update
All checks were successful
Enterprise AI Code Review / ai-review (pull_request) Successful in 32s

This commit is contained in:
2025-12-28 14:10:04 +00:00
parent c17a61b27a
commit 69d9963597
17 changed files with 627 additions and 444 deletions

View File

@@ -96,7 +96,9 @@ class CodebaseAgent(BaseAgent):
def execute(self, context: AgentContext) -> AgentResult:
"""Execute codebase analysis."""
self.logger.info(f"Starting codebase analysis for {context.owner}/{context.repo}")
self.logger.info(
f"Starting codebase analysis for {context.owner}/{context.repo}"
)
actions_taken = []
@@ -192,7 +194,7 @@ class CodebaseAgent(BaseAgent):
# Check for docstrings (Python)
if ext == ".py":
if 'def ' in content and '"""' not in content:
if "def " in content and '"""' not in content:
metrics.missing_docstrings += 1
except Exception as e:
@@ -273,23 +275,45 @@ Be constructive and actionable. Focus on the most impactful improvements.
)
except Exception as e:
self.logger.error(f"AI analysis failed: {e}")
# Try to log the raw response if possible (requires accessing the last response)
# Since we don't have direct access here, we rely on having good logging in LLMClient if needed.
# But let's add a note to the summary.
self.logger.debug(f"Full error details: {type(e).__name__}: {str(e)}")
# Calculate basic health score from metrics
health_score = 70
if metrics.todo_count > 10:
health_score -= 10
if metrics.fixme_count > 5:
health_score -= 10
if metrics.deprecated_count > 3:
health_score -= 5
# Build recommendations based on metrics
recommendations = []
if metrics.todo_count > 5:
recommendations.append(
f"Review and address {metrics.todo_count} TODO comments"
)
if metrics.fixme_count > 0:
recommendations.append(
f"Review and fix {metrics.fixme_count} FIXME markers"
)
if metrics.deprecated_count > 0:
recommendations.append(
f"Update {metrics.deprecated_count} deprecated code sections"
)
if metrics.missing_docstrings > 5:
recommendations.append("Consider adding more documentation")
if not recommendations:
recommendations.append("Codebase appears well-maintained")
return CodebaseReport(
summary=f"Basic analysis complete (AI unavailable: {e})",
summary=f"Basic metrics analysis complete. {metrics.total_files} files analyzed across {len(metrics.languages)} languages.",
health_score=health_score,
metrics=metrics,
issues=[],
recommendations=["Manual review recommended"],
architecture_notes=[],
recommendations=recommendations,
architecture_notes=[
f"Primary languages: {', '.join(list(metrics.languages.keys())[:3])}"
],
)
def _get_key_files_content(
@@ -386,7 +410,11 @@ Be constructive and actionable. Focus on the most impactful improvements.
def _generate_report_body(self, report: CodebaseReport) -> str:
"""Generate the report issue body."""
health_emoji = "🟢" if report.health_score >= 80 else ("🟡" if report.health_score >= 60 else "🔴")
health_emoji = (
"🟢"
if report.health_score >= 80
else ("🟡" if report.health_score >= 60 else "🔴")
)
lines = [
f"{self.AI_DISCLAIMER}",
@@ -427,7 +455,11 @@ Be constructive and actionable. Focus on the most impactful improvements.
lines.append("")
for issue in report.issues[:10]:
severity = issue.get("severity", "MEDIUM")
emoji = "🔴" if severity == "HIGH" else ("🟡" if severity == "MEDIUM" else "🟢")
emoji = (
"🔴"
if severity == "HIGH"
else ("🟡" if severity == "MEDIUM" else "🟢")
)
lines.append(f"### [{severity}] {issue.get('category', 'General')}")
lines.append("")
lines.append(issue.get("description", ""))

View File

@@ -262,9 +262,7 @@ class IssueAgent(BaseAgent):
if labels_to_add:
try:
self.gitea.add_issue_labels(owner, repo, issue_index, labels_to_add)
return [
name for name, id in label_map.items() if id in labels_to_add
]
return [name for name, id in label_map.items() if id in labels_to_add]
except Exception as e:
self.logger.warning(f"Failed to add labels: {e}")
@@ -336,6 +334,8 @@ class IssueAgent(BaseAgent):
return self._command_explain(title, body)
elif command == "suggest":
return self._command_suggest(title, body)
elif command == "triage":
return self._command_triage(context, issue)
return f"{self.AI_DISCLAIMER}\n\nSorry, I don't understand the command `{command}`."
@@ -390,3 +390,33 @@ Be practical and concise."""
return f"{self.AI_DISCLAIMER}\n\n**Suggestions:**\n{response.content}"
except Exception as e:
return f"{self.AI_DISCLAIMER}\n\nSorry, I was unable to generate suggestions. Error: {e}"
def _command_triage(self, context: AgentContext, issue: dict) -> str:
"""Perform full triage analysis on the issue."""
title = issue.get("title", "")
body = issue.get("body", "")
author = issue.get("user", {}).get("login", "unknown")
existing_labels = [l.get("name", "") for l in issue.get("labels", [])]
issue_index = issue.get("number")
try:
# Perform triage analysis
triage = self._triage_issue(title, body, author, existing_labels)
# Apply labels if enabled
agent_config = self.config.get("agents", {}).get("issue", {})
labels_applied = []
if agent_config.get("auto_label", True):
labels_applied = self._apply_labels(
context.owner, context.repo, issue_index, triage
)
# Generate response
response = self._generate_triage_comment(triage, issue)
if labels_applied:
response += f"\n\n**Labels Applied:** {', '.join(labels_applied)}"
return response
except Exception as e:
return f"{self.AI_DISCLAIMER}\n\nSorry, I was unable to triage this issue. Error: {e}"

View File

@@ -346,7 +346,9 @@ class LLMClient:
config: Provider-specific configuration.
"""
if provider not in self.PROVIDERS:
raise ValueError(f"Unknown provider: {provider}. Available: {list(self.PROVIDERS.keys())}")
raise ValueError(
f"Unknown provider: {provider}. Available: {list(self.PROVIDERS.keys())}"
)
self.provider_name = provider
self.config = config or {}
@@ -405,6 +407,8 @@ class LLMClient:
Handles markdown code blocks and preamble text.
"""
import re
content = content.strip()
# Attempt 1: direct parse
@@ -413,16 +417,24 @@ class LLMClient:
except json.JSONDecodeError:
pass
# Attempt 2: Extract from markdown code blocks
# Attempt 2: Extract from markdown code blocks (improved regex)
if "```" in content:
# Find the JSON block
import re
match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", content)
if match:
try:
return json.loads(match.group(1))
except json.JSONDecodeError:
pass
# Try multiple code block patterns
patterns = [
r"```json\s*\n([\s\S]*?)\n```", # ```json with newlines
r"```json\s*([\s\S]*?)```", # ```json without newlines
r"```\s*\n([\s\S]*?)\n```", # ``` with newlines
r"```\s*([\s\S]*?)```", # ``` without newlines
]
for pattern in patterns:
match = re.search(pattern, content)
if match:
try:
json_str = match.group(1).strip()
return json.loads(json_str)
except json.JSONDecodeError:
continue
# Attempt 3: Find first { and last }
try:
@@ -435,17 +447,22 @@ class LLMClient:
pass
# Attempt 4: Fix common JSON errors (comments, trailing commas)
# This is risky but helpful for LLM output
try:
# Remove comments
import re
json_str = re.sub(r"//.*", "", content)
json_str = re.sub(r"/\*[\s\S]*?\*/", "", json_str)
return json.loads(json_str)
except json.JSONDecodeError as e:
# If all attempts fail, raise an error with the content for debugging
snippet = content[:500] + "..." if len(content) > 500 else content
raise ValueError(f"Failed to parse JSON response: {e}. Raw content snippet: {snippet!r}")
# Try to extract JSON after cleaning
start = json_str.find("{")
end = json_str.rfind("}")
if start != -1 and end != -1:
json_str = json_str[start : end + 1]
return json.loads(json_str)
except json.JSONDecodeError:
pass
# If all attempts fail, raise an error with the content for debugging
snippet = content[:500] + "..." if len(content) > 500 else content
raise ValueError(f"Failed to parse JSON response: {snippet!r}")
@classmethod
def from_config(cls, config: dict) -> "LLMClient":
@@ -469,7 +486,9 @@ class LLMClient:
}
elif provider == "openrouter":
provider_config = {
"model": config.get("model", {}).get("openrouter", "anthropic/claude-3.5-sonnet"),
"model": config.get("model", {}).get(
"openrouter", "anthropic/claude-3.5-sonnet"
),
"temperature": config.get("temperature", 0),
"max_tokens": config.get("max_tokens", 16000),
}

View File

@@ -1,49 +1,49 @@
provider: openai # openai | openrouter | ollama
provider: openai # openai | openrouter | ollama
model:
openai: gpt-4.1-mini
openrouter: anthropic/claude-3.5-sonnet
ollama: codellama:13b
openai: gpt-4.1-mini
openrouter: anthropic/claude-3.5-sonnet
ollama: codellama:13b
temperature: 0
max_tokens: 4096
# Review settings
review:
fail_on_severity: HIGH
max_diff_lines: 800
inline_comments: true
security_scan: true
fail_on_severity: HIGH
max_diff_lines: 800
inline_comments: true
security_scan: true
# Agent settings
agents:
issue:
enabled: true
auto_label: true
auto_triage: true
duplicate_threshold: 0.85
events:
- opened
- labeled
pr:
enabled: true
inline_comments: true
security_scan: true
events:
- opened
- synchronize
codebase:
enabled: true
schedule: "0 0 * * 0" # Weekly on Sunday
chat:
enabled: true
name: "Bartender"
max_iterations: 5 # Max tool call iterations per chat
tools:
- search_codebase
- read_file
- search_web
searxng_url: "" # Set via SEARXNG_URL env var or here
issue:
enabled: true
auto_label: true
auto_triage: true
duplicate_threshold: 0.85
events:
- opened
- labeled
pr:
enabled: true
inline_comments: true
security_scan: true
events:
- opened
- synchronize
codebase:
enabled: true
schedule: "0 0 * * 0" # Weekly on Sunday
chat:
enabled: true
name: "Bartender"
max_iterations: 5 # Max tool call iterations per chat
tools:
- search_codebase
- read_file
- search_web
searxng_url: "" # Set via SEARXNG_URL env var or here
# Interaction settings
# CUSTOMIZE YOUR BOT NAME HERE!
@@ -56,41 +56,42 @@ agents:
# NOTE: Also update the workflow files (.github/workflows/ or .gitea/workflows/)
# to match this prefix in the 'if: contains(...)' condition
interaction:
respond_to_mentions: true
mention_prefix: "@bartender" # Change this to customize your bot's name!
commands:
- explain
- suggest
- security
- summarize
respond_to_mentions: true
mention_prefix: "@codebot" # Change this to customize your bot's name!
commands:
- explain
- suggest
- security
- summarize
- triage
# Enterprise settings
enterprise:
audit_log: true
audit_path: "/var/log/ai-review/"
metrics_enabled: true
rate_limit:
requests_per_minute: 30
max_concurrent: 4
audit_log: true
audit_path: "/var/log/ai-review/"
metrics_enabled: true
rate_limit:
requests_per_minute: 30
max_concurrent: 4
# Label mappings for auto-labeling
labels:
priority:
high: "priority: high"
medium: "priority: medium"
low: "priority: low"
type:
bug: "type: bug"
feature: "type: feature"
question: "type: question"
docs: "type: documentation"
status:
ai_approved: "ai-approved"
ai_changes_required: "ai-changes-required"
ai_reviewed: "ai-reviewed"
priority:
high: "priority: high"
medium: "priority: medium"
low: "priority: low"
type:
bug: "type: bug"
feature: "type: feature"
question: "type: question"
docs: "type: documentation"
status:
ai_approved: "ai-approved"
ai_changes_required: "ai-changes-required"
ai_reviewed: "ai-reviewed"
# Security scanning rules
security:
enabled: true
fail_on_high: true
rules_file: "security/security_rules.yml"
enabled: true
fail_on_high: true
rules_file: "security/security_rules.yml"