update
All checks were successful
Enterprise AI Code Review / ai-review (pull_request) Successful in 32s

This commit is contained in:
2025-12-28 14:10:04 +00:00
parent c17a61b27a
commit 69d9963597
17 changed files with 627 additions and 444 deletions

View File

@@ -96,7 +96,9 @@ class CodebaseAgent(BaseAgent):
def execute(self, context: AgentContext) -> AgentResult:
"""Execute codebase analysis."""
self.logger.info(f"Starting codebase analysis for {context.owner}/{context.repo}")
self.logger.info(
f"Starting codebase analysis for {context.owner}/{context.repo}"
)
actions_taken = []
@@ -192,7 +194,7 @@ class CodebaseAgent(BaseAgent):
# Check for docstrings (Python)
if ext == ".py":
if 'def ' in content and '"""' not in content:
if "def " in content and '"""' not in content:
metrics.missing_docstrings += 1
except Exception as e:
@@ -273,23 +275,45 @@ Be constructive and actionable. Focus on the most impactful improvements.
)
except Exception as e:
self.logger.error(f"AI analysis failed: {e}")
# Try to log the raw response if possible (requires accessing the last response)
# Since we don't have direct access here, we rely on having good logging in LLMClient if needed.
# But let's add a note to the summary.
self.logger.debug(f"Full error details: {type(e).__name__}: {str(e)}")
# Calculate basic health score from metrics
health_score = 70
if metrics.todo_count > 10:
health_score -= 10
if metrics.fixme_count > 5:
health_score -= 10
if metrics.deprecated_count > 3:
health_score -= 5
# Build recommendations based on metrics
recommendations = []
if metrics.todo_count > 5:
recommendations.append(
f"Review and address {metrics.todo_count} TODO comments"
)
if metrics.fixme_count > 0:
recommendations.append(
f"Review and fix {metrics.fixme_count} FIXME markers"
)
if metrics.deprecated_count > 0:
recommendations.append(
f"Update {metrics.deprecated_count} deprecated code sections"
)
if metrics.missing_docstrings > 5:
recommendations.append("Consider adding more documentation")
if not recommendations:
recommendations.append("Codebase appears well-maintained")
return CodebaseReport(
summary=f"Basic analysis complete (AI unavailable: {e})",
summary=f"Basic metrics analysis complete. {metrics.total_files} files analyzed across {len(metrics.languages)} languages.",
health_score=health_score,
metrics=metrics,
issues=[],
recommendations=["Manual review recommended"],
architecture_notes=[],
recommendations=recommendations,
architecture_notes=[
f"Primary languages: {', '.join(list(metrics.languages.keys())[:3])}"
],
)
def _get_key_files_content(
@@ -386,7 +410,11 @@ Be constructive and actionable. Focus on the most impactful improvements.
def _generate_report_body(self, report: CodebaseReport) -> str:
"""Generate the report issue body."""
health_emoji = "🟢" if report.health_score >= 80 else ("🟡" if report.health_score >= 60 else "🔴")
health_emoji = (
"🟢"
if report.health_score >= 80
else ("🟡" if report.health_score >= 60 else "🔴")
)
lines = [
f"{self.AI_DISCLAIMER}",
@@ -427,7 +455,11 @@ Be constructive and actionable. Focus on the most impactful improvements.
lines.append("")
for issue in report.issues[:10]:
severity = issue.get("severity", "MEDIUM")
emoji = "🔴" if severity == "HIGH" else ("🟡" if severity == "MEDIUM" else "🟢")
emoji = (
"🔴"
if severity == "HIGH"
else ("🟡" if severity == "MEDIUM" else "🟢")
)
lines.append(f"### [{severity}] {issue.get('category', 'General')}")
lines.append("")
lines.append(issue.get("description", ""))

View File

@@ -262,9 +262,7 @@ class IssueAgent(BaseAgent):
if labels_to_add:
try:
self.gitea.add_issue_labels(owner, repo, issue_index, labels_to_add)
return [
name for name, id in label_map.items() if id in labels_to_add
]
return [name for name, id in label_map.items() if id in labels_to_add]
except Exception as e:
self.logger.warning(f"Failed to add labels: {e}")
@@ -336,6 +334,8 @@ class IssueAgent(BaseAgent):
return self._command_explain(title, body)
elif command == "suggest":
return self._command_suggest(title, body)
elif command == "triage":
return self._command_triage(context, issue)
return f"{self.AI_DISCLAIMER}\n\nSorry, I don't understand the command `{command}`."
@@ -390,3 +390,33 @@ Be practical and concise."""
return f"{self.AI_DISCLAIMER}\n\n**Suggestions:**\n{response.content}"
except Exception as e:
return f"{self.AI_DISCLAIMER}\n\nSorry, I was unable to generate suggestions. Error: {e}"
def _command_triage(self, context: AgentContext, issue: dict) -> str:
"""Perform full triage analysis on the issue."""
title = issue.get("title", "")
body = issue.get("body", "")
author = issue.get("user", {}).get("login", "unknown")
existing_labels = [l.get("name", "") for l in issue.get("labels", [])]
issue_index = issue.get("number")
try:
# Perform triage analysis
triage = self._triage_issue(title, body, author, existing_labels)
# Apply labels if enabled
agent_config = self.config.get("agents", {}).get("issue", {})
labels_applied = []
if agent_config.get("auto_label", True):
labels_applied = self._apply_labels(
context.owner, context.repo, issue_index, triage
)
# Generate response
response = self._generate_triage_comment(triage, issue)
if labels_applied:
response += f"\n\n**Labels Applied:** {', '.join(labels_applied)}"
return response
except Exception as e:
return f"{self.AI_DISCLAIMER}\n\nSorry, I was unable to triage this issue. Error: {e}"