just why not
All checks were successful
AI Codebase Quality Review / ai-codebase-review (push) Successful in 39s

This commit is contained in:
2026-01-07 21:19:46 +01:00
parent a1fe47cdf4
commit e8d28225e0
24 changed files with 6431 additions and 250 deletions

View File

@@ -365,9 +365,20 @@ class IssueAgent(BaseAgent):
"commands", ["explain", "suggest", "security", "summarize", "triage"]
)
# Also check for setup-labels command (not in config since it's a setup command)
if f"{mention_prefix} setup-labels" in body.lower():
return "setup-labels"
# Built-in commands not in config
builtin_commands = [
"setup-labels",
"check-deps",
"suggest-tests",
"refactor-suggest",
"architecture",
"arch-check",
]
# Check built-in commands first
for command in builtin_commands:
if f"{mention_prefix} {command}" in body.lower():
return command
for command in commands:
if f"{mention_prefix} {command}" in body.lower():
@@ -392,6 +403,14 @@ class IssueAgent(BaseAgent):
return self._command_triage(context, issue)
elif command == "setup-labels":
return self._command_setup_labels(context, issue)
elif command == "check-deps":
return self._command_check_deps(context)
elif command == "suggest-tests":
return self._command_suggest_tests(context)
elif command == "refactor-suggest":
return self._command_refactor_suggest(context)
elif command == "architecture" or command == "arch-check":
return self._command_architecture(context)
return f"{self.AI_DISCLAIMER}\n\nSorry, I don't understand the command `{command}`."
@@ -464,6 +483,12 @@ Be practical and concise."""
- `{mention_prefix} suggest` - Solution suggestions or next steps
- `{mention_prefix} security` - Security-focused analysis of the issue
### Code Quality & Security
- `{mention_prefix} check-deps` - Scan dependencies for security vulnerabilities
- `{mention_prefix} suggest-tests` - Suggest test cases for changed/new code
- `{mention_prefix} refactor-suggest` - Suggest refactoring opportunities
- `{mention_prefix} architecture` - Check architecture compliance (alias: `arch-check`)
### Interactive Chat
- `{mention_prefix} [question]` - Ask questions about the codebase (uses search & file reading tools)
- Example: `{mention_prefix} how does authentication work?`
@@ -494,9 +519,19 @@ PR reviews run automatically when you open or update a pull request. The bot pro
{mention_prefix} triage
```
**Get help understanding:**
**Check for dependency vulnerabilities:**
```
{mention_prefix} explain
{mention_prefix} check-deps
```
**Get test suggestions:**
```
{mention_prefix} suggest-tests
```
**Check architecture compliance:**
```
{mention_prefix} architecture
```
**Ask about the codebase:**
@@ -504,11 +539,6 @@ PR reviews run automatically when you open or update a pull request. The bot pro
{mention_prefix} how does the authentication system work?
```
**Setup repository labels:**
```
{mention_prefix} setup-labels
```
---
*For full documentation, see the [README](https://github.com/YourOrg/OpenRabbit/blob/main/README.md)*
@@ -854,3 +884,145 @@ PR reviews run automatically when you open or update a pull request. The bot pro
return f"{prefix} - {value}"
else: # colon or unknown
return base_name
def _command_check_deps(self, context: AgentContext) -> str:
"""Check dependencies for security vulnerabilities."""
try:
from agents.dependency_agent import DependencyAgent
agent = DependencyAgent(config=self.config)
result = agent.run(context)
if result.success:
return result.data.get(
"report", f"{self.AI_DISCLAIMER}\n\n{result.message}"
)
else:
return f"{self.AI_DISCLAIMER}\n\n**Dependency Check Failed**\n\n{result.error or result.message}"
except ImportError:
return f"{self.AI_DISCLAIMER}\n\n**Dependency Agent Not Available**\n\nThe dependency security agent is not installed."
except Exception as e:
self.logger.error(f"Dependency check failed: {e}")
return f"{self.AI_DISCLAIMER}\n\n**Dependency Check Error**\n\n{e}"
def _command_suggest_tests(self, context: AgentContext) -> str:
"""Suggest tests for changed or new code."""
try:
from agents.test_coverage_agent import TestCoverageAgent
agent = TestCoverageAgent(config=self.config)
result = agent.run(context)
if result.success:
return result.data.get(
"report", f"{self.AI_DISCLAIMER}\n\n{result.message}"
)
else:
return f"{self.AI_DISCLAIMER}\n\n**Test Suggestion Failed**\n\n{result.error or result.message}"
except ImportError:
return f"{self.AI_DISCLAIMER}\n\n**Test Coverage Agent Not Available**\n\nThe test coverage agent is not installed."
except Exception as e:
self.logger.error(f"Test suggestion failed: {e}")
return f"{self.AI_DISCLAIMER}\n\n**Test Suggestion Error**\n\n{e}"
def _command_architecture(self, context: AgentContext) -> str:
"""Check architecture compliance."""
try:
from agents.architecture_agent import ArchitectureAgent
agent = ArchitectureAgent(config=self.config)
result = agent.run(context)
if result.success:
return result.data.get(
"report", f"{self.AI_DISCLAIMER}\n\n{result.message}"
)
else:
return f"{self.AI_DISCLAIMER}\n\n**Architecture Check Failed**\n\n{result.error or result.message}"
except ImportError:
return f"{self.AI_DISCLAIMER}\n\n**Architecture Agent Not Available**\n\nThe architecture compliance agent is not installed."
except Exception as e:
self.logger.error(f"Architecture check failed: {e}")
return f"{self.AI_DISCLAIMER}\n\n**Architecture Check Error**\n\n{e}"
def _command_refactor_suggest(self, context: AgentContext) -> str:
"""Suggest refactoring opportunities."""
issue = context.event_data.get("issue", {})
title = issue.get("title", "")
body = issue.get("body", "")
# Use LLM to analyze for refactoring opportunities
prompt = f"""Analyze the following issue/context and suggest refactoring opportunities:
Issue Title: {title}
Issue Body: {body}
Based on common refactoring patterns, suggest:
1. Code smell detection (if any code is referenced)
2. Design pattern opportunities
3. Complexity reduction suggestions
4. DRY principle violations
5. SOLID principle improvements
Format your response as a structured report with actionable recommendations.
If no code is referenced in the issue, provide general refactoring guidance based on the context.
Return as JSON:
{{
"summary": "Brief summary of refactoring opportunities",
"suggestions": [
{{
"category": "Code Smell | Design Pattern | Complexity | DRY | SOLID",
"title": "Short title",
"description": "Detailed description",
"priority": "high | medium | low",
"effort": "small | medium | large"
}}
],
"general_advice": "Any general refactoring advice"
}}"""
try:
result = self.call_llm_json(prompt)
lines = [f"{self.AI_DISCLAIMER}\n"]
lines.append("## Refactoring Suggestions\n")
if result.get("summary"):
lines.append(f"**Summary:** {result['summary']}\n")
suggestions = result.get("suggestions", [])
if suggestions:
lines.append("### Recommendations\n")
lines.append("| Priority | Category | Suggestion | Effort |")
lines.append("|----------|----------|------------|--------|")
for s in suggestions:
priority = s.get("priority", "medium").upper()
priority_icon = {"HIGH": "🔴", "MEDIUM": "🟡", "LOW": "🟢"}.get(
priority, ""
)
lines.append(
f"| {priority_icon} {priority} | {s.get('category', 'General')} | "
f"**{s.get('title', 'Suggestion')}** | {s.get('effort', 'medium')} |"
)
lines.append("")
# Detailed descriptions
lines.append("### Details\n")
for i, s in enumerate(suggestions, 1):
lines.append(f"**{i}. {s.get('title', 'Suggestion')}**")
lines.append(f"{s.get('description', 'No description')}\n")
if result.get("general_advice"):
lines.append("### General Advice\n")
lines.append(result["general_advice"])
return "\n".join(lines)
except Exception as e:
self.logger.error(f"Refactor suggestion failed: {e}")
return (
f"{self.AI_DISCLAIMER}\n\n**Refactor Suggestion Failed**\n\nError: {e}"
)