All checks were successful
Enterprise AI Code Review / ai-review (pull_request) Successful in 39s
Implements code diff explainer that translates technical changes into plain language for non-technical stakeholders (PMs, designers, new team members). Features: - Plain-language explanations without jargon - File-by-file breakdown with 'what' and 'why' context - Architecture impact analysis - Breaking change detection - Perfect for onboarding and cross-functional reviews Implementation: - Added explain_diff.md prompt template with plain-language guidelines - Implemented _handle_explain_diff_command() in PRAgent - Added _format_diff_explanation() for readable markdown - Updated PRAgent.can_handle() to route explain-diff commands - Added 'explain-diff' to config.yml commands list Workflow Safety (prevents duplicate runs): - Added '@codebot explain-diff' to ai-comment-reply.yml conditions - Excluded from ai-chat.yml to prevent duplication - Only triggers on PR comments (not issues) - Manual command only (no automatic triggering) Testing: - 9 comprehensive tests in TestDiffExplanation class - Tests command detection, formatting, plain-language output - Verifies prompt formatting and empty section handling Documentation: - Updated README.md with explain-diff command and examples - Added detailed implementation guide in CLAUDE.md - Included plain-language rules and use cases Related: Milestone 2 high-priority feature - code diff explainer
1297 lines
41 KiB
Python
1297 lines
41 KiB
Python
"""Test Suite for AI Code Review Workflow
|
||
|
||
Tests for verifying prompt formatting, agent logic, and core functionality.
|
||
Run with: pytest tests/ -v
|
||
"""
|
||
|
||
import os
|
||
import sys
|
||
|
||
# Add the tools directory to path
|
||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "tools", "ai-review"))
|
||
|
||
import pytest
|
||
|
||
|
||
class TestPromptFormatting:
|
||
"""Test that all prompts can be formatted without errors."""
|
||
|
||
def get_prompt_path(self, name: str) -> str:
|
||
"""Get the full path to a prompt file."""
|
||
return os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
f"{name}.md",
|
||
)
|
||
|
||
def load_prompt(self, name: str) -> str:
|
||
"""Load a prompt file."""
|
||
path = self.get_prompt_path(name)
|
||
with open(path) as f:
|
||
return f.read()
|
||
|
||
def test_issue_triage_prompt_formatting(self):
|
||
"""Test that issue_triage.md can be formatted with placeholders."""
|
||
prompt = self.load_prompt("issue_triage")
|
||
|
||
# This should NOT raise a KeyError
|
||
formatted = prompt.format(
|
||
title="Test Issue Title",
|
||
body="This is the issue body content",
|
||
author="testuser",
|
||
existing_labels="bug, urgent",
|
||
)
|
||
|
||
assert "Test Issue Title" in formatted
|
||
assert "This is the issue body content" in formatted
|
||
assert "testuser" in formatted
|
||
assert "bug, urgent" in formatted
|
||
# JSON example should still be present (curly braces escaped)
|
||
assert '"type"' in formatted
|
||
assert '"priority"' in formatted
|
||
|
||
def test_issue_response_prompt_formatting(self):
|
||
"""Test that issue_response.md can be formatted with placeholders."""
|
||
prompt = self.load_prompt("issue_response")
|
||
|
||
formatted = prompt.format(
|
||
issue_type="bug",
|
||
priority="high",
|
||
title="Bug Report",
|
||
body="Description of the bug",
|
||
triage_analysis="This is a high priority bug",
|
||
)
|
||
|
||
assert "bug" in formatted
|
||
assert "high" in formatted
|
||
assert "Bug Report" in formatted
|
||
# JSON example should still be present
|
||
assert '"comment"' in formatted
|
||
|
||
def test_base_prompt_no_placeholders(self):
|
||
"""Test that base.md loads correctly (no placeholders needed)."""
|
||
prompt = self.load_prompt("base")
|
||
|
||
# Should contain key elements
|
||
assert "security" in prompt.lower()
|
||
assert "JSON" in prompt
|
||
assert "severity" in prompt.lower()
|
||
|
||
def test_prompts_have_escaped_json(self):
|
||
"""Verify JSON examples use double curly braces."""
|
||
for prompt_name in ["issue_triage", "issue_response"]:
|
||
prompt = self.load_prompt(prompt_name)
|
||
|
||
# Check that format() doesn't fail
|
||
try:
|
||
# Try with minimal placeholders
|
||
if prompt_name == "issue_triage":
|
||
prompt.format(title="t", body="b", author="a", existing_labels="l")
|
||
elif prompt_name == "issue_response":
|
||
prompt.format(
|
||
issue_type="t",
|
||
priority="p",
|
||
title="t",
|
||
body="b",
|
||
triage_analysis="a",
|
||
)
|
||
except KeyError as e:
|
||
pytest.fail(f"Prompt {prompt_name} has unescaped curly braces: {e}")
|
||
|
||
|
||
class TestImports:
|
||
"""Test that all modules can be imported correctly."""
|
||
|
||
def test_import_agents(self):
|
||
"""Test importing agent classes."""
|
||
from agents.base_agent import AgentContext, AgentResult, BaseAgent
|
||
from agents.codebase_agent import CodebaseAgent
|
||
from agents.issue_agent import IssueAgent
|
||
from agents.pr_agent import PRAgent
|
||
|
||
assert BaseAgent is not None
|
||
assert IssueAgent is not None
|
||
assert PRAgent is not None
|
||
assert CodebaseAgent is not None
|
||
|
||
def test_import_clients(self):
|
||
"""Test importing client classes."""
|
||
from clients.gitea_client import GiteaClient
|
||
from clients.llm_client import LLMClient
|
||
|
||
assert GiteaClient is not None
|
||
assert LLMClient is not None
|
||
|
||
def test_import_security(self):
|
||
"""Test importing security scanner."""
|
||
from security.security_scanner import SecurityScanner
|
||
|
||
assert SecurityScanner is not None
|
||
|
||
def test_import_enterprise(self):
|
||
"""Test importing enterprise features."""
|
||
from enterprise.audit_logger import AuditLogger
|
||
from enterprise.metrics import MetricsCollector
|
||
|
||
assert AuditLogger is not None
|
||
assert MetricsCollector is not None
|
||
|
||
def test_import_dispatcher(self):
|
||
"""Test importing dispatcher."""
|
||
from dispatcher import Dispatcher
|
||
|
||
assert Dispatcher is not None
|
||
|
||
|
||
class TestSecurityScanner:
|
||
"""Test security scanner pattern detection."""
|
||
|
||
def test_detects_hardcoded_secret(self):
|
||
"""Test detection of hardcoded secrets."""
|
||
from security.security_scanner import SecurityScanner
|
||
|
||
scanner = SecurityScanner()
|
||
code = """
|
||
API_KEY = "sk-1234567890abcdef"
|
||
"""
|
||
findings = list(scanner.scan_content(code, "test.py"))
|
||
assert len(findings) >= 1
|
||
assert any(f.severity == "HIGH" for f in findings)
|
||
|
||
def test_detects_eval(self):
|
||
"""Test detection of eval usage."""
|
||
from security.security_scanner import SecurityScanner
|
||
|
||
scanner = SecurityScanner()
|
||
code = """
|
||
result = eval(user_input)
|
||
"""
|
||
findings = list(scanner.scan_content(code, "test.py"))
|
||
assert len(findings) >= 1
|
||
assert any("eval" in f.rule_name.lower() for f in findings)
|
||
|
||
def test_no_false_positives_on_clean_code(self):
|
||
"""Test that clean code doesn't trigger false positives."""
|
||
from security.security_scanner import SecurityScanner
|
||
|
||
scanner = SecurityScanner()
|
||
code = """
|
||
def hello():
|
||
print("Hello, world!")
|
||
return 42
|
||
"""
|
||
findings = list(scanner.scan_content(code, "test.py"))
|
||
# Should have no HIGH severity issues for clean code
|
||
high_findings = [f for f in findings if f.severity == "HIGH"]
|
||
assert len(high_findings) == 0
|
||
|
||
|
||
class TestAgentContext:
|
||
"""Test agent context and result dataclasses."""
|
||
|
||
def test_agent_context_creation(self):
|
||
"""Test creating AgentContext."""
|
||
from agents.base_agent import AgentContext
|
||
|
||
context = AgentContext(
|
||
owner="testowner",
|
||
repo="testrepo",
|
||
event_type="issues",
|
||
event_data={"action": "opened"},
|
||
config={},
|
||
)
|
||
|
||
assert context.owner == "testowner"
|
||
assert context.repo == "testrepo"
|
||
assert context.event_type == "issues"
|
||
|
||
def test_agent_result_creation(self):
|
||
"""Test creating AgentResult."""
|
||
from agents.base_agent import AgentResult
|
||
|
||
result = AgentResult(
|
||
success=True,
|
||
message="Test passed",
|
||
data={"key": "value"},
|
||
actions_taken=["action1", "action2"],
|
||
)
|
||
|
||
assert result.success is True
|
||
assert result.message == "Test passed"
|
||
assert len(result.actions_taken) == 2
|
||
|
||
|
||
class TestMetrics:
|
||
"""Test metrics collection."""
|
||
|
||
def test_counter_increment(self):
|
||
"""Test counter metrics."""
|
||
from enterprise.metrics import Counter
|
||
|
||
counter = Counter("test_counter")
|
||
assert counter.value == 0
|
||
counter.inc()
|
||
assert counter.value == 1
|
||
counter.inc(5)
|
||
assert counter.value == 6
|
||
|
||
def test_histogram_observation(self):
|
||
"""Test histogram metrics."""
|
||
from enterprise.metrics import Histogram
|
||
|
||
hist = Histogram("test_histogram")
|
||
hist.observe(0.1)
|
||
hist.observe(0.5)
|
||
hist.observe(1.0)
|
||
|
||
assert hist.count == 3
|
||
assert hist.sum == 1.6
|
||
|
||
def test_metrics_collector_summary(self):
|
||
"""Test metrics collector summary."""
|
||
from enterprise.metrics import MetricsCollector
|
||
|
||
collector = MetricsCollector()
|
||
collector.record_request_start("TestAgent")
|
||
collector.record_request_end("TestAgent", success=True, duration_seconds=0.5)
|
||
|
||
summary = collector.get_summary()
|
||
assert summary["requests"]["total"] == 1
|
||
assert summary["requests"]["success"] == 1
|
||
|
||
|
||
class TestHelpCommand:
|
||
"""Test help command functionality."""
|
||
|
||
def test_help_command_returns_text(self):
|
||
"""Test that help command returns formatted help text."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
"commands": ["help", "triage", "explain"],
|
||
}
|
||
},
|
||
)
|
||
|
||
help_text = agent._command_help()
|
||
|
||
assert help_text is not None
|
||
assert len(help_text) > 100
|
||
assert "@codebot" in help_text
|
||
assert "help" in help_text.lower()
|
||
assert "triage" in help_text.lower()
|
||
|
||
def test_help_includes_all_sections(self):
|
||
"""Test that help text includes all major sections."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={"interaction": {"mention_prefix": "@codebot"}},
|
||
)
|
||
|
||
help_text = agent._command_help()
|
||
|
||
# Check for main sections
|
||
assert "Issue Triage" in help_text
|
||
assert "Interactive Chat" in help_text
|
||
assert "Setup & Utility" in help_text
|
||
assert "Pull Request" in help_text
|
||
assert "Quick Examples" in help_text
|
||
|
||
def test_help_uses_custom_bot_name(self):
|
||
"""Test that help command uses custom bot name from config."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={"interaction": {"mention_prefix": "@mybot"}},
|
||
)
|
||
|
||
help_text = agent._command_help()
|
||
|
||
assert "@mybot" in help_text
|
||
assert "@codebot" not in help_text
|
||
|
||
def test_help_includes_examples(self):
|
||
"""Test that help text includes usage examples."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={"interaction": {"mention_prefix": "@codebot"}},
|
||
)
|
||
|
||
help_text = agent._command_help()
|
||
|
||
# Check for example commands
|
||
assert "triage" in help_text
|
||
assert "explain" in help_text
|
||
assert "setup-labels" in help_text
|
||
|
||
|
||
class TestLabelSetup:
|
||
"""Test label setup and schema detection."""
|
||
|
||
def test_detect_prefix_slash_schema(self):
|
||
"""Test detection of Kind/Bug style labels."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
# Create mock agent with config
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"label_patterns": {
|
||
"prefix_slash": r"^(Kind|Type|Category)/(.+)$",
|
||
"prefix_dash": r"^(Priority|Status|Reviewed) - (.+)$",
|
||
"colon": r"^(type|priority|status): (.+)$",
|
||
}
|
||
},
|
||
)
|
||
|
||
labels = [
|
||
{"name": "Kind/Bug", "color": "d73a4a"},
|
||
{"name": "Kind/Feature", "color": "1d76db"},
|
||
{"name": "Kind/Documentation", "color": "0075ca"},
|
||
{"name": "Priority - High", "color": "d73a4a"},
|
||
{"name": "Priority - Low", "color": "28a745"},
|
||
]
|
||
|
||
schema = agent._detect_label_schema(labels)
|
||
|
||
assert schema is not None
|
||
assert schema["pattern"] == "prefix_slash"
|
||
assert "type" in schema["categories"]
|
||
assert "priority" in schema["categories"]
|
||
assert len(schema["categories"]["type"]) == 3
|
||
|
||
def test_detect_prefix_dash_schema(self):
|
||
"""Test detection of Priority - High style labels."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"label_patterns": {
|
||
"prefix_slash": r"^(Kind|Type|Category)/(.+)$",
|
||
"prefix_dash": r"^(Priority|Status|Reviewed) - (.+)$",
|
||
"colon": r"^(type|priority|status): (.+)$",
|
||
}
|
||
},
|
||
)
|
||
|
||
labels = [
|
||
{"name": "Priority - Critical", "color": "b60205"},
|
||
{"name": "Priority - High", "color": "d73a4a"},
|
||
{"name": "Status - Blocked", "color": "fef2c0"},
|
||
]
|
||
|
||
schema = agent._detect_label_schema(labels)
|
||
|
||
assert schema is not None
|
||
assert schema["pattern"] == "prefix_dash"
|
||
assert "priority" in schema["categories"]
|
||
assert "status" in schema["categories"]
|
||
|
||
def test_detect_colon_schema(self):
|
||
"""Test detection of type: bug style labels."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"label_patterns": {
|
||
"prefix_slash": r"^(Kind|Type|Category)/(.+)$",
|
||
"prefix_dash": r"^(Priority|Status|Reviewed) - (.+)$",
|
||
"colon": r"^(type|priority|status): (.+)$",
|
||
}
|
||
},
|
||
)
|
||
|
||
labels = [
|
||
{"name": "type: bug", "color": "d73a4a"},
|
||
{"name": "type: feature", "color": "1d76db"},
|
||
{"name": "priority: high", "color": "d73a4a"},
|
||
]
|
||
|
||
schema = agent._detect_label_schema(labels)
|
||
|
||
assert schema is not None
|
||
assert schema["pattern"] == "colon"
|
||
assert "type" in schema["categories"]
|
||
assert "priority" in schema["categories"]
|
||
|
||
def test_build_label_mapping(self):
|
||
"""Test building label mapping from existing labels."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"labels": {
|
||
"type": {
|
||
"bug": {"name": "type: bug", "aliases": ["Kind/Bug", "bug"]},
|
||
"feature": {
|
||
"name": "type: feature",
|
||
"aliases": ["Kind/Feature", "feature"],
|
||
},
|
||
},
|
||
"priority": {
|
||
"high": {
|
||
"name": "priority: high",
|
||
"aliases": ["Priority - High", "P1"],
|
||
}
|
||
},
|
||
"status": {},
|
||
}
|
||
},
|
||
)
|
||
|
||
existing_labels = [
|
||
{"name": "Kind/Bug", "color": "d73a4a"},
|
||
{"name": "Kind/Feature", "color": "1d76db"},
|
||
{"name": "Priority - High", "color": "d73a4a"},
|
||
]
|
||
|
||
schema = {
|
||
"pattern": "prefix_slash",
|
||
"categories": {
|
||
"type": ["Kind/Bug", "Kind/Feature"],
|
||
"priority": ["Priority - High"],
|
||
},
|
||
}
|
||
|
||
mapping = agent._build_label_mapping(existing_labels, schema)
|
||
|
||
assert "type" in mapping
|
||
assert "bug" in mapping["type"]
|
||
assert mapping["type"]["bug"] == "Kind/Bug"
|
||
assert "feature" in mapping["type"]
|
||
assert mapping["type"]["feature"] == "Kind/Feature"
|
||
assert "priority" in mapping
|
||
assert "high" in mapping["priority"]
|
||
assert mapping["priority"]["high"] == "Priority - High"
|
||
|
||
def test_suggest_label_name_prefix_slash(self):
|
||
"""Test label name suggestion for prefix_slash pattern."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"labels": {
|
||
"type": {"bug": {"name": "type: bug", "color": "d73a4a"}},
|
||
"priority": {},
|
||
"status": {
|
||
"ai_approved": {"name": "ai-approved", "color": "28a745"}
|
||
},
|
||
}
|
||
},
|
||
)
|
||
|
||
# Test type category
|
||
suggested = agent._suggest_label_name("type", "bug", "prefix_slash")
|
||
assert suggested == "Kind/Bug"
|
||
|
||
# Test status category
|
||
suggested = agent._suggest_label_name("status", "ai_approved", "prefix_slash")
|
||
assert suggested == "Status/Ai Approved"
|
||
|
||
def test_suggest_label_name_prefix_dash(self):
|
||
"""Test label name suggestion for prefix_dash pattern."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"labels": {
|
||
"type": {},
|
||
"priority": {"high": {"name": "priority: high", "color": "d73a4a"}},
|
||
"status": {},
|
||
}
|
||
},
|
||
)
|
||
|
||
suggested = agent._suggest_label_name("priority", "high", "prefix_dash")
|
||
assert suggested == "Priority - High"
|
||
|
||
def test_get_label_config_backwards_compatibility(self):
|
||
"""Test that old string format still works."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
# Old config format (strings)
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"labels": {
|
||
"type": {
|
||
"bug": "type: bug" # Old format
|
||
},
|
||
"priority": {},
|
||
"status": {},
|
||
}
|
||
},
|
||
)
|
||
|
||
config = agent._get_label_config("type", "bug")
|
||
|
||
assert config["name"] == "type: bug"
|
||
assert config["color"] == "1d76db" # Default color
|
||
assert config["aliases"] == []
|
||
|
||
def test_get_label_config_new_format(self):
|
||
"""Test that new dict format works."""
|
||
from agents.issue_agent import IssueAgent
|
||
|
||
agent = IssueAgent(
|
||
gitea_client=None,
|
||
llm_client=None,
|
||
config={
|
||
"labels": {
|
||
"type": {
|
||
"bug": {
|
||
"name": "type: bug",
|
||
"color": "d73a4a",
|
||
"description": "Something isn't working",
|
||
"aliases": ["Kind/Bug", "bug"],
|
||
}
|
||
},
|
||
"priority": {},
|
||
"status": {},
|
||
}
|
||
},
|
||
)
|
||
|
||
config = agent._get_label_config("type", "bug")
|
||
|
||
assert config["name"] == "type: bug"
|
||
assert config["color"] == "d73a4a"
|
||
assert config["description"] == "Something isn't working"
|
||
assert "Kind/Bug" in config["aliases"]
|
||
|
||
|
||
class TestPRSummaryGeneration:
|
||
"""Test PR summary generation functionality."""
|
||
|
||
def test_pr_summary_prompt_exists(self):
|
||
"""Verify pr_summary.md prompt file exists."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"pr_summary.md",
|
||
)
|
||
assert os.path.exists(prompt_path), "pr_summary.md prompt file not found"
|
||
|
||
def test_pr_summary_prompt_formatting(self):
|
||
"""Test that pr_summary.md can be loaded without errors."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"pr_summary.md",
|
||
)
|
||
with open(prompt_path) as f:
|
||
prompt = f.read()
|
||
|
||
# Check for key elements
|
||
assert "summary" in prompt.lower()
|
||
assert "change_type" in prompt.lower()
|
||
assert "files_affected" in prompt.lower()
|
||
assert "impact" in prompt.lower()
|
||
assert "JSON" in prompt
|
||
|
||
# Verify JSON examples use double curly braces (escaped)
|
||
# Should not raise KeyError when formatted with empty string
|
||
try:
|
||
formatted = prompt.format()
|
||
except KeyError as e:
|
||
pytest.fail(f"Prompt has unescaped placeholders: {e}")
|
||
|
||
def test_pr_agent_has_summary_marker(self):
|
||
"""Verify PRAgent has PR_SUMMARY_MARKER constant."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
assert hasattr(PRAgent, "PR_SUMMARY_MARKER")
|
||
assert PRAgent.PR_SUMMARY_MARKER == "<!-- AI_PR_SUMMARY -->"
|
||
|
||
def test_pr_agent_can_handle_summarize_command(self):
|
||
"""Test that PRAgent can handle @codebot summarize in PR comments."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test summarize command in PR comment
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {}, # Indicates this is a PR
|
||
},
|
||
"comment": {
|
||
"body": "@codebot summarize this PR please",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_can_handle_summarize_case_insensitive(self):
|
||
"""Test that summarize command is case-insensitive."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test various casings
|
||
for body in [
|
||
"@codebot SUMMARIZE",
|
||
"@codebot Summarize",
|
||
"@codebot SuMmArIzE",
|
||
]:
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {},
|
||
},
|
||
"comment": {"body": body},
|
||
}
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_ignores_summarize_on_non_pr(self):
|
||
"""Test that summarize command is ignored on regular issues."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Regular issue (no pull_request field)
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
# No pull_request field
|
||
},
|
||
"comment": {
|
||
"body": "@codebot summarize",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is False
|
||
|
||
def test_format_pr_summary_structure(self):
|
||
"""Test _format_pr_summary generates correct markdown structure."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
summary_data = {
|
||
"summary": "This PR adds a new feature",
|
||
"change_type": "Feature",
|
||
"key_changes": {
|
||
"added": ["New authentication module", "User login endpoint"],
|
||
"modified": ["Updated config file"],
|
||
"removed": ["Deprecated legacy auth"],
|
||
},
|
||
"files_affected": [
|
||
{
|
||
"path": "src/auth.py",
|
||
"description": "New authentication module",
|
||
"change_type": "added",
|
||
},
|
||
{
|
||
"path": "config.yml",
|
||
"description": "Added auth settings",
|
||
"change_type": "modified",
|
||
},
|
||
],
|
||
"impact": {
|
||
"scope": "medium",
|
||
"description": "Adds authentication without breaking existing features",
|
||
},
|
||
}
|
||
|
||
result = agent._format_pr_summary(summary_data)
|
||
|
||
# Verify structure
|
||
assert "## 📋 Pull Request Summary" in result
|
||
assert "This PR adds a new feature" in result
|
||
assert "**Type:** ✨ Feature" in result
|
||
assert "## Changes" in result
|
||
assert "**✅ Added:**" in result
|
||
assert "**📝 Modified:**" in result
|
||
assert "**❌ Removed:**" in result
|
||
assert "## Files Affected" in result
|
||
assert "➕ `src/auth.py`" in result
|
||
assert "📝 `config.yml`" in result
|
||
assert "## Impact" in result
|
||
assert "🟡 **Scope:** Medium" in result
|
||
|
||
def test_format_pr_summary_change_types(self):
|
||
"""Test that all change types have correct emojis."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
change_types = {
|
||
"Feature": "✨",
|
||
"Bugfix": "🐛",
|
||
"Refactor": "♻️",
|
||
"Documentation": "📚",
|
||
"Testing": "🧪",
|
||
"Mixed": "🔀",
|
||
}
|
||
|
||
for change_type, expected_emoji in change_types.items():
|
||
summary_data = {
|
||
"summary": "Test",
|
||
"change_type": change_type,
|
||
"key_changes": {},
|
||
"files_affected": [],
|
||
"impact": {},
|
||
}
|
||
|
||
result = agent._format_pr_summary(summary_data)
|
||
assert f"**Type:** {expected_emoji} {change_type}" in result
|
||
|
||
def test_config_has_auto_summary_settings(self):
|
||
"""Verify config.yml has auto_summary configuration."""
|
||
config_path = os.path.join(
|
||
os.path.dirname(__file__), "..", "tools", "ai-review", "config.yml"
|
||
)
|
||
|
||
import yaml
|
||
|
||
with open(config_path) as f:
|
||
config = yaml.safe_load(f)
|
||
|
||
assert "agents" in config
|
||
assert "pr" in config["agents"]
|
||
assert "auto_summary" in config["agents"]["pr"]
|
||
assert "enabled" in config["agents"]["pr"]["auto_summary"]
|
||
assert "post_as_comment" in config["agents"]["pr"]["auto_summary"]
|
||
|
||
|
||
class TestChangelogGeneration:
|
||
"""Test changelog generation functionality."""
|
||
|
||
def test_changelog_prompt_exists(self):
|
||
"""Verify changelog.md prompt file exists."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"changelog.md",
|
||
)
|
||
assert os.path.exists(prompt_path), "changelog.md prompt file not found"
|
||
|
||
def test_changelog_prompt_formatting(self):
|
||
"""Test that changelog.md can be formatted with placeholders."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"changelog.md",
|
||
)
|
||
with open(prompt_path) as f:
|
||
prompt = f.read()
|
||
|
||
# Check for key elements
|
||
assert "changelog" in prompt.lower()
|
||
assert "added" in prompt.lower()
|
||
assert "changed" in prompt.lower()
|
||
assert "fixed" in prompt.lower()
|
||
assert "breaking" in prompt.lower()
|
||
assert "JSON" in prompt
|
||
|
||
# Should be able to format with pr_title and pr_description
|
||
try:
|
||
formatted = prompt.format(
|
||
pr_title="Test PR Title", pr_description="Test PR Description"
|
||
)
|
||
assert "Test PR Title" in formatted
|
||
assert "Test PR Description" in formatted
|
||
except KeyError as e:
|
||
pytest.fail(f"Prompt has unescaped placeholders: {e}")
|
||
|
||
def test_pr_agent_can_handle_changelog_command(self):
|
||
"""Test that PRAgent can handle @codebot changelog in PR comments."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test changelog command in PR comment
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {}, # Indicates this is a PR
|
||
},
|
||
"comment": {
|
||
"body": "@codebot changelog please",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_can_handle_changelog_case_insensitive(self):
|
||
"""Test that changelog command is case-insensitive."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test various casings
|
||
for body in [
|
||
"@codebot CHANGELOG",
|
||
"@codebot Changelog",
|
||
"@codebot ChAnGeLoG",
|
||
]:
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {},
|
||
},
|
||
"comment": {"body": body},
|
||
}
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_ignores_changelog_on_non_pr(self):
|
||
"""Test that changelog command is ignored on regular issues."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Regular issue (no pull_request field)
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
# No pull_request field
|
||
},
|
||
"comment": {
|
||
"body": "@codebot changelog",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is False
|
||
|
||
def test_format_changelog_structure(self):
|
||
"""Test _format_changelog generates correct Keep a Changelog format."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
changelog_data = {
|
||
"changelog": {
|
||
"added": ["User authentication system", "Password reset feature"],
|
||
"changed": ["Updated database schema", "Refactored login endpoint"],
|
||
"fixed": ["Session timeout bug", "Security vulnerability"],
|
||
"security": ["Fixed XSS vulnerability in user input"],
|
||
},
|
||
"breaking_changes": ["Removed legacy API endpoint /api/v1/old"],
|
||
"technical_details": {
|
||
"files_changed": 15,
|
||
"insertions": 450,
|
||
"deletions": 120,
|
||
"main_components": ["auth/", "api/users/", "database/"],
|
||
},
|
||
}
|
||
|
||
result = agent._format_changelog(changelog_data, 123)
|
||
|
||
# Verify structure
|
||
assert "## 📋 Changelog for PR #123" in result
|
||
assert "### ✨ Added" in result
|
||
assert "User authentication system" in result
|
||
assert "### 🔄 Changed" in result
|
||
assert "Updated database schema" in result
|
||
assert "### 🐛 Fixed" in result
|
||
assert "Session timeout bug" in result
|
||
assert "### 🔒 Security" in result
|
||
assert "Fixed XSS vulnerability" in result
|
||
assert "### ⚠️ BREAKING CHANGES" in result
|
||
assert "Removed legacy API endpoint" in result
|
||
assert "### 📊 Technical Details" in result
|
||
assert "Files changed:** 15" in result
|
||
assert "+450 / -120" in result
|
||
assert "auth/" in result
|
||
|
||
def test_format_changelog_empty_sections(self):
|
||
"""Test that empty sections are not included in output."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
changelog_data = {
|
||
"changelog": {
|
||
"added": ["New feature"],
|
||
"changed": [],
|
||
"deprecated": [],
|
||
"removed": [],
|
||
"fixed": [],
|
||
"security": [],
|
||
},
|
||
"breaking_changes": [],
|
||
"technical_details": {},
|
||
}
|
||
|
||
result = agent._format_changelog(changelog_data, 123)
|
||
|
||
# Only Added section should be present
|
||
assert "### ✨ Added" in result
|
||
assert "New feature" in result
|
||
assert "### 🔄 Changed" not in result
|
||
assert "### 🗑️ Removed" not in result
|
||
assert "### 🐛 Fixed" not in result
|
||
assert "### ⚠️ BREAKING CHANGES" not in result
|
||
|
||
def test_config_has_changelog_command(self):
|
||
"""Verify config.yml has changelog command."""
|
||
config_path = os.path.join(
|
||
os.path.dirname(__file__), "..", "tools", "ai-review", "config.yml"
|
||
)
|
||
|
||
import yaml
|
||
|
||
with open(config_path) as f:
|
||
config = yaml.safe_load(f)
|
||
|
||
assert "interaction" in config
|
||
assert "commands" in config["interaction"]
|
||
assert "changelog" in config["interaction"]["commands"]
|
||
|
||
|
||
class TestDiffExplanation:
|
||
"""Test diff explanation functionality."""
|
||
|
||
def test_explain_diff_prompt_exists(self):
|
||
"""Verify explain_diff.md prompt file exists."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"explain_diff.md",
|
||
)
|
||
assert os.path.exists(prompt_path), "explain_diff.md prompt file not found"
|
||
|
||
def test_explain_diff_prompt_formatting(self):
|
||
"""Test that explain_diff.md can be formatted with placeholders."""
|
||
prompt_path = os.path.join(
|
||
os.path.dirname(__file__),
|
||
"..",
|
||
"tools",
|
||
"ai-review",
|
||
"prompts",
|
||
"explain_diff.md",
|
||
)
|
||
with open(prompt_path) as f:
|
||
prompt = f.read()
|
||
|
||
# Check for key elements
|
||
assert "plain language" in prompt.lower() or "plain-language" in prompt.lower()
|
||
assert "overview" in prompt.lower()
|
||
assert "key changes" in prompt.lower() or "key_changes" in prompt.lower()
|
||
assert "architecture" in prompt.lower()
|
||
assert "JSON" in prompt
|
||
|
||
# Should be able to format with pr_title and pr_description
|
||
try:
|
||
formatted = prompt.format(
|
||
pr_title="Test PR Title", pr_description="Test PR Description"
|
||
)
|
||
assert "Test PR Title" in formatted
|
||
assert "Test PR Description" in formatted
|
||
except KeyError as e:
|
||
pytest.fail(f"Prompt has unescaped placeholders: {e}")
|
||
|
||
def test_pr_agent_can_handle_explain_diff_command(self):
|
||
"""Test that PRAgent can handle @codebot explain-diff in PR comments."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test explain-diff command in PR comment
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {}, # Indicates this is a PR
|
||
},
|
||
"comment": {
|
||
"body": "@codebot explain-diff please",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_can_handle_explain_diff_case_insensitive(self):
|
||
"""Test that explain-diff command is case-insensitive."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Test various casings
|
||
for body in [
|
||
"@codebot EXPLAIN-DIFF",
|
||
"@codebot Explain-Diff",
|
||
"@codebot ExPlAiN-dIfF",
|
||
]:
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
"pull_request": {},
|
||
},
|
||
"comment": {"body": body},
|
||
}
|
||
assert agent.can_handle("issue_comment", event_data) is True
|
||
|
||
def test_pr_agent_ignores_explain_diff_on_non_pr(self):
|
||
"""Test that explain-diff command is ignored on regular issues."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
config = {
|
||
"agents": {
|
||
"pr": {
|
||
"enabled": True,
|
||
}
|
||
},
|
||
"interaction": {
|
||
"mention_prefix": "@codebot",
|
||
},
|
||
}
|
||
|
||
agent = PRAgent(config=config)
|
||
|
||
# Regular issue (no pull_request field)
|
||
event_data = {
|
||
"action": "created",
|
||
"issue": {
|
||
"number": 123,
|
||
# No pull_request field
|
||
},
|
||
"comment": {
|
||
"body": "@codebot explain-diff",
|
||
},
|
||
}
|
||
|
||
assert agent.can_handle("issue_comment", event_data) is False
|
||
|
||
def test_format_diff_explanation_structure(self):
|
||
"""Test _format_diff_explanation generates correct structure."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
explanation_data = {
|
||
"overview": "This PR adds user authentication using JWT tokens",
|
||
"key_changes": [
|
||
{
|
||
"file": "auth/jwt.py",
|
||
"status": "new",
|
||
"explanation": "Creates JSON Web Tokens for authenticated users",
|
||
"why_it_matters": "Enables secure stateless authentication",
|
||
},
|
||
{
|
||
"file": "api/users.py",
|
||
"status": "modified",
|
||
"explanation": "Added login endpoint",
|
||
"why_it_matters": "Users can now authenticate",
|
||
},
|
||
],
|
||
"architecture_impact": {
|
||
"description": "Introduces authentication layer across all API endpoints",
|
||
"new_dependencies": ["PyJWT", "bcrypt"],
|
||
"affected_components": ["API", "Database"],
|
||
},
|
||
"breaking_changes": ["All API endpoints now require authentication"],
|
||
"technical_details": {
|
||
"files_changed": 5,
|
||
"insertions": 200,
|
||
"deletions": 10,
|
||
"main_components": ["auth/", "api/"],
|
||
},
|
||
}
|
||
|
||
result = agent._format_diff_explanation(explanation_data, 123)
|
||
|
||
# Verify structure
|
||
assert "## 📖 Code Changes Explained (PR #123)" in result
|
||
assert "### 🎯 Overview" in result
|
||
assert "user authentication using JWT tokens" in result
|
||
assert "### 🔍 What Changed" in result
|
||
assert "➕ `auth/jwt.py` (new)" in result
|
||
assert "📝 `api/users.py` (modified)" in result
|
||
assert "**What changed:**" in result
|
||
assert "**Why it matters:**" in result
|
||
assert "### 🏗️ Architecture Impact" in result
|
||
assert "PyJWT" in result
|
||
assert "### ⚠️ Breaking Changes" in result
|
||
assert "All API endpoints now require authentication" in result
|
||
assert "### 📊 Technical Summary" in result
|
||
assert "Files changed:** 5" in result
|
||
assert "+200 / -10" in result
|
||
|
||
def test_format_diff_explanation_empty_sections(self):
|
||
"""Test that empty sections are not included in output."""
|
||
from agents.pr_agent import PRAgent
|
||
|
||
agent = PRAgent(config={})
|
||
|
||
explanation_data = {
|
||
"overview": "Small bugfix",
|
||
"key_changes": [
|
||
{
|
||
"file": "app.py",
|
||
"status": "modified",
|
||
"explanation": "Fixed typo",
|
||
"why_it_matters": "",
|
||
}
|
||
],
|
||
"architecture_impact": {},
|
||
"breaking_changes": [],
|
||
"technical_details": {},
|
||
}
|
||
|
||
result = agent._format_diff_explanation(explanation_data, 123)
|
||
|
||
# Only overview and key changes should be present
|
||
assert "### 🎯 Overview" in result
|
||
assert "### 🔍 What Changed" in result
|
||
assert "### 🏗️ Architecture Impact" not in result
|
||
assert "### ⚠️ Breaking Changes" not in result
|
||
|
||
def test_config_has_explain_diff_command(self):
|
||
"""Verify config.yml has explain-diff command."""
|
||
config_path = os.path.join(
|
||
os.path.dirname(__file__), "..", "tools", "ai-review", "config.yml"
|
||
)
|
||
|
||
import yaml
|
||
|
||
with open(config_path) as f:
|
||
config = yaml.safe_load(f)
|
||
|
||
assert "interaction" in config
|
||
assert "commands" in config["interaction"]
|
||
assert "explain-diff" in config["interaction"]["commands"]
|
||
|
||
|
||
if __name__ == "__main__":
|
||
pytest.main([__file__, "-v"])
|