175 lines
4.8 KiB
Python
175 lines
4.8 KiB
Python
import json
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
|
|
import requests
|
|
import yaml
|
|
from comment import to_markdown
|
|
|
|
ROOT = os.path.dirname(__file__)
|
|
CFG = yaml.safe_load(open(f"{ROOT}/config.yml"))
|
|
|
|
# Marker to identify the AI comment
|
|
AI_MARKER = "<!-- AI_CODE_REVIEW -->"
|
|
|
|
# Disclaimer text to prepend
|
|
AI_DISCLAIMER = (
|
|
"**Note:** This review was generated by an AI assistant. "
|
|
"While it aims to be accurate and helpful, it may contain mistakes "
|
|
"or miss important issues. Please verify all findings before taking action."
|
|
)
|
|
|
|
# -------------------------------
|
|
# Helper functions
|
|
# -------------------------------
|
|
|
|
|
|
def get_diff() -> str:
|
|
"""Get git diff against main branch, limited by config"""
|
|
diff = subprocess.check_output(["git", "diff", "origin/main...HEAD"], text=True)
|
|
lines = diff.splitlines()
|
|
if len(lines) > CFG["review"]["max_diff_lines"]:
|
|
return "\n".join(lines[: CFG["review"]["max_diff_lines"]])
|
|
return diff
|
|
|
|
|
|
def build_prompt(diff: str) -> str:
|
|
"""Prepare the AI prompt with the diff"""
|
|
base = open(f"{ROOT}/prompts/base.md").read()
|
|
return f"{base}\n\nDIFF:\n{diff}"
|
|
|
|
|
|
def call_llm(prompt: str) -> str:
|
|
"""Call the configured LLM provider"""
|
|
provider = CFG["provider"]
|
|
|
|
if provider == "openai":
|
|
r = requests.post(
|
|
"https://api.openai.com/v1/chat/completions",
|
|
headers={
|
|
"Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}",
|
|
"Content-Type": "application/json",
|
|
},
|
|
json={
|
|
"model": CFG["model"]["openai"],
|
|
"temperature": CFG["temperature"],
|
|
"messages": [{"role": "user", "content": prompt}],
|
|
},
|
|
timeout=60,
|
|
)
|
|
return r.json()["choices"][0]["message"]["content"]
|
|
|
|
if provider == "openrouter":
|
|
r = requests.post(
|
|
"https://openrouter.ai/api/v1/chat/completions",
|
|
headers={
|
|
"Authorization": f"Bearer {os.environ['OPENROUTER_API_KEY']}",
|
|
"Content-Type": "application/json",
|
|
},
|
|
json={
|
|
"model": CFG["model"]["openrouter"],
|
|
"messages": [{"role": "user", "content": prompt}],
|
|
},
|
|
timeout=60,
|
|
)
|
|
return r.json()["choices"][0]["message"]["content"]
|
|
|
|
if provider == "ollama":
|
|
r = requests.post(
|
|
f"{os.environ['OLLAMA_HOST']}/api/generate",
|
|
json={
|
|
"model": CFG["model"]["ollama"],
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
},
|
|
timeout=120,
|
|
)
|
|
return r.json()["response"]
|
|
|
|
raise RuntimeError("Unknown provider")
|
|
|
|
|
|
# -------------------------------
|
|
# Gitea PR comment functions
|
|
# -------------------------------
|
|
|
|
|
|
def find_existing_comment() -> int | None:
|
|
"""Find existing AI review comment in the PR"""
|
|
url = (
|
|
f"{os.environ['AI_REVIEW_API_URL']}/repos/"
|
|
f"{os.environ['AI_REVIEW_REPO']}/issues/"
|
|
f"{os.environ['AI_REVIEW_PR_NUMBER']}/comments"
|
|
)
|
|
|
|
r = requests.get(
|
|
url,
|
|
headers={"Authorization": f"token {os.environ['AI_REVIEW_TOKEN']}"},
|
|
timeout=15,
|
|
)
|
|
|
|
for c in r.json():
|
|
if AI_MARKER in c["body"]:
|
|
return c["id"]
|
|
|
|
return None
|
|
|
|
|
|
def upsert_pr_comment(markdown: str):
|
|
"""Create or update the PR comment"""
|
|
comment_id = find_existing_comment()
|
|
headers = {
|
|
"Authorization": f"token {os.environ['AI_REVIEW_TOKEN']}",
|
|
"Content-Type": "application/json",
|
|
}
|
|
|
|
if comment_id:
|
|
url = (
|
|
f"{os.environ['AI_REVIEW_API_URL']}/repos/"
|
|
f"{os.environ['AI_REVIEW_REPO']}/issues/comments/{comment_id}"
|
|
)
|
|
r = requests.patch(url, headers=headers, json={"body": markdown})
|
|
else:
|
|
url = (
|
|
f"{os.environ['AI_REVIEW_API_URL']}/repos/"
|
|
f"{os.environ['AI_REVIEW_REPO']}/issues/"
|
|
f"{os.environ['AI_REVIEW_PR_NUMBER']}/comments"
|
|
)
|
|
r = requests.post(url, headers=headers, json={"body": markdown})
|
|
|
|
if r.status_code not in (200, 201):
|
|
raise RuntimeError(f"Failed to upsert PR comment: {r.text}")
|
|
|
|
|
|
# -------------------------------
|
|
# Main workflow
|
|
# -------------------------------
|
|
|
|
|
|
def main():
|
|
diff = get_diff()
|
|
if not diff.strip():
|
|
sys.exit(0)
|
|
|
|
raw = call_llm(build_prompt(diff))
|
|
result = json.loads(raw)
|
|
|
|
# Convert JSON review to Markdown
|
|
markdown = to_markdown(result)
|
|
|
|
# Prepend AI disclaimer and marker
|
|
full_comment = AI_DISCLAIMER + "\n\n" + AI_MARKER + "\n" + markdown
|
|
|
|
upsert_pr_comment(full_comment)
|
|
|
|
# Fail CI if severity is HIGH
|
|
if result["overall_severity"] == CFG["review"][
|
|
"fail_on_severity"
|
|
] and not result.get("approval", False):
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|