diff --git a/.env.example b/.env.example index 9e99d47..de8b2bb 100644 --- a/.env.example +++ b/.env.example @@ -7,7 +7,7 @@ DISCORD_TOKEN=your_discord_bot_token_here # =========================================== # AI Provider Configuration # =========================================== -# Available providers: "openai", "openrouter", "anthropic" +# Available providers: "openai", "openrouter", "anthropic", "gemini" AI_PROVIDER=openai # Model to use (e.g., gpt-4o, gpt-4o-mini, claude-3-5-sonnet, etc.) @@ -17,6 +17,7 @@ AI_MODEL=gpt-4o OPENAI_API_KEY=sk-xxx OPENROUTER_API_KEY=sk-or-xxx ANTHROPIC_API_KEY=sk-ant-xxx +GEMINI_API_KEY=xxx # Maximum tokens in AI response (100-4096) AI_MAX_TOKENS=1024 diff --git a/.gitea/workflows/ai-chat.yml b/.gitea/workflows/ai-chat.yml new file mode 100644 index 0000000..19b7918 --- /dev/null +++ b/.gitea/workflows/ai-chat.yml @@ -0,0 +1,61 @@ +name: AI Chat (Bartender) + +# WORKFLOW ROUTING: +# This workflow handles FREE-FORM questions/chat (no specific command) +# Other workflows: ai-issue-triage.yml (@codebot triage), ai-comment-reply.yml (specific commands) +# This is the FALLBACK for any @codebot mention that isn't a known command + +on: + issue_comment: + types: [created] + +# CUSTOMIZE YOUR BOT NAME: +# Change '@codebot' in all conditions below to match your config.yml mention_prefix +# Examples: '@bartender', '@uni', '@joey', '@codebot' + +jobs: + ai-chat: + # Only run if comment mentions the bot but NOT a specific command + # This prevents duplicate runs with ai-comment-reply.yml and ai-issue-triage.yml + # CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender) + if: | + github.event.comment.user.login != 'Bartender' && + contains(github.event.comment.body, '@codebot') && + !contains(github.event.comment.body, '@codebot triage') && + !contains(github.event.comment.body, '@codebot help') && + !contains(github.event.comment.body, '@codebot explain') && + !contains(github.event.comment.body, '@codebot suggest') && + !contains(github.event.comment.body, '@codebot security') && + !contains(github.event.comment.body, '@codebot summarize') && + !contains(github.event.comment.body, '@codebot changelog') && + !contains(github.event.comment.body, '@codebot explain-diff') && + !contains(github.event.comment.body, '@codebot review-again') && + !contains(github.event.comment.body, '@codebot setup-labels') + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/checkout@v4 + with: + repository: Hiddenden/openrabbit + path: .ai-review + token: ${{ secrets.AI_REVIEW_TOKEN }} + + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - run: pip install requests pyyaml + + - name: Run AI Chat + env: + AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }} + AI_REVIEW_REPO: ${{ gitea.repository }} + AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1 + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }} + SEARXNG_URL: ${{ secrets.SEARXNG_URL }} + run: | + cd .ai-review/tools/ai-review + python main.py comment ${{ gitea.repository }} ${{ gitea.event.issue.number }} "${{ gitea.event.comment.body }}" diff --git a/.gitea/workflows/ai-codebase-review.yml b/.gitea/workflows/ai-codebase-review.yml new file mode 100644 index 0000000..2269688 --- /dev/null +++ b/.gitea/workflows/ai-codebase-review.yml @@ -0,0 +1,58 @@ +name: AI Codebase Quality Review + +on: + # Weekly scheduled run + # schedule: + # - cron: "0 0 * * 0" # Every Sunday at midnight + + # Manual trigger + workflow_dispatch: + inputs: + report_type: + description: "Type of report to generate" + required: false + default: "full" + type: choice + options: + - full + - security + - quick + +jobs: + ai-codebase-review: + runs-on: ubuntu-latest + + steps: + # Checkout the repository + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Full history for analysis + + # Checkout central AI tooling + - uses: actions/checkout@v4 + with: + repository: Hiddenden/openrabbit + path: .ai-review + token: ${{ secrets.AI_REVIEW_TOKEN }} + + # Setup Python + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + # Install dependencies + - run: pip install requests pyyaml + + # Run AI codebase analysis + - name: Run AI Codebase Analysis + env: + AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }} + AI_REVIEW_REPO: ${{ gitea.repository }} + AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1 + + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }} + run: | + cd .ai-review/tools/ai-review + python main.py codebase ${{ gitea.repository }} diff --git a/.gitea/workflows/ai-comment-reply.yml b/.gitea/workflows/ai-comment-reply.yml new file mode 100644 index 0000000..11f70c3 --- /dev/null +++ b/.gitea/workflows/ai-comment-reply.yml @@ -0,0 +1,98 @@ +name: AI Comment Reply + +# WORKFLOW ROUTING: +# This workflow handles SPECIFIC commands: help, explain, suggest, security, summarize, changelog, explain-diff, review-again, setup-labels +# Other workflows: ai-issue-triage.yml (@codebot triage), ai-chat.yml (free-form questions) + +on: + issue_comment: + types: [created] + +# CUSTOMIZE YOUR BOT NAME: +# Change '@codebot' in the 'if' condition below to match your config.yml mention_prefix +# Examples: '@bartender', '@uni', '@joey', '@codebot' + +jobs: + ai-reply: + runs-on: ubuntu-latest + # Only run for specific commands (not free-form chat or triage) + # This prevents duplicate runs with ai-chat.yml and ai-issue-triage.yml + # CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender) + if: | + github.event.comment.user.login != 'Bartender' && + (contains(github.event.comment.body, '@codebot help') || + contains(github.event.comment.body, '@codebot explain') || + contains(github.event.comment.body, '@codebot suggest') || + contains(github.event.comment.body, '@codebot security') || + contains(github.event.comment.body, '@codebot summarize') || + contains(github.event.comment.body, '@codebot changelog') || + contains(github.event.comment.body, '@codebot explain-diff') || + contains(github.event.comment.body, '@codebot review-again') || + contains(github.event.comment.body, '@codebot setup-labels')) + steps: + - uses: actions/checkout@v4 + + - uses: actions/checkout@v4 + with: + repository: Hiddenden/openrabbit + path: .ai-review + token: ${{ secrets.AI_REVIEW_TOKEN }} + + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - run: pip install requests pyyaml + + - name: Run AI Comment Response + env: + AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }} + AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1 + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }} + run: | + cd .ai-review/tools/ai-review + + # Determine if this is a PR or issue comment + IS_PR="${{ gitea.event.issue.pull_request != null }}" + REPO="${{ gitea.repository }}" + ISSUE_NUMBER="${{ gitea.event.issue.number }}" + + # Validate inputs + if [ -z "$REPO" ] || [ -z "$ISSUE_NUMBER" ]; then + echo "Error: Missing required parameters" + exit 1 + fi + + # Validate repository format (owner/repo) + if ! echo "$REPO" | grep -qE '^[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$'; then + echo "Error: Invalid repository format: $REPO" + exit 1 + fi + + if [ "$IS_PR" = "true" ]; then + # This is a PR comment - use safe dispatch with minimal event data + # Build minimal event payload (does not include sensitive user data) + EVENT_DATA=$(cat <=2.3.0 # AI Providers anthropic>=0.18.0 +google-genai>=1.0.0 openai>=1.12.0 # HTTP Client diff --git a/src/daemon_boyfriend/config.py b/src/daemon_boyfriend/config.py index b1f064c..0c19893 100644 --- a/src/daemon_boyfriend/config.py +++ b/src/daemon_boyfriend/config.py @@ -19,7 +19,7 @@ class Settings(BaseSettings): discord_token: str = Field(..., description="Discord bot token") # AI Provider Configuration - ai_provider: Literal["openai", "openrouter", "anthropic"] = Field( + ai_provider: Literal["openai", "openrouter", "anthropic", "gemini"] = Field( "openai", description="Which AI provider to use" ) ai_model: str = Field("gpt-4o", description="AI model to use") @@ -30,6 +30,7 @@ class Settings(BaseSettings): openai_api_key: str | None = Field(None, description="OpenAI API key") openrouter_api_key: str | None = Field(None, description="OpenRouter API key") anthropic_api_key: str | None = Field(None, description="Anthropic API key") + gemini_api_key: str | None = Field(None, description="Google Gemini API key") # Logging log_level: str = Field("INFO", description="Logging level") @@ -66,6 +67,7 @@ class Settings(BaseSettings): "openai": self.openai_api_key, "openrouter": self.openrouter_api_key, "anthropic": self.anthropic_api_key, + "gemini": self.gemini_api_key, } key = key_map.get(self.ai_provider) if not key: diff --git a/src/daemon_boyfriend/services/ai_service.py b/src/daemon_boyfriend/services/ai_service.py index fce3193..775ca28 100644 --- a/src/daemon_boyfriend/services/ai_service.py +++ b/src/daemon_boyfriend/services/ai_service.py @@ -9,6 +9,7 @@ from .providers import ( AIProvider, AIResponse, AnthropicProvider, + GeminiProvider, Message, OpenAIProvider, OpenRouterProvider, @@ -16,7 +17,7 @@ from .providers import ( logger = logging.getLogger(__name__) -ProviderType = Literal["openai", "openrouter", "anthropic"] +ProviderType = Literal["openai", "openrouter", "anthropic", "gemini"] class AIService: @@ -45,6 +46,7 @@ class AIService: "openai": OpenAIProvider, "openrouter": OpenRouterProvider, "anthropic": AnthropicProvider, + "gemini": GeminiProvider, } provider_class = providers.get(provider_type) diff --git a/src/daemon_boyfriend/services/providers/__init__.py b/src/daemon_boyfriend/services/providers/__init__.py index 71e99b5..b5e4b68 100644 --- a/src/daemon_boyfriend/services/providers/__init__.py +++ b/src/daemon_boyfriend/services/providers/__init__.py @@ -2,6 +2,7 @@ from .anthropic import AnthropicProvider from .base import AIProvider, AIResponse, Message +from .gemini import GeminiProvider from .openai import OpenAIProvider from .openrouter import OpenRouterProvider @@ -12,4 +13,5 @@ __all__ = [ "OpenAIProvider", "OpenRouterProvider", "AnthropicProvider", + "GeminiProvider", ] diff --git a/src/daemon_boyfriend/services/providers/gemini.py b/src/daemon_boyfriend/services/providers/gemini.py new file mode 100644 index 0000000..8ef1350 --- /dev/null +++ b/src/daemon_boyfriend/services/providers/gemini.py @@ -0,0 +1,71 @@ +"""Google Gemini provider implementation.""" + +import logging + +from google import genai +from google.genai import types + +from .base import AIProvider, AIResponse, Message + +logger = logging.getLogger(__name__) + + +class GeminiProvider(AIProvider): + """Google Gemini API provider.""" + + def __init__(self, api_key: str, model: str = "gemini-2.0-flash") -> None: + self.client = genai.Client(api_key=api_key) + self.model = model + + @property + def provider_name(self) -> str: + return "gemini" + + async def generate( + self, + messages: list[Message], + system_prompt: str | None = None, + max_tokens: int = 1024, + temperature: float = 0.7, + ) -> AIResponse: + """Generate a response using Gemini.""" + # Build contents list (Gemini format) + contents = [] + for m in messages: + # Gemini uses "user" and "model" roles + role = "model" if m.role == "assistant" else m.role + contents.append(types.Content(role=role, parts=[types.Part(text=m.content)])) + + logger.debug(f"Sending {len(contents)} messages to Gemini") + + # Build config + config = types.GenerateContentConfig( + max_output_tokens=max_tokens, + temperature=temperature, + ) + if system_prompt: + config.system_instruction = system_prompt + + response = await self.client.aio.models.generate_content( + model=self.model, + contents=contents, + config=config, + ) + + # Extract text from response + content = response.text or "" + + # Build usage dict + usage = {} + if response.usage_metadata: + usage = { + "prompt_tokens": response.usage_metadata.prompt_token_count or 0, + "completion_tokens": response.usage_metadata.candidates_token_count or 0, + "total_tokens": response.usage_metadata.total_token_count or 0, + } + + return AIResponse( + content=content, + model=self.model, + usage=usage, + )