Merge pull request 'feature/google-gemini-provider' (#5) from feature/google-gemini-provider into main

Reviewed-on: MSC/Daemon-Boyfriend#5
This commit is contained in:
2026-01-11 19:41:09 +00:00
13 changed files with 403 additions and 7 deletions

View File

@@ -7,7 +7,7 @@ DISCORD_TOKEN=your_discord_bot_token_here
# =========================================== # ===========================================
# AI Provider Configuration # AI Provider Configuration
# =========================================== # ===========================================
# Available providers: "openai", "openrouter", "anthropic" # Available providers: "openai", "openrouter", "anthropic", "gemini"
AI_PROVIDER=openai AI_PROVIDER=openai
# Model to use (e.g., gpt-4o, gpt-4o-mini, claude-3-5-sonnet, etc.) # Model to use (e.g., gpt-4o, gpt-4o-mini, claude-3-5-sonnet, etc.)
@@ -17,6 +17,7 @@ AI_MODEL=gpt-4o
OPENAI_API_KEY=sk-xxx OPENAI_API_KEY=sk-xxx
OPENROUTER_API_KEY=sk-or-xxx OPENROUTER_API_KEY=sk-or-xxx
ANTHROPIC_API_KEY=sk-ant-xxx ANTHROPIC_API_KEY=sk-ant-xxx
GEMINI_API_KEY=xxx
# Maximum tokens in AI response (100-4096) # Maximum tokens in AI response (100-4096)
AI_MAX_TOKENS=1024 AI_MAX_TOKENS=1024

View File

@@ -0,0 +1,61 @@
name: AI Chat (Bartender)
# WORKFLOW ROUTING:
# This workflow handles FREE-FORM questions/chat (no specific command)
# Other workflows: ai-issue-triage.yml (@codebot triage), ai-comment-reply.yml (specific commands)
# This is the FALLBACK for any @codebot mention that isn't a known command
on:
issue_comment:
types: [created]
# CUSTOMIZE YOUR BOT NAME:
# Change '@codebot' in all conditions below to match your config.yml mention_prefix
# Examples: '@bartender', '@uni', '@joey', '@codebot'
jobs:
ai-chat:
# Only run if comment mentions the bot but NOT a specific command
# This prevents duplicate runs with ai-comment-reply.yml and ai-issue-triage.yml
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
contains(github.event.comment.body, '@codebot') &&
!contains(github.event.comment.body, '@codebot triage') &&
!contains(github.event.comment.body, '@codebot help') &&
!contains(github.event.comment.body, '@codebot explain') &&
!contains(github.event.comment.body, '@codebot suggest') &&
!contains(github.event.comment.body, '@codebot security') &&
!contains(github.event.comment.body, '@codebot summarize') &&
!contains(github.event.comment.body, '@codebot changelog') &&
!contains(github.event.comment.body, '@codebot explain-diff') &&
!contains(github.event.comment.body, '@codebot review-again') &&
!contains(github.event.comment.body, '@codebot setup-labels')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Chat
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
SEARXNG_URL: ${{ secrets.SEARXNG_URL }}
run: |
cd .ai-review/tools/ai-review
python main.py comment ${{ gitea.repository }} ${{ gitea.event.issue.number }} "${{ gitea.event.comment.body }}"

View File

@@ -0,0 +1,58 @@
name: AI Codebase Quality Review
on:
# Weekly scheduled run
# schedule:
# - cron: "0 0 * * 0" # Every Sunday at midnight
# Manual trigger
workflow_dispatch:
inputs:
report_type:
description: "Type of report to generate"
required: false
default: "full"
type: choice
options:
- full
- security
- quick
jobs:
ai-codebase-review:
runs-on: ubuntu-latest
steps:
# Checkout the repository
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for analysis
# Checkout central AI tooling
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
# Setup Python
- uses: actions/setup-python@v5
with:
python-version: "3.11"
# Install dependencies
- run: pip install requests pyyaml
# Run AI codebase analysis
- name: Run AI Codebase Analysis
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py codebase ${{ gitea.repository }}

View File

@@ -0,0 +1,98 @@
name: AI Comment Reply
# WORKFLOW ROUTING:
# This workflow handles SPECIFIC commands: help, explain, suggest, security, summarize, changelog, explain-diff, review-again, setup-labels
# Other workflows: ai-issue-triage.yml (@codebot triage), ai-chat.yml (free-form questions)
on:
issue_comment:
types: [created]
# CUSTOMIZE YOUR BOT NAME:
# Change '@codebot' in the 'if' condition below to match your config.yml mention_prefix
# Examples: '@bartender', '@uni', '@joey', '@codebot'
jobs:
ai-reply:
runs-on: ubuntu-latest
# Only run for specific commands (not free-form chat or triage)
# This prevents duplicate runs with ai-chat.yml and ai-issue-triage.yml
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
(contains(github.event.comment.body, '@codebot help') ||
contains(github.event.comment.body, '@codebot explain') ||
contains(github.event.comment.body, '@codebot suggest') ||
contains(github.event.comment.body, '@codebot security') ||
contains(github.event.comment.body, '@codebot summarize') ||
contains(github.event.comment.body, '@codebot changelog') ||
contains(github.event.comment.body, '@codebot explain-diff') ||
contains(github.event.comment.body, '@codebot review-again') ||
contains(github.event.comment.body, '@codebot setup-labels'))
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Comment Response
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
# Determine if this is a PR or issue comment
IS_PR="${{ gitea.event.issue.pull_request != null }}"
REPO="${{ gitea.repository }}"
ISSUE_NUMBER="${{ gitea.event.issue.number }}"
# Validate inputs
if [ -z "$REPO" ] || [ -z "$ISSUE_NUMBER" ]; then
echo "Error: Missing required parameters"
exit 1
fi
# Validate repository format (owner/repo)
if ! echo "$REPO" | grep -qE '^[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$'; then
echo "Error: Invalid repository format: $REPO"
exit 1
fi
if [ "$IS_PR" = "true" ]; then
# This is a PR comment - use safe dispatch with minimal event data
# Build minimal event payload (does not include sensitive user data)
EVENT_DATA=$(cat <<EOF
{
"action": "created",
"issue": {
"number": ${{ gitea.event.issue.number }},
"pull_request": {}
},
"comment": {
"id": ${{ gitea.event.comment.id }},
"body": $(echo '${{ gitea.event.comment.body }}' | jq -Rs .)
}
}
EOF
)
# Use safe dispatch utility
python utils/safe_dispatch.py issue_comment "$REPO" "$EVENT_DATA"
else
# This is an issue comment - use the comment command
COMMENT_BODY='${{ gitea.event.comment.body }}'
python main.py comment "$REPO" "$ISSUE_NUMBER" "$COMMENT_BODY"
fi

View File

@@ -0,0 +1,44 @@
name: AI Issue Triage
# WORKFLOW ROUTING:
# This workflow handles ONLY the 'triage' command
# Other workflows: ai-comment-reply.yml (specific commands), ai-chat.yml (free-form questions)
on:
issue_comment:
types: [created]
jobs:
ai-triage:
runs-on: ubuntu-latest
# Only run if comment contains @codebot triage
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
contains(github.event.comment.body, '@codebot triage')
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Issue Triage
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py issue ${{ gitea.repository }} ${{ gitea.event.issue.number }}

View File

@@ -0,0 +1,53 @@
name: Enterprise AI Code Review
on:
pull_request:
types: [opened, synchronize]
jobs:
ai-review:
runs-on: ubuntu-latest
steps:
# Checkout the PR repository
- uses: actions/checkout@v4
with:
fetch-depth: 0
# Checkout the CENTRAL AI tooling repo
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
# Setup Python
- uses: actions/setup-python@v5
with:
python-version: "3.11"
# Install dependencies
- run: pip install requests pyyaml
# Run the AI review
- name: Run Enterprise AI Review
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
AI_REVIEW_PR_NUMBER: ${{ gitea.event.pull_request.number }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py pr ${{ gitea.repository }} ${{ gitea.event.pull_request.number }} \
--title "${{ gitea.event.pull_request.title }}"
# Fail CI on HIGH severity (optional)
- name: Check Review Result
if: failure()
run: |
echo "AI Review found HIGH severity issues. Please address them before merging."
exit 1

View File

@@ -25,9 +25,10 @@ This is a Discord bot that responds to @mentions with AI-generated responses (mu
### Provider Pattern ### Provider Pattern
The AI system uses a provider abstraction pattern: The AI system uses a provider abstraction pattern:
- `services/providers/base.py` defines `AIProvider` abstract class with `generate()` method - `services/providers/base.py` defines `AIProvider` abstract class with `generate()` method
- `services/providers/openai.py`, `openrouter.py`, `anthropic.py` implement the interface - `services/providers/openai.py`, `openrouter.py`, `anthropic.py`, `gemini.py` implement the interface
- `services/ai_service.py` is the factory that creates the correct provider based on `AI_PROVIDER` env var - `services/ai_service.py` is the factory that creates the correct provider based on `AI_PROVIDER` env var
- OpenRouter uses OpenAI's client with a different base URL - OpenRouter uses OpenAI's client with a different base URL
- Gemini uses the `google-genai` SDK
### Cog System ### Cog System
Discord functionality is in `cogs/`: Discord functionality is in `cogs/`:
@@ -45,4 +46,4 @@ All config flows through `config.py` using pydantic-settings. The `settings` sin
## Environment Variables ## Environment Variables
Required: `DISCORD_TOKEN`, plus one of `OPENAI_API_KEY`, `OPENROUTER_API_KEY`, or `ANTHROPIC_API_KEY` depending on `AI_PROVIDER` setting. Required: `DISCORD_TOKEN`, plus one of `OPENAI_API_KEY`, `OPENROUTER_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` depending on `AI_PROVIDER` setting.

View File

@@ -4,7 +4,7 @@ A customizable Discord bot that responds to @mentions with AI-generated response
## Features ## Features
- **Multi-Provider AI**: Supports OpenAI, OpenRouter, and Anthropic (Claude) - **Multi-Provider AI**: Supports OpenAI, OpenRouter, Anthropic (Claude), and Google Gemini
- **Fully Customizable**: Configure bot name, personality, and behavior - **Fully Customizable**: Configure bot name, personality, and behavior
- **Conversation Memory**: Remembers context per user - **Conversation Memory**: Remembers context per user
- **Easy Deployment**: Docker support included - **Easy Deployment**: Docker support included
@@ -50,10 +50,11 @@ All configuration is done via environment variables in `.env`.
| Variable | Description | | Variable | Description |
|----------|-------------| |----------|-------------|
| `DISCORD_TOKEN` | Your Discord bot token | | `DISCORD_TOKEN` | Your Discord bot token |
| `AI_PROVIDER` | `openai`, `openrouter`, or `anthropic` | | `AI_PROVIDER` | `openai`, `openrouter`, `anthropic`, or `gemini` |
| `OPENAI_API_KEY` | OpenAI API key (if using OpenAI) | | `OPENAI_API_KEY` | OpenAI API key (if using OpenAI) |
| `OPENROUTER_API_KEY` | OpenRouter API key (if using OpenRouter) | | `OPENROUTER_API_KEY` | OpenRouter API key (if using OpenRouter) |
| `ANTHROPIC_API_KEY` | Anthropic API key (if using Anthropic) | | `ANTHROPIC_API_KEY` | Anthropic API key (if using Anthropic) |
| `GEMINI_API_KEY` | Google Gemini API key (if using Gemini) |
### Bot Identity ### Bot Identity
@@ -129,6 +130,7 @@ Mention the bot in any channel:
| OpenAI | gpt-4o, gpt-4-turbo, gpt-3.5-turbo | Official OpenAI API | | OpenAI | gpt-4o, gpt-4-turbo, gpt-3.5-turbo | Official OpenAI API |
| OpenRouter | 100+ models | Access to Llama, Mistral, Claude, etc. | | OpenRouter | 100+ models | Access to Llama, Mistral, Claude, etc. |
| Anthropic | claude-3-5-sonnet, claude-3-opus | Direct Claude API | | Anthropic | claude-3-5-sonnet, claude-3-opus | Direct Claude API |
| Gemini | gemini-2.0-flash, gemini-1.5-pro | Google AI API |
## Project Structure ## Project Structure

View File

@@ -3,6 +3,7 @@ discord.py>=2.3.0
# AI Providers # AI Providers
anthropic>=0.18.0 anthropic>=0.18.0
google-genai>=1.0.0
openai>=1.12.0 openai>=1.12.0
# HTTP Client # HTTP Client

View File

@@ -19,7 +19,7 @@ class Settings(BaseSettings):
discord_token: str = Field(..., description="Discord bot token") discord_token: str = Field(..., description="Discord bot token")
# AI Provider Configuration # AI Provider Configuration
ai_provider: Literal["openai", "openrouter", "anthropic"] = Field( ai_provider: Literal["openai", "openrouter", "anthropic", "gemini"] = Field(
"openai", description="Which AI provider to use" "openai", description="Which AI provider to use"
) )
ai_model: str = Field("gpt-4o", description="AI model to use") ai_model: str = Field("gpt-4o", description="AI model to use")
@@ -30,6 +30,7 @@ class Settings(BaseSettings):
openai_api_key: str | None = Field(None, description="OpenAI API key") openai_api_key: str | None = Field(None, description="OpenAI API key")
openrouter_api_key: str | None = Field(None, description="OpenRouter API key") openrouter_api_key: str | None = Field(None, description="OpenRouter API key")
anthropic_api_key: str | None = Field(None, description="Anthropic API key") anthropic_api_key: str | None = Field(None, description="Anthropic API key")
gemini_api_key: str | None = Field(None, description="Google Gemini API key")
# Logging # Logging
log_level: str = Field("INFO", description="Logging level") log_level: str = Field("INFO", description="Logging level")
@@ -66,6 +67,7 @@ class Settings(BaseSettings):
"openai": self.openai_api_key, "openai": self.openai_api_key,
"openrouter": self.openrouter_api_key, "openrouter": self.openrouter_api_key,
"anthropic": self.anthropic_api_key, "anthropic": self.anthropic_api_key,
"gemini": self.gemini_api_key,
} }
key = key_map.get(self.ai_provider) key = key_map.get(self.ai_provider)
if not key: if not key:

View File

@@ -9,6 +9,7 @@ from .providers import (
AIProvider, AIProvider,
AIResponse, AIResponse,
AnthropicProvider, AnthropicProvider,
GeminiProvider,
Message, Message,
OpenAIProvider, OpenAIProvider,
OpenRouterProvider, OpenRouterProvider,
@@ -16,7 +17,7 @@ from .providers import (
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
ProviderType = Literal["openai", "openrouter", "anthropic"] ProviderType = Literal["openai", "openrouter", "anthropic", "gemini"]
class AIService: class AIService:
@@ -45,6 +46,7 @@ class AIService:
"openai": OpenAIProvider, "openai": OpenAIProvider,
"openrouter": OpenRouterProvider, "openrouter": OpenRouterProvider,
"anthropic": AnthropicProvider, "anthropic": AnthropicProvider,
"gemini": GeminiProvider,
} }
provider_class = providers.get(provider_type) provider_class = providers.get(provider_type)

View File

@@ -2,6 +2,7 @@
from .anthropic import AnthropicProvider from .anthropic import AnthropicProvider
from .base import AIProvider, AIResponse, Message from .base import AIProvider, AIResponse, Message
from .gemini import GeminiProvider
from .openai import OpenAIProvider from .openai import OpenAIProvider
from .openrouter import OpenRouterProvider from .openrouter import OpenRouterProvider
@@ -12,4 +13,5 @@ __all__ = [
"OpenAIProvider", "OpenAIProvider",
"OpenRouterProvider", "OpenRouterProvider",
"AnthropicProvider", "AnthropicProvider",
"GeminiProvider",
] ]

View File

@@ -0,0 +1,71 @@
"""Google Gemini provider implementation."""
import logging
from google import genai
from google.genai import types
from .base import AIProvider, AIResponse, Message
logger = logging.getLogger(__name__)
class GeminiProvider(AIProvider):
"""Google Gemini API provider."""
def __init__(self, api_key: str, model: str = "gemini-2.0-flash") -> None:
self.client = genai.Client(api_key=api_key)
self.model = model
@property
def provider_name(self) -> str:
return "gemini"
async def generate(
self,
messages: list[Message],
system_prompt: str | None = None,
max_tokens: int = 1024,
temperature: float = 0.7,
) -> AIResponse:
"""Generate a response using Gemini."""
# Build contents list (Gemini format)
contents = []
for m in messages:
# Gemini uses "user" and "model" roles
role = "model" if m.role == "assistant" else m.role
contents.append(types.Content(role=role, parts=[types.Part(text=m.content)]))
logger.debug(f"Sending {len(contents)} messages to Gemini")
# Build config
config = types.GenerateContentConfig(
max_output_tokens=max_tokens,
temperature=temperature,
)
if system_prompt:
config.system_instruction = system_prompt
response = await self.client.aio.models.generate_content(
model=self.model,
contents=contents,
config=config,
)
# Extract text from response
content = response.text or ""
# Build usage dict
usage = {}
if response.usage_metadata:
usage = {
"prompt_tokens": response.usage_metadata.prompt_token_count or 0,
"completion_tokens": response.usage_metadata.candidates_token_count or 0,
"total_tokens": response.usage_metadata.total_token_count or 0,
}
return AIResponse(
content=content,
model=self.model,
usage=usage,
)