Rebrand and personalize the bot as 'Bartender' - a companion for those who love deeply and feel intensely. Major changes: - Rename package: daemon_boyfriend -> loyal_companion - New default personality: Bartender - wise, steady, non-judgmental - Grief-aware system prompt (no toxic positivity, attachment-informed) - New relationship levels: New Face -> Close Friend progression - Bartender-style mood modifiers (steady presence) - New fact types: attachment_pattern, grief_context, coping_mechanism - Lower mood decay (0.05) for emotional stability - Higher fact extraction rate (0.4) - Bartender pays attention Updated all imports, configs, Docker files, and documentation.
86 lines
2.5 KiB
Python
86 lines
2.5 KiB
Python
"""Google Gemini provider implementation."""
|
|
|
|
import logging
|
|
|
|
from google import genai
|
|
from google.genai import types
|
|
|
|
from .base import AIProvider, AIResponse, Message
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class GeminiProvider(AIProvider):
|
|
"""Google Gemini API provider."""
|
|
|
|
def __init__(self, api_key: str, model: str = "gemini-2.0-flash") -> None:
|
|
self.client = genai.Client(api_key=api_key)
|
|
self.model = model
|
|
|
|
@property
|
|
def provider_name(self) -> str:
|
|
return "gemini"
|
|
|
|
def _build_message_parts(self, message: Message) -> list[types.Part]:
|
|
"""Build message parts, handling images if present."""
|
|
parts = []
|
|
|
|
# Add images first
|
|
for image in message.images:
|
|
parts.append(types.Part.from_uri(file_uri=image.url, mime_type=image.media_type))
|
|
|
|
# Add text content
|
|
parts.append(types.Part(text=message.content))
|
|
|
|
return parts
|
|
|
|
async def generate(
|
|
self,
|
|
messages: list[Message],
|
|
system_prompt: str | None = None,
|
|
max_tokens: int = 1024,
|
|
temperature: float = 0.7,
|
|
) -> AIResponse:
|
|
"""Generate a response using Gemini."""
|
|
# Build contents list (Gemini format)
|
|
contents = []
|
|
for m in messages:
|
|
# Gemini uses "user" and "model" roles
|
|
role = "model" if m.role == "assistant" else m.role
|
|
parts = self._build_message_parts(m)
|
|
contents.append(types.Content(role=role, parts=parts))
|
|
|
|
logger.debug(f"Sending {len(contents)} messages to Gemini")
|
|
|
|
# Build config
|
|
config = types.GenerateContentConfig(
|
|
max_output_tokens=max_tokens,
|
|
temperature=temperature,
|
|
)
|
|
if system_prompt:
|
|
config.system_instruction = system_prompt
|
|
|
|
response = await self.client.aio.models.generate_content(
|
|
model=self.model,
|
|
contents=contents,
|
|
config=config,
|
|
)
|
|
|
|
# Extract text from response
|
|
content = response.text or ""
|
|
|
|
# Build usage dict
|
|
usage = {}
|
|
if response.usage_metadata:
|
|
usage = {
|
|
"prompt_tokens": response.usage_metadata.prompt_token_count or 0,
|
|
"completion_tokens": response.usage_metadata.candidates_token_count or 0,
|
|
"total_tokens": response.usage_metadata.total_token_count or 0,
|
|
}
|
|
|
|
return AIResponse(
|
|
content=content,
|
|
model=self.model,
|
|
usage=usage,
|
|
)
|