AI implementation for openai and claude.
This commit is contained in:
19
backend/Dockerfile.backend
Normal file
19
backend/Dockerfile.backend
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install curl for health checks
|
||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application
|
||||
COPY app/ ./app/
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000
|
||||
|
||||
# Run with uvicorn
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
0
backend/app/__init__.py
Normal file
0
backend/app/__init__.py
Normal file
0
backend/app/api/__init__.py
Normal file
0
backend/app/api/__init__.py
Normal file
69
backend/app/api/chat.py
Normal file
69
backend/app/api/chat.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import json
|
||||
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from ..config import settings
|
||||
from ..models.schemas import ChatRequest, ChatResponse, ProviderListResponse
|
||||
from ..services.provider_manager import provider_manager
|
||||
|
||||
router = APIRouter(prefix="/api/chat", tags=["chat"])
|
||||
|
||||
|
||||
@router.post("/", response_model=ChatResponse)
|
||||
async def chat(request: ChatRequest):
|
||||
"""
|
||||
Non-streaming chat endpoint
|
||||
"""
|
||||
try:
|
||||
provider = provider_manager.get_provider(request.provider)
|
||||
response = await provider.chat(request.message)
|
||||
|
||||
return ChatResponse(message=response, provider=provider.get_provider_name())
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error processing request: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/stream")
|
||||
async def chat_stream(request: ChatRequest):
|
||||
"""
|
||||
Streaming chat endpoint - returns SSE (Server-Sent Events)
|
||||
"""
|
||||
try:
|
||||
provider = provider_manager.get_provider(request.provider)
|
||||
|
||||
async def event_generator():
|
||||
try:
|
||||
async for chunk in provider.chat_stream(request.message):
|
||||
yield f"data: {json.dumps({'chunk': chunk})}\n\n"
|
||||
|
||||
yield f"data: {json.dumps({'done': True})}\n\n"
|
||||
except Exception as e:
|
||||
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/providers", response_model=ProviderListResponse)
|
||||
async def list_providers():
|
||||
"""
|
||||
List available providers
|
||||
"""
|
||||
return ProviderListResponse(
|
||||
providers=provider_manager.get_available_providers(),
|
||||
default=settings.DEFAULT_PROVIDER,
|
||||
)
|
||||
33
backend/app/config.py
Normal file
33
backend/app/config.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
# API Keys
|
||||
ANTHROPIC_API_KEY: Optional[str] = None
|
||||
OPENAI_API_KEY: Optional[str] = None
|
||||
|
||||
# Provider Settings
|
||||
DEFAULT_PROVIDER: str = "claude"
|
||||
CLAUDE_MODEL: str = "claude-3-5-sonnet-20241022"
|
||||
OPENAI_MODEL: str = "gpt-4-turbo-preview"
|
||||
|
||||
# API Settings
|
||||
MAX_TOKENS: int = 4000
|
||||
TEMPERATURE: float = 0.7
|
||||
TIMEOUT: int = 60
|
||||
|
||||
# CORS
|
||||
FRONTEND_URL: str = "http://localhost:3000"
|
||||
|
||||
# Rate Limiting
|
||||
RATE_LIMIT_REQUESTS: int = 10
|
||||
RATE_LIMIT_WINDOW: int = 60
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
case_sensitive = True
|
||||
|
||||
|
||||
settings = Settings()
|
||||
51
backend/app/main.py
Normal file
51
backend/app/main.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import logging
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from .api import chat
|
||||
from .config import settings
|
||||
from .services.provider_manager import provider_manager
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = FastAPI(
|
||||
title="DevDen API", description="AI chat backend for DevDen", version="1.0.0"
|
||||
)
|
||||
|
||||
# CORS Configuration
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=[settings.FRONTEND_URL, "http://localhost:3000"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Include routers
|
||||
app.include_router(chat.router)
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return JSONResponse(
|
||||
content={
|
||||
"status": "healthy",
|
||||
"providers": provider_manager.get_available_providers(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
logger.info("DevDen API starting up...")
|
||||
logger.info(f"Available providers: {provider_manager.get_available_providers()}")
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
logger.info("DevDen API shutting down...")
|
||||
0
backend/app/models/__init__.py
Normal file
0
backend/app/models/__init__.py
Normal file
24
backend/app/models/schemas.py
Normal file
24
backend/app/models/schemas.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
message: str = Field(..., min_length=1, max_length=10000)
|
||||
provider: Optional[str] = None
|
||||
|
||||
@validator("message")
|
||||
def message_not_empty(cls, v):
|
||||
if not v.strip():
|
||||
raise ValueError("Message cannot be empty or whitespace")
|
||||
return v.strip()
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
message: str
|
||||
provider: str
|
||||
|
||||
|
||||
class ProviderListResponse(BaseModel):
|
||||
providers: List[str]
|
||||
default: str
|
||||
0
backend/app/services/__init__.py
Normal file
0
backend/app/services/__init__.py
Normal file
27
backend/app/services/provider_base.py
Normal file
27
backend/app/services/provider_base.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import AsyncGenerator
|
||||
|
||||
|
||||
class AIProvider(ABC):
|
||||
"""Abstract base class for AI providers"""
|
||||
|
||||
def __init__(self, api_key: str, model: str):
|
||||
self.api_key = api_key
|
||||
self.model = model
|
||||
|
||||
@abstractmethod
|
||||
async def chat(self, message: str, system_prompt: str = None) -> str:
|
||||
"""Non-streaming chat"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def chat_stream(
|
||||
self, message: str, system_prompt: str = None
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Streaming chat - yields chunks of text"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_provider_name(self) -> str:
|
||||
"""Return provider identifier"""
|
||||
pass
|
||||
41
backend/app/services/provider_claude.py
Normal file
41
backend/app/services/provider_claude.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import AsyncGenerator
|
||||
|
||||
import anthropic
|
||||
|
||||
from .provider_base import AIProvider
|
||||
|
||||
|
||||
class ClaudeProvider(AIProvider):
|
||||
def __init__(self, api_key: str, model: str):
|
||||
super().__init__(api_key, model)
|
||||
self.client = anthropic.AsyncAnthropic(api_key=api_key)
|
||||
|
||||
async def chat(self, message: str, system_prompt: str = None) -> str:
|
||||
"""Non-streaming chat"""
|
||||
messages = [{"role": "user", "content": message}]
|
||||
|
||||
kwargs = {"model": self.model, "max_tokens": 4000, "messages": messages}
|
||||
|
||||
if system_prompt:
|
||||
kwargs["system"] = system_prompt
|
||||
|
||||
response = await self.client.messages.create(**kwargs)
|
||||
return response.content[0].text
|
||||
|
||||
async def chat_stream(
|
||||
self, message: str, system_prompt: str = None
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Streaming chat"""
|
||||
messages = [{"role": "user", "content": message}]
|
||||
|
||||
kwargs = {"model": self.model, "max_tokens": 4000, "messages": messages}
|
||||
|
||||
if system_prompt:
|
||||
kwargs["system"] = system_prompt
|
||||
|
||||
async with self.client.messages.stream(**kwargs) as stream:
|
||||
async for text in stream.text_stream:
|
||||
yield text
|
||||
|
||||
def get_provider_name(self) -> str:
|
||||
return "claude"
|
||||
75
backend/app/services/provider_manager.py
Normal file
75
backend/app/services/provider_manager.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from typing import Optional
|
||||
|
||||
from ..config import settings
|
||||
from .provider_base import AIProvider
|
||||
from .provider_claude import ClaudeProvider
|
||||
from .provider_openai import OpenAIProvider
|
||||
|
||||
|
||||
class ProviderManager:
|
||||
"""Manages provider selection and fallback logic"""
|
||||
|
||||
def __init__(self):
|
||||
self.providers = {}
|
||||
self._initialize_providers()
|
||||
|
||||
def _initialize_providers(self):
|
||||
"""Initialize available providers based on API keys"""
|
||||
if settings.ANTHROPIC_API_KEY and settings.ANTHROPIC_API_KEY.strip():
|
||||
self.providers["claude"] = ClaudeProvider(
|
||||
api_key=settings.ANTHROPIC_API_KEY, model=settings.CLAUDE_MODEL
|
||||
)
|
||||
|
||||
if settings.OPENAI_API_KEY and settings.OPENAI_API_KEY.strip():
|
||||
self.providers["openai"] = OpenAIProvider(
|
||||
api_key=settings.OPENAI_API_KEY, model=settings.OPENAI_MODEL
|
||||
)
|
||||
|
||||
def get_provider(self, provider_name: Optional[str] = None) -> AIProvider:
|
||||
"""
|
||||
Get a provider by name, or use default.
|
||||
Raises ValueError if provider not available.
|
||||
"""
|
||||
name = provider_name or settings.DEFAULT_PROVIDER
|
||||
|
||||
if name not in self.providers:
|
||||
raise ValueError(
|
||||
f"Provider '{name}' not available. "
|
||||
f"Available: {list(self.providers.keys())}"
|
||||
)
|
||||
|
||||
return self.providers[name]
|
||||
|
||||
def get_available_providers(self) -> list[str]:
|
||||
"""Return list of available provider names"""
|
||||
return list(self.providers.keys())
|
||||
|
||||
async def chat_with_fallback(
|
||||
self, message: str, preferred_provider: Optional[str] = None
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Try to chat with preferred provider, fallback to others if it fails.
|
||||
Returns (response, provider_used)
|
||||
"""
|
||||
providers_to_try = [preferred_provider or settings.DEFAULT_PROVIDER] + [
|
||||
p
|
||||
for p in self.providers.keys()
|
||||
if p != (preferred_provider or settings.DEFAULT_PROVIDER)
|
||||
]
|
||||
|
||||
last_error = None
|
||||
|
||||
for provider_name in providers_to_try:
|
||||
try:
|
||||
provider = self.get_provider(provider_name)
|
||||
response = await provider.chat(message)
|
||||
return response, provider_name
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
continue
|
||||
|
||||
raise Exception(f"All providers failed. Last error: {last_error}")
|
||||
|
||||
|
||||
# Singleton instance
|
||||
provider_manager = ProviderManager()
|
||||
48
backend/app/services/provider_openai.py
Normal file
48
backend/app/services/provider_openai.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from .provider_base import AIProvider
|
||||
|
||||
|
||||
class OpenAIProvider(AIProvider):
|
||||
def __init__(self, api_key: str, model: str):
|
||||
super().__init__(api_key, model)
|
||||
self.client = AsyncOpenAI(api_key=api_key)
|
||||
|
||||
async def chat(self, message: str, system_prompt: str = None) -> str:
|
||||
"""Non-streaming chat"""
|
||||
messages = []
|
||||
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.model, messages=messages, max_tokens=4000
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
||||
|
||||
async def chat_stream(
|
||||
self, message: str, system_prompt: str = None
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Streaming chat"""
|
||||
messages = []
|
||||
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
stream = await self.client.chat.completions.create(
|
||||
model=self.model, messages=messages, max_tokens=4000, stream=True
|
||||
)
|
||||
|
||||
async for chunk in stream:
|
||||
if chunk.choices[0].delta.content:
|
||||
yield chunk.choices[0].delta.content
|
||||
|
||||
def get_provider_name(self) -> str:
|
||||
return "openai"
|
||||
8
backend/requirements.txt
Normal file
8
backend/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
fastapi>=0.109.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
anthropic>=0.18.1
|
||||
openai>=1.50.0
|
||||
pydantic>=2.6.0
|
||||
pydantic-settings>=2.1.0
|
||||
python-dotenv>=1.0.0
|
||||
httpx>=0.27.0
|
||||
Reference in New Issue
Block a user