just why not
All checks were successful
AI Codebase Quality Review / ai-codebase-review (push) Successful in 39s

This commit is contained in:
2026-01-07 21:19:46 +01:00
parent a1fe47cdf4
commit e8d28225e0
24 changed files with 6431 additions and 250 deletions

View File

@@ -77,11 +77,13 @@ class OpenAIProvider(BaseLLMProvider):
model: str = "gpt-4o-mini",
temperature: float = 0,
max_tokens: int = 4096,
timeout: int = 120,
):
self.api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.timeout = timeout
self.api_url = "https://api.openai.com/v1/chat/completions"
def call(self, prompt: str, **kwargs) -> LLMResponse:
@@ -101,7 +103,7 @@ class OpenAIProvider(BaseLLMProvider):
"max_tokens": kwargs.get("max_tokens", self.max_tokens),
"messages": [{"role": "user", "content": prompt}],
},
timeout=120,
timeout=self.timeout,
)
response.raise_for_status()
data = response.json()
@@ -145,7 +147,7 @@ class OpenAIProvider(BaseLLMProvider):
"Content-Type": "application/json",
},
json=request_body,
timeout=120,
timeout=self.timeout,
)
response.raise_for_status()
data = response.json()
@@ -186,11 +188,13 @@ class OpenRouterProvider(BaseLLMProvider):
model: str = "anthropic/claude-3.5-sonnet",
temperature: float = 0,
max_tokens: int = 4096,
timeout: int = 120,
):
self.api_key = api_key or os.environ.get("OPENROUTER_API_KEY", "")
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.timeout = timeout
self.api_url = "https://openrouter.ai/api/v1/chat/completions"
def call(self, prompt: str, **kwargs) -> LLMResponse:
@@ -210,7 +214,7 @@ class OpenRouterProvider(BaseLLMProvider):
"max_tokens": kwargs.get("max_tokens", self.max_tokens),
"messages": [{"role": "user", "content": prompt}],
},
timeout=120,
timeout=self.timeout,
)
response.raise_for_status()
data = response.json()
@@ -254,7 +258,7 @@ class OpenRouterProvider(BaseLLMProvider):
"Content-Type": "application/json",
},
json=request_body,
timeout=120,
timeout=self.timeout,
)
response.raise_for_status()
data = response.json()
@@ -294,10 +298,12 @@ class OllamaProvider(BaseLLMProvider):
host: str | None = None,
model: str = "codellama:13b",
temperature: float = 0,
timeout: int = 300,
):
self.host = host or os.environ.get("OLLAMA_HOST", "http://localhost:11434")
self.model = model
self.temperature = temperature
self.timeout = timeout
def call(self, prompt: str, **kwargs) -> LLMResponse:
"""Call Ollama API."""
@@ -311,7 +317,7 @@ class OllamaProvider(BaseLLMProvider):
"temperature": kwargs.get("temperature", self.temperature),
},
},
timeout=300, # Longer timeout for local models
timeout=self.timeout,
)
response.raise_for_status()
data = response.json()
@@ -477,12 +483,18 @@ class LLMClient:
provider = config.get("provider", "openai")
provider_config = {}
# Get timeout configuration
timeouts = config.get("timeouts", {})
llm_timeout = timeouts.get("llm", 120)
ollama_timeout = timeouts.get("ollama", 300)
# Map config keys to provider-specific settings
if provider == "openai":
provider_config = {
"model": config.get("model", {}).get("openai", "gpt-4o-mini"),
"temperature": config.get("temperature", 0),
"max_tokens": config.get("max_tokens", 16000),
"timeout": llm_timeout,
}
elif provider == "openrouter":
provider_config = {
@@ -491,11 +503,13 @@ class LLMClient:
),
"temperature": config.get("temperature", 0),
"max_tokens": config.get("max_tokens", 16000),
"timeout": llm_timeout,
}
elif provider == "ollama":
provider_config = {
"model": config.get("model", {}).get("ollama", "codellama:13b"),
"temperature": config.get("temperature", 0),
"timeout": ollama_timeout,
}
return cls(provider=provider, config=provider_config)