Merge pull request 'fix/emtpy-loging' (#1) from fix/emtpy-loging into main

Reviewed-on: #1
This commit was merged in pull request #1.
This commit is contained in:
2026-01-16 12:18:27 +00:00
16 changed files with 1804 additions and 89 deletions

View File

@@ -13,11 +13,15 @@ OPENAI_MODEL=gpt-4o-mini
# API Configuration
MAX_TOKENS=4000
TEMPERATURE=0.7
# For local development use: http://localhost:3000
# For production use your domain with HTTPS: https://your-domain.com
FRONTEND_URL=http://localhost:3000
# Microsoft Entra ID (Azure AD)
# Create an app registration at: https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps
# Add redirect URI: http://localhost:3000/auth/callback
# IMPORTANT: Azure requires HTTPS for non-localhost redirect URIs
# For localhost: http://localhost:3000/auth/callback
# For production: https://your-domain.com/auth/callback
ENTRA_TENANT_ID=your-tenant-id
ENTRA_CLIENT_ID=your-client-id
ENTRA_CLIENT_SECRET=your-client-secret

View File

@@ -0,0 +1,61 @@
name: AI Chat (Bartender)
# WORKFLOW ROUTING:
# This workflow handles FREE-FORM questions/chat (no specific command)
# Other workflows: ai-issue-triage.yml (@codebot triage), ai-comment-reply.yml (specific commands)
# This is the FALLBACK for any @codebot mention that isn't a known command
on:
issue_comment:
types: [created]
# CUSTOMIZE YOUR BOT NAME:
# Change '@codebot' in all conditions below to match your config.yml mention_prefix
# Examples: '@bartender', '@uni', '@joey', '@codebot'
jobs:
ai-chat:
# Only run if comment mentions the bot but NOT a specific command
# This prevents duplicate runs with ai-comment-reply.yml and ai-issue-triage.yml
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
contains(github.event.comment.body, '@codebot') &&
!contains(github.event.comment.body, '@codebot triage') &&
!contains(github.event.comment.body, '@codebot help') &&
!contains(github.event.comment.body, '@codebot explain') &&
!contains(github.event.comment.body, '@codebot suggest') &&
!contains(github.event.comment.body, '@codebot security') &&
!contains(github.event.comment.body, '@codebot summarize') &&
!contains(github.event.comment.body, '@codebot changelog') &&
!contains(github.event.comment.body, '@codebot explain-diff') &&
!contains(github.event.comment.body, '@codebot review-again') &&
!contains(github.event.comment.body, '@codebot setup-labels')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Chat
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
SEARXNG_URL: ${{ secrets.SEARXNG_URL }}
run: |
cd .ai-review/tools/ai-review
python main.py comment ${{ gitea.repository }} ${{ gitea.event.issue.number }} "${{ gitea.event.comment.body }}"

View File

@@ -0,0 +1,58 @@
name: AI Codebase Quality Review
on:
# # Weekly scheduled run
# schedule:
# - cron: "0 0 * * 0"
# Manual trigger
workflow_dispatch:
inputs:
report_type:
description: "Type of report to generate"
required: false
default: "full"
type: choice
options:
- full
- security
- quick
jobs:
ai-codebase-review:
runs-on: ubuntu-latest
steps:
# Checkout the repository
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for analysis
# Checkout central AI tooling
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
# Setup Python
- uses: actions/setup-python@v5
with:
python-version: "3.11"
# Install dependencies
- run: pip install requests pyyaml
# Run AI codebase analysis
- name: Run AI Codebase Analysis
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py codebase ${{ gitea.repository }}

View File

@@ -0,0 +1,98 @@
name: AI Comment Reply
# WORKFLOW ROUTING:
# This workflow handles SPECIFIC commands: help, explain, suggest, security, summarize, changelog, explain-diff, review-again, setup-labels
# Other workflows: ai-issue-triage.yml (@codebot triage), ai-chat.yml (free-form questions)
on:
issue_comment:
types: [created]
# CUSTOMIZE YOUR BOT NAME:
# Change '@codebot' in the 'if' condition below to match your config.yml mention_prefix
# Examples: '@bartender', '@uni', '@joey', '@codebot'
jobs:
ai-reply:
runs-on: ubuntu-latest
# Only run for specific commands (not free-form chat or triage)
# This prevents duplicate runs with ai-chat.yml and ai-issue-triage.yml
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
(contains(github.event.comment.body, '@codebot help') ||
contains(github.event.comment.body, '@codebot explain') ||
contains(github.event.comment.body, '@codebot suggest') ||
contains(github.event.comment.body, '@codebot security') ||
contains(github.event.comment.body, '@codebot summarize') ||
contains(github.event.comment.body, '@codebot changelog') ||
contains(github.event.comment.body, '@codebot explain-diff') ||
contains(github.event.comment.body, '@codebot review-again') ||
contains(github.event.comment.body, '@codebot setup-labels'))
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Comment Response
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
# Determine if this is a PR or issue comment
IS_PR="${{ gitea.event.issue.pull_request != null }}"
REPO="${{ gitea.repository }}"
ISSUE_NUMBER="${{ gitea.event.issue.number }}"
# Validate inputs
if [ -z "$REPO" ] || [ -z "$ISSUE_NUMBER" ]; then
echo "Error: Missing required parameters"
exit 1
fi
# Validate repository format (owner/repo)
if ! echo "$REPO" | grep -qE '^[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$'; then
echo "Error: Invalid repository format: $REPO"
exit 1
fi
if [ "$IS_PR" = "true" ]; then
# This is a PR comment - use safe dispatch with minimal event data
# Build minimal event payload (does not include sensitive user data)
EVENT_DATA=$(cat <<EOF
{
"action": "created",
"issue": {
"number": ${{ gitea.event.issue.number }},
"pull_request": {}
},
"comment": {
"id": ${{ gitea.event.comment.id }},
"body": $(echo '${{ gitea.event.comment.body }}' | jq -Rs .)
}
}
EOF
)
# Use safe dispatch utility
python utils/safe_dispatch.py issue_comment "$REPO" "$EVENT_DATA"
else
# This is an issue comment - use the comment command
COMMENT_BODY='${{ gitea.event.comment.body }}'
python main.py comment "$REPO" "$ISSUE_NUMBER" "$COMMENT_BODY"
fi

View File

@@ -0,0 +1,44 @@
name: AI Issue Triage
# WORKFLOW ROUTING:
# This workflow handles ONLY the 'triage' command
# Other workflows: ai-comment-reply.yml (specific commands), ai-chat.yml (free-form questions)
on:
issue_comment:
types: [created]
jobs:
ai-triage:
runs-on: ubuntu-latest
# Only run if comment contains @codebot triage
# CRITICAL: Ignore bot's own comments to prevent infinite loops (bot username: Bartender)
if: |
github.event.comment.user.login != 'Bartender' &&
contains(github.event.comment.body, '@codebot triage')
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install requests pyyaml
- name: Run AI Issue Triage
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py issue ${{ gitea.repository }} ${{ gitea.event.issue.number }}

View File

@@ -0,0 +1,53 @@
name: Enterprise AI Code Review
on:
pull_request:
types: [opened, synchronize]
jobs:
ai-review:
runs-on: ubuntu-latest
steps:
# Checkout the PR repository
- uses: actions/checkout@v4
with:
fetch-depth: 0
# Checkout the CENTRAL AI tooling repo
- uses: actions/checkout@v4
with:
repository: Hiddenden/openrabbit
path: .ai-review
token: ${{ secrets.AI_REVIEW_TOKEN }}
# Setup Python
- uses: actions/setup-python@v5
with:
python-version: "3.11"
# Install dependencies
- run: pip install requests pyyaml
# Run the AI review
- name: Run Enterprise AI Review
env:
AI_REVIEW_TOKEN: ${{ secrets.AI_REVIEW_TOKEN }}
AI_REVIEW_REPO: ${{ gitea.repository }}
AI_REVIEW_API_URL: https://git.hiddenden.cafe/api/v1
AI_REVIEW_PR_NUMBER: ${{ gitea.event.pull_request.number }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }}
run: |
cd .ai-review/tools/ai-review
python main.py pr ${{ gitea.repository }} ${{ gitea.event.pull_request.number }} \
--title "${{ gitea.event.pull_request.title }}"
# Fail CI on HIGH severity (optional)
- name: Check Review Result
if: failure()
run: |
echo "AI Review found HIGH severity issues. Please address them before merging."
exit 1

View File

@@ -1,8 +1,12 @@
# Use nginx alpine for minimal size
FROM nginx:alpine
# Copy custom nginx config
COPY default.conf /etc/nginx/conf.d/default.conf
# Copy static files to nginx html directory
COPY index.html /usr/share/nginx/html/
COPY auth-callback.html /usr/share/nginx/html/
COPY style.css /usr/share/nginx/html/
COPY script.js /usr/share/nginx/html/

View File

@@ -26,11 +26,14 @@ cp .env.example .env
```
3. **Set up Microsoft Entra ID** (Azure AD)
- Go to [Azure Portal - App Registrations](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps)
- Create a new app registration
- Add redirect URI: `http://localhost:3000/auth/callback`
- Generate a client secret under "Certificates & secrets"
- Copy Tenant ID, Client ID, and Client Secret to your `.env` file
- Sign in to the [Azure Portal - App Registrations](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps) page using an account that can manage Entra applications.
- Create a new app registration named something like “DevDen Auth” and choose “Accounts in this organizational directory only” (single tenant) unless you explicitly need multi-tenant access.
- Under **Authentication**, register the redirect URI `http://localhost:3000/auth/callback` (or your deployed URL if you are not running locally) and enable the **ID tokens (used for implicit flows)** checkbox.
- Click **Save**, then go to **API permissions** and add the **Microsoft Graph > User.Read (delegated)** permission, clicking **Grant admin consent** afterward so DevDen can read the signed-in users profile.
- Open **Certificates & secrets**, create a new client secret, give it a descriptive name, and copy the value immediately (it is hidden after leaving the page).
- Capture the Tenant ID, Client ID, and the client secret value and paste them into your `.env` as `ENTRA_TENANT_ID`, `ENTRA_CLIENT_ID`, and `ENTRA_CLIENT_SECRET`. If you changed the redirect URI, also update `ENTRA_REDIRECT_URI` to match what you entered in Entra ID.
- Optionally configure application roles and group claims if you want to limit access to specific users/groups, then run `docker compose down && docker compose up -d --build` to ensure the backend reloads the updated secret values.
- Test the login flow by visiting `http://localhost:3000`, clicking **Login with Microsoft**, and confirming you are redirected back with a chat session. If you see a “redirect_uri_mismatch” error, double-check the URI in both the portal and your `.env` file.
4. **Start the services**
```bash

47
auth-callback.html Normal file
View File

@@ -0,0 +1,47 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>DevDen - Authentication</title>
<style>
body {
margin: 0;
padding: 0;
display: flex;
justify-content: center;
align-items: center;
min-height: 100vh;
background: #1e1e2e;
color: #cdd6f4;
font-family: monospace;
}
.loader {
text-align: center;
}
.spinner {
border: 3px solid #313244;
border-top: 3px solid #cba6f7;
border-radius: 50%;
width: 40px;
height: 40px;
animation: spin 1s linear infinite;
margin: 0 auto 20px;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
</style>
</head>
<body>
<div class="loader">
<div class="spinner"></div>
<p>Completing authentication...</p>
</div>
<script>
// Redirect to root with query parameters preserved
window.location.href = '/' + window.location.search;
</script>
</body>
</html>

View File

@@ -1,3 +1,4 @@
import logging
from datetime import datetime, timedelta, timezone
import jwt
@@ -13,30 +14,60 @@ from ..models.schemas import (
UserResponse,
)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/app/auth.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/auth", tags=["auth"])
security = HTTPBearer(auto_error=False)
def get_msal_app():
"""Create MSAL confidential client application"""
if not all(
[
settings.ENTRA_TENANT_ID,
settings.ENTRA_CLIENT_ID,
settings.ENTRA_CLIENT_SECRET,
logger.info("Checking MSAL configuration")
required_settings = [
("ENTRA_TENANT_ID", settings.ENTRA_TENANT_ID),
("ENTRA_CLIENT_ID", settings.ENTRA_CLIENT_ID),
("ENTRA_CLIENT_SECRET", settings.ENTRA_CLIENT_SECRET),
]
):
missing_settings = [name for name, value in required_settings if not value]
if missing_settings:
logger.error(f"Missing required Entra ID settings: {missing_settings}")
return None
return msal.ConfidentialClientApplication(
logger.info("All Entra ID settings present, creating MSAL app")
try:
msal_app = msal.ConfidentialClientApplication(
client_id=settings.ENTRA_CLIENT_ID,
client_credential=settings.ENTRA_CLIENT_SECRET,
authority=f"https://login.microsoftonline.com/{settings.ENTRA_TENANT_ID}",
)
logger.info("MSAL application created successfully")
return msal_app
except Exception as e:
logger.error(f"Failed to create MSAL application: {e}")
return None
def create_jwt_token(user_data: dict) -> str:
"""Create JWT token with user data"""
logger.info("Creating JWT token", {
"user_id": user_data.get("oid") or user_data.get("sub"),
"user_name": user_data.get("name"),
"user_email": user_data.get("preferred_username")
})
try:
payload = {
"sub": user_data.get("oid") or user_data.get("sub"),
"name": user_data.get("name"),
@@ -44,74 +75,145 @@ def create_jwt_token(user_data: dict) -> str:
"exp": datetime.now(timezone.utc) + timedelta(hours=settings.JWT_EXPIRY_HOURS),
"iat": datetime.now(timezone.utc),
}
return jwt.encode(payload, settings.JWT_SECRET, algorithm=settings.JWT_ALGORITHM)
token = jwt.encode(payload, settings.JWT_SECRET, algorithm=settings.JWT_ALGORITHM)
logger.info("JWT token created successfully", {
"expires_in_hours": settings.JWT_EXPIRY_HOURS,
"algorithm": settings.JWT_ALGORITHM
})
return token
except Exception as e:
logger.error(f"Failed to create JWT token: {e}")
raise
def decode_jwt_token(token: str) -> dict:
"""Decode and validate JWT token"""
logger.info("Decoding JWT token")
try:
payload = jwt.decode(
token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGORITHM]
)
logger.info("JWT token decoded successfully", {
"user_id": payload.get("sub"),
"user_name": payload.get("name"),
"expires_at": datetime.fromtimestamp(payload.get("exp", 0), timezone.utc).isoformat()
})
return payload
except jwt.ExpiredSignatureError:
except jwt.ExpiredSignatureError as e:
logger.warning("JWT token expired", {"error": str(e)})
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token expired"
)
except jwt.InvalidTokenError:
except jwt.InvalidTokenError as e:
logger.warning("Invalid JWT token", {"error": str(e)})
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token"
)
except Exception as e:
logger.error(f"Unexpected error decoding JWT token: {e}")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token validation failed"
)
async def get_current_user(
credentials: HTTPAuthorizationCredentials = Depends(security),
) -> dict:
"""Dependency to get current user from JWT token"""
logger.info("Getting current user from credentials")
if not credentials:
logger.warning("No credentials provided")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated"
)
logger.info("Credentials found, decoding token")
return decode_jwt_token(credentials.credentials)
@router.get("/login", response_model=AuthUrlResponse)
async def login():
"""Get Microsoft OAuth2 authorization URL"""
logger.info("Login endpoint called")
msal_app = get_msal_app()
if not msal_app:
logger.error("MSAL app not available for login")
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Authentication not configured. Please set ENTRA_TENANT_ID, ENTRA_CLIENT_ID, and ENTRA_CLIENT_SECRET.",
)
try:
logger.info("Generating authorization URL", {
"scopes": ["User.Read"],
"redirect_uri": settings.ENTRA_REDIRECT_URI
})
auth_url = msal_app.get_authorization_request_url(
scopes=["User.Read"], redirect_uri=settings.ENTRA_REDIRECT_URI
)
logger.info("Authorization URL generated successfully", {
"url_length": len(auth_url),
"url_start": auth_url[:100] + "..." if len(auth_url) > 100 else auth_url
})
return AuthUrlResponse(auth_url=auth_url)
except Exception as e:
logger.error(f"Failed to generate authorization URL: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Failed to generate authorization URL"
)
@router.post("/callback", response_model=AuthCallbackResponse)
async def callback(request: AuthCallbackRequest):
"""Exchange authorization code for tokens"""
logger.info("Callback endpoint called", {
"code_length": len(request.code) if request.code else 0,
"code_start": request.code[:50] + "..." if request.code and len(request.code) > 50 else request.code
})
msal_app = get_msal_app()
if not msal_app:
logger.error("MSAL app not available for callback")
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Authentication not configured",
)
try:
logger.info("Exchanging authorization code for tokens", {
"scopes": ["User.Read"],
"redirect_uri": settings.ENTRA_REDIRECT_URI
})
result = msal_app.acquire_token_by_authorization_code(
code=request.code,
scopes=["User.Read"],
redirect_uri=settings.ENTRA_REDIRECT_URI,
)
logger.info("Token exchange result", {
"has_access_token": "access_token" in result,
"has_id_token": "id_token" in result,
"has_error": "error" in result,
"error": result.get("error"),
"error_description": result.get("error_description")
})
if "error" in result:
logger.error("Token exchange failed", {
"error": result.get("error"),
"error_description": result.get("error_description"),
"correlation_id": result.get("correlation_id")
})
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Authentication failed: {result.get('error_description', result.get('error'))}",
@@ -119,11 +221,17 @@ async def callback(request: AuthCallbackRequest):
# Extract user info from ID token claims
id_token_claims = result.get("id_token_claims", {})
logger.info("ID token claims extracted", {
"claims_keys": list(id_token_claims.keys()),
"user_id": id_token_claims.get("oid") or id_token_claims.get("sub"),
"user_name": id_token_claims.get("name"),
"user_email": id_token_claims.get("preferred_username")
})
# Create our JWT token
token = create_jwt_token(id_token_claims)
return AuthCallbackResponse(
response_data = AuthCallbackResponse(
token=token,
user=UserResponse(
id=id_token_claims.get("oid") or id_token_claims.get("sub"),
@@ -132,10 +240,32 @@ async def callback(request: AuthCallbackRequest):
),
)
logger.info("Callback completed successfully", {
"user_id": response_data.user.id,
"user_name": response_data.user.name
})
return response_data
except HTTPException:
# Re-raise HTTP exceptions as-is
raise
except Exception as e:
logger.error(f"Unexpected error in callback: {e}", {"traceback": str(e)})
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Internal server error during authentication"
)
@router.get("/me", response_model=UserResponse)
async def me(current_user: dict = Depends(get_current_user)):
"""Get current user info"""
logger.info("Me endpoint called", {
"user_id": current_user.get("sub"),
"user_name": current_user.get("name")
})
return UserResponse(
id=current_user.get("sub"),
name=current_user.get("name"),
@@ -146,12 +276,15 @@ async def me(current_user: dict = Depends(get_current_user)):
@router.post("/logout")
async def logout():
"""Logout (client should clear token)"""
logger.info("Logout endpoint called")
return {"message": "Logged out successfully"}
@router.get("/status")
async def auth_status():
"""Check if authentication is configured"""
logger.info("Auth status endpoint called")
configured = all(
[
settings.ENTRA_TENANT_ID,
@@ -159,4 +292,14 @@ async def auth_status():
settings.ENTRA_CLIENT_SECRET,
]
)
status_info = {
"configured": configured,
"has_tenant_id": bool(settings.ENTRA_TENANT_ID),
"has_client_id": bool(settings.ENTRA_CLIENT_ID),
"has_client_secret": bool(settings.ENTRA_CLIENT_SECRET),
}
logger.info("Auth status checked", status_info)
return {"configured": configured}

View File

@@ -9,7 +9,14 @@ from .config import settings
from .services.provider_manager import provider_manager
# Setup logging
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/app/devden.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
app = FastAPI(
@@ -30,9 +37,25 @@ app.include_router(auth.router)
app.include_router(chat.router)
@app.exception_handler(Exception)
async def global_exception_handler(request, exc):
"""Global exception handler to log all errors"""
logger.error(f"Unhandled exception: {exc}", {
"url": str(request.url),
"method": request.method,
"headers": dict(request.headers),
"traceback": str(exc)
})
return JSONResponse(
status_code=500,
content={"detail": "Internal server error"}
)
@app.get("/health")
async def health_check():
"""Health check endpoint"""
logger.info("Health check requested")
return JSONResponse(
content={
"status": "healthy",
@@ -41,6 +64,18 @@ async def health_check():
)
@app.get("/logs")
async def get_logs():
"""Get recent log entries (for debugging)"""
try:
with open('/app/devden.log', 'r') as f:
lines = f.readlines()[-50:] # Last 50 lines
return {"logs": lines}
except Exception as e:
logger.error(f"Failed to read logs: {e}")
return {"error": "Failed to read logs"}
@app.on_event("startup")
async def startup_event():
logger.info("DevDen API starting up...")

56
default.conf Normal file
View File

@@ -0,0 +1,56 @@
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
# Proxy API requests to backend
location /api/ {
proxy_pass http://backend:8000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
# Support for SSE (Server-Sent Events)
proxy_buffering off;
proxy_cache off;
}
# OAuth callback endpoint - redirect to root with query params
location = /auth/callback {
try_files /auth-callback.html =404;
}
# Proxy other auth requests to backend
location /auth/ {
proxy_pass http://backend:8000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Enable SPA routing - try to serve the file, then directory, then fallback to index.html
location / {
try_files $uri $uri/ /index.html;
}
# Cache static assets for better performance
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
expires 1y;
add_header Cache-Control "public, immutable";
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}

View File

@@ -25,11 +25,11 @@ services:
- DEFAULT_PROVIDER=${DEFAULT_PROVIDER:-claude}
- CLAUDE_MODEL=${CLAUDE_MODEL:-claude-3-5-sonnet-20241022}
- OPENAI_MODEL=${OPENAI_MODEL:-gpt-4-turbo-preview}
- FRONTEND_URL=http://localhost:3000
- FRONTEND_URL=https://devden.hiddenden.cafe
- ENTRA_TENANT_ID=${ENTRA_TENANT_ID}
- ENTRA_CLIENT_ID=${ENTRA_CLIENT_ID}
- ENTRA_CLIENT_SECRET=${ENTRA_CLIENT_SECRET}
- ENTRA_REDIRECT_URI=${ENTRA_REDIRECT_URI:-http://localhost:3000/auth/callback}
- ENTRA_REDIRECT_URI=https://devden.hiddenden.cafe/auth/callback
- JWT_SECRET=${JWT_SECRET:-change-this-in-production}
- JWT_EXPIRY_HOURS=${JWT_EXPIRY_HOURS:-24}
env_file:

764
get-docker.sh Normal file
View File

@@ -0,0 +1,764 @@
#!/bin/sh
set -e
# Docker Engine for Linux installation script.
#
# This script is intended as a convenient way to configure docker's package
# repositories and to install Docker Engine, This script is not recommended
# for production environments. Before running this script, make yourself familiar
# with potential risks and limitations, and refer to the installation manual
# at https://docs.docker.com/engine/install/ for alternative installation methods.
#
# The script:
#
# - Requires `root` or `sudo` privileges to run.
# - Attempts to detect your Linux distribution and version and configure your
# package management system for you.
# - Doesn't allow you to customize most installation parameters.
# - Installs dependencies and recommendations without asking for confirmation.
# - Installs the latest stable release (by default) of Docker CLI, Docker Engine,
# Docker Buildx, Docker Compose, containerd, and runc. When using this script
# to provision a machine, this may result in unexpected major version upgrades
# of these packages. Always test upgrades in a test environment before
# deploying to your production systems.
# - Isn't designed to upgrade an existing Docker installation. When using the
# script to update an existing installation, dependencies may not be updated
# to the expected version, resulting in outdated versions.
#
# Source code is available at https://github.com/docker/docker-install/
#
# Usage
# ==============================================================================
#
# To install the latest stable versions of Docker CLI, Docker Engine, and their
# dependencies:
#
# 1. download the script
#
# $ curl -fsSL https://get.docker.com -o install-docker.sh
#
# 2. verify the script's content
#
# $ cat install-docker.sh
#
# 3. run the script with --dry-run to verify the steps it executes
#
# $ sh install-docker.sh --dry-run
#
# 4. run the script either as root, or using sudo to perform the installation.
#
# $ sudo sh install-docker.sh
#
# Command-line options
# ==============================================================================
#
# --version <VERSION>
# Use the --version option to install a specific version, for example:
#
# $ sudo sh install-docker.sh --version 23.0
#
# --channel <stable|test>
#
# Use the --channel option to install from an alternative installation channel.
# The following example installs the latest versions from the "test" channel,
# which includes pre-releases (alpha, beta, rc):
#
# $ sudo sh install-docker.sh --channel test
#
# Alternatively, use the script at https://test.docker.com, which uses the test
# channel as default.
#
# --mirror <Aliyun|AzureChinaCloud>
#
# Use the --mirror option to install from a mirror supported by this script.
# Available mirrors are "Aliyun" (https://mirrors.aliyun.com/docker-ce), and
# "AzureChinaCloud" (https://mirror.azure.cn/docker-ce), for example:
#
# $ sudo sh install-docker.sh --mirror AzureChinaCloud
#
# --setup-repo
#
# Use the --setup-repo option to configure Docker's package repositories without
# installing Docker packages. This is useful when you want to add the repository
# but install packages separately:
#
# $ sudo sh install-docker.sh --setup-repo
#
# Automatic Service Start
#
# By default, this script automatically starts the Docker daemon and enables the docker
# service after installation if systemd is used as init.
#
# If you prefer to start the service manually, use the --no-autostart option:
#
# $ sudo sh install-docker.sh --no-autostart
#
# Note: Starting the service requires appropriate privileges to manage system services.
#
# ==============================================================================
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA="f381ee68b32e515bb4dc034b339266aff1fbc460"
# strip "v" prefix if present
VERSION="${VERSION#v}"
# The channel to install from:
# * stable
# * test
DEFAULT_CHANNEL_VALUE="stable"
if [ -z "$CHANNEL" ]; then
CHANNEL=$DEFAULT_CHANNEL_VALUE
fi
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi
DEFAULT_REPO_FILE="docker-ce.repo"
if [ -z "$REPO_FILE" ]; then
REPO_FILE="$DEFAULT_REPO_FILE"
# Automatically default to a staging repo fora
# a staging download url (download-stage.docker.com)
case "$DOWNLOAD_URL" in
*-stage*) REPO_FILE="docker-ce-staging.repo";;
esac
fi
mirror=''
DRY_RUN=${DRY_RUN:-}
REPO_ONLY=${REPO_ONLY:-0}
NO_AUTOSTART=${NO_AUTOSTART:-0}
while [ $# -gt 0 ]; do
case "$1" in
--channel)
CHANNEL="$2"
shift
;;
--dry-run)
DRY_RUN=1
;;
--mirror)
mirror="$2"
shift
;;
--version)
VERSION="${2#v}"
shift
;;
--setup-repo)
REPO_ONLY=1
shift
;;
--no-autostart)
NO_AUTOSTART=1
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
Aliyun)
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
;;
AzureChinaCloud)
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
;;
"")
;;
*)
>&2 echo "unknown mirror '$mirror': use either 'Aliyun', or 'AzureChinaCloud'."
exit 1
;;
esac
case "$CHANNEL" in
stable|test)
;;
*)
>&2 echo "unknown CHANNEL '$CHANNEL': use either stable or test."
exit 1
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
# version_gte checks if the version specified in $VERSION is at least the given
# SemVer (Maj.Minor[.Patch]), or CalVer (YY.MM) version.It returns 0 (success)
# if $VERSION is either unset (=latest) or newer or equal than the specified
# version, or returns 1 (fail) otherwise.
#
# examples:
#
# VERSION=23.0
# version_gte 23.0 // 0 (success)
# version_gte 20.10 // 0 (success)
# version_gte 19.03 // 0 (success)
# version_gte 26.1 // 1 (fail)
version_gte() {
if [ -z "$VERSION" ]; then
return 0
fi
version_compare "$VERSION" "$1"
}
# version_compare compares two version strings (either SemVer (Major.Minor.Path),
# or CalVer (YY.MM) version strings. It returns 0 (success) if version A is newer
# or equal than version B, or 1 (fail) otherwise. Patch releases and pre-release
# (-alpha/-beta) are not taken into account
#
# examples:
#
# version_compare 23.0.0 20.10 // 0 (success)
# version_compare 23.0 20.10 // 0 (success)
# version_compare 20.10 19.03 // 0 (success)
# version_compare 20.10 20.10 // 0 (success)
# version_compare 19.03 20.10 // 1 (fail)
version_compare() (
set +x
yy_a="$(echo "$1" | cut -d'.' -f1)"
yy_b="$(echo "$2" | cut -d'.' -f1)"
if [ "$yy_a" -lt "$yy_b" ]; then
return 1
fi
if [ "$yy_a" -gt "$yy_b" ]; then
return 0
fi
mm_a="$(echo "$1" | cut -d'.' -f2)"
mm_b="$(echo "$2" | cut -d'.' -f2)"
# trim leading zeros to accommodate CalVer
mm_a="${mm_a#0}"
mm_b="${mm_b#0}"
if [ "${mm_a:-0}" -lt "${mm_b:-0}" ]; then
return 1
fi
return 0
)
is_dry_run() {
if [ -z "$DRY_RUN" ]; then
return 1
else
return 0
fi
}
is_wsl() {
case "$(uname -r)" in
*microsoft* ) true ;; # WSL 2
*Microsoft* ) true ;; # WSL 1
* ) false;;
esac
}
is_darwin() {
case "$(uname -s)" in
*darwin* ) true ;;
*Darwin* ) true ;;
* ) false;;
esac
}
deprecation_notice() {
distro=$1
distro_version=$2
echo
printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
echo " No updates or security fixes will be released for this distribution, and users are recommended"
echo " to upgrade to a currently maintained version of $distro."
echo
printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
echo
sleep 10
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
start_docker_daemon() {
# Use systemctl if available (for systemd-based systems)
if command_exists systemctl; then
is_dry_run || >&2 echo "Using systemd to manage Docker service"
if (
is_dry_run || set -x
$sh_c systemctl enable --now docker.service 2>/dev/null
); then
is_dry_run || echo "INFO: Docker daemon enabled and started" >&2
else
is_dry_run || echo "WARNING: unable to enable the docker service" >&2
fi
else
# No service management available (container environment)
if ! is_dry_run; then
>&2 echo "Note: Running in a container environment without service management"
>&2 echo "Docker daemon cannot be started automatically in this environment"
>&2 echo "The Docker packages have been installed successfully"
fi
fi
>&2 echo
}
echo_docker_as_nonroot() {
if is_dry_run; then
return
fi
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
echo
echo "================================================================================"
echo
if version_gte "20.10"; then
echo "To run Docker as a non-privileged user, consider setting up the"
echo "Docker daemon in rootless mode for your user:"
echo
echo " dockerd-rootless-setuptool.sh install"
echo
echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
echo
fi
echo
echo "To run the Docker daemon as a fully privileged service, but granting non-root"
echo "users access, refer to https://docs.docker.com/go/daemon-access/"
echo
echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
echo " documentation for details: https://docs.docker.com/go/attack-surface/"
echo
echo "================================================================================"
echo
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
if [ "$lsb_dist" = "osmc" ]; then
# OSMC runs Raspbian
lsb_dist=raspbian
else
# We're Debian and don't even know it!
lsb_dist=debian
fi
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
13|14|forky)
dist_version="trixie"
;;
12)
dist_version="bookworm"
;;
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
fi
fi
fi
}
do_install() {
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
if command_exists docker; then
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
again to update Docker, you can ignore this message, but be aware that the
script resets any custom changes in the deb and rpm repo configuration
files to match the parameters passed to the script.
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
if is_dry_run; then
sh_c="echo"
fi
# perform some very rudimentary platform detection
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
if is_wsl; then
echo
echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop/"
echo
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
13)
dist_version="trixie"
;;
12)
dist_version="bookworm"
;;
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
;;
centos|rhel)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --release | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Print deprecation warnings for distro versions that recently reached EOL,
# but may still be commonly used (especially LTS versions).
case "$lsb_dist.$dist_version" in
centos.8|centos.7|rhel.7)
deprecation_notice "$lsb_dist" "$dist_version"
;;
debian.buster|debian.stretch|debian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
raspbian.buster|raspbian.stretch|raspbian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
ubuntu.focal|ubuntu.bionic|ubuntu.xenial|ubuntu.trusty)
deprecation_notice "$lsb_dist" "$dist_version"
;;
ubuntu.oracular|ubuntu.mantic|ubuntu.lunar|ubuntu.kinetic|ubuntu.impish|ubuntu.hirsute|ubuntu.groovy|ubuntu.eoan|ubuntu.disco|ubuntu.cosmic)
deprecation_notice "$lsb_dist" "$dist_version"
;;
fedora.*)
if [ "$dist_version" -lt 41 ]; then
deprecation_notice "$lsb_dist" "$dist_version"
fi
;;
esac
# Run setup for each distro accordingly
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="ca-certificates curl"
apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
(
if ! is_dry_run; then
set -x
fi
$sh_c 'apt-get -qq update >/dev/null'
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pre_reqs >/dev/null"
$sh_c 'install -m 0755 -d /etc/apt/keyrings'
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" -o /etc/apt/keyrings/docker.asc"
$sh_c "chmod a+r /etc/apt/keyrings/docker.asc"
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
$sh_c 'apt-get -qq update >/dev/null'
)
if [ "$REPO_ONLY" = "1" ]; then
exit 0
fi
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/~ce~.*/g' | sed 's/-/.*/g')"
search_command="apt-cache madison docker-ce | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
echo
exit 1
fi
if version_gte "18.09"; then
search_command="apt-cache madison docker-ce-cli | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
echo "INFO: $search_command"
cli_pkg_version="=$($sh_c "$search_command")"
fi
pkg_version="=$pkg_version"
fi
fi
(
pkgs="docker-ce${pkg_version%=}"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if version_gte "28.2"; then
pkgs="$pkgs docker-model-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pkgs >/dev/null"
)
if [ "$NO_AUTOSTART" != "1" ]; then
start_docker_daemon
fi
echo_docker_as_nonroot
exit 0
;;
centos|fedora|rhel)
if [ "$(uname -m)" = "s390x" ]; then
echo "Effective v27.5, please consult RHEL distro statement for s390x support."
exit 1
fi
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
(
if ! is_dry_run; then
set -x
fi
if command_exists dnf5; then
$sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core"
$sh_c "dnf5 config-manager addrepo --overwrite --save-filename=docker-ce.repo --from-repofile='$repo_file_url'"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "dnf5 config-manager setopt \"docker-ce-*.enabled=0\""
$sh_c "dnf5 config-manager setopt \"docker-ce-$CHANNEL.enabled=1\""
fi
$sh_c "dnf makecache"
elif command_exists dnf; then
$sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core"
$sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo"
$sh_c "dnf config-manager --add-repo $repo_file_url"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "dnf config-manager --set-disabled \"docker-ce-*\""
$sh_c "dnf config-manager --set-enabled \"docker-ce-$CHANNEL\""
fi
$sh_c "dnf makecache"
else
$sh_c "yum -y -q install yum-utils"
$sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo"
$sh_c "yum-config-manager --add-repo $repo_file_url"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "yum-config-manager --disable \"docker-ce-*\""
$sh_c "yum-config-manager --enable \"docker-ce-$CHANNEL\""
fi
$sh_c "yum makecache"
fi
)
if [ "$REPO_ONLY" = "1" ]; then
exit 0
fi
pkg_version=""
if command_exists dnf; then
pkg_manager="dnf"
pkg_manager_flags="-y -q --best"
else
pkg_manager="yum"
pkg_manager_flags="-y -q"
fi
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
if [ "$lsb_dist" = "fedora" ]; then
pkg_suffix="fc$dist_version"
else
pkg_suffix="el"
fi
pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/\\\\.ce.*/g' | sed 's/-/.*/g').*$pkg_suffix"
search_command="$pkg_manager list --showduplicates docker-ce | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
echo
exit 1
fi
if version_gte "18.09"; then
# older versions don't support a cli package
search_command="$pkg_manager list --showduplicates docker-ce-cli | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
fi
# Cut out the epoch and prefix with a '-'
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
if [ -n "$cli_pkg_version" ]; then
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin docker-model-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager $pkg_manager_flags install $pkgs"
)
if [ "$NO_AUTOSTART" != "1" ]; then
start_docker_daemon
fi
echo_docker_as_nonroot
exit 0
;;
sles)
echo "Effective v27.5, please consult SLES distro statement for s390x support."
exit 1
;;
*)
if [ -z "$lsb_dist" ]; then
if is_darwin; then
echo
echo "ERROR: Unsupported operating system 'macOS'"
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
exit 1
fi
fi
echo
echo "ERROR: Unsupported distribution '$lsb_dist'"
echo
exit 1
;;
esac
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install

View File

@@ -53,6 +53,16 @@
</div>
</div>
<script src="script.js"></script>
<!-- Debug Panel (hidden by default) -->
<div id="debugPanel" style="display: none; position: fixed; top: 10px; right: 10px; background: rgba(0,0,0,0.9); color: white; padding: 10px; border-radius: 5px; z-index: 1000; font-family: monospace; font-size: 12px;">
<h4>Debug Tools</h4>
<button id="exportLogsBtn" style="margin: 5px; padding: 5px;">Export Frontend Logs</button>
<button id="clearLogsBtn" style="margin: 5px; padding: 5px;">Clear Logs</button>
<button id="viewBackendLogsBtn" style="margin: 5px; padding: 5px;">View Backend Logs</button>
<button id="closeDebugBtn" style="margin: 5px; padding: 5px;">Close</button>
<div id="debugOutput" style="margin-top: 10px; max-height: 200px; overflow-y: auto;"></div>
</div>
<script src="script.js?v=2"></script>
</body>
</html>

361
script.js
View File

@@ -1,3 +1,69 @@
// Enhanced logging utility
class Logger {
constructor() {
this.logs = [];
this.maxLogs = 1000;
}
log(level, message, data = null) {
const timestamp = new Date().toISOString();
const logEntry = {
timestamp,
level,
message,
data,
url: window.location.href
};
this.logs.push(logEntry);
if (this.logs.length > this.maxLogs) {
this.logs.shift();
}
const consoleMethod = level === 'error' ? 'error' : level === 'warn' ? 'warn' : 'log';
console[consoleMethod](`[${timestamp}] ${level.toUpperCase()}: ${message}`, data || '');
// Save to localStorage for persistence
try {
localStorage.setItem('devden_logs', JSON.stringify(this.logs));
} catch (e) {
console.warn('Failed to save logs to localStorage:', e);
}
}
info(message, data = null) { this.log('info', message, data); }
warn(message, data = null) { this.log('warn', message, data); }
error(message, data = null) { this.log('error', message, data); }
exportLogs() {
const blob = new Blob([JSON.stringify(this.logs, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `devden_logs_${new Date().toISOString().split('T')[0]}.json`;
a.click();
URL.revokeObjectURL(url);
}
clearLogs() {
this.logs = [];
localStorage.removeItem('devden_logs');
}
}
const logger = new Logger();
// Load existing logs from localStorage
try {
const savedLogs = localStorage.getItem('devden_logs');
if (savedLogs) {
logger.logs = JSON.parse(savedLogs);
logger.info('Loaded existing logs from localStorage', { count: logger.logs.length });
}
} catch (e) {
logger.warn('Failed to load existing logs from localStorage:', e);
}
const loginScreen = document.getElementById("loginScreen");
const welcomeScreen = document.getElementById("welcomeScreen");
const chatScreen = document.getElementById("chatScreen");
@@ -6,97 +72,173 @@ const welcomeInput = document.getElementById("welcomeInput");
const chatInput = document.getElementById("chatInput");
const loginBtn = document.getElementById("loginBtn");
const API_URL = "http://localhost:8000";
// API URL is same as frontend (nginx proxies /api and /auth to backend)
const API_URL = window.location.origin;
let isInChat = false;
// Auth functions
function getToken() {
return localStorage.getItem("devden_token");
try {
const token = localStorage.getItem("devden_token");
logger.info('Retrieved token from localStorage', { hasToken: !!token });
return token;
} catch (error) {
logger.error('Failed to retrieve token from localStorage', error);
return null;
}
}
function setToken(token) {
try {
localStorage.setItem("devden_token", token);
logger.info('Token saved to localStorage');
} catch (error) {
logger.error('Failed to save token to localStorage', error);
}
}
function clearToken() {
try {
localStorage.removeItem("devden_token");
logger.info('Token cleared from localStorage');
} catch (error) {
logger.error('Failed to clear token from localStorage', error);
}
}
function showLoginScreen() {
try {
loginScreen.classList.remove("hidden");
welcomeScreen.classList.add("hidden");
chatScreen.classList.add("hidden");
logger.info('Switched to login screen');
} catch (error) {
logger.error('Failed to show login screen', error);
}
}
function showWelcomeScreen() {
try {
loginScreen.classList.add("hidden");
welcomeScreen.classList.remove("hidden");
chatScreen.classList.add("hidden");
welcomeInput.focus();
logger.info('Switched to welcome screen');
} catch (error) {
logger.error('Failed to show welcome screen', error);
}
}
function switchToChat() {
try {
loginScreen.classList.add("hidden");
welcomeScreen.classList.add("hidden");
chatScreen.classList.remove("hidden");
chatInput.focus();
isInChat = true;
logger.info('Switched to chat screen');
} catch (error) {
logger.error('Failed to switch to chat screen', error);
}
}
async function checkAuth() {
logger.info('Starting auth check');
const token = getToken();
if (!token) {
logger.info('No token found, showing login screen');
showLoginScreen();
return;
}
try {
logger.info('Making auth check request to /api/auth/me');
const response = await fetch(`${API_URL}/api/auth/me`, {
headers: { Authorization: `Bearer ${token}` },
});
logger.info('Auth check response received', {
status: response.status,
statusText: response.statusText,
ok: response.ok
});
if (response.ok) {
const userData = await response.json();
logger.info('Auth check successful', { user: userData });
showWelcomeScreen();
} else {
const errorText = await response.text();
logger.warn('Auth check failed', {
status: response.status,
statusText: response.statusText,
response: errorText
});
clearToken();
showLoginScreen();
}
} catch (error) {
console.error("Auth check failed:", error);
logger.error('Auth check request failed', {
error: error.message,
stack: error.stack
});
showLoginScreen();
}
}
async function handleLogin() {
loginBtn.disabled = true;
loginBtn.textContent = "Redirecting...";
logger.info('Login button clicked, starting login process');
try {
loginBtn.disabled = true;
loginBtn.textContent = "Checking auth config...";
logger.info('Disabled login button and updated text');
// Check if auth is configured
logger.info('Checking auth configuration via /api/auth/status');
const statusResponse = await fetch(`${API_URL}/api/auth/status`);
const statusData = await statusResponse.json();
logger.info('Auth status response', {
status: statusResponse.status,
configured: statusData.configured
});
if (!statusData.configured) {
alert(
"Authentication not configured. Please set ENTRA_TENANT_ID, ENTRA_CLIENT_ID, and ENTRA_CLIENT_SECRET in your .env file.",
);
const errorMsg = "Authentication not configured. Please set ENTRA_TENANT_ID, ENTRA_CLIENT_ID, and ENTRA_CLIENT_SECRET in your .env file.";
logger.error('Auth not configured', { response: statusData });
alert(errorMsg);
loginBtn.disabled = false;
loginBtn.textContent = "Sign in with Microsoft";
return;
}
loginBtn.textContent = "Getting auth URL...";
logger.info('Auth configured, requesting login URL from /api/auth/login');
// Get auth URL and redirect
const response = await fetch(`${API_URL}/api/auth/login`);
const data = await response.json();
logger.info('Login URL response', {
status: response.status,
hasAuthUrl: !!data.auth_url,
authUrl: data.auth_url ? data.auth_url.substring(0, 100) + '...' : null
});
if (data.auth_url) {
loginBtn.textContent = "Redirecting...";
logger.info('Redirecting to Microsoft OAuth URL');
window.location.href = data.auth_url;
} else {
throw new Error("No auth URL returned");
}
} catch (error) {
console.error("Login failed:", error);
logger.error('Login process failed', {
error: error.message,
stack: error.stack
});
alert("Login failed: " + error.message);
loginBtn.disabled = false;
loginBtn.textContent = "Sign in with Microsoft";
@@ -104,32 +246,81 @@ async function handleLogin() {
}
async function handleCallback() {
logger.info('Starting OAuth callback processing', { url: window.location.href });
const params = new URLSearchParams(window.location.search);
const code = params.get("code");
const sessionState = params.get("session_state");
const error = params.get("error");
const errorDescription = params.get("error_description");
if (!code) return false;
logger.info('Parsed URL parameters', {
hasCode: !!code,
hasSessionState: !!sessionState,
hasError: !!error,
codeLength: code ? code.length : 0,
sessionState: sessionState,
error: error,
errorDescription: errorDescription
});
// Check for OAuth errors
if (error) {
logger.error('OAuth error in callback URL', {
error: error,
errorDescription: errorDescription
});
alert(`Authentication failed: ${error} - ${errorDescription || 'Unknown error'}`);
return false;
}
if (!code) {
logger.warn('No authorization code found in callback URL');
return false;
}
try {
logger.info('Sending authorization code to backend /api/auth/callback');
const response = await fetch(`${API_URL}/api/auth/callback`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ code }),
});
logger.info('Callback response received', {
status: response.status,
statusText: response.statusText,
ok: response.ok
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail || "Callback failed");
const errorData = await response.json();
logger.error('Callback request failed', {
status: response.status,
error: errorData
});
throw new Error(errorData.detail || "Callback failed");
}
const data = await response.json();
logger.info('Callback successful', {
hasToken: !!data.token,
hasUser: !!data.user,
user: data.user
});
setToken(data.token);
// Clean up URL
logger.info('Cleaning up URL (removing query parameters)');
window.history.replaceState({}, "", "/");
return true;
} catch (error) {
console.error("Callback failed:", error);
logger.error('Callback processing failed', {
error: error.message,
stack: error.stack
});
alert("Authentication failed: " + error.message);
return false;
}
@@ -288,14 +479,158 @@ chatInput.addEventListener("keydown", (e) => {
// Initialize
async function init() {
logger.info('Application initialization started', {
userAgent: navigator.userAgent,
url: window.location.href,
timestamp: new Date().toISOString()
});
// Check for CSS loading
const styleLink = document.querySelector('link[rel="stylesheet"]');
if (styleLink) {
logger.info('CSS link found', { href: styleLink.href });
styleLink.addEventListener('load', () => {
logger.info('CSS loaded successfully');
});
styleLink.addEventListener('error', (e) => {
logger.error('CSS failed to load', { href: styleLink.href, error: e });
});
} else {
logger.error('CSS link not found in document');
}
// Check DOM elements
const elements = ['loginScreen', 'welcomeScreen', 'chatScreen', 'chatMessages', 'welcomeInput', 'chatInput', 'loginBtn'];
elements.forEach(id => {
const el = document.getElementById(id);
if (!el) {
logger.error(`Required DOM element not found: ${id}`);
} else {
logger.info(`DOM element found: ${id}`);
}
});
try {
// Check for OAuth callback first
logger.info('Checking for OAuth callback parameters');
const callbackSuccess = await handleCallback();
if (callbackSuccess) {
logger.info('OAuth callback processed successfully, showing welcome screen');
showWelcomeScreen();
} else {
logger.info('No OAuth callback or callback failed, checking existing auth');
await checkAuth();
}
} catch (error) {
logger.error('Initialization failed', {
error: error.message,
stack: error.stack
});
}
logger.info('Application initialization completed');
}
// Add global error handler
window.addEventListener('error', (event) => {
logger.error('Global JavaScript error', {
message: event.message,
filename: event.filename,
lineno: event.lineno,
colno: event.colno,
error: event.error
});
});
window.addEventListener('unhandledrejection', (event) => {
logger.error('Unhandled promise rejection', {
reason: event.reason,
promise: event.promise
});
});
// Debug panel functionality
const debugPanel = document.getElementById('debugPanel');
const exportLogsBtn = document.getElementById('exportLogsBtn');
const clearLogsBtn = document.getElementById('clearLogsBtn');
const viewBackendLogsBtn = document.getElementById('viewBackendLogsBtn');
const closeDebugBtn = document.getElementById('closeDebugBtn');
const debugOutput = document.getElementById('debugOutput');
function showDebugPanel() {
debugPanel.style.display = 'block';
logger.info('Debug panel opened');
}
function hideDebugPanel() {
debugPanel.style.display = 'none';
logger.info('Debug panel closed');
}
function updateDebugOutput(text) {
debugOutput.textContent = text;
debugOutput.scrollTop = debugOutput.scrollHeight;
}
// Event listeners for debug panel
exportLogsBtn.addEventListener('click', () => {
logger.exportLogs();
updateDebugOutput('Frontend logs exported to download');
logger.info('Logs exported via debug panel');
});
clearLogsBtn.addEventListener('click', () => {
logger.clearLogs();
updateDebugOutput('Logs cleared');
logger.info('Logs cleared via debug panel');
});
viewBackendLogsBtn.addEventListener('click', async () => {
try {
updateDebugOutput('Loading backend logs...');
const response = await fetch(`${API_URL}/logs`);
if (response.ok) {
const data = await response.json();
if (data.logs) {
updateDebugOutput(data.logs.join('\n'));
} else if (data.error) {
updateDebugOutput(`Error: ${data.error}`);
}
} else {
updateDebugOutput(`Failed to load backend logs: ${response.status}`);
}
} catch (error) {
updateDebugOutput(`Error loading backend logs: ${error.message}`);
logger.error('Failed to load backend logs', error);
}
});
closeDebugBtn.addEventListener('click', hideDebugPanel);
// Keyboard shortcuts
document.addEventListener('keydown', (e) => {
// Ctrl+Shift+L: Export logs
if (e.ctrlKey && e.shiftKey && e.key === 'L') {
e.preventDefault();
logger.exportLogs();
logger.info('Logs exported via keyboard shortcut');
}
// Ctrl+Shift+D: Toggle debug panel
if (e.ctrlKey && e.shiftKey && e.key === 'D') {
e.preventDefault();
if (debugPanel.style.display === 'none') {
showDebugPanel();
} else {
hideDebugPanel();
}
}
// Escape: Close debug panel
if (e.key === 'Escape' && debugPanel.style.display !== 'none') {
hideDebugPanel();
}
});
init();