fix: Remove ModerationResult and unused imports from AI services
Fix ImportError caused by removed ModerationResult class. Changes: - Remove ModerationResult from ai/__init__.py exports - Remove unused ContentCategory enum and parse_categories function - Remove unused imports from ai_moderation.py - Clean up NullProvider to only have analyze_image method Fixes bot startup crash.
This commit is contained in:
@@ -8,7 +8,6 @@ import discord
|
||||
from discord.ext import commands
|
||||
|
||||
from guardden.bot import GuardDen
|
||||
from guardden.services.ai.base import ContentCategory, ModerationResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""AI services for content moderation."""
|
||||
|
||||
from guardden.services.ai.base import AIProvider, ModerationResult
|
||||
from guardden.services.ai.base import AIProvider, ImageAnalysisResult
|
||||
from guardden.services.ai.factory import create_ai_provider
|
||||
|
||||
__all__ = ["AIProvider", "ModerationResult", "create_ai_provider"]
|
||||
__all__ = ["AIProvider", "ImageAnalysisResult", "create_ai_provider"]
|
||||
|
||||
@@ -9,20 +9,6 @@ from enum import Enum
|
||||
from typing import Literal, TypeVar
|
||||
|
||||
|
||||
class ContentCategory(str, Enum):
|
||||
"""Categories of problematic content."""
|
||||
|
||||
SAFE = "safe"
|
||||
HARASSMENT = "harassment"
|
||||
HATE_SPEECH = "hate_speech"
|
||||
SEXUAL = "sexual"
|
||||
VIOLENCE = "violence"
|
||||
SELF_HARM = "self_harm"
|
||||
SPAM = "spam"
|
||||
SCAM = "scam"
|
||||
MISINFORMATION = "misinformation"
|
||||
|
||||
|
||||
class NSFWCategory(str, Enum):
|
||||
"""NSFW content subcategories with increasing severity."""
|
||||
|
||||
@@ -45,17 +31,6 @@ class RetryConfig:
|
||||
max_delay: float = 2.0
|
||||
|
||||
|
||||
def parse_categories(values: list[str]) -> list[ContentCategory]:
|
||||
"""Parse category values into ContentCategory enums."""
|
||||
categories: list[ContentCategory] = []
|
||||
for value in values:
|
||||
try:
|
||||
categories.append(ContentCategory(value))
|
||||
except ValueError:
|
||||
continue
|
||||
return categories
|
||||
|
||||
|
||||
async def run_with_retries(
|
||||
operation: Callable[[], Awaitable[_T]],
|
||||
*,
|
||||
|
||||
@@ -11,21 +11,11 @@ logger = logging.getLogger(__name__)
|
||||
class NullProvider(AIProvider):
|
||||
"""Null provider that does nothing (for when AI is disabled)."""
|
||||
|
||||
async def moderate_text(self, content, context=None, sensitivity=50):
|
||||
from guardden.services.ai.base import ModerationResult
|
||||
|
||||
return ModerationResult()
|
||||
|
||||
async def analyze_image(self, image_url, sensitivity=50):
|
||||
from guardden.services.ai.base import ImageAnalysisResult
|
||||
|
||||
return ImageAnalysisResult()
|
||||
|
||||
async def analyze_phishing(self, url, message_content=None):
|
||||
from guardden.services.ai.base import PhishingAnalysisResult
|
||||
|
||||
return PhishingAnalysisResult()
|
||||
|
||||
async def close(self):
|
||||
pass
|
||||
|
||||
|
||||
Reference in New Issue
Block a user