Features: - Core moderation: warn, kick, ban, timeout, strike system - Automod: banned words filter, scam detection, anti-spam, link filtering - AI moderation: Claude/OpenAI integration, NSFW detection, phishing analysis - Verification system: button, captcha, math, emoji challenges - Rate limiting system with configurable scopes - Event logging: joins, leaves, message edits/deletes, voice activity - Per-guild configuration with caching - Docker deployment support Bug fixes applied: - Fixed await on session.delete() in guild_config.py - Fixed memory leak in AI moderation message tracking (use deque) - Added error handling to bot shutdown - Added error handling to timeout command - Removed unused Literal import - Added prefix validation - Added image analysis limit (3 per message) - Fixed test mock for SQLAlchemy model
120 lines
4.1 KiB
Python
120 lines
4.1 KiB
Python
"""Tests for AI services."""
|
|
|
|
import pytest
|
|
|
|
from guardden.services.ai.base import ContentCategory, ModerationResult
|
|
from guardden.services.ai.factory import NullProvider, create_ai_provider
|
|
|
|
|
|
class TestModerationResult:
|
|
"""Tests for ModerationResult dataclass."""
|
|
|
|
def test_severity_not_flagged(self) -> None:
|
|
"""Test severity is 0 when not flagged."""
|
|
result = ModerationResult(is_flagged=False, confidence=0.9)
|
|
assert result.severity == 0
|
|
|
|
def test_severity_with_confidence(self) -> None:
|
|
"""Test severity includes confidence."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.8,
|
|
categories=[],
|
|
)
|
|
# 0.8 * 50 = 40
|
|
assert result.severity == 40
|
|
|
|
def test_severity_high_category(self) -> None:
|
|
"""Test severity with high-severity category."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HATE_SPEECH],
|
|
)
|
|
# 0.5 * 50 + 30 = 55
|
|
assert result.severity == 55
|
|
|
|
def test_severity_medium_category(self) -> None:
|
|
"""Test severity with medium-severity category."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HARASSMENT],
|
|
)
|
|
# 0.5 * 50 + 20 = 45
|
|
assert result.severity == 45
|
|
|
|
def test_severity_multiple_categories(self) -> None:
|
|
"""Test severity with multiple categories."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HATE_SPEECH, ContentCategory.VIOLENCE],
|
|
)
|
|
# 0.5 * 50 + 30 + 20 = 75
|
|
assert result.severity == 75
|
|
|
|
def test_severity_capped_at_100(self) -> None:
|
|
"""Test severity is capped at 100."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=1.0,
|
|
categories=[
|
|
ContentCategory.HATE_SPEECH,
|
|
ContentCategory.SELF_HARM,
|
|
ContentCategory.SCAM,
|
|
],
|
|
)
|
|
# Would be 50 + 30 + 30 + 30 = 140, capped to 100
|
|
assert result.severity == 100
|
|
|
|
|
|
class TestNullProvider:
|
|
"""Tests for NullProvider."""
|
|
|
|
@pytest.fixture
|
|
def provider(self) -> NullProvider:
|
|
return NullProvider()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_moderate_text_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test moderate_text returns unflagged result."""
|
|
result = await provider.moderate_text("test content")
|
|
assert result.is_flagged is False
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_analyze_image_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test analyze_image returns empty result."""
|
|
result = await provider.analyze_image("http://example.com/image.jpg")
|
|
assert result.is_nsfw is False
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_analyze_phishing_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test analyze_phishing returns empty result."""
|
|
result = await provider.analyze_phishing("http://example.com")
|
|
assert result.is_phishing is False
|
|
|
|
|
|
class TestFactory:
|
|
"""Tests for AI provider factory."""
|
|
|
|
def test_create_null_provider(self) -> None:
|
|
"""Test creating null provider."""
|
|
provider = create_ai_provider("none")
|
|
assert isinstance(provider, NullProvider)
|
|
|
|
def test_create_anthropic_without_key(self) -> None:
|
|
"""Test creating anthropic provider without key raises error."""
|
|
with pytest.raises(ValueError, match="API key required"):
|
|
create_ai_provider("anthropic", None)
|
|
|
|
def test_create_openai_without_key(self) -> None:
|
|
"""Test creating openai provider without key raises error."""
|
|
with pytest.raises(ValueError, match="API key required"):
|
|
create_ai_provider("openai", None)
|
|
|
|
def test_create_unknown_provider(self) -> None:
|
|
"""Test creating unknown provider raises error."""
|
|
with pytest.raises(ValueError, match="Unknown AI provider"):
|
|
create_ai_provider("unknown", "key") # type: ignore
|