Some checks failed
CI/CD Pipeline / Code Quality Checks (push) Failing after 6m9s
CI/CD Pipeline / Security Scanning (push) Successful in 26s
CI/CD Pipeline / Tests (3.11) (push) Failing after 5m24s
CI/CD Pipeline / Tests (3.12) (push) Failing after 5m23s
CI/CD Pipeline / Build Docker Image (push) Has been skipped
CI/CD Pipeline / Deploy to Staging (push) Has been skipped
CI/CD Pipeline / Deploy to Production (push) Has been skipped
CI/CD Pipeline / Notification (push) Successful in 1s
128 lines
4.4 KiB
Python
128 lines
4.4 KiB
Python
"""Tests for AI services."""
|
|
|
|
import pytest
|
|
|
|
from guardden.services.ai.base import ContentCategory, ModerationResult, parse_categories
|
|
from guardden.services.ai.factory import NullProvider, create_ai_provider
|
|
|
|
|
|
class TestModerationResult:
|
|
"""Tests for ModerationResult dataclass."""
|
|
|
|
def test_severity_not_flagged(self) -> None:
|
|
"""Test severity is 0 when not flagged."""
|
|
result = ModerationResult(is_flagged=False, confidence=0.9)
|
|
assert result.severity == 0
|
|
|
|
def test_severity_with_confidence(self) -> None:
|
|
"""Test severity includes confidence."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.8,
|
|
categories=[],
|
|
)
|
|
# 0.8 * 50 = 40
|
|
assert result.severity == 40
|
|
|
|
def test_severity_high_category(self) -> None:
|
|
"""Test severity with high-severity category."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HATE_SPEECH],
|
|
)
|
|
# 0.5 * 50 + 30 = 55
|
|
assert result.severity == 55
|
|
|
|
def test_severity_medium_category(self) -> None:
|
|
"""Test severity with medium-severity category."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HARASSMENT],
|
|
)
|
|
# 0.5 * 50 + 20 = 45
|
|
assert result.severity == 45
|
|
|
|
def test_severity_multiple_categories(self) -> None:
|
|
"""Test severity with multiple categories."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=0.5,
|
|
categories=[ContentCategory.HATE_SPEECH, ContentCategory.VIOLENCE],
|
|
)
|
|
# 0.5 * 50 + 30 + 20 = 75
|
|
assert result.severity == 75
|
|
|
|
def test_severity_capped_at_100(self) -> None:
|
|
"""Test severity is capped at 100."""
|
|
result = ModerationResult(
|
|
is_flagged=True,
|
|
confidence=1.0,
|
|
categories=[
|
|
ContentCategory.HATE_SPEECH,
|
|
ContentCategory.SELF_HARM,
|
|
ContentCategory.SCAM,
|
|
],
|
|
)
|
|
# Would be 50 + 30 + 30 + 30 = 140, capped to 100
|
|
assert result.severity == 100
|
|
|
|
|
|
class TestParseCategories:
|
|
"""Tests for category parsing helper."""
|
|
|
|
def test_parse_categories_filters_invalid(self) -> None:
|
|
categories = parse_categories(["harassment", "unknown", "scam"])
|
|
assert categories == [ContentCategory.HARASSMENT, ContentCategory.SCAM]
|
|
|
|
|
|
class TestNullProvider:
|
|
"""Tests for NullProvider."""
|
|
|
|
@pytest.fixture
|
|
def provider(self) -> NullProvider:
|
|
return NullProvider()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_moderate_text_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test moderate_text returns unflagged result."""
|
|
result = await provider.moderate_text("test content")
|
|
assert result.is_flagged is False
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_analyze_image_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test analyze_image returns empty result."""
|
|
result = await provider.analyze_image("http://example.com/image.jpg")
|
|
assert result.is_nsfw is False
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_analyze_phishing_returns_empty(self, provider: NullProvider) -> None:
|
|
"""Test analyze_phishing returns empty result."""
|
|
result = await provider.analyze_phishing("http://example.com")
|
|
assert result.is_phishing is False
|
|
|
|
|
|
class TestFactory:
|
|
"""Tests for AI provider factory."""
|
|
|
|
def test_create_null_provider(self) -> None:
|
|
"""Test creating null provider."""
|
|
provider = create_ai_provider("none")
|
|
assert isinstance(provider, NullProvider)
|
|
|
|
def test_create_anthropic_without_key(self) -> None:
|
|
"""Test creating anthropic provider without key raises error."""
|
|
with pytest.raises(ValueError, match="API key required"):
|
|
create_ai_provider("anthropic", None)
|
|
|
|
def test_create_openai_without_key(self) -> None:
|
|
"""Test creating openai provider without key raises error."""
|
|
with pytest.raises(ValueError, match="API key required"):
|
|
create_ai_provider("openai", None)
|
|
|
|
def test_create_unknown_provider(self) -> None:
|
|
"""Test creating unknown provider raises error."""
|
|
with pytest.raises(ValueError, match="Unknown AI provider"):
|
|
create_ai_provider("unknown", "key") # type: ignore
|