Add NSFW-only filtering mode for content moderation
Some checks failed
NSFW-Only Filtering Tests / NSFW-Only Filtering Feature Tests (push) Has been cancelled

- Add nsfw_only_filtering field to GuildSettings model
- Create database migration for new field (20260124_add_nsfw_only_filtering)
- Update AI moderation logic to respect NSFW-only mode
- Add Discord command !ai nsfwonly <true/false> for toggling mode
- Implement filtering logic in image analysis for both attachments and embeds
- Add comprehensive test suite for new functionality
- Update documentation with usage examples and feature description
- Create dedicated CI workflow for testing NSFW-only filtering feature

When enabled, only sexual/nude content is filtered while allowing:
- Violence and gore
- Harassment and bullying
- Hate speech
- Self-harm content
- Other content categories

This mode is useful for gaming communities and mature discussion
servers that have specific content policies allowing violence
but prohibiting sexual material.
This commit is contained in:
2026-01-24 23:51:10 +01:00
parent 824dd681f7
commit 1250b5573c
6 changed files with 748 additions and 14 deletions

View File

@@ -0,0 +1,196 @@
name: NSFW-Only Filtering Tests
on:
push:
paths:
- 'src/guardden/models/guild.py'
- 'src/guardden/cogs/ai_moderation.py'
- 'migrations/versions/20260124_add_nsfw_only_filtering.py'
- 'tests/test_nsfw_only_filtering.py'
pull_request:
paths:
- 'src/guardden/models/guild.py'
- 'src/guardden/cogs/ai_moderation.py'
- 'migrations/versions/20260124_add_nsfw_only_filtering.py'
- 'tests/test_nsfw_only_filtering.py'
env:
PYTHON_VERSION: "3.11"
jobs:
test-nsfw-only-filtering:
name: NSFW-Only Filtering Feature Tests
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: guardden_test
POSTGRES_USER: guardden_test
POSTGRES_DB: guardden_test
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-nsfw-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-pip-nsfw-
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[dev,ai]"
- name: Set up test environment
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
GUARDDEN_LOG_LEVEL: "DEBUG"
run: echo "Test environment configured"
- name: Run database migration test
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
run: |
echo "Testing database migration for nsfw_only_filtering field..."
python -c "
import asyncio
import sys
sys.path.insert(0, 'src')
from guardden.models.guild import GuildSettings
from guardden.services.database import Database
async def test_migration():
db = Database('postgresql://guardden_test:guardden_test@localhost:5432/guardden_test')
try:
async with db.engine.begin() as conn:
await conn.run_sync(db.Base.metadata.create_all)
print('✅ Database schema created successfully')
# Test that the field exists in the model
if hasattr(GuildSettings, 'nsfw_only_filtering'):
print('✅ nsfw_only_filtering field exists in GuildSettings model')
else:
print('❌ nsfw_only_filtering field missing from GuildSettings model')
sys.exit(1)
except Exception as e:
print(f'❌ Database migration test failed: {e}')
sys.exit(1)
finally:
await db.close()
asyncio.run(test_migration())
"
- name: Run NSFW-only filtering logic tests
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
GUARDDEN_LOG_LEVEL: "DEBUG"
run: |
echo "Running NSFW-only filtering specific tests..."
pytest tests/test_nsfw_only_filtering.py -v --tb=short
- name: Run AI moderation integration tests
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
GUARDDEN_LOG_LEVEL: "DEBUG"
run: |
echo "Running AI moderation tests to ensure no regression..."
pytest tests/test_ai.py -v --tb=short
- name: Test command functionality
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
run: |
echo "Testing Discord command integration..."
python -c "
import sys
sys.path.insert(0, 'src')
from guardden.cogs.ai_moderation import AIModeration
from unittest.mock import MagicMock
# Test that the new command exists
bot = MagicMock()
cog = AIModeration(bot)
if hasattr(cog, 'ai_nsfw_only'):
print('✅ ai_nsfw_only command exists')
else:
print('❌ ai_nsfw_only command missing')
sys.exit(1)
print('✅ All command tests passed')
"
- name: Validate feature completeness
run: |
echo "Validating NSFW-only filtering feature completeness..."
echo "✅ Checking model updates..."
grep -q "nsfw_only_filtering" src/guardden/models/guild.py || (echo "❌ Model not updated" && exit 1)
echo "✅ Checking migration exists..."
test -f migrations/versions/20260124_add_nsfw_only_filtering.py || (echo "❌ Migration missing" && exit 1)
echo "✅ Checking AI moderation logic..."
grep -q "nsfw_only_filtering" src/guardden/cogs/ai_moderation.py || (echo "❌ AI moderation not updated" && exit 1)
echo "✅ Checking new command exists..."
grep -q "ai_nsfw_only" src/guardden/cogs/ai_moderation.py || (echo "❌ New command missing" && exit 1)
echo "✅ Checking documentation updates..."
grep -q "nsfwonly" README.md || (echo "❌ Documentation not updated" && exit 1)
echo "✅ Checking tests exist..."
test -f tests/test_nsfw_only_filtering.py || (echo "❌ Tests missing" && exit 1)
echo "🎉 All NSFW-only filtering feature checks passed!"
- name: Generate test coverage for new feature
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
run: |
echo "Generating coverage report for NSFW-only filtering feature..."
pytest tests/test_nsfw_only_filtering.py --cov=src/guardden/cogs/ai_moderation --cov=src/guardden/models/guild --cov-report=term-missing --cov-report=xml:nsfw-coverage.xml
- name: Upload NSFW-only filtering test results
uses: actions/upload-artifact@v3
if: always()
with:
name: nsfw-only-filtering-test-results
path: |
nsfw-coverage.xml
pytest-report.xml

View File

@@ -19,6 +19,7 @@ GuardDen is a comprehensive Discord moderation bot designed to protect your comm
### AI Moderation
- **Text Analysis** - AI-powered content moderation using Claude or GPT
- **NSFW Image Detection** - Automatic flagging of inappropriate images
- **NSFW-Only Filtering** - Option to only filter sexual content, allowing violence/harassment
- **Phishing Analysis** - AI-enhanced detection of scam URLs
- **Configurable Sensitivity** - Adjust strictness per server (0-100)
@@ -177,7 +178,7 @@ Each server can configure:
- Automod toggles (spam, links, banned words)
- Automod thresholds and scam allowlist
- Strike action thresholds
- AI moderation settings (enabled, sensitivity, confidence threshold, log-only, NSFW detection)
- AI moderation settings (enabled, sensitivity, confidence threshold, log-only, NSFW detection, NSFW-only mode)
- Verification settings (type, enabled)
## Commands
@@ -245,6 +246,7 @@ Managed wordlists are synced weekly by default. You can override sources with
| `!ai threshold <0.0-1.0>` | Set AI confidence threshold |
| `!ai logonly <true/false>` | Toggle AI log-only mode |
| `!ai nsfw <true/false>` | Toggle NSFW image detection |
| `!ai nsfwonly <true/false>` | Toggle NSFW-only filtering mode |
| `!ai analyze <text>` | Test AI analysis on text |
### Diagnostics (Admin only)
@@ -386,6 +388,23 @@ The AI analyzes content for:
3. Actions are taken based on guild sensitivity settings
4. All AI actions are logged to the mod log channel
### NSFW-Only Filtering Mode
For communities that only want to filter sexual content while allowing other content types:
```
!ai nsfwonly true
```
**When enabled:**
- ✅ **Blocked:** Sexual content, nude images, explicit material
- ❌ **Allowed:** Violence, harassment, hate speech, self-harm content
**When disabled (normal mode):**
- ✅ **Blocked:** All inappropriate content categories
This mode is useful for gaming communities, mature discussion servers, or communities with specific content policies that allow violence but prohibit sexual material.
## Development
### Running Tests

View File

@@ -0,0 +1,39 @@
"""Add nsfw_only_filtering column to guild_settings table.
Revision ID: 20260124_add_nsfw_only_filtering
Revises: 20260117_enable_ai_defaults
Create Date: 2026-01-24 23:00:00.000000
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "20260124_add_nsfw_only_filtering"
down_revision = "20260117_enable_ai_defaults"
branch_labels = None
depends_on = None
def upgrade() -> None:
"""Add nsfw_only_filtering column to guild_settings table."""
op.add_column(
"guild_settings",
sa.Column("nsfw_only_filtering", sa.Boolean, nullable=False, default=False)
)
# Set default value for existing records
op.execute(
sa.text(
"""
UPDATE guild_settings
SET nsfw_only_filtering = FALSE
WHERE nsfw_only_filtering IS NULL
"""
)
)
def downgrade() -> None:
"""Remove nsfw_only_filtering column from guild_settings table."""
op.drop_column("guild_settings", "nsfw_only_filtering")

View File

@@ -93,6 +93,16 @@ class AIModeration(commands.Cog):
if not config:
return
# Check NSFW-only filtering mode
if config.nsfw_only_filtering:
# Only process SEXUAL content when NSFW-only mode is enabled
if ContentCategory.SEXUAL not in result.categories:
logger.debug(
"NSFW-only mode enabled, ignoring non-sexual content: categories=%s",
[cat.value for cat in result.categories],
)
return
# Check if severity meets threshold based on sensitivity
# Higher sensitivity = lower threshold needed to trigger
threshold = 100 - config.ai_sensitivity # e.g., sensitivity 70 = threshold 30
@@ -315,17 +325,27 @@ class AIModeration(commands.Cog):
f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}"
)
if (
image_result.is_nsfw
or image_result.is_violent
or image_result.is_disturbing
):
# Convert to ModerationResult format
# Filter based on NSFW-only mode setting
should_flag_image = False
categories = []
if config.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
else:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
if should_flag_image:
# Use nsfw_severity if available, otherwise use None for default calculation
severity_override = (
@@ -373,16 +393,27 @@ class AIModeration(commands.Cog):
f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}"
)
if (
image_result.is_nsfw
or image_result.is_violent
or image_result.is_disturbing
):
# Filter based on NSFW-only mode setting
should_flag_image = False
categories = []
if config.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
else:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
if should_flag_image:
# Use nsfw_severity if available, otherwise use None for default calculation
severity_override = (
@@ -465,6 +496,11 @@ class AIModeration(commands.Cog):
value="✅ Enabled" if config and config.ai_log_only else "❌ Disabled",
inline=True,
)
embed.add_field(
name="NSFW-Only Mode",
value="✅ Enabled" if config and config.nsfw_only_filtering else "❌ Disabled",
inline=True,
)
embed.add_field(
name="AI Provider",
value=self.bot.settings.ai_provider.capitalize(),
@@ -537,6 +573,46 @@ class AIModeration(commands.Cog):
status = "enabled" if enabled else "disabled"
await ctx.send(f"NSFW detection {status}.")
@ai_cmd.command(name="nsfwonly")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def ai_nsfw_only(self, ctx: commands.Context, enabled: bool) -> None:
"""Enable or disable NSFW-only filtering mode.
When enabled, only sexual/nude content will be filtered.
Violence, harassment, and other content types will be allowed.
"""
await self.bot.guild_config.update_settings(ctx.guild.id, nsfw_only_filtering=enabled)
status = "enabled" if enabled else "disabled"
if enabled:
embed = discord.Embed(
title="NSFW-Only Mode Enabled",
description="⚠️ **Important:** Only sexual and nude content will now be filtered.\n"
"Violence, harassment, hate speech, and other content types will be **allowed**.",
color=discord.Color.orange(),
)
embed.add_field(
name="What will be filtered:",
value="• Sexual content\n• Nude images\n• Explicit material",
inline=True,
)
embed.add_field(
name="What will be allowed:",
value="• Violence and gore\n• Harassment\n• Hate speech\n• Self-harm content",
inline=True,
)
embed.set_footer(text="Use '!ai nsfwonly false' to return to normal filtering")
else:
embed = discord.Embed(
title="NSFW-Only Mode Disabled",
description="✅ Normal content filtering restored.\n"
"All inappropriate content types will now be filtered.",
color=discord.Color.green(),
)
await ctx.send(embed=embed)
@ai_cmd.command(name="analyze")
@commands.has_permissions(administrator=True)
@commands.guild_only()

View File

@@ -97,6 +97,7 @@ class GuildSettings(Base, TimestampMixin):
ai_confidence_threshold: Mapped[float] = mapped_column(Float, default=0.7, nullable=False)
ai_log_only: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)
nsfw_detection_enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
nsfw_only_filtering: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)
# Verification settings
verification_enabled: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)

View File

@@ -0,0 +1,403 @@
"""Tests for NSFW-only filtering functionality."""
import pytest
from unittest.mock import AsyncMock, MagicMock
from guardden.models.guild import GuildSettings
from guardden.services.ai.base import ContentCategory, ModerationResult, ImageAnalysisResult
from guardden.cogs.ai_moderation import AIModeration
class TestNSFWOnlyFiltering:
"""Tests for NSFW-only filtering mode."""
@pytest.fixture
def mock_bot(self):
"""Create a mock bot instance."""
bot = MagicMock()
bot.user.id = 123456789
bot.user.__str__ = MagicMock(return_value="GuardDen")
bot.database.session.return_value.__aenter__ = AsyncMock()
bot.database.session.return_value.__aexit__ = AsyncMock()
bot.database.session.return_value.add = MagicMock()
return bot
@pytest.fixture
def ai_moderation(self, mock_bot):
"""Create an AIModeration instance with mocked bot."""
return AIModeration(mock_bot)
@pytest.fixture
def mock_message(self):
"""Create a mock Discord message."""
message = MagicMock()
message.id = 987654321
message.content = "Test message content"
message.guild.id = 111222333
message.guild.name = "Test Guild"
message.channel.id = 444555666
message.channel.name = "general"
message.author.id = 777888999
message.author.__str__ = MagicMock(return_value="TestUser")
message.author.display_avatar.url = "https://example.com/avatar.png"
message.delete = AsyncMock()
message.author.send = AsyncMock()
return message
@pytest.fixture
def guild_config_normal(self):
"""Guild config with normal filtering (NSFW-only disabled)."""
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = False
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
config.mod_log_channel_id = None
return config
@pytest.fixture
def guild_config_nsfw_only(self):
"""Guild config with NSFW-only filtering enabled."""
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = True
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
config.mod_log_channel_id = None
return config
@pytest.mark.asyncio
async def test_normal_mode_blocks_violence(self, ai_moderation, mock_message, guild_config_normal, mock_bot):
"""Test that normal mode blocks violence content."""
mock_bot.guild_config.get_config.return_value = guild_config_normal
# Create a moderation result with violence category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.VIOLENCE],
explanation="Violence detected",
suggested_action="delete"
)
# Mock the message delete to track if it was called
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In normal mode, violent content should be deleted
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_violence(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores violence content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with violence category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.VIOLENCE],
explanation="Violence detected",
suggested_action="delete"
)
# Mock the message delete to track if it was called
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, violent content should NOT be deleted
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_blocks_sexual_content(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode still blocks sexual content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with sexual category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SEXUAL],
explanation="Sexual content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, sexual content should still be deleted
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_harassment(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores harassment content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with harassment category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.HARASSMENT],
explanation="Harassment detected",
suggested_action="warn"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, harassment content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_hate_speech(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores hate speech content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with hate speech category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.HATE_SPEECH],
explanation="Hate speech detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, hate speech content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_self_harm(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores self-harm content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with self-harm category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SELF_HARM],
explanation="Self-harm content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, self-harm content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_mixed_categories_blocks_only_sexual(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode with mixed categories only blocks if sexual content is present."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with both sexual and violence categories
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SEXUAL, ContentCategory.VIOLENCE],
explanation="Sexual and violent content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# Should still be deleted because sexual content is present
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_image_analysis_nsfw_flagged(self, ai_moderation, mock_bot, guild_config_nsfw_only):
"""Test that NSFW-only mode flags NSFW images."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Mock image analysis result with NSFW content
image_result = ImageAnalysisResult(
is_nsfw=True,
is_violent=True, # Also violent, but should be ignored in NSFW-only mode
is_disturbing=False,
confidence=0.9,
description="NSFW image with violence",
categories=["sexual", "violence"],
nsfw_category="explicit",
nsfw_severity=85
)
# Test the filtering logic by directly checking what gets flagged
should_flag_image = False
categories = []
if guild_config_nsfw_only.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
assert should_flag_image is True
assert ContentCategory.SEXUAL in categories
assert ContentCategory.VIOLENCE not in categories
@pytest.mark.asyncio
async def test_nsfw_only_mode_image_analysis_violence_ignored(self, ai_moderation, mock_bot, guild_config_nsfw_only):
"""Test that NSFW-only mode ignores violent images without sexual content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Mock image analysis result with only violence (no NSFW)
image_result = ImageAnalysisResult(
is_nsfw=False,
is_violent=True,
is_disturbing=True,
confidence=0.9,
description="Violent image without sexual content",
categories=["violence"],
nsfw_category="none",
nsfw_severity=0
)
# Test the filtering logic
should_flag_image = False
categories = []
if guild_config_nsfw_only.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
assert should_flag_image is False
assert categories == []
@pytest.mark.asyncio
async def test_normal_mode_image_analysis_flags_all(self, ai_moderation, mock_bot, guild_config_normal):
"""Test that normal mode flags all inappropriate image content."""
mock_bot.guild_config.get_config.return_value = guild_config_normal
# Mock image analysis result with violence only
image_result = ImageAnalysisResult(
is_nsfw=False,
is_violent=True,
is_disturbing=True,
confidence=0.9,
description="Violent image",
categories=["violence"],
nsfw_category="none",
nsfw_severity=0
)
# Test the filtering logic for normal mode
should_flag_image = False
categories = []
if not guild_config_normal.nsfw_only_filtering:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
assert should_flag_image is True
assert ContentCategory.VIOLENCE in categories
def test_guild_settings_model_has_nsfw_only_filtering(self):
"""Test that GuildSettings model includes the nsfw_only_filtering field."""
# This test ensures the database model was updated correctly
# Note: This is a basic check, more comprehensive DB tests would require actual DB setup
assert hasattr(GuildSettings, "nsfw_only_filtering")
class TestNSFWOnlyFilteringCommands:
"""Tests for NSFW-only filtering Discord commands."""
@pytest.fixture
def mock_ctx(self):
"""Create a mock Discord context."""
ctx = MagicMock()
ctx.guild.id = 111222333
ctx.guild.name = "Test Guild"
ctx.send = AsyncMock()
return ctx
@pytest.fixture
def mock_bot_with_config(self):
"""Create a mock bot with guild config service."""
bot = MagicMock()
bot.guild_config.update_settings = AsyncMock()
return bot
@pytest.fixture
def ai_moderation_with_bot(self, mock_bot_with_config):
"""Create an AIModeration instance with mocked bot."""
return AIModeration(mock_bot_with_config)
@pytest.mark.asyncio
async def test_nsfw_only_command_enable(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test the !ai nsfwonly true command."""
await ai_moderation_with_bot.ai_nsfw_only(mock_ctx, True)
# Verify the setting was updated
mock_bot_with_config.guild_config.update_settings.assert_called_once_with(
mock_ctx.guild.id,
nsfw_only_filtering=True
)
# Verify response was sent
mock_ctx.send.assert_called_once()
# Check that the response contains the warning about what will be allowed
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
assert "NSFW-Only Mode Enabled" in str(call_args.title)
@pytest.mark.asyncio
async def test_nsfw_only_command_disable(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test the !ai nsfwonly false command."""
await ai_moderation_with_bot.ai_nsfw_only(mock_ctx, False)
# Verify the setting was updated
mock_bot_with_config.guild_config.update_settings.assert_called_once_with(
mock_ctx.guild.id,
nsfw_only_filtering=False
)
# Verify response was sent
mock_ctx.send.assert_called_once()
# Check that the response confirms normal filtering is restored
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
assert "NSFW-Only Mode Disabled" in str(call_args.title)
@pytest.mark.asyncio
async def test_ai_settings_display_includes_nsfw_only_mode(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test that !ai command shows NSFW-only mode status."""
# Mock the config response
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = True # Enable NSFW-only mode
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
mock_bot_with_config.guild_config.get_config.return_value = config
mock_bot_with_config.settings.ai_provider = "openai"
await ai_moderation_with_bot.ai_cmd(mock_ctx)
# Verify that the embed was sent
mock_ctx.send.assert_called_once()
# Check that NSFW-Only Mode field is in the embed
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
embed_dict = call_args.to_dict()
# Look for the NSFW-Only Mode field
nsfw_only_field = None
for field in embed_dict.get("fields", []):
if field.get("name") == "NSFW-Only Mode":
nsfw_only_field = field
break
assert nsfw_only_field is not None
assert "✅ Enabled" in nsfw_only_field.get("value", "")