Merge pull request 'feature/nsfw-only-filtering' (#7) from feature/nsfw-only-filtering into main

Reviewed-on: #7
This commit was merged in pull request #7.
This commit is contained in:
2026-01-24 22:59:06 +00:00
7 changed files with 552 additions and 257 deletions

View File

@@ -1,199 +0,0 @@
name: CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
release:
types: [ published ]
env:
PYTHON_VERSION: "3.11"
POETRY_VERSION: "1.7.1"
jobs:
code-quality:
name: Code Quality Checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
- name: Run Ruff (Linting)
run: ruff check src tests --output-format=github
- name: Run Ruff (Formatting)
run: ruff format src tests --check
- name: Run MyPy (Type Checking)
run: mypy src
- name: Check imports with isort
run: ruff check --select I src tests
security-scan:
name: Security Scanning
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
pip install safety bandit
- name: Run Safety (Dependency vulnerability scan)
run: safety check --json --output safety-report.json
continue-on-error: true
- name: Run Bandit (Security linting)
run: bandit -r src/ -f json -o bandit-report.json
continue-on-error: true
- name: Upload Security Reports
uses: actions/upload-artifact@v3
if: always()
with:
name: security-reports
path: |
safety-report.json
bandit-report.json
test:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11", "3.12"]
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: guardden_test
POSTGRES_USER: guardden_test
POSTGRES_DB: guardden_test
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-${{ matrix.python-version }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
- name: Set up test environment
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
GUARDDEN_LOG_LEVEL: "DEBUG"
run: |
python -c "
import os
os.environ['GUARDDEN_DISCORD_TOKEN'] = 'test_token_12345678901234567890123456789012345'
os.environ['GUARDDEN_DATABASE_URL'] = 'postgresql://guardden_test:guardden_test@localhost:5432/guardden_test'
print('Test environment configured')
"
- name: Run tests with coverage
env:
GUARDDEN_DISCORD_TOKEN: "test_token_12345678901234567890123456789012345"
GUARDDEN_DATABASE_URL: "postgresql://guardden_test:guardden_test@localhost:5432/guardden_test"
GUARDDEN_AI_PROVIDER: "none"
GUARDDEN_LOG_LEVEL: "DEBUG"
run: |
pytest --cov=src/guardden --cov-report=xml --cov-report=html --cov-report=term-missing
- name: Upload coverage reports
uses: actions/upload-artifact@v3
if: matrix.python-version == '3.11'
with:
name: coverage-reports
path: |
coverage.xml
htmlcov/
build-docker:
name: Build Docker Image
runs-on: ubuntu-latest
needs: [code-quality, test]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v5
with:
context: .
push: false
tags: guardden:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
INSTALL_AI=false
- name: Build Docker image with AI
uses: docker/build-push-action@v5
with:
context: .
push: false
tags: guardden-ai:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
INSTALL_AI=true
- name: Test Docker image
run: |
docker run --rm guardden:${{ github.sha }} python -m guardden --help

View File

@@ -1,44 +0,0 @@
name: Dependency Updates
on:
schedule:
- cron: '0 9 * * 1'
workflow_dispatch:
jobs:
update-dependencies:
name: Update Dependencies
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install pip-tools
run: |
python -m pip install --upgrade pip
pip install pip-tools
- name: Update dependencies
run: |
pip-compile --upgrade pyproject.toml --output-file requirements.txt
pip-compile --upgrade --extra dev pyproject.toml --output-file requirements-dev.txt
- name: Check for security vulnerabilities
run: |
pip install safety
safety check --file requirements.txt --json --output vulnerability-report.json || true
safety check --file requirements-dev.txt --json --output vulnerability-dev-report.json || true
- name: Upload vulnerability reports
uses: actions/upload-artifact@v3
if: always()
with:
name: vulnerability-reports
path: |
vulnerability-report.json
vulnerability-dev-report.json

View File

@@ -19,6 +19,7 @@ GuardDen is a comprehensive Discord moderation bot designed to protect your comm
### AI Moderation ### AI Moderation
- **Text Analysis** - AI-powered content moderation using Claude or GPT - **Text Analysis** - AI-powered content moderation using Claude or GPT
- **NSFW Image Detection** - Automatic flagging of inappropriate images - **NSFW Image Detection** - Automatic flagging of inappropriate images
- **NSFW-Only Filtering** - Option to only filter sexual content, allowing violence/harassment
- **Phishing Analysis** - AI-enhanced detection of scam URLs - **Phishing Analysis** - AI-enhanced detection of scam URLs
- **Configurable Sensitivity** - Adjust strictness per server (0-100) - **Configurable Sensitivity** - Adjust strictness per server (0-100)
@@ -177,7 +178,7 @@ Each server can configure:
- Automod toggles (spam, links, banned words) - Automod toggles (spam, links, banned words)
- Automod thresholds and scam allowlist - Automod thresholds and scam allowlist
- Strike action thresholds - Strike action thresholds
- AI moderation settings (enabled, sensitivity, confidence threshold, log-only, NSFW detection) - AI moderation settings (enabled, sensitivity, confidence threshold, log-only, NSFW detection, NSFW-only mode)
- Verification settings (type, enabled) - Verification settings (type, enabled)
## Commands ## Commands
@@ -245,6 +246,7 @@ Managed wordlists are synced weekly by default. You can override sources with
| `!ai threshold <0.0-1.0>` | Set AI confidence threshold | | `!ai threshold <0.0-1.0>` | Set AI confidence threshold |
| `!ai logonly <true/false>` | Toggle AI log-only mode | | `!ai logonly <true/false>` | Toggle AI log-only mode |
| `!ai nsfw <true/false>` | Toggle NSFW image detection | | `!ai nsfw <true/false>` | Toggle NSFW image detection |
| `!ai nsfwonly <true/false>` | Toggle NSFW-only filtering mode |
| `!ai analyze <text>` | Test AI analysis on text | | `!ai analyze <text>` | Test AI analysis on text |
### Diagnostics (Admin only) ### Diagnostics (Admin only)
@@ -386,6 +388,23 @@ The AI analyzes content for:
3. Actions are taken based on guild sensitivity settings 3. Actions are taken based on guild sensitivity settings
4. All AI actions are logged to the mod log channel 4. All AI actions are logged to the mod log channel
### NSFW-Only Filtering Mode
For communities that only want to filter sexual content while allowing other content types:
```
!ai nsfwonly true
```
**When enabled:**
- ✅ **Blocked:** Sexual content, nude images, explicit material
- ❌ **Allowed:** Violence, harassment, hate speech, self-harm content
**When disabled (normal mode):**
- ✅ **Blocked:** All inappropriate content categories
This mode is useful for gaming communities, mature discussion servers, or communities with specific content policies that allow violence but prohibit sexual material.
## Development ## Development
### Running Tests ### Running Tests

View File

@@ -0,0 +1,39 @@
"""Add nsfw_only_filtering column to guild_settings table.
Revision ID: 20260124_add_nsfw_only_filtering
Revises: 20260117_enable_ai_defaults
Create Date: 2026-01-24 23:00:00.000000
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "20260124_add_nsfw_only_filtering"
down_revision = "20260117_enable_ai_defaults"
branch_labels = None
depends_on = None
def upgrade() -> None:
"""Add nsfw_only_filtering column to guild_settings table."""
op.add_column(
"guild_settings",
sa.Column("nsfw_only_filtering", sa.Boolean, nullable=False, default=False)
)
# Set default value for existing records
op.execute(
sa.text(
"""
UPDATE guild_settings
SET nsfw_only_filtering = FALSE
WHERE nsfw_only_filtering IS NULL
"""
)
)
def downgrade() -> None:
"""Remove nsfw_only_filtering column from guild_settings table."""
op.drop_column("guild_settings", "nsfw_only_filtering")

View File

@@ -93,6 +93,16 @@ class AIModeration(commands.Cog):
if not config: if not config:
return return
# Check NSFW-only filtering mode
if config.nsfw_only_filtering:
# Only process SEXUAL content when NSFW-only mode is enabled
if ContentCategory.SEXUAL not in result.categories:
logger.debug(
"NSFW-only mode enabled, ignoring non-sexual content: categories=%s",
[cat.value for cat in result.categories],
)
return
# Check if severity meets threshold based on sensitivity # Check if severity meets threshold based on sensitivity
# Higher sensitivity = lower threshold needed to trigger # Higher sensitivity = lower threshold needed to trigger
threshold = 100 - config.ai_sensitivity # e.g., sensitivity 70 = threshold 30 threshold = 100 - config.ai_sensitivity # e.g., sensitivity 70 = threshold 30
@@ -315,17 +325,27 @@ class AIModeration(commands.Cog):
f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}" f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}"
) )
if ( # Filter based on NSFW-only mode setting
image_result.is_nsfw should_flag_image = False
or image_result.is_violent categories = []
or image_result.is_disturbing
): if config.nsfw_only_filtering:
# Convert to ModerationResult format # In NSFW-only mode, only flag sexual content
categories = []
if image_result.is_nsfw: if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
else:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL) categories.append(ContentCategory.SEXUAL)
if image_result.is_violent: if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE) categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
if should_flag_image:
# Use nsfw_severity if available, otherwise use None for default calculation # Use nsfw_severity if available, otherwise use None for default calculation
severity_override = ( severity_override = (
@@ -373,16 +393,27 @@ class AIModeration(commands.Cog):
f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}" f"severity={image_result.nsfw_severity}, violent={image_result.is_violent}, conf={image_result.confidence}"
) )
if ( # Filter based on NSFW-only mode setting
image_result.is_nsfw should_flag_image = False
or image_result.is_violent categories = []
or image_result.is_disturbing
): if config.nsfw_only_filtering:
categories = [] # In NSFW-only mode, only flag sexual content
if image_result.is_nsfw: if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
else:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL) categories.append(ContentCategory.SEXUAL)
if image_result.is_violent: if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE) categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
if should_flag_image:
# Use nsfw_severity if available, otherwise use None for default calculation # Use nsfw_severity if available, otherwise use None for default calculation
severity_override = ( severity_override = (
@@ -465,6 +496,11 @@ class AIModeration(commands.Cog):
value="✅ Enabled" if config and config.ai_log_only else "❌ Disabled", value="✅ Enabled" if config and config.ai_log_only else "❌ Disabled",
inline=True, inline=True,
) )
embed.add_field(
name="NSFW-Only Mode",
value="✅ Enabled" if config and config.nsfw_only_filtering else "❌ Disabled",
inline=True,
)
embed.add_field( embed.add_field(
name="AI Provider", name="AI Provider",
value=self.bot.settings.ai_provider.capitalize(), value=self.bot.settings.ai_provider.capitalize(),
@@ -537,6 +573,46 @@ class AIModeration(commands.Cog):
status = "enabled" if enabled else "disabled" status = "enabled" if enabled else "disabled"
await ctx.send(f"NSFW detection {status}.") await ctx.send(f"NSFW detection {status}.")
@ai_cmd.command(name="nsfwonly")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def ai_nsfw_only(self, ctx: commands.Context, enabled: bool) -> None:
"""Enable or disable NSFW-only filtering mode.
When enabled, only sexual/nude content will be filtered.
Violence, harassment, and other content types will be allowed.
"""
await self.bot.guild_config.update_settings(ctx.guild.id, nsfw_only_filtering=enabled)
status = "enabled" if enabled else "disabled"
if enabled:
embed = discord.Embed(
title="NSFW-Only Mode Enabled",
description="⚠️ **Important:** Only sexual and nude content will now be filtered.\n"
"Violence, harassment, hate speech, and other content types will be **allowed**.",
color=discord.Color.orange(),
)
embed.add_field(
name="What will be filtered:",
value="• Sexual content\n• Nude images\n• Explicit material",
inline=True,
)
embed.add_field(
name="What will be allowed:",
value="• Violence and gore\n• Harassment\n• Hate speech\n• Self-harm content",
inline=True,
)
embed.set_footer(text="Use '!ai nsfwonly false' to return to normal filtering")
else:
embed = discord.Embed(
title="NSFW-Only Mode Disabled",
description="✅ Normal content filtering restored.\n"
"All inappropriate content types will now be filtered.",
color=discord.Color.green(),
)
await ctx.send(embed=embed)
@ai_cmd.command(name="analyze") @ai_cmd.command(name="analyze")
@commands.has_permissions(administrator=True) @commands.has_permissions(administrator=True)
@commands.guild_only() @commands.guild_only()

View File

@@ -97,6 +97,7 @@ class GuildSettings(Base, TimestampMixin):
ai_confidence_threshold: Mapped[float] = mapped_column(Float, default=0.7, nullable=False) ai_confidence_threshold: Mapped[float] = mapped_column(Float, default=0.7, nullable=False)
ai_log_only: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False) ai_log_only: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)
nsfw_detection_enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) nsfw_detection_enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
nsfw_only_filtering: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)
# Verification settings # Verification settings
verification_enabled: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False) verification_enabled: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False)

View File

@@ -0,0 +1,403 @@
"""Tests for NSFW-only filtering functionality."""
import pytest
from unittest.mock import AsyncMock, MagicMock
from guardden.models.guild import GuildSettings
from guardden.services.ai.base import ContentCategory, ModerationResult, ImageAnalysisResult
from guardden.cogs.ai_moderation import AIModeration
class TestNSFWOnlyFiltering:
"""Tests for NSFW-only filtering mode."""
@pytest.fixture
def mock_bot(self):
"""Create a mock bot instance."""
bot = MagicMock()
bot.user.id = 123456789
bot.user.__str__ = MagicMock(return_value="GuardDen")
bot.database.session.return_value.__aenter__ = AsyncMock()
bot.database.session.return_value.__aexit__ = AsyncMock()
bot.database.session.return_value.add = MagicMock()
return bot
@pytest.fixture
def ai_moderation(self, mock_bot):
"""Create an AIModeration instance with mocked bot."""
return AIModeration(mock_bot)
@pytest.fixture
def mock_message(self):
"""Create a mock Discord message."""
message = MagicMock()
message.id = 987654321
message.content = "Test message content"
message.guild.id = 111222333
message.guild.name = "Test Guild"
message.channel.id = 444555666
message.channel.name = "general"
message.author.id = 777888999
message.author.__str__ = MagicMock(return_value="TestUser")
message.author.display_avatar.url = "https://example.com/avatar.png"
message.delete = AsyncMock()
message.author.send = AsyncMock()
return message
@pytest.fixture
def guild_config_normal(self):
"""Guild config with normal filtering (NSFW-only disabled)."""
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = False
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
config.mod_log_channel_id = None
return config
@pytest.fixture
def guild_config_nsfw_only(self):
"""Guild config with NSFW-only filtering enabled."""
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = True
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
config.mod_log_channel_id = None
return config
@pytest.mark.asyncio
async def test_normal_mode_blocks_violence(self, ai_moderation, mock_message, guild_config_normal, mock_bot):
"""Test that normal mode blocks violence content."""
mock_bot.guild_config.get_config.return_value = guild_config_normal
# Create a moderation result with violence category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.VIOLENCE],
explanation="Violence detected",
suggested_action="delete"
)
# Mock the message delete to track if it was called
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In normal mode, violent content should be deleted
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_violence(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores violence content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with violence category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.VIOLENCE],
explanation="Violence detected",
suggested_action="delete"
)
# Mock the message delete to track if it was called
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, violent content should NOT be deleted
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_blocks_sexual_content(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode still blocks sexual content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with sexual category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SEXUAL],
explanation="Sexual content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, sexual content should still be deleted
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_harassment(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores harassment content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with harassment category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.HARASSMENT],
explanation="Harassment detected",
suggested_action="warn"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, harassment content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_hate_speech(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores hate speech content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with hate speech category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.HATE_SPEECH],
explanation="Hate speech detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, hate speech content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_ignores_self_harm(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode ignores self-harm content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with self-harm category
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SELF_HARM],
explanation="Self-harm content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# In NSFW-only mode, self-harm content should be ignored
mock_message.delete.assert_not_called()
@pytest.mark.asyncio
async def test_nsfw_only_mode_mixed_categories_blocks_only_sexual(self, ai_moderation, mock_message, guild_config_nsfw_only, mock_bot):
"""Test that NSFW-only mode with mixed categories only blocks if sexual content is present."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Create a moderation result with both sexual and violence categories
result = ModerationResult(
is_flagged=True,
confidence=0.9,
categories=[ContentCategory.SEXUAL, ContentCategory.VIOLENCE],
explanation="Sexual and violent content detected",
suggested_action="delete"
)
await ai_moderation._handle_ai_result(mock_message, result, "Text Analysis")
# Should still be deleted because sexual content is present
mock_message.delete.assert_called_once()
@pytest.mark.asyncio
async def test_nsfw_only_mode_image_analysis_nsfw_flagged(self, ai_moderation, mock_bot, guild_config_nsfw_only):
"""Test that NSFW-only mode flags NSFW images."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Mock image analysis result with NSFW content
image_result = ImageAnalysisResult(
is_nsfw=True,
is_violent=True, # Also violent, but should be ignored in NSFW-only mode
is_disturbing=False,
confidence=0.9,
description="NSFW image with violence",
categories=["sexual", "violence"],
nsfw_category="explicit",
nsfw_severity=85
)
# Test the filtering logic by directly checking what gets flagged
should_flag_image = False
categories = []
if guild_config_nsfw_only.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
assert should_flag_image is True
assert ContentCategory.SEXUAL in categories
assert ContentCategory.VIOLENCE not in categories
@pytest.mark.asyncio
async def test_nsfw_only_mode_image_analysis_violence_ignored(self, ai_moderation, mock_bot, guild_config_nsfw_only):
"""Test that NSFW-only mode ignores violent images without sexual content."""
mock_bot.guild_config.get_config.return_value = guild_config_nsfw_only
# Mock image analysis result with only violence (no NSFW)
image_result = ImageAnalysisResult(
is_nsfw=False,
is_violent=True,
is_disturbing=True,
confidence=0.9,
description="Violent image without sexual content",
categories=["violence"],
nsfw_category="none",
nsfw_severity=0
)
# Test the filtering logic
should_flag_image = False
categories = []
if guild_config_nsfw_only.nsfw_only_filtering:
# In NSFW-only mode, only flag sexual content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
assert should_flag_image is False
assert categories == []
@pytest.mark.asyncio
async def test_normal_mode_image_analysis_flags_all(self, ai_moderation, mock_bot, guild_config_normal):
"""Test that normal mode flags all inappropriate image content."""
mock_bot.guild_config.get_config.return_value = guild_config_normal
# Mock image analysis result with violence only
image_result = ImageAnalysisResult(
is_nsfw=False,
is_violent=True,
is_disturbing=True,
confidence=0.9,
description="Violent image",
categories=["violence"],
nsfw_category="none",
nsfw_severity=0
)
# Test the filtering logic for normal mode
should_flag_image = False
categories = []
if not guild_config_normal.nsfw_only_filtering:
# Normal mode: flag all inappropriate content
if image_result.is_nsfw:
should_flag_image = True
categories.append(ContentCategory.SEXUAL)
if image_result.is_violent:
should_flag_image = True
categories.append(ContentCategory.VIOLENCE)
if image_result.is_disturbing:
should_flag_image = True
assert should_flag_image is True
assert ContentCategory.VIOLENCE in categories
def test_guild_settings_model_has_nsfw_only_filtering(self):
"""Test that GuildSettings model includes the nsfw_only_filtering field."""
# This test ensures the database model was updated correctly
# Note: This is a basic check, more comprehensive DB tests would require actual DB setup
assert hasattr(GuildSettings, "nsfw_only_filtering")
class TestNSFWOnlyFilteringCommands:
"""Tests for NSFW-only filtering Discord commands."""
@pytest.fixture
def mock_ctx(self):
"""Create a mock Discord context."""
ctx = MagicMock()
ctx.guild.id = 111222333
ctx.guild.name = "Test Guild"
ctx.send = AsyncMock()
return ctx
@pytest.fixture
def mock_bot_with_config(self):
"""Create a mock bot with guild config service."""
bot = MagicMock()
bot.guild_config.update_settings = AsyncMock()
return bot
@pytest.fixture
def ai_moderation_with_bot(self, mock_bot_with_config):
"""Create an AIModeration instance with mocked bot."""
return AIModeration(mock_bot_with_config)
@pytest.mark.asyncio
async def test_nsfw_only_command_enable(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test the !ai nsfwonly true command."""
await ai_moderation_with_bot.ai_nsfw_only(mock_ctx, True)
# Verify the setting was updated
mock_bot_with_config.guild_config.update_settings.assert_called_once_with(
mock_ctx.guild.id,
nsfw_only_filtering=True
)
# Verify response was sent
mock_ctx.send.assert_called_once()
# Check that the response contains the warning about what will be allowed
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
assert "NSFW-Only Mode Enabled" in str(call_args.title)
@pytest.mark.asyncio
async def test_nsfw_only_command_disable(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test the !ai nsfwonly false command."""
await ai_moderation_with_bot.ai_nsfw_only(mock_ctx, False)
# Verify the setting was updated
mock_bot_with_config.guild_config.update_settings.assert_called_once_with(
mock_ctx.guild.id,
nsfw_only_filtering=False
)
# Verify response was sent
mock_ctx.send.assert_called_once()
# Check that the response confirms normal filtering is restored
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
assert "NSFW-Only Mode Disabled" in str(call_args.title)
@pytest.mark.asyncio
async def test_ai_settings_display_includes_nsfw_only_mode(self, ai_moderation_with_bot, mock_ctx, mock_bot_with_config):
"""Test that !ai command shows NSFW-only mode status."""
# Mock the config response
config = MagicMock()
config.ai_moderation_enabled = True
config.nsfw_detection_enabled = True
config.nsfw_only_filtering = True # Enable NSFW-only mode
config.ai_sensitivity = 80
config.ai_confidence_threshold = 0.7
config.ai_log_only = False
mock_bot_with_config.guild_config.get_config.return_value = config
mock_bot_with_config.settings.ai_provider = "openai"
await ai_moderation_with_bot.ai_cmd(mock_ctx)
# Verify that the embed was sent
mock_ctx.send.assert_called_once()
# Check that NSFW-Only Mode field is in the embed
call_args = mock_ctx.send.call_args[0][0] # Get the embed argument
embed_dict = call_args.to_dict()
# Look for the NSFW-Only Mode field
nsfw_only_field = None
for field in embed_dict.get("fields", []):
if field.get("name") == "NSFW-Only Mode":
nsfw_only_field = field
break
assert nsfw_only_field is not None
assert "✅ Enabled" in nsfw_only_field.get("value", "")