quick commit
Some checks failed
CI/CD Pipeline / Code Quality Checks (push) Failing after 6m9s
CI/CD Pipeline / Security Scanning (push) Successful in 26s
CI/CD Pipeline / Tests (3.11) (push) Failing after 5m24s
CI/CD Pipeline / Tests (3.12) (push) Failing after 5m23s
CI/CD Pipeline / Build Docker Image (push) Has been skipped
CI/CD Pipeline / Deploy to Staging (push) Has been skipped
CI/CD Pipeline / Deploy to Production (push) Has been skipped
CI/CD Pipeline / Notification (push) Successful in 1s

This commit is contained in:
2026-01-17 20:24:43 +01:00
parent 95cc3cdb8f
commit 831eed8dbc
82 changed files with 8860 additions and 167 deletions

View File

@@ -2,12 +2,21 @@
import logging
from datetime import datetime, timedelta, timezone
from typing import Literal
import discord
from discord.ext import commands
from sqlalchemy import func, select
from guardden.bot import GuardDen
from guardden.services.automod import AutomodResult, AutomodService
from guardden.models import ModerationLog, Strike
from guardden.services.automod import (
AutomodResult,
AutomodService,
SpamConfig,
normalize_domain,
)
from guardden.utils.ratelimit import RateLimitExceeded
logger = logging.getLogger(__name__)
@@ -19,6 +28,135 @@ class Automod(commands.Cog):
self.bot = bot
self.automod = AutomodService()
def cog_check(self, ctx: commands.Context) -> bool:
"""Optional owner allowlist for automod commands."""
if not ctx.guild:
return False
return self.bot.is_owner_allowed(ctx.author.id)
async def cog_before_invoke(self, ctx: commands.Context) -> None:
if not ctx.command:
return
result = self.bot.rate_limiter.acquire_command(
ctx.command.qualified_name,
user_id=ctx.author.id,
guild_id=ctx.guild.id if ctx.guild else None,
channel_id=ctx.channel.id,
)
if result.is_limited:
raise RateLimitExceeded(result.reset_after)
async def cog_command_error(self, ctx: commands.Context, error: Exception) -> None:
if isinstance(error, RateLimitExceeded):
await ctx.send(
f"You're being rate limited. Try again in {error.retry_after:.1f} seconds."
)
def _spam_config(self, config) -> SpamConfig:
if not config:
return self.automod.default_spam_config
return SpamConfig(
message_rate_limit=config.message_rate_limit,
message_rate_window=config.message_rate_window,
duplicate_threshold=config.duplicate_threshold,
mention_limit=config.mention_limit,
mention_rate_limit=config.mention_rate_limit,
mention_rate_window=config.mention_rate_window,
)
async def _get_strike_count(self, guild_id: int, user_id: int) -> int:
async with self.bot.database.session() as session:
result = await session.execute(
select(func.sum(Strike.points)).where(
Strike.guild_id == guild_id,
Strike.user_id == user_id,
Strike.is_active == True,
)
)
total = result.scalar()
return total or 0
async def _add_strike(
self,
guild: discord.Guild,
member: discord.Member,
reason: str,
) -> int:
async with self.bot.database.session() as session:
strike = Strike(
guild_id=guild.id,
user_id=member.id,
user_name=str(member),
moderator_id=self.bot.user.id if self.bot.user else 0,
reason=reason,
points=1,
)
session.add(strike)
return await self._get_strike_count(guild.id, member.id)
async def _apply_strike_actions(
self,
member: discord.Member,
total_strikes: int,
config,
) -> None:
if not config or not config.strike_actions:
return
for threshold, action_config in sorted(
config.strike_actions.items(), key=lambda item: int(item[0]), reverse=True
):
if total_strikes < int(threshold):
continue
action = action_config.get("action")
if action == "ban":
await member.ban(reason=f"Automod: {total_strikes} strikes")
elif action == "kick":
await member.kick(reason=f"Automod: {total_strikes} strikes")
elif action == "timeout":
duration = action_config.get("duration", 3600)
await member.timeout(
timedelta(seconds=duration),
reason=f"Automod: {total_strikes} strikes",
)
break
async def _log_database_action(
self,
message: discord.Message,
result: AutomodResult,
) -> None:
async with self.bot.database.session() as session:
action = "delete"
if result.should_timeout:
action = "timeout"
elif result.should_strike:
action = "strike"
elif result.should_warn:
action = "warn"
expires_at = None
if result.timeout_duration:
expires_at = datetime.now(timezone.utc) + timedelta(seconds=result.timeout_duration)
log_entry = ModerationLog(
guild_id=message.guild.id,
target_id=message.author.id,
target_name=str(message.author),
moderator_id=self.bot.user.id if self.bot.user else 0,
moderator_name=str(self.bot.user) if self.bot.user else "GuardDen",
action=action,
reason=result.reason,
duration=result.timeout_duration or None,
expires_at=expires_at,
channel_id=message.channel.id,
message_id=message.id,
message_content=message.content,
is_automatic=True,
)
session.add(log_entry)
async def _handle_violation(
self,
message: discord.Message,
@@ -45,8 +183,15 @@ class Automod(commands.Cog):
logger.warning(f"Cannot timeout {message.author}: missing permissions")
# Log the action
await self._log_database_action(message, result)
await self._log_automod_action(message, result)
# Apply strike escalation if configured
if (result.should_warn or result.should_strike) and isinstance(message.author, discord.Member):
total = await self._add_strike(message.guild, message.author, result.reason)
config = await self.bot.guild_config.get_config(message.guild.id)
await self._apply_strike_actions(message.author, total, config)
# Notify the user via DM
try:
embed = discord.Embed(
@@ -136,13 +281,22 @@ class Automod(commands.Cog):
if banned_words:
result = self.automod.check_banned_words(message.content, banned_words)
spam_config = self._spam_config(config)
# Check scam links (if link filter enabled)
if not result and config.link_filter_enabled:
result = self.automod.check_scam_links(message.content)
result = self.automod.check_scam_links(
message.content,
allowlist=config.scam_allowlist,
)
# Check spam
if not result and config.anti_spam_enabled:
result = self.automod.check_spam(message, anti_spam_enabled=True)
result = self.automod.check_spam(
message,
anti_spam_enabled=True,
spam_config=spam_config,
)
# Check invite links (if link filter enabled)
if not result and config.link_filter_enabled:
@@ -194,20 +348,27 @@ class Automod(commands.Cog):
inline=True,
)
spam_config = self._spam_config(config)
# Show thresholds
embed.add_field(
name="Rate Limit",
value=f"{self.automod.message_rate_limit} msgs / {self.automod.message_rate_window}s",
value=f"{spam_config.message_rate_limit} msgs / {spam_config.message_rate_window}s",
inline=True,
)
embed.add_field(
name="Duplicate Threshold",
value=f"{self.automod.duplicate_threshold} same messages",
value=f"{spam_config.duplicate_threshold} same messages",
inline=True,
)
embed.add_field(
name="Mention Limit",
value=f"{self.automod.mention_limit} per message",
value=f"{spam_config.mention_limit} per message",
inline=True,
)
embed.add_field(
name="Mention Rate",
value=f"{spam_config.mention_rate_limit} mentions / {spam_config.mention_rate_window}s",
inline=True,
)
@@ -220,6 +381,82 @@ class Automod(commands.Cog):
await ctx.send(embed=embed)
@automod_cmd.command(name="threshold")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def automod_threshold(
self,
ctx: commands.Context,
setting: Literal[
"message_rate_limit",
"message_rate_window",
"duplicate_threshold",
"mention_limit",
"mention_rate_limit",
"mention_rate_window",
],
value: int,
) -> None:
"""Update a single automod threshold."""
if value <= 0:
await ctx.send("Threshold values must be positive.")
return
await self.bot.guild_config.update_settings(ctx.guild.id, **{setting: value})
await ctx.send(f"Updated `{setting}` to {value}.")
@automod_cmd.group(name="allowlist", invoke_without_command=True)
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def automod_allowlist(self, ctx: commands.Context) -> None:
"""Show the scam link allowlist."""
config = await self.bot.guild_config.get_config(ctx.guild.id)
allowlist = sorted(config.scam_allowlist) if config else []
if not allowlist:
await ctx.send("No allowlisted domains configured.")
return
formatted = "\n".join(f"- `{domain}`" for domain in allowlist[:20])
await ctx.send(f"Allowed domains:\n{formatted}")
@automod_allowlist.command(name="add")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def automod_allowlist_add(self, ctx: commands.Context, domain: str) -> None:
"""Add a domain to the scam link allowlist."""
normalized = normalize_domain(domain)
if not normalized:
await ctx.send("Provide a valid domain or URL to allowlist.")
return
config = await self.bot.guild_config.get_config(ctx.guild.id)
allowlist = list(config.scam_allowlist) if config else []
if normalized in allowlist:
await ctx.send(f"`{normalized}` is already allowlisted.")
return
allowlist.append(normalized)
await self.bot.guild_config.update_settings(ctx.guild.id, scam_allowlist=allowlist)
await ctx.send(f"Added `{normalized}` to the allowlist.")
@automod_allowlist.command(name="remove")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def automod_allowlist_remove(self, ctx: commands.Context, domain: str) -> None:
"""Remove a domain from the scam link allowlist."""
normalized = normalize_domain(domain)
config = await self.bot.guild_config.get_config(ctx.guild.id)
allowlist = list(config.scam_allowlist) if config else []
if normalized not in allowlist:
await ctx.send(f"`{normalized}` is not in the allowlist.")
return
allowlist.remove(normalized)
await self.bot.guild_config.update_settings(ctx.guild.id, scam_allowlist=allowlist)
await ctx.send(f"Removed `{normalized}` from the allowlist.")
@automod_cmd.command(name="test")
@commands.has_permissions(administrator=True)
@commands.guild_only()
@@ -235,7 +472,7 @@ class Automod(commands.Cog):
results.append(f"**Banned Words**: {result.reason}")
# Check scam links
result = self.automod.check_scam_links(text)
result = self.automod.check_scam_links(text, allowlist=config.scam_allowlist if config else [])
if result:
results.append(f"**Scam Detection**: {result.reason}")