From 136ae04388d91c855cd1f973f17ae0565daaef2f Mon Sep 17 00:00:00 2001 From: latte Date: Sat, 24 Jan 2026 16:53:13 +0100 Subject: [PATCH] fix giff issue --- src/guardden/cogs/ai_moderation.py | 55 ++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/src/guardden/cogs/ai_moderation.py b/src/guardden/cogs/ai_moderation.py index 51c35d6..64a50a6 100644 --- a/src/guardden/cogs/ai_moderation.py +++ b/src/guardden/cogs/ai_moderation.py @@ -54,8 +54,8 @@ class AIModeration(commands.Cog): if message.id in self._analyzed_messages: return False - # Skip short messages - if len(message.content) < 20 and not message.attachments: + # Skip short messages without media + if len(message.content) < 20 and not message.attachments and not message.embeds: return False # Skip messages from bots @@ -106,9 +106,7 @@ class AIModeration(commands.Cog): # Determine action based on suggested action and severity should_delete = not log_only and result.suggested_action in ("delete", "timeout", "ban") should_timeout = ( - not log_only - and result.suggested_action in ("timeout", "ban") - and result.severity > 70 + not log_only and result.suggested_action in ("timeout", "ban") and result.severity > 70 ) timeout_duration: int | None = None @@ -288,9 +286,9 @@ class AIModeration(commands.Cog): return # Don't continue if already flagged # Analyze images if NSFW detection is enabled (limit to 3 per message) + images_analyzed = 0 if config.nsfw_detection_enabled and message.attachments: logger.info(f"Checking {len(message.attachments)} attachments for NSFW content") - images_analyzed = 0 for attachment in message.attachments: if images_analyzed >= 3: break @@ -327,6 +325,51 @@ class AIModeration(commands.Cog): await self._handle_ai_result(message, result, "Image Analysis") return + # Also analyze images from embeds (GIFs from Discord's GIF picker use embeds) + if config.nsfw_detection_enabled and message.embeds: + for embed in message.embeds: + if images_analyzed >= 3: + break + + # Check embed image or thumbnail (GIFs often use thumbnail) + image_url = None + if embed.image and embed.image.url: + image_url = embed.image.url + elif embed.thumbnail and embed.thumbnail.url: + image_url = embed.thumbnail.url + + if image_url: + images_analyzed += 1 + logger.info(f"Analyzing embed image: {image_url[:80]}...") + image_result = await self.bot.ai_provider.analyze_image( + image_url=image_url, + sensitivity=config.ai_sensitivity, + ) + logger.info( + f"Embed image result: nsfw={image_result.is_nsfw}, violent={image_result.is_violent}, conf={image_result.confidence}" + ) + + if ( + image_result.is_nsfw + or image_result.is_violent + or image_result.is_disturbing + ): + categories = [] + if image_result.is_nsfw: + categories.append(ContentCategory.SEXUAL) + if image_result.is_violent: + categories.append(ContentCategory.VIOLENCE) + + result = ModerationResult( + is_flagged=True, + confidence=image_result.confidence, + categories=categories, + explanation=image_result.description, + suggested_action="delete", + ) + await self._handle_ai_result(message, result, "Image Analysis") + return + # Analyze URLs for phishing urls = URL_PATTERN.findall(message.content) allowlist = {normalize_domain(domain) for domain in config.scam_allowlist if domain}