fix giff issue
Some checks failed
CI/CD Pipeline / Code Quality Checks (push) Failing after 4m56s
CI/CD Pipeline / Security Scanning (push) Successful in 16s
CI/CD Pipeline / Tests (3.11) (push) Successful in 9m43s
CI/CD Pipeline / Tests (3.12) (push) Successful in 9m36s
CI/CD Pipeline / Build Docker Image (push) Has been skipped
Some checks failed
CI/CD Pipeline / Code Quality Checks (push) Failing after 4m56s
CI/CD Pipeline / Security Scanning (push) Successful in 16s
CI/CD Pipeline / Tests (3.11) (push) Successful in 9m43s
CI/CD Pipeline / Tests (3.12) (push) Successful in 9m36s
CI/CD Pipeline / Build Docker Image (push) Has been skipped
This commit is contained in:
@@ -54,8 +54,8 @@ class AIModeration(commands.Cog):
|
|||||||
if message.id in self._analyzed_messages:
|
if message.id in self._analyzed_messages:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip short messages
|
# Skip short messages without media
|
||||||
if len(message.content) < 20 and not message.attachments:
|
if len(message.content) < 20 and not message.attachments and not message.embeds:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip messages from bots
|
# Skip messages from bots
|
||||||
@@ -106,9 +106,7 @@ class AIModeration(commands.Cog):
|
|||||||
# Determine action based on suggested action and severity
|
# Determine action based on suggested action and severity
|
||||||
should_delete = not log_only and result.suggested_action in ("delete", "timeout", "ban")
|
should_delete = not log_only and result.suggested_action in ("delete", "timeout", "ban")
|
||||||
should_timeout = (
|
should_timeout = (
|
||||||
not log_only
|
not log_only and result.suggested_action in ("timeout", "ban") and result.severity > 70
|
||||||
and result.suggested_action in ("timeout", "ban")
|
|
||||||
and result.severity > 70
|
|
||||||
)
|
)
|
||||||
timeout_duration: int | None = None
|
timeout_duration: int | None = None
|
||||||
|
|
||||||
@@ -288,9 +286,9 @@ class AIModeration(commands.Cog):
|
|||||||
return # Don't continue if already flagged
|
return # Don't continue if already flagged
|
||||||
|
|
||||||
# Analyze images if NSFW detection is enabled (limit to 3 per message)
|
# Analyze images if NSFW detection is enabled (limit to 3 per message)
|
||||||
|
images_analyzed = 0
|
||||||
if config.nsfw_detection_enabled and message.attachments:
|
if config.nsfw_detection_enabled and message.attachments:
|
||||||
logger.info(f"Checking {len(message.attachments)} attachments for NSFW content")
|
logger.info(f"Checking {len(message.attachments)} attachments for NSFW content")
|
||||||
images_analyzed = 0
|
|
||||||
for attachment in message.attachments:
|
for attachment in message.attachments:
|
||||||
if images_analyzed >= 3:
|
if images_analyzed >= 3:
|
||||||
break
|
break
|
||||||
@@ -327,6 +325,51 @@ class AIModeration(commands.Cog):
|
|||||||
await self._handle_ai_result(message, result, "Image Analysis")
|
await self._handle_ai_result(message, result, "Image Analysis")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Also analyze images from embeds (GIFs from Discord's GIF picker use embeds)
|
||||||
|
if config.nsfw_detection_enabled and message.embeds:
|
||||||
|
for embed in message.embeds:
|
||||||
|
if images_analyzed >= 3:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Check embed image or thumbnail (GIFs often use thumbnail)
|
||||||
|
image_url = None
|
||||||
|
if embed.image and embed.image.url:
|
||||||
|
image_url = embed.image.url
|
||||||
|
elif embed.thumbnail and embed.thumbnail.url:
|
||||||
|
image_url = embed.thumbnail.url
|
||||||
|
|
||||||
|
if image_url:
|
||||||
|
images_analyzed += 1
|
||||||
|
logger.info(f"Analyzing embed image: {image_url[:80]}...")
|
||||||
|
image_result = await self.bot.ai_provider.analyze_image(
|
||||||
|
image_url=image_url,
|
||||||
|
sensitivity=config.ai_sensitivity,
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Embed image result: nsfw={image_result.is_nsfw}, violent={image_result.is_violent}, conf={image_result.confidence}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
image_result.is_nsfw
|
||||||
|
or image_result.is_violent
|
||||||
|
or image_result.is_disturbing
|
||||||
|
):
|
||||||
|
categories = []
|
||||||
|
if image_result.is_nsfw:
|
||||||
|
categories.append(ContentCategory.SEXUAL)
|
||||||
|
if image_result.is_violent:
|
||||||
|
categories.append(ContentCategory.VIOLENCE)
|
||||||
|
|
||||||
|
result = ModerationResult(
|
||||||
|
is_flagged=True,
|
||||||
|
confidence=image_result.confidence,
|
||||||
|
categories=categories,
|
||||||
|
explanation=image_result.description,
|
||||||
|
suggested_action="delete",
|
||||||
|
)
|
||||||
|
await self._handle_ai_result(message, result, "Image Analysis")
|
||||||
|
return
|
||||||
|
|
||||||
# Analyze URLs for phishing
|
# Analyze URLs for phishing
|
||||||
urls = URL_PATTERN.findall(message.content)
|
urls = URL_PATTERN.findall(message.content)
|
||||||
allowlist = {normalize_domain(domain) for domain in config.scam_allowlist if domain}
|
allowlist = {normalize_domain(domain) for domain in config.scam_allowlist if domain}
|
||||||
|
|||||||
Reference in New Issue
Block a user