mantimon-tcg/backend/tests/unit/services/test_profanity_service.py
Cal Corum cd3efcb528 Implement ProfilePage and profanity filter for display names (F1-006)
ProfilePage implementation:
- Full profile page with avatar, editable display name, session count
- LinkedAccountCard and DisplayNameEditor components
- useProfile composable wrapping user store operations
- Support for linking/unlinking OAuth providers
- Logout and logout-all-devices functionality

Profanity service with bypass detection:
- Uses better-profanity library for base detection
- Enhanced to catch common bypass attempts:
  - Number suffixes/prefixes (shit123, 69fuck)
  - Leet-speak substitutions (sh1t, f@ck, $hit)
  - Separator characters (s.h.i.t, f-u-c-k)
- Integrated into PATCH /api/users/me endpoint
- 17 unit tests covering all normalization strategies

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-30 16:06:42 -06:00

212 lines
7.0 KiB
Python

"""Tests for profanity service.
Unit tests for the profanity filtering service that validates
user-generated content like display names.
"""
from app.services.profanity_service import (
censor_text,
contains_profanity,
validate_display_name,
validate_text,
)
class TestContainsProfanity:
"""Tests for the contains_profanity function."""
def test_clean_text_returns_false(self) -> None:
"""
Test that clean text is not flagged as profanity.
Normal usernames and display names should pass validation
without being incorrectly flagged.
"""
assert contains_profanity("PlayerOne") is False
assert contains_profanity("CoolGamer123") is False
assert contains_profanity("DragonMaster") is False
def test_profane_text_returns_true(self) -> None:
"""
Test that profane text is correctly detected.
Common profanity should be detected to prevent inappropriate
usernames from being created.
"""
# Using a word that's definitely in the default word list
assert contains_profanity("shit") is True
assert contains_profanity("fuck") is True
def test_mixed_case_profanity_detected(self) -> None:
"""
Test that profanity is detected regardless of case.
Users may try to bypass filters using mixed case, which
should still be caught.
"""
assert contains_profanity("SHIT") is True
assert contains_profanity("ShIt") is True
def test_empty_string_is_clean(self) -> None:
"""
Test that empty strings are not flagged.
Empty strings don't contain profanity by definition.
"""
assert contains_profanity("") is False
def test_profanity_with_number_suffix_detected(self) -> None:
"""
Test that profanity followed by numbers is detected.
Users commonly try to bypass filters by appending numbers
to bad words (e.g., "shit123"). The filter should catch these.
"""
assert contains_profanity("shit123") is True
assert contains_profanity("fuck2024") is True
assert contains_profanity("ass99") is True
def test_profanity_with_number_prefix_detected(self) -> None:
"""
Test that profanity preceded by numbers is detected.
Numbers before bad words should also be caught.
"""
assert contains_profanity("123shit") is True
assert contains_profanity("69fuck") is True
def test_leet_speak_profanity_detected(self) -> None:
"""
Test that leet-speak substitutions are detected.
Common character substitutions (@ for a, 1 for i, etc.)
should be normalized and caught by the filter.
"""
assert contains_profanity("sh1t") is True
assert contains_profanity("f@ck") is True
assert contains_profanity("a$$") is True
assert contains_profanity("$hit") is True
def test_profanity_with_separators_detected(self) -> None:
"""
Test that profanity broken up with separators is detected.
Users may insert dots, dashes, or underscores between letters
to break up bad words. The filter should remove these and check.
"""
assert contains_profanity("s.h.i.t") is True
assert contains_profanity("f-u-c-k") is True
assert contains_profanity("s_h_i_t") is True
def test_clean_words_with_numbers_not_flagged(self) -> None:
"""
Test that clean words with numbers are not incorrectly flagged.
Normal gaming-style names with numbers should pass validation.
"""
assert contains_profanity("Player123") is False
assert contains_profanity("Gamer2024") is False
assert contains_profanity("Dragon99") is False
assert contains_profanity("FireMage42") is False
class TestValidateDisplayName:
"""Tests for the validate_display_name function."""
def test_valid_name_returns_success(self) -> None:
"""
Test that valid display names pass validation.
Normal display names should return (True, None) indicating
they are valid with no error message.
"""
is_valid, error = validate_display_name("GoodPlayer")
assert is_valid is True
assert error is None
def test_profane_name_returns_error(self) -> None:
"""
Test that profane display names return an error.
Display names containing profanity should return (False, error)
with a user-friendly error message.
"""
is_valid, error = validate_display_name("shit123")
assert is_valid is False
assert error is not None
assert "inappropriate" in error.lower()
def test_error_message_is_user_friendly(self) -> None:
"""
Test that error messages are suitable for display to users.
The error message should be professional and not contain
the actual profanity.
"""
is_valid, error = validate_display_name("fuckface")
assert is_valid is False
assert error == "Display name contains inappropriate language"
class TestValidateText:
"""Tests for the generic validate_text function."""
def test_valid_text_passes(self) -> None:
"""
Test that clean text passes validation.
Generic text validation should work the same as display name
validation for clean content.
"""
is_valid, error = validate_text("This is a clean message", "message")
assert is_valid is True
assert error is None
def test_custom_field_name_in_error(self) -> None:
"""
Test that the field name appears in error messages.
The error message should reference the specific field that
failed validation for clarity.
"""
is_valid, error = validate_text("shit", "bio")
assert is_valid is False
assert "Bio" in error # Capitalized field name
class TestCensorText:
"""Tests for the censor_text function."""
def test_profanity_is_censored(self) -> None:
"""
Test that profanity is replaced with asterisks.
The censor function should replace bad words while keeping
the rest of the text intact.
"""
result = censor_text("what the shit")
assert "shit" not in result
assert "****" in result
def test_clean_text_unchanged(self) -> None:
"""
Test that clean text is returned unchanged.
Text without profanity should pass through the censor
function without modification.
"""
original = "This is a clean message"
result = censor_text(original)
assert result == original
def test_custom_censor_char(self) -> None:
"""
Test that custom censor characters are used.
Users should be able to specify alternative censor
characters for different display contexts.
"""
result = censor_text("what the shit", censor_char="#")
assert "#" in result
assert "*" not in result