Merge pull request 'next-release' (#42) from next-release into main
All checks were successful
Build Docker Image / build (push) Successful in 50s

Reviewed-on: #42
This commit is contained in:
cal 2026-02-20 20:28:15 +00:00
commit 12fe600e83
118 changed files with 3636 additions and 2764 deletions

80
.scripts/deploy.sh Executable file
View File

@ -0,0 +1,80 @@
#!/usr/bin/env bash
# Deploy Discord Bot v2 to production (akamai)
# Pulls latest Docker image and restarts the container.
#
# Usage: .scripts/deploy.sh [-y]
# -y Skip confirmation prompt
set -euo pipefail
SSH_CMD="ssh -i ~/.ssh/cloud_servers_rsa root@akamai"
REMOTE_DIR="/root/container-data/major-domo"
SERVICE="discord-app"
CONTAINER="major-domo-discord-app-1"
IMAGE="manticorum67/major-domo-discordapp:latest"
SKIP_CONFIRM=false
[[ "${1:-}" == "-y" ]] && SKIP_CONFIRM=true
# --- Pre-deploy checks ---
if [[ -n "$(git status --porcelain 2>/dev/null)" ]]; then
echo "WARNING: You have uncommitted changes."
git status --short
echo ""
fi
BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
COMMIT=$(git log -1 --format='%h %s' 2>/dev/null || echo "unknown")
echo "Branch: ${BRANCH}"
echo "Latest: ${COMMIT}"
echo "Target: akamai (${IMAGE})"
echo ""
if [[ "$SKIP_CONFIRM" != true ]]; then
read -rp "Deploy to production? [y/N] " answer
[[ "$answer" =~ ^[Yy]$ ]] || { echo "Aborted."; exit 0; }
echo ""
fi
# --- Save previous image for rollback ---
PREV_DIGEST=$($SSH_CMD "docker inspect --format='{{index .RepoDigests 0}}' ${IMAGE} 2>/dev/null" || echo "unknown")
# --- Deploy ---
echo "==> Pulling latest image for ${SERVICE}..."
$SSH_CMD "cd ${REMOTE_DIR} && docker compose pull ${SERVICE}"
echo "==> Restarting ${SERVICE}..."
$SSH_CMD "cd ${REMOTE_DIR} && docker compose up -d ${SERVICE}"
echo "==> Waiting 5s for container to start..."
sleep 5
echo "==> Container status:"
$SSH_CMD "docker ps --filter name=${CONTAINER} --format 'table {{.Names}}\t{{.Status}}\t{{.Image}}'"
echo ""
echo "==> Last 10 log lines:"
$SSH_CMD "docker logs --tail 10 ${CONTAINER}"
NEW_DIGEST=$($SSH_CMD "docker inspect --format='{{index .RepoDigests 0}}' ${IMAGE} 2>/dev/null || docker inspect --format='{{.Image}}' ${CONTAINER}")
echo ""
echo "==> Image digest: ${NEW_DIGEST}"
if [[ "$PREV_DIGEST" == "$NEW_DIGEST" ]]; then
echo " (unchanged from previous deploy)"
fi
# --- Rollback command ---
if [[ "$PREV_DIGEST" != "unknown" && "$PREV_DIGEST" != "$NEW_DIGEST" ]]; then
echo ""
echo "==> To rollback:"
echo " ssh -i ~/.ssh/cloud_servers_rsa root@akamai \\"
echo " \"cd ${REMOTE_DIR} && docker pull ${PREV_DIGEST} && docker tag ${PREV_DIGEST} ${IMAGE} && docker compose up -d ${SERVICE}\""
fi
echo ""
echo "Deploy complete."

View File

@ -4,6 +4,7 @@ API client for Discord Bot v2.0
Modern aiohttp-based HTTP client for communicating with the database API.
Provides connection pooling, proper error handling, and session management.
"""
import aiohttp
import logging
from typing import Optional, List, Dict, Any, Union
@ -13,13 +14,13 @@ from contextlib import asynccontextmanager
from config import get_config
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.APIClient')
logger = logging.getLogger(f"{__name__}.APIClient")
class APIClient:
"""
Async HTTP client for SBA database API communication.
Features:
- Connection pooling with proper session management
- Bearer token authentication
@ -27,15 +28,15 @@ class APIClient:
- Comprehensive error handling
- Debug logging with response truncation
"""
def __init__(self, base_url: Optional[str] = None, api_token: Optional[str] = None):
"""
Initialize API client with configuration.
Args:
base_url: Override default database URL from config
api_token: Override default API token from config
Raises:
ValueError: If required configuration is missing
"""
@ -43,24 +44,29 @@ class APIClient:
self.base_url = base_url or config.db_url
self.api_token = api_token or config.api_token
self._session: Optional[aiohttp.ClientSession] = None
if not self.base_url:
raise ValueError("DB_URL must be configured")
if not self.api_token:
raise ValueError("API_TOKEN must be configured")
logger.debug(f"APIClient initialized with base_url: {self.base_url}")
@property
def headers(self) -> Dict[str, str]:
"""Get headers with authentication and content type."""
return {
'Authorization': f'Bearer {self.api_token}',
'Content-Type': 'application/json',
'User-Agent': 'SBA-Discord-Bot-v2/1.0'
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
"User-Agent": "SBA-Discord-Bot-v2/1.0",
}
def _build_url(self, endpoint: str, api_version: int = 3, object_id: Optional[Union[int, str]] = None) -> str:
def _build_url(
self,
endpoint: str,
api_version: int = 3,
object_id: Optional[Union[int, str]] = None,
) -> str:
"""
Build complete API URL from components.
@ -73,35 +79,38 @@ class APIClient:
Complete URL for API request
"""
# Handle already complete URLs
if endpoint.startswith(('http://', 'https://')) or '/api/' in endpoint:
if endpoint.startswith(("http://", "https://")) or "/api/" in endpoint:
return endpoint
path = f"v{api_version}/{endpoint}"
if object_id is not None:
# URL-encode the object_id to handle special characters (e.g., colons in moveids)
encoded_id = quote(str(object_id), safe='')
encoded_id = quote(str(object_id), safe="")
path += f"/{encoded_id}"
return urljoin(self.base_url.rstrip('/') + '/', path)
return urljoin(self.base_url.rstrip("/") + "/", path)
def _add_params(self, url: str, params: Optional[List[tuple]] = None) -> str:
"""
Add query parameters to URL.
Args:
url: Base URL
params: List of (key, value) tuples
Returns:
URL with query parameters appended
"""
if not params:
return url
param_str = "&".join(f"{key}={value}" for key, value in params)
param_str = "&".join(
f"{quote(str(key), safe='')}={quote(str(value), safe='')}"
for key, value in params
)
separator = "&" if "?" in url else "?"
return f"{url}{separator}{param_str}"
async def _ensure_session(self) -> None:
"""Ensure aiohttp session exists and is not closed."""
if self._session is None or self._session.closed:
@ -109,53 +118,51 @@ class APIClient:
limit=100, # Total connection pool size
limit_per_host=30, # Connections per host
ttl_dns_cache=300, # DNS cache TTL
use_dns_cache=True
use_dns_cache=True,
)
timeout = aiohttp.ClientTimeout(total=30, connect=10)
self._session = aiohttp.ClientSession(
headers=self.headers,
connector=connector,
timeout=timeout
headers=self.headers, connector=connector, timeout=timeout
)
logger.debug("Created new aiohttp session with connection pooling")
async def get(
self,
endpoint: str,
object_id: Optional[Union[int, str]] = None,
params: Optional[List[tuple]] = None,
api_version: int = 3,
timeout: Optional[int] = None
timeout: Optional[int] = None,
) -> Optional[Dict[str, Any]]:
"""
Make GET request to API.
Args:
endpoint: API endpoint
object_id: Optional object ID
params: Query parameters
api_version: API version (default: 3)
timeout: Request timeout override
Returns:
JSON response data or None for 404
Raises:
APIException: For HTTP errors or network issues
"""
url = self._build_url(endpoint, api_version, object_id)
url = self._add_params(url, params)
await self._ensure_session()
try:
logger.debug(f"GET: {endpoint} id: {object_id} params: {params}")
request_timeout = aiohttp.ClientTimeout(total=timeout) if timeout else None
async with self._session.get(url, timeout=request_timeout) as response:
if response.status == 404:
logger.warning(f"Resource not found: {url}")
@ -169,10 +176,12 @@ class APIClient:
elif response.status >= 400:
error_text = await response.text()
logger.error(f"API error {response.status}: {url} - {error_text}")
raise APIException(f"API request failed with status {response.status}: {error_text}")
raise APIException(
f"API request failed with status {response.status}: {error_text}"
)
data = await response.json()
# Truncate response for logging
data_str = str(data)
if len(data_str) > 1200:
@ -180,48 +189,50 @@ class APIClient:
else:
log_data = data_str
logger.debug(f"Response: {log_data}")
return data
except aiohttp.ClientError as e:
logger.error(f"HTTP client error for {url}: {e}")
raise APIException(f"Network error: {e}")
except Exception as e:
logger.error(f"Unexpected error in GET {url}: {e}")
raise APIException(f"API call failed: {e}")
async def post(
self,
endpoint: str,
self,
endpoint: str,
data: Dict[str, Any],
api_version: int = 3,
timeout: Optional[int] = None
timeout: Optional[int] = None,
) -> Optional[Dict[str, Any]]:
"""
Make POST request to API.
Args:
endpoint: API endpoint
data: Request payload
api_version: API version (default: 3)
timeout: Request timeout override
Returns:
JSON response data
Raises:
APIException: For HTTP errors or network issues
"""
url = self._build_url(endpoint, api_version)
await self._ensure_session()
try:
logger.debug(f"POST: {endpoint} data: {data}")
request_timeout = aiohttp.ClientTimeout(total=timeout) if timeout else None
async with self._session.post(url, json=data, timeout=request_timeout) as response:
async with self._session.post(
url, json=data, timeout=request_timeout
) as response:
if response.status == 401:
logger.error(f"Authentication failed for POST: {url}")
raise APIException("Authentication failed - check API token")
@ -231,10 +242,12 @@ class APIClient:
elif response.status not in [200, 201]:
error_text = await response.text()
logger.error(f"POST error {response.status}: {url} - {error_text}")
raise APIException(f"POST request failed with status {response.status}: {error_text}")
raise APIException(
f"POST request failed with status {response.status}: {error_text}"
)
result = await response.json()
# Truncate response for logging
result_str = str(result)
if len(result_str) > 1200:
@ -242,50 +255,52 @@ class APIClient:
else:
log_result = result_str
logger.debug(f"POST Response: {log_result}")
return result
except aiohttp.ClientError as e:
logger.error(f"HTTP client error for POST {url}: {e}")
raise APIException(f"Network error: {e}")
except Exception as e:
logger.error(f"Unexpected error in POST {url}: {e}")
raise APIException(f"POST failed: {e}")
async def put(
self,
endpoint: str,
data: Dict[str, Any],
object_id: Optional[Union[int, str]] = None,
api_version: int = 3,
timeout: Optional[int] = None
timeout: Optional[int] = None,
) -> Optional[Dict[str, Any]]:
"""
Make PUT request to API.
Args:
endpoint: API endpoint
data: Request payload
object_id: Optional object ID
api_version: API version (default: 3)
timeout: Request timeout override
Returns:
JSON response data
Raises:
APIException: For HTTP errors or network issues
"""
url = self._build_url(endpoint, api_version, object_id)
await self._ensure_session()
try:
logger.debug(f"PUT: {endpoint} id: {object_id} data: {data}")
request_timeout = aiohttp.ClientTimeout(total=timeout) if timeout else None
async with self._session.put(url, json=data, timeout=request_timeout) as response:
async with self._session.put(
url, json=data, timeout=request_timeout
) as response:
if response.status == 401:
logger.error(f"Authentication failed for PUT: {url}")
raise APIException("Authentication failed - check API token")
@ -298,19 +313,23 @@ class APIClient:
elif response.status not in [200, 201]:
error_text = await response.text()
logger.error(f"PUT error {response.status}: {url} - {error_text}")
raise APIException(f"PUT request failed with status {response.status}: {error_text}")
raise APIException(
f"PUT request failed with status {response.status}: {error_text}"
)
result = await response.json()
logger.debug(f"PUT Response: {str(result)[:1200]}{'...' if len(str(result)) > 1200 else ''}")
logger.debug(
f"PUT Response: {str(result)[:1200]}{'...' if len(str(result)) > 1200 else ''}"
)
return result
except aiohttp.ClientError as e:
logger.error(f"HTTP client error for PUT {url}: {e}")
raise APIException(f"Network error: {e}")
except Exception as e:
logger.error(f"Unexpected error in PUT {url}: {e}")
raise APIException(f"PUT failed: {e}")
async def patch(
self,
endpoint: str,
@ -318,7 +337,7 @@ class APIClient:
object_id: Optional[Union[int, str]] = None,
api_version: int = 3,
timeout: Optional[int] = None,
use_query_params: bool = False
use_query_params: bool = False,
) -> Optional[Dict[str, Any]]:
"""
Make PATCH request to API.
@ -344,13 +363,15 @@ class APIClient:
# Handle None values by converting to empty string
# The database API's PATCH endpoint treats empty strings as NULL for nullable fields
# Example: {'il_return': None} → ?il_return= → Database sets il_return to NULL
params = [(k, '' if v is None else str(v)) for k, v in data.items()]
params = [(k, "" if v is None else str(v)) for k, v in data.items()]
url = self._add_params(url, params)
await self._ensure_session()
try:
logger.debug(f"PATCH: {endpoint} id: {object_id} data: {data} use_query_params: {use_query_params}")
logger.debug(
f"PATCH: {endpoint} id: {object_id} data: {data} use_query_params: {use_query_params}"
)
logger.debug(f"PATCH URL: {url}")
request_timeout = aiohttp.ClientTimeout(total=timeout) if timeout else None
@ -358,10 +379,12 @@ class APIClient:
# Use json=data if data is provided and not using query params
kwargs = {}
if data is not None and not use_query_params:
kwargs['json'] = data
kwargs["json"] = data
logger.debug(f"PATCH JSON body: {data}")
async with self._session.patch(url, timeout=request_timeout, **kwargs) as response:
async with self._session.patch(
url, timeout=request_timeout, **kwargs
) as response:
if response.status == 401:
logger.error(f"Authentication failed for PATCH: {url}")
raise APIException("Authentication failed - check API token")
@ -374,10 +397,14 @@ class APIClient:
elif response.status not in [200, 201]:
error_text = await response.text()
logger.error(f"PATCH error {response.status}: {url} - {error_text}")
raise APIException(f"PATCH request failed with status {response.status}: {error_text}")
raise APIException(
f"PATCH request failed with status {response.status}: {error_text}"
)
result = await response.json()
logger.debug(f"PATCH Response: {str(result)[:1200]}{'...' if len(str(result)) > 1200 else ''}")
logger.debug(
f"PATCH Response: {str(result)[:1200]}{'...' if len(str(result)) > 1200 else ''}"
)
return result
except aiohttp.ClientError as e:
@ -386,38 +413,38 @@ class APIClient:
except Exception as e:
logger.error(f"Unexpected error in PATCH {url}: {e}")
raise APIException(f"PATCH failed: {e}")
async def delete(
self,
endpoint: str,
object_id: Optional[Union[int, str]] = None,
api_version: int = 3,
timeout: Optional[int] = None
timeout: Optional[int] = None,
) -> bool:
"""
Make DELETE request to API.
Args:
endpoint: API endpoint
object_id: Optional object ID
api_version: API version (default: 3)
timeout: Request timeout override
Returns:
True if deletion successful, False if resource not found
Raises:
APIException: For HTTP errors or network issues
"""
url = self._build_url(endpoint, api_version, object_id)
await self._ensure_session()
try:
logger.debug(f"DELETE: {endpoint} id: {object_id}")
request_timeout = aiohttp.ClientTimeout(total=timeout) if timeout else None
async with self._session.delete(url, timeout=request_timeout) as response:
if response.status == 401:
logger.error(f"Authentication failed for DELETE: {url}")
@ -430,30 +457,34 @@ class APIClient:
return False
elif response.status not in [200, 204]:
error_text = await response.text()
logger.error(f"DELETE error {response.status}: {url} - {error_text}")
raise APIException(f"DELETE request failed with status {response.status}: {error_text}")
logger.error(
f"DELETE error {response.status}: {url} - {error_text}"
)
raise APIException(
f"DELETE request failed with status {response.status}: {error_text}"
)
logger.debug(f"DELETE successful: {url}")
return True
except aiohttp.ClientError as e:
logger.error(f"HTTP client error for DELETE {url}: {e}")
raise APIException(f"Network error: {e}")
except Exception as e:
logger.error(f"Unexpected error in DELETE {url}: {e}")
raise APIException(f"DELETE failed: {e}")
async def close(self) -> None:
"""Close the HTTP session and clean up resources."""
if self._session and not self._session.closed:
await self._session.close()
logger.debug("Closed aiohttp session")
async def __aenter__(self):
"""Async context manager entry."""
await self._ensure_session()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit with cleanup."""
await self.close()
@ -463,7 +494,7 @@ class APIClient:
async def get_api_client() -> APIClient:
"""
Get API client as async context manager.
Usage:
async with get_api_client() as client:
data = await client.get('players')
@ -482,14 +513,14 @@ _global_client: Optional[APIClient] = None
async def get_global_client() -> APIClient:
"""
Get global API client instance with automatic session management.
Returns:
Shared APIClient instance
"""
global _global_client
if _global_client is None:
_global_client = APIClient()
await _global_client._ensure_session()
return _global_client
@ -499,4 +530,4 @@ async def cleanup_global_client() -> None:
global _global_client
if _global_client:
await _global_client.close()
_global_client = None
_global_client = None

271
bot.py
View File

@ -3,6 +3,7 @@ Discord Bot v2.0 - Main Entry Point
Modern discord.py bot with application commands and proper error handling.
"""
import asyncio
import hashlib
import json
@ -17,95 +18,97 @@ from config import get_config
from exceptions import BotException
from api.client import get_global_client, cleanup_global_client
from utils.random_gen import STARTUP_WATCHING, random_from_list
from views.embeds import EmbedTemplate, EmbedColors
from views.embeds import EmbedTemplate
def setup_logging():
"""Configure hybrid logging: human-readable console + structured JSON files."""
from utils.logging import JSONFormatter
# Create logs directory if it doesn't exist
os.makedirs('logs', exist_ok=True)
os.makedirs("logs", exist_ok=True)
# Configure root logger
config = get_config()
logger = logging.getLogger('discord_bot_v2')
logger = logging.getLogger("discord_bot_v2")
logger.setLevel(getattr(logging, config.log_level.upper()))
# Console handler - detailed format for development debugging
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
"%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s"
)
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
# JSON file handler - structured logging for monitoring and analysis
json_handler = RotatingFileHandler(
'logs/discord_bot_v2.json',
maxBytes=5 * 1024 * 1024, # 5MB
backupCount=5
"logs/discord_bot_v2.json", maxBytes=5 * 1024 * 1024, backupCount=5 # 5MB
)
json_handler.setFormatter(JSONFormatter())
logger.addHandler(json_handler)
# Configure root logger for third-party libraries (discord.py, aiohttp, etc.)
root_logger = logging.getLogger()
root_logger.setLevel(getattr(logging, config.log_level.upper()))
# Add handlers to root logger so third-party loggers inherit them
if not root_logger.handlers: # Avoid duplicate handlers
root_logger.addHandler(console_handler)
root_logger.addHandler(json_handler)
# Prevent discord_bot_v2 logger from propagating to root to avoid duplicate messages
# (bot logs will still appear via its own handlers, third-party logs via root handlers)
# To revert: remove the line below and bot logs will appear twice
logger.propagate = False
return logger
class SBABot(commands.Bot):
"""Custom bot class for SBA league management."""
def __init__(self):
# Configure intents
intents = discord.Intents.default()
intents.message_content = True # For legacy commands if needed
intents.members = True # For member management
super().__init__(
command_prefix='!', # Legacy prefix, primarily using slash commands
command_prefix="!", # Legacy prefix, primarily using slash commands
intents=intents,
description="Major Domo v2.0"
description="Major Domo v2.0",
)
self.logger = logging.getLogger('discord_bot_v2')
self.logger = logging.getLogger("discord_bot_v2")
async def setup_hook(self):
"""Called when the bot is starting up."""
self.logger.info("Setting up bot...")
# Load command packages
await self._load_command_packages()
# Initialize cleanup tasks
await self._setup_background_tasks()
# Smart command syncing: auto-sync in development if changes detected; !admin-sync for first sync
config = get_config()
if config.is_development:
if await self._should_sync_commands():
self.logger.info("Development mode: changes detected, syncing commands...")
self.logger.info(
"Development mode: changes detected, syncing commands..."
)
await self._sync_commands()
await self._save_command_hash()
else:
self.logger.info("Development mode: no command changes detected, skipping sync")
self.logger.info(
"Development mode: no command changes detected, skipping sync"
)
else:
self.logger.info("Production mode: commands loaded but not auto-synced")
self.logger.info("Use /admin-sync command to manually sync when needed")
async def _load_command_packages(self):
"""Load all command packages with resilient error handling."""
from commands.players import setup_players
@ -146,32 +149,42 @@ class SBABot(commands.Bot):
("gameplay", setup_gameplay),
("dev", setup_dev), # Dev-only commands (admin restricted)
]
total_successful = 0
total_failed = 0
for package_name, setup_func in command_packages:
try:
self.logger.info(f"Loading {package_name} commands...")
successful, failed, failed_modules = await setup_func(self)
total_successful += successful
total_failed += failed
if failed == 0:
self.logger.info(f"{package_name} commands loaded successfully ({successful} cogs)")
self.logger.info(
f"{package_name} commands loaded successfully ({successful} cogs)"
)
else:
self.logger.warning(f"⚠️ {package_name} commands partially loaded: {successful} successful, {failed} failed")
self.logger.warning(
f"⚠️ {package_name} commands partially loaded: {successful} successful, {failed} failed"
)
except Exception as e:
self.logger.error(f"❌ Failed to load {package_name} package: {e}", exc_info=True)
self.logger.error(
f"❌ Failed to load {package_name} package: {e}", exc_info=True
)
total_failed += 1
# Log overall summary
if total_failed == 0:
self.logger.info(f"🎉 All command packages loaded successfully ({total_successful} total cogs)")
self.logger.info(
f"🎉 All command packages loaded successfully ({total_successful} total cogs)"
)
else:
self.logger.warning(f"⚠️ Command loading completed with issues: {total_successful} successful, {total_failed} failed")
self.logger.warning(
f"⚠️ Command loading completed with issues: {total_successful} successful, {total_failed} failed"
)
async def _setup_background_tasks(self):
"""Initialize background tasks for the bot."""
try:
@ -179,28 +192,34 @@ class SBABot(commands.Bot):
# Initialize custom command cleanup task
from tasks.custom_command_cleanup import setup_cleanup_task
self.custom_command_cleanup = setup_cleanup_task(self)
# Initialize transaction freeze/thaw task
from tasks.transaction_freeze import setup_freeze_task
self.transaction_freeze = setup_freeze_task(self)
self.logger.info("✅ Transaction freeze/thaw task started")
# Initialize voice channel cleanup service
from commands.voice.cleanup_service import setup_voice_cleanup
self.voice_cleanup_service = setup_voice_cleanup(self)
self.logger.info("✅ Voice channel cleanup service started")
# Initialize live scorebug tracker
from tasks.live_scorebug_tracker import setup_scorebug_tracker
self.live_scorebug_tracker = setup_scorebug_tracker(self)
self.logger.info("✅ Live scorebug tracker started")
self.logger.info("✅ Background tasks initialized successfully")
except Exception as e:
self.logger.error(f"❌ Failed to initialize background tasks: {e}", exc_info=True)
self.logger.error(
f"❌ Failed to initialize background tasks: {e}", exc_info=True
)
async def _should_sync_commands(self) -> bool:
"""Check if commands have changed since last sync."""
try:
@ -209,50 +228,51 @@ class SBABot(commands.Bot):
for cmd in self.tree.get_commands():
# Handle different command types properly
cmd_dict = {}
cmd_dict['name'] = cmd.name
cmd_dict['type'] = type(cmd).__name__
cmd_dict["name"] = cmd.name
cmd_dict["type"] = type(cmd).__name__
# Add description if available (most command types have this)
if hasattr(cmd, 'description'):
cmd_dict['description'] = cmd.description # type: ignore
if hasattr(cmd, "description"):
cmd_dict["description"] = cmd.description # type: ignore
# Add parameters for Command objects
if isinstance(cmd, discord.app_commands.Command):
cmd_dict['parameters'] = [
cmd_dict["parameters"] = [
{
'name': param.name,
'description': param.description,
'required': param.required,
'type': str(param.type)
} for param in cmd.parameters
"name": param.name,
"description": param.description,
"required": param.required,
"type": str(param.type),
}
for param in cmd.parameters
]
elif isinstance(cmd, discord.app_commands.Group):
# For groups, include subcommands
cmd_dict['subcommands'] = [subcmd.name for subcmd in cmd.commands]
cmd_dict["subcommands"] = [subcmd.name for subcmd in cmd.commands]
commands_data.append(cmd_dict)
# Sort for consistent hashing
commands_data.sort(key=lambda x: x['name'])
current_hash = hashlib.md5(
commands_data.sort(key=lambda x: x["name"])
current_hash = hashlib.sha256(
json.dumps(commands_data, sort_keys=True).encode()
).hexdigest()
# Compare with stored hash
hash_file = '.last_command_hash'
hash_file = ".last_command_hash"
if os.path.exists(hash_file):
with open(hash_file, 'r') as f:
with open(hash_file, "r") as f:
last_hash = f.read().strip()
return current_hash != last_hash
else:
# No previous hash = first run, should sync
return True
except Exception as e:
self.logger.warning(f"Error checking command hash: {e}")
# If we can't determine changes, err on the side of syncing
return True
async def _save_command_hash(self):
"""Save current command hash for future comparison."""
try:
@ -261,41 +281,42 @@ class SBABot(commands.Bot):
for cmd in self.tree.get_commands():
# Handle different command types properly
cmd_dict = {}
cmd_dict['name'] = cmd.name
cmd_dict['type'] = type(cmd).__name__
cmd_dict["name"] = cmd.name
cmd_dict["type"] = type(cmd).__name__
# Add description if available (most command types have this)
if hasattr(cmd, 'description'):
cmd_dict['description'] = cmd.description # type: ignore
if hasattr(cmd, "description"):
cmd_dict["description"] = cmd.description # type: ignore
# Add parameters for Command objects
if isinstance(cmd, discord.app_commands.Command):
cmd_dict['parameters'] = [
cmd_dict["parameters"] = [
{
'name': param.name,
'description': param.description,
'required': param.required,
'type': str(param.type)
} for param in cmd.parameters
"name": param.name,
"description": param.description,
"required": param.required,
"type": str(param.type),
}
for param in cmd.parameters
]
elif isinstance(cmd, discord.app_commands.Group):
# For groups, include subcommands
cmd_dict['subcommands'] = [subcmd.name for subcmd in cmd.commands]
cmd_dict["subcommands"] = [subcmd.name for subcmd in cmd.commands]
commands_data.append(cmd_dict)
commands_data.sort(key=lambda x: x['name'])
current_hash = hashlib.md5(
commands_data.sort(key=lambda x: x["name"])
current_hash = hashlib.sha256(
json.dumps(commands_data, sort_keys=True).encode()
).hexdigest()
# Save hash to file
with open('.last_command_hash', 'w') as f:
with open(".last_command_hash", "w") as f:
f.write(current_hash)
except Exception as e:
self.logger.warning(f"Error saving command hash: {e}")
async def _sync_commands(self):
"""Internal method to sync commands."""
config = get_config()
@ -303,54 +324,55 @@ class SBABot(commands.Bot):
guild = discord.Object(id=config.guild_id)
self.tree.copy_global_to(guild=guild)
synced = await self.tree.sync(guild=guild)
self.logger.info(f"Synced {len(synced)} commands to guild {config.guild_id}")
self.logger.info(
f"Synced {len(synced)} commands to guild {config.guild_id}"
)
else:
synced = await self.tree.sync()
self.logger.info(f"Synced {len(synced)} commands globally")
async def on_ready(self):
"""Called when the bot is ready."""
self.logger.info(f"Bot ready! Logged in as {self.user}")
self.logger.info(f"Connected to {len(self.guilds)} guilds")
# Set activity status
activity = discord.Activity(
type=discord.ActivityType.watching,
name=random_from_list(STARTUP_WATCHING)
type=discord.ActivityType.watching, name=random_from_list(STARTUP_WATCHING)
)
await self.change_presence(activity=activity)
async def on_error(self, event_method: str, /, *args, **kwargs):
"""Global error handler for events."""
self.logger.error(f"Error in event {event_method}", exc_info=True)
async def close(self):
"""Clean shutdown of the bot."""
self.logger.info("Bot shutting down...")
# Stop background tasks
if hasattr(self, 'custom_command_cleanup'):
if hasattr(self, "custom_command_cleanup"):
try:
self.custom_command_cleanup.cleanup_task.cancel()
self.logger.info("Custom command cleanup task stopped")
except Exception as e:
self.logger.error(f"Error stopping cleanup task: {e}")
if hasattr(self, 'transaction_freeze'):
if hasattr(self, "transaction_freeze"):
try:
self.transaction_freeze.weekly_loop.cancel()
self.logger.info("Transaction freeze/thaw task stopped")
except Exception as e:
self.logger.error(f"Error stopping transaction freeze task: {e}")
if hasattr(self, 'voice_cleanup_service'):
if hasattr(self, "voice_cleanup_service"):
try:
self.voice_cleanup_service.cog_unload()
self.logger.info("Voice channel cleanup service stopped")
except Exception as e:
self.logger.error(f"Error stopping voice cleanup service: {e}")
if hasattr(self, 'live_scorebug_tracker'):
if hasattr(self, "live_scorebug_tracker"):
try:
self.live_scorebug_tracker.update_loop.cancel()
self.logger.info("Live scorebug tracker stopped")
@ -369,15 +391,15 @@ bot = SBABot()
@bot.tree.command(name="health", description="Check bot and API health status")
async def health_command(interaction: discord.Interaction):
"""Health check command to verify bot and API connectivity."""
logger = logging.getLogger('discord_bot_v2')
logger = logging.getLogger("discord_bot_v2")
try:
# Check API connectivity
api_status = "✅ Connected"
try:
client = await get_global_client()
# Test API with a simple request
result = await client.get('current')
result = await client.get("current")
if result:
api_status = "✅ Connected"
else:
@ -385,69 +407,66 @@ async def health_command(interaction: discord.Interaction):
except Exception as e:
logger.error(f"API health check failed: {e}")
api_status = f"❌ Error: {str(e)}"
# Bot health info
guild_count = len(bot.guilds)
# Create health status embed
embed = EmbedTemplate.success(
title="🏥 Bot Health Check"
)
embed = EmbedTemplate.success(title="🏥 Bot Health Check")
embed.add_field(name="Bot Status", value="✅ Online", inline=True)
embed.add_field(name="API Status", value=api_status, inline=True)
embed.add_field(name="Guilds", value=str(guild_count), inline=True)
embed.add_field(name="Latency", value=f"{bot.latency*1000:.1f}ms", inline=True)
if bot.user:
embed.set_footer(text=f"Bot: {bot.user.name}", icon_url=bot.user.display_avatar.url)
embed.set_footer(
text=f"Bot: {bot.user.name}", icon_url=bot.user.display_avatar.url
)
await interaction.response.send_message(embed=embed, ephemeral=True)
except Exception as e:
logger.error(f"Health check command error: {e}", exc_info=True)
await interaction.response.send_message(
f"❌ Health check failed: {str(e)}",
ephemeral=True
f"❌ Health check failed: {str(e)}", ephemeral=True
)
@bot.tree.error
async def on_app_command_error(interaction: discord.Interaction, error: discord.app_commands.AppCommandError):
async def on_app_command_error(
interaction: discord.Interaction, error: discord.app_commands.AppCommandError
):
"""Global error handler for application commands."""
logger = logging.getLogger('discord_bot_v2')
logger = logging.getLogger("discord_bot_v2")
# Handle specific error types
if isinstance(error, discord.app_commands.CommandOnCooldown):
await interaction.response.send_message(
f"⏰ Command on cooldown. Try again in {error.retry_after:.1f} seconds.",
ephemeral=True
ephemeral=True,
)
elif isinstance(error, discord.app_commands.MissingPermissions):
await interaction.response.send_message(
"❌ You don't have permission to use this command.",
ephemeral=True
"❌ You don't have permission to use this command.", ephemeral=True
)
elif isinstance(error, discord.app_commands.CommandNotFound):
await interaction.response.send_message(
"❌ Command not found. Use `/help` to see available commands.",
ephemeral=True
ephemeral=True,
)
elif isinstance(error, BotException):
# Our custom exceptions - show user-friendly message
await interaction.response.send_message(
f"{str(error)}",
ephemeral=True
)
await interaction.response.send_message(f"{str(error)}", ephemeral=True)
else:
# Unexpected errors - log and show generic message
logger.error(f"Unhandled command error: {error}", exc_info=True)
message = "❌ An unexpected error occurred. Please try again."
config = get_config()
if config.is_development:
message += f"\n\nDevelopment error: {str(error)}"
if interaction.response.is_done():
await interaction.followup.send(message, ephemeral=True)
else:
@ -457,12 +476,12 @@ async def on_app_command_error(interaction: discord.Interaction, error: discord.
async def main():
"""Main entry point."""
logger = setup_logging()
config = get_config()
logger.info("Starting Discord Bot v2.0")
logger.info(f"Environment: {config.environment}")
logger.info(f"Guild ID: {config.guild_id}")
try:
await bot.start(config.bot_token)
except KeyboardInterrupt:
@ -475,4 +494,4 @@ async def main():
if __name__ == "__main__":
asyncio.run(main())
asyncio.run(main())

View File

@ -3,23 +3,23 @@ Admin command package for Discord Bot v2.0
Contains administrative commands for league management.
"""
import logging
from typing import List, Tuple, Type
import discord
from discord.ext import commands
from .management import AdminCommands
from .users import UserManagementCommands
from .league_management import LeagueManagementCommands
logger = logging.getLogger(f'{__name__}.setup_admin')
logger = logging.getLogger(f"{__name__}.setup_admin")
async def setup_admin(bot: commands.Bot) -> Tuple[int, int, List[str]]:
"""
Set up admin command modules.
Returns:
Tuple of (successful_loads, failed_loads, failed_modules)
"""
@ -28,11 +28,11 @@ async def setup_admin(bot: commands.Bot) -> Tuple[int, int, List[str]]:
("UserManagementCommands", UserManagementCommands),
("LeagueManagementCommands", LeagueManagementCommands),
]
successful = 0
failed = 0
failed_modules = []
for cog_name, cog_class in admin_cogs:
try:
await bot.add_cog(cog_class(bot))
@ -42,13 +42,15 @@ async def setup_admin(bot: commands.Bot) -> Tuple[int, int, List[str]]:
logger.error(f"❌ Failed to load admin command module {cog_name}: {e}")
failed += 1
failed_modules.append(cog_name)
# Log summary
if failed == 0:
logger.info(f"🎉 All {successful} admin command modules loaded successfully")
else:
logger.warning(f"⚠️ Admin commands loaded with issues: {successful} successful, {failed} failed")
logger.warning(
f"⚠️ Admin commands loaded with issues: {successful} successful, {failed} failed"
)
if failed_modules:
logger.warning(f"Failed modules: {', '.join(failed_modules)}")
return successful, failed, failed_modules
return successful, failed, failed_modules

View File

@ -10,11 +10,10 @@ import discord
from discord.ext import commands
from discord import app_commands
from config import get_config
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from utils.permissions import league_admin_only
from views.embeds import EmbedColors, EmbedTemplate
from views.embeds import EmbedTemplate
from services.league_service import league_service
from services.transaction_service import transaction_service
from tasks.transaction_freeze import resolve_contested_transactions

View File

@ -4,8 +4,7 @@ Admin User Management Commands
User-focused administrative commands for moderation and user management.
"""
from typing import Optional, Union
import asyncio
from datetime import datetime, timedelta
from datetime import timedelta
import discord
from discord.ext import commands

View File

@ -10,9 +10,7 @@ from discord.ext import commands
from services.custom_commands_service import (
custom_commands_service,
CustomCommandNotFoundError,
CustomCommandExistsError,
CustomCommandPermissionError
CustomCommandNotFoundError
)
from models.custom_command import CustomCommandSearchFilters
from utils.logging import get_contextual_logger
@ -28,7 +26,6 @@ from views.custom_commands import (
CustomCommandSearchModal,
SingleCommandManagementView
)
from exceptions import BotException
class CustomCommandsCommands(commands.Cog):

View File

@ -4,14 +4,11 @@ Dice Rolling Commands
Implements slash commands for dice rolling functionality required for gameplay.
"""
import random
from typing import Optional
import discord
from discord.ext import commands
from models.team import Team
from services.team_service import team_service
from utils import team_utils
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from utils.team_utils import get_user_major_league_team

View File

@ -6,7 +6,6 @@ Manage team auto-draft queue (draft board).
from typing import List, Optional
import discord
from discord import app_commands
from discord.ext import commands
from config import get_config

View File

@ -3,10 +3,11 @@ Draft Pick Commands
Implements slash commands for making draft picks with global lock protection.
"""
import asyncio
import re
from typing import List, Optional
from datetime import datetime
from datetime import UTC, datetime
import discord
from discord.ext import commands
@ -27,7 +28,7 @@ from views.draft_views import (
create_player_draft_card,
create_pick_illegal_embed,
create_pick_success_embed,
create_on_clock_announcement_embed
create_on_clock_announcement_embed,
)
@ -53,7 +54,7 @@ def _parse_player_name(raw_input: str) -> str:
# Pattern: "Player Name (POS) - X.XX sWAR"
# Position can be letters or numbers (e.g., SS, RP, 1B, 2B, 3B, OF)
# Extract just the player name before the opening parenthesis
match = re.match(r'^(.+?)\s*\([A-Z0-9]+\)\s*-\s*[\d.]+\s*sWAR$', raw_input)
match = re.match(r"^(.+?)\s*\([A-Z0-9]+\)\s*-\s*[\d.]+\s*sWAR$", raw_input)
if match:
return match.group(1).strip()
@ -73,9 +74,7 @@ async def fa_player_autocomplete(
config = get_config()
# Search for FA players only
players = await player_service.search_players(
current,
limit=25,
season=config.sba_season
current, limit=25, season=config.sba_season
)
# Filter to FA team
@ -84,7 +83,7 @@ async def fa_player_autocomplete(
return [
discord.app_commands.Choice(
name=f"{p.name} ({p.primary_position}) - {p.wara:.2f} sWAR",
value=p.name
value=p.name,
)
for p in fa_players[:25]
]
@ -98,7 +97,7 @@ class DraftPicksCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.DraftPicksCog')
self.logger = get_contextual_logger(f"{__name__}.DraftPicksCog")
# GLOBAL PICK LOCK (local only - not in database)
self.pick_lock = asyncio.Lock()
@ -107,7 +106,7 @@ class DraftPicksCog(commands.Cog):
@discord.app_commands.command(
name="draft",
description="Make a draft pick (autocomplete shows FA players only)"
description="Make a draft pick (autocomplete shows FA players only)",
)
@discord.app_commands.describe(
player="Player name to draft (autocomplete shows available FA players)"
@ -116,18 +115,14 @@ class DraftPicksCog(commands.Cog):
@requires_draft_period
@requires_team()
@logged_command("/draft")
async def draft_pick(
self,
interaction: discord.Interaction,
player: str
):
async def draft_pick(self, interaction: discord.Interaction, player: str):
"""Make a draft pick with global lock protection."""
await interaction.response.defer()
# Check if lock is held
if self.pick_lock.locked():
if self.lock_acquired_at:
time_held = (datetime.now() - self.lock_acquired_at).total_seconds()
time_held = (datetime.now(UTC) - self.lock_acquired_at).total_seconds()
if time_held > 30:
# STALE LOCK: Auto-override after 30 seconds
@ -140,14 +135,14 @@ class DraftPicksCog(commands.Cog):
embed = await create_pick_illegal_embed(
"Pick In Progress",
f"Another manager is currently making a pick. "
f"Please wait approximately {30 - int(time_held)} seconds."
f"Please wait approximately {30 - int(time_held)} seconds.",
)
await interaction.followup.send(embed=embed)
return
# Acquire global lock
async with self.pick_lock:
self.lock_acquired_at = datetime.now()
self.lock_acquired_at = datetime.now(UTC)
self.lock_acquired_by = interaction.user.id
try:
@ -157,9 +152,7 @@ class DraftPicksCog(commands.Cog):
self.lock_acquired_by = None
async def _process_draft_pick(
self,
interaction: discord.Interaction,
player_name: str
self, interaction: discord.Interaction, player_name: str
):
"""
Process draft pick with validation.
@ -176,14 +169,12 @@ class DraftPicksCog(commands.Cog):
# Get user's team (CACHED via @cached_single_item)
team = await team_service.get_team_by_owner(
interaction.user.id,
config.sba_season
interaction.user.id, config.sba_season
)
if not team:
embed = await create_pick_illegal_embed(
"Not a GM",
"You are not registered as a team owner."
"Not a GM", "You are not registered as a team owner."
)
await interaction.followup.send(embed=embed)
return
@ -192,8 +183,7 @@ class DraftPicksCog(commands.Cog):
draft_data = await draft_service.get_draft_data()
if not draft_data:
embed = await create_pick_illegal_embed(
"Draft Not Found",
"Could not retrieve draft configuration."
"Draft Not Found", "Could not retrieve draft configuration."
)
await interaction.followup.send(embed=embed)
return
@ -202,21 +192,19 @@ class DraftPicksCog(commands.Cog):
if draft_data.paused:
embed = await create_pick_illegal_embed(
"Draft Paused",
"The draft is currently paused. Please wait for an administrator to resume."
"The draft is currently paused. Please wait for an administrator to resume.",
)
await interaction.followup.send(embed=embed)
return
# Get current pick
current_pick = await draft_pick_service.get_pick(
config.sba_season,
draft_data.currentpick
config.sba_season, draft_data.currentpick
)
if not current_pick or not current_pick.owner:
embed = await create_pick_illegal_embed(
"Invalid Pick",
f"Could not retrieve pick #{draft_data.currentpick}."
"Invalid Pick", f"Could not retrieve pick #{draft_data.currentpick}."
)
await interaction.followup.send(embed=embed)
return
@ -227,16 +215,14 @@ class DraftPicksCog(commands.Cog):
if current_pick.owner.id != team.id:
# Not on the clock - check for skipped picks
skipped_picks = await draft_pick_service.get_skipped_picks_for_team(
config.sba_season,
team.id,
draft_data.currentpick
config.sba_season, team.id, draft_data.currentpick
)
if not skipped_picks:
# No skipped picks - can't draft
embed = await create_pick_illegal_embed(
"Not Your Turn",
f"{current_pick.owner.sname} is on the clock for {format_pick_display(current_pick.overall)}."
f"{current_pick.owner.sname} is on the clock for {format_pick_display(current_pick.overall)}.",
)
await interaction.followup.send(embed=embed)
return
@ -249,12 +235,13 @@ class DraftPicksCog(commands.Cog):
)
# Get player
players = await player_service.get_players_by_name(player_name, config.sba_season)
players = await player_service.get_players_by_name(
player_name, config.sba_season
)
if not players:
embed = await create_pick_illegal_embed(
"Player Not Found",
f"Could not find player '{player_name}'."
"Player Not Found", f"Could not find player '{player_name}'."
)
await interaction.followup.send(embed=embed)
return
@ -264,55 +251,52 @@ class DraftPicksCog(commands.Cog):
# Validate player is FA
if player_obj.team_id != config.free_agent_team_id:
embed = await create_pick_illegal_embed(
"Player Not Available",
f"{player_obj.name} is not a free agent."
"Player Not Available", f"{player_obj.name} is not a free agent."
)
await interaction.followup.send(embed=embed)
return
# Validate cap space
roster = await team_service.get_team_roster(team.id, 'current')
roster = await team_service.get_team_roster(team.id, "current")
if not roster:
embed = await create_pick_illegal_embed(
"Roster Error",
f"Could not retrieve roster for {team.abbrev}."
"Roster Error", f"Could not retrieve roster for {team.abbrev}."
)
await interaction.followup.send(embed=embed)
return
is_valid, projected_total, cap_limit = await validate_cap_space(roster, player_obj.wara, team)
is_valid, projected_total, cap_limit = await validate_cap_space(
roster, player_obj.wara, team
)
if not is_valid:
embed = await create_pick_illegal_embed(
"Cap Space Exceeded",
f"Drafting {player_obj.name} would put you at {projected_total:.2f} sWAR (limit: {cap_limit:.2f})."
f"Drafting {player_obj.name} would put you at {projected_total:.2f} sWAR (limit: {cap_limit:.2f}).",
)
await interaction.followup.send(embed=embed)
return
# Execute pick (using pick_to_use which may be current or skipped pick)
updated_pick = await draft_pick_service.update_pick_selection(
pick_to_use.id,
player_obj.id
pick_to_use.id, player_obj.id
)
if not updated_pick:
embed = await create_pick_illegal_embed(
"Pick Failed",
"Failed to update draft pick. Please try again."
"Pick Failed", "Failed to update draft pick. Please try again."
)
await interaction.followup.send(embed=embed)
return
# Get current league state for dem_week calculation
from services.league_service import league_service
current = await league_service.get_current_state()
# Update player team with dem_week set to current.week + 2 for draft picks
updated_player = await player_service.update_player_team(
player_obj.id,
team.id,
dem_week=current.week + 2 if current else None
player_obj.id, team.id, dem_week=current.week + 2 if current else None
)
if not updated_player:
@ -324,7 +308,7 @@ class DraftPicksCog(commands.Cog):
pick=pick_to_use,
player=player_obj,
team=team,
guild=interaction.guild
guild=interaction.guild,
)
# Determine if this was a skipped pick
@ -332,11 +316,7 @@ class DraftPicksCog(commands.Cog):
# Send success message
success_embed = await create_pick_success_embed(
player_obj,
team,
pick_to_use.overall,
projected_total,
cap_limit
player_obj, team, pick_to_use.overall, projected_total, cap_limit
)
# Add note if this was a skipped pick
@ -348,7 +328,10 @@ class DraftPicksCog(commands.Cog):
await interaction.followup.send(embed=success_embed)
# Post draft card to ping channel (only if different from command channel)
if draft_data.ping_channel and draft_data.ping_channel != interaction.channel_id:
if (
draft_data.ping_channel
and draft_data.ping_channel != interaction.channel_id
):
guild = interaction.guild
if guild:
ping_channel = guild.get_channel(draft_data.ping_channel)
@ -369,7 +352,9 @@ class DraftPicksCog(commands.Cog):
if guild:
result_channel = guild.get_channel(draft_data.result_channel)
if result_channel:
result_card = await create_player_draft_card(player_obj, pick_to_use)
result_card = await create_player_draft_card(
player_obj, pick_to_use
)
# Add skipped pick context to result card
if is_skipped_pick:
@ -379,7 +364,9 @@ class DraftPicksCog(commands.Cog):
await result_channel.send(embed=result_card)
else:
self.logger.warning(f"Could not find result channel {draft_data.result_channel}")
self.logger.warning(
f"Could not find result channel {draft_data.result_channel}"
)
# Only advance the draft if this was the current pick (not a skipped pick)
if not is_skipped_pick:
@ -391,8 +378,7 @@ class DraftPicksCog(commands.Cog):
ping_channel = guild.get_channel(draft_data.ping_channel)
if ping_channel:
await self._post_on_clock_announcement(
ping_channel=ping_channel,
guild=guild
ping_channel=ping_channel, guild=guild
)
self.logger.info(
@ -402,12 +388,7 @@ class DraftPicksCog(commands.Cog):
)
async def _write_pick_to_sheets(
self,
draft_data,
pick,
player,
team,
guild: Optional[discord.Guild]
self, draft_data, pick, player, team, guild: Optional[discord.Guild]
):
"""
Write pick to Google Sheets (fire-and-forget with ping channel notification on failure).
@ -426,10 +407,12 @@ class DraftPicksCog(commands.Cog):
success = await draft_sheet_service.write_pick(
season=config.sba_season,
overall=pick.overall,
orig_owner_abbrev=pick.origowner.abbrev if pick.origowner else team.abbrev,
orig_owner_abbrev=(
pick.origowner.abbrev if pick.origowner else team.abbrev
),
owner_abbrev=team.abbrev,
player_name=player.name,
swar=player.wara
swar=player.wara,
)
if not success:
@ -439,7 +422,7 @@ class DraftPicksCog(commands.Cog):
channel_id=draft_data.ping_channel,
pick_overall=pick.overall,
player_name=player.name,
reason="Sheet write returned failure"
reason="Sheet write returned failure",
)
except Exception as e:
@ -450,7 +433,7 @@ class DraftPicksCog(commands.Cog):
channel_id=draft_data.ping_channel,
pick_overall=pick.overall,
player_name=player.name,
reason=str(e)
reason=str(e),
)
async def _notify_sheet_failure(
@ -459,7 +442,7 @@ class DraftPicksCog(commands.Cog):
channel_id: Optional[int],
pick_overall: int,
player_name: str,
reason: str
reason: str,
):
"""
Post notification to ping channel when sheet write fails.
@ -476,7 +459,7 @@ class DraftPicksCog(commands.Cog):
try:
channel = guild.get_channel(channel_id)
if channel and hasattr(channel, 'send'):
if channel and hasattr(channel, "send"):
await channel.send(
f"⚠️ **Sheet Sync Failed** - Pick #{pick_overall} ({player_name}) "
f"was not written to the draft sheet. "
@ -486,9 +469,7 @@ class DraftPicksCog(commands.Cog):
self.logger.error(f"Failed to send sheet failure notification: {e}")
async def _post_on_clock_announcement(
self,
ping_channel,
guild: discord.Guild
self, ping_channel, guild: discord.Guild
) -> None:
"""
Post the on-clock announcement embed for the next team with role ping.
@ -510,23 +491,26 @@ class DraftPicksCog(commands.Cog):
# Get the new current pick
next_pick = await draft_pick_service.get_pick(
config.sba_season,
updated_draft_data.currentpick
config.sba_season, updated_draft_data.currentpick
)
if not next_pick or not next_pick.owner:
self.logger.error(f"Could not get pick #{updated_draft_data.currentpick} for announcement")
self.logger.error(
f"Could not get pick #{updated_draft_data.currentpick} for announcement"
)
return
# Get recent picks (last 5 completed)
recent_picks = await draft_pick_service.get_recent_picks(
config.sba_season,
updated_draft_data.currentpick - 1, # Start from previous pick
limit=5
limit=5,
)
# Get team roster for sWAR calculation
team_roster = await roster_service.get_team_roster(next_pick.owner.id, "current")
team_roster = await roster_service.get_team_roster(
next_pick.owner.id, "current"
)
roster_swar = team_roster.total_wara if team_roster else 0.0
cap_limit = get_team_salary_cap(next_pick.owner)
@ -534,7 +518,9 @@ class DraftPicksCog(commands.Cog):
top_roster_players = []
if team_roster:
all_players = team_roster.all_players
sorted_players = sorted(all_players, key=lambda p: p.wara if p.wara else 0.0, reverse=True)
sorted_players = sorted(
all_players, key=lambda p: p.wara if p.wara else 0.0, reverse=True
)
top_roster_players = sorted_players[:5]
# Get sheet URL
@ -548,7 +534,7 @@ class DraftPicksCog(commands.Cog):
roster_swar=roster_swar,
cap_limit=cap_limit,
top_roster_players=top_roster_players,
sheet_url=sheet_url
sheet_url=sheet_url,
)
# Mention the team's role (using team.lname)
@ -557,10 +543,14 @@ class DraftPicksCog(commands.Cog):
if team_role:
team_mention = f"{team_role.mention} "
else:
self.logger.warning(f"Could not find role for team {next_pick.owner.lname}")
self.logger.warning(
f"Could not find role for team {next_pick.owner.lname}"
)
await ping_channel.send(content=team_mention, embed=embed)
self.logger.info(f"Posted on-clock announcement for pick #{updated_draft_data.currentpick}")
self.logger.info(
f"Posted on-clock announcement for pick #{updated_draft_data.currentpick}"
)
except Exception as e:
self.logger.error("Error posting on-clock announcement", error=e)

View File

@ -14,7 +14,6 @@ from services.player_service import player_service
from models.player import Player
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from exceptions import BotException
# Import our new view components
from views import (
@ -24,8 +23,7 @@ from views import (
PlayerSelectionView,
DetailedInfoView,
SearchResultsView,
PlayerSearchModal,
PaginationView
PlayerSearchModal
)

View File

@ -3,17 +3,18 @@ Scorebug Commands
Implements commands for publishing and displaying live game scorebugs from Google Sheets scorecards.
"""
import discord
from discord.ext import commands
from discord import app_commands
from services.scorebug_service import ScorebugData, ScorebugService
from services.scorebug_service import ScorebugService
from services.team_service import team_service
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from utils.permissions import league_only
from utils.scorebug_helpers import create_scorebug_embed
from views.embeds import EmbedTemplate, EmbedColors
from views.embeds import EmbedTemplate
from exceptions import SheetsException
from .scorecard_tracker import ScorecardTracker
@ -23,25 +24,21 @@ class ScorebugCommands(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.ScorebugCommands')
self.logger = get_contextual_logger(f"{__name__}.ScorebugCommands")
self.scorebug_service = ScorebugService()
self.scorecard_tracker = ScorecardTracker()
self.logger.info("ScorebugCommands cog initialized")
@app_commands.command(
name="publish-scorecard",
description="Publish a Google Sheets scorecard to this channel for live tracking"
description="Publish a Google Sheets scorecard to this channel for live tracking",
)
@app_commands.describe(
url="Full URL to the Google Sheets scorecard or just the sheet key"
)
@league_only()
@logged_command("/publish-scorecard")
async def publish_scorecard(
self,
interaction: discord.Interaction,
url: str
):
async def publish_scorecard(self, interaction: discord.Interaction, url: str):
"""
Link a Google Sheets scorecard to the current channel for live scorebug tracking.
@ -61,14 +58,16 @@ class ScorebugCommands(commands.Cog):
# Verify it has a Scorebug tab
try:
scorebug_data = await self.scorebug_service.read_scorebug_data(url, full_length=False)
scorebug_data = await self.scorebug_service.read_scorebug_data(
url, full_length=False
)
except SheetsException:
embed = EmbedTemplate.error(
title="Invalid Scorecard",
description=(
"This doesn't appear to be a valid scorecard.\n\n"
"Make sure the sheet has a 'Scorebug' tab and is properly set up."
)
),
)
await interaction.edit_original_response(content=None, embed=embed)
return
@ -88,22 +87,22 @@ class ScorebugCommands(commands.Cog):
# Store the scorecard in the tracker
self.scorecard_tracker.publish_scorecard(
text_channel_id=interaction.channel_id, # type: ignore
text_channel_id=interaction.channel_id, # type: ignore
sheet_url=url,
publisher_id=interaction.user.id
publisher_id=interaction.user.id,
)
# Create success embed
embed = EmbedTemplate.success(
title="Scorecard Published",
description=(
f"Your scorecard has been published to {interaction.channel.mention}\n\n" # type: ignore
f"Your scorecard has been published to {interaction.channel.mention}\n\n" # type: ignore
f"**Sheet:** {scorecard.title}\n"
f"**Status:** Live tracking enabled\n"
f"**Scorecard:** {scorecard_link}\n\n"
f"Anyone can now run `/scorebug` in this channel to see the current score.\n"
f"The scorebug will also update in the live scores channel every 3 minutes."
)
),
)
embed.add_field(
@ -112,7 +111,7 @@ class ScorebugCommands(commands.Cog):
"`/scorebug` - Display full scorebug with details\n"
"`/scorebug full_length:False` - Display compact scorebug"
),
inline=False
inline=False,
)
await interaction.edit_original_response(content=None, embed=embed)
@ -121,13 +120,14 @@ class ScorebugCommands(commands.Cog):
embed = EmbedTemplate.error(
title="Cannot Access Scorecard",
description=(
f"{str(e)}\n\n"
f"{str(e)}\n\n"
f"**You provided:** `{url}`\n\n"
f"**Common issues:**\n"
f"• Sheet is not publicly accessible\n"
f"• Invalid sheet URL or key\n"
f"• Sheet doesn't exist\n\n"
f"Make sure your sheet is shared with 'Anyone with the link can view'."
)
),
)
await interaction.edit_original_response(content=None, embed=embed)
@ -138,23 +138,18 @@ class ScorebugCommands(commands.Cog):
description=(
"❌ An unexpected error occurred while publishing the scorecard.\n\n"
"Please try again or contact support if the issue persists."
)
),
)
await interaction.edit_original_response(content=None, embed=embed)
@app_commands.command(
name="scorebug",
description="Display the scorebug for the game in this channel"
)
@app_commands.describe(
full_length="Include full game details (defaults to True)"
name="scorebug", description="Display the scorebug for the game in this channel"
)
@app_commands.describe(full_length="Include full game details (defaults to True)")
@league_only()
@logged_command("/scorebug")
async def scorebug(
self,
interaction: discord.Interaction,
full_length: bool = True
self, interaction: discord.Interaction, full_length: bool = True
):
"""
Display the current scorebug from the scorecard published in this channel.
@ -162,7 +157,7 @@ class ScorebugCommands(commands.Cog):
await interaction.response.defer(ephemeral=True)
# Check if a scorecard is published in this channel
sheet_url = self.scorecard_tracker.get_scorecard(interaction.channel_id) # type: ignore
sheet_url = self.scorecard_tracker.get_scorecard(interaction.channel_id) # type: ignore
if not sheet_url:
embed = EmbedTemplate.error(
@ -170,20 +165,17 @@ class ScorebugCommands(commands.Cog):
description=(
"❌ No scorecard has been published in this channel.\n\n"
"Use `/publish-scorecard <url>` to publish a scorecard first."
)
),
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
try:
# Read scorebug data
await interaction.edit_original_response(
content="📊 Reading scorebug..."
)
await interaction.edit_original_response(content="📊 Reading scorebug...")
scorebug_data = await self.scorebug_service.read_scorebug_data(
sheet_url,
full_length=full_length
sheet_url, full_length=full_length
)
# Get team data
@ -196,16 +188,13 @@ class ScorebugCommands(commands.Cog):
# Create scorebug embed using shared utility
embed = create_scorebug_embed(
scorebug_data,
away_team,
home_team,
full_length
scorebug_data, away_team, home_team, full_length
)
await interaction.edit_original_response(content=None, embed=embed)
# Update timestamp in tracker
self.scorecard_tracker.update_timestamp(interaction.channel_id) # type: ignore
self.scorecard_tracker.update_timestamp(interaction.channel_id) # type: ignore
except SheetsException as e:
embed = EmbedTemplate.error(
@ -213,7 +202,7 @@ class ScorebugCommands(commands.Cog):
description=(
f"{str(e)}\n\n"
f"The scorecard may have been deleted or the sheet structure changed."
)
),
)
await interaction.edit_original_response(content=None, embed=embed)
@ -224,7 +213,7 @@ class ScorebugCommands(commands.Cog):
description=(
"❌ An error occurred while reading the scorebug.\n\n"
"Please try again or republish the scorecard."
)
),
)
await interaction.edit_original_response(content=None, embed=embed)

View File

@ -3,13 +3,14 @@ Scorecard Tracker
Provides persistent tracking of published scorecards per Discord text channel using JSON file storage.
"""
import json
import logging
from datetime import datetime, UTC
from pathlib import Path
from typing import Dict, List, Optional, Tuple
logger = logging.getLogger(f'{__name__}.ScorecardTracker')
logger = logging.getLogger(f"{__name__}.ScorecardTracker")
class ScorecardTracker:
@ -39,9 +40,11 @@ class ScorecardTracker:
"""Load scorecard data from JSON file."""
try:
if self.data_file.exists():
with open(self.data_file, 'r') as f:
with open(self.data_file, "r") as f:
self._data = json.load(f)
logger.debug(f"Loaded {len(self._data.get('scorecards', {}))} tracked scorecards")
logger.debug(
f"Loaded {len(self._data.get('scorecards', {}))} tracked scorecards"
)
else:
self._data = {"scorecards": {}}
logger.info("No existing scorecard data found, starting fresh")
@ -52,17 +55,14 @@ class ScorecardTracker:
def save_data(self) -> None:
"""Save scorecard data to JSON file."""
try:
with open(self.data_file, 'w') as f:
with open(self.data_file, "w") as f:
json.dump(self._data, f, indent=2, default=str)
logger.debug("Scorecard data saved successfully")
except Exception as e:
logger.error(f"Failed to save scorecard data: {e}")
def publish_scorecard(
self,
text_channel_id: int,
sheet_url: str,
publisher_id: int
self, text_channel_id: int, sheet_url: str, publisher_id: int
) -> None:
"""
Link a scorecard to a text channel.
@ -77,7 +77,7 @@ class ScorecardTracker:
"sheet_url": sheet_url,
"published_at": datetime.now(UTC).isoformat(),
"last_updated": datetime.now(UTC).isoformat(),
"publisher_id": str(publisher_id)
"publisher_id": str(publisher_id),
}
self.save_data()
logger.info(f"Published scorecard to channel {text_channel_id}: {sheet_url}")
@ -113,6 +113,7 @@ class ScorecardTracker:
Returns:
Sheet URL if published, None otherwise
"""
self.load_data()
scorecards = self._data.get("scorecards", {})
scorecard_data = scorecards.get(str(text_channel_id))
return scorecard_data["sheet_url"] if scorecard_data else None
@ -124,6 +125,7 @@ class ScorecardTracker:
Returns:
List of (text_channel_id, sheet_url) tuples
"""
self.load_data()
scorecards = self._data.get("scorecards", {})
return [
(int(channel_id), data["sheet_url"])
@ -163,13 +165,17 @@ class ScorecardTracker:
if channel_id not in valid_channel_ids:
stale_entries.append(channel_id_str)
except (ValueError, TypeError):
logger.warning(f"Invalid channel ID in scorecard data: {channel_id_str}")
logger.warning(
f"Invalid channel ID in scorecard data: {channel_id_str}"
)
stale_entries.append(channel_id_str)
# Remove stale entries
for channel_id_str in stale_entries:
del scorecards[channel_id_str]
logger.info(f"Removed stale scorecard entry for channel ID: {channel_id_str}")
logger.info(
f"Removed stale scorecard entry for channel ID: {channel_id_str}"
)
if stale_entries:
self.save_data()

View File

@ -17,7 +17,7 @@ from services.help_commands_service import (
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from utils.permissions import league_admin_only
from views.embeds import EmbedTemplate, EmbedColors
from views.embeds import EmbedTemplate
from views.help_commands import (
HelpCommandCreateModal,
HelpCommandEditModal,
@ -25,7 +25,6 @@ from views.help_commands import (
HelpCommandListView,
create_help_topic_embed
)
from exceptions import BotException
async def help_topic_autocomplete(

View File

@ -10,6 +10,7 @@ The injury rating format (#p##) encodes both games played and rating:
- First character: Games played in series (1-6)
- Remaining: Injury rating (p70, p65, p60, p50, p40, p30, p20)
"""
import math
import random
import discord
@ -17,9 +18,6 @@ from discord import app_commands
from discord.ext import commands
from config import get_config
from models.current import Current
from models.injury import Injury
from models.player import Player
from models.team import RosterType
from services.player_service import player_service
from services.injury_service import injury_service
@ -40,11 +38,8 @@ class InjuryGroup(app_commands.Group):
"""Injury management command group with roll, set-new, and clear subcommands."""
def __init__(self):
super().__init__(
name="injury",
description="Injury management commands"
)
self.logger = get_contextual_logger(f'{__name__}.InjuryGroup')
super().__init__(name="injury", description="Injury management commands")
self.logger = get_contextual_logger(f"{__name__}.InjuryGroup")
self.logger.info("InjuryGroup initialized")
def has_player_role(self, interaction: discord.Interaction) -> bool:
@ -53,13 +48,17 @@ class InjuryGroup(app_commands.Group):
if not isinstance(interaction.user, discord.Member):
return False
if interaction.guild is None:
return False
player_role = discord.utils.get(
interaction.guild.roles,
name=get_config().sba_players_role_name
interaction.guild.roles, name=get_config().sba_players_role_name
)
return player_role in interaction.user.roles if player_role else False
@app_commands.command(name="roll", description="Roll for injury based on player's injury rating")
@app_commands.command(
name="roll", description="Roll for injury based on player's injury rating"
)
@app_commands.describe(player_name="Player name")
@app_commands.autocomplete(player_name=player_autocomplete)
@league_only()
@ -74,12 +73,14 @@ class InjuryGroup(app_commands.Group):
raise BotException("Failed to get current season information")
# Search for player using the search endpoint (more reliable than name param)
players = await player_service.search_players(player_name, limit=10, season=current.season)
players = await player_service.search_players(
player_name, limit=10, season=current.season
)
if not players:
embed = EmbedTemplate.error(
title="Player Not Found",
description=f"I did not find anybody named **{player_name}**."
description=f"I did not find anybody named **{player_name}**.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -89,14 +90,17 @@ class InjuryGroup(app_commands.Group):
# Fetch full team data if team is not populated
if player.team_id and not player.team:
from services.team_service import team_service
player.team = await team_service.get_team(player.team_id)
# Check if player already has an active injury
existing_injury = await injury_service.get_active_injury(player.id, current.season)
existing_injury = await injury_service.get_active_injury(
player.id, current.season
)
if existing_injury:
embed = EmbedTemplate.error(
title="Already Injured",
description=f"Hm. It looks like {player.name} is already hurt."
description=f"Hm. It looks like {player.name} is already hurt.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -105,7 +109,7 @@ class InjuryGroup(app_commands.Group):
if not player.injury_rating:
embed = EmbedTemplate.error(
title="No Injury Rating",
description=f"{player.name} does not have an injury rating set."
description=f"{player.name} does not have an injury rating set.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -120,13 +124,13 @@ class InjuryGroup(app_commands.Group):
raise ValueError("Games played must be between 1 and 6")
# Validate rating format (should start with 'p')
if not injury_rating.startswith('p'):
if not injury_rating.startswith("p"):
raise ValueError("Invalid rating format")
except (ValueError, IndexError):
embed = EmbedTemplate.error(
title="Invalid Injury Rating Format",
description=f"{player.name} has an invalid injury rating: `{player.injury_rating}`\n\nExpected format: `#p##` (e.g., `1p70`, `4p50`)"
description=f"{player.name} has an invalid injury rating: `{player.injury_rating}`\n\nExpected format: `#p##` (e.g., `1p70`, `4p50`)",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -141,33 +145,25 @@ class InjuryGroup(app_commands.Group):
injury_result = self._get_injury_result(injury_rating, games_played, roll_total)
# Create response embed
embed = EmbedTemplate.warning(
title=f"Injury roll for {interaction.user.name}"
)
embed = EmbedTemplate.warning(title=f"Injury roll for {interaction.user.name}")
if player.team and player.team.thumbnail:
embed.set_thumbnail(url=player.team.thumbnail)
embed.add_field(
name="Player",
value=f"{player.name} ({player.primary_position})",
inline=True
inline=True,
)
embed.add_field(
name="Injury Rating",
value=f"{player.injury_rating}",
inline=True
name="Injury Rating", value=f"{player.injury_rating}", inline=True
)
# embed.add_field(name='', value='', inline=False) # Embed line break
# Format dice roll in markdown (same format as /ab roll)
dice_result = f"```md\n# {roll_total}\nDetails:[3d6 ({d1} {d2} {d3})]```"
embed.add_field(
name="Dice Roll",
value=dice_result,
inline=False
)
embed.add_field(name="Dice Roll", value=dice_result, inline=False)
view = None
@ -177,20 +173,20 @@ class InjuryGroup(app_commands.Group):
embed.color = discord.Color.orange()
if injury_result > 6:
gif_search_text = ['well shit', 'well fuck', 'god dammit']
gif_search_text = ["well shit", "well fuck", "god dammit"]
else:
gif_search_text = ['bummer', 'well damn']
gif_search_text = ["bummer", "well damn"]
if player.is_pitcher:
result_text += ' plus their current rest requirement'
result_text += " plus their current rest requirement"
# Pitcher callback shows modal to collect rest games
async def pitcher_confirm_callback(button_interaction: discord.Interaction):
async def pitcher_confirm_callback(
button_interaction: discord.Interaction,
):
"""Show modal to collect pitcher rest information."""
modal = PitcherRestModal(
player=player,
injury_games=injury_result,
season=current.season
player=player, injury_games=injury_result, season=current.season
)
await button_interaction.response.send_modal(modal)
@ -198,12 +194,12 @@ class InjuryGroup(app_commands.Group):
else:
# Batter callback shows modal to collect current week/game
async def batter_confirm_callback(button_interaction: discord.Interaction):
async def batter_confirm_callback(
button_interaction: discord.Interaction,
):
"""Show modal to collect current week/game information for batter injury."""
modal = BatterInjuryModal(
player=player,
injury_games=injury_result,
season=current.season
player=player, injury_games=injury_result, season=current.season
)
await button_interaction.response.send_modal(modal)
@ -213,35 +209,31 @@ class InjuryGroup(app_commands.Group):
# Only the player's team GM(s) can log the injury
view = ConfirmationView(
timeout=180.0, # 3 minutes for confirmation
responders=[player.team.gmid, player.team.gmid2] if player.team else None,
responders=(
[player.team.gmid, player.team.gmid2] if player.team else None
),
confirm_callback=injury_callback,
confirm_label="Log Injury",
cancel_label="Ignore Injury"
cancel_label="Ignore Injury",
)
elif injury_result == 'REM':
elif injury_result == "REM":
if player.is_pitcher:
result_text = '**FATIGUED**'
result_text = "**FATIGUED**"
else:
result_text = "**REMAINDER OF GAME**"
embed.color = discord.Color.gold()
gif_search_text = ['this is fine', 'not even mad', 'could be worse']
gif_search_text = ["this is fine", "not even mad", "could be worse"]
else: # 'OK'
result_text = "**No injury!**"
embed.color = discord.Color.green()
gif_search_text = ['we are so back', 'all good', 'totally fine']
gif_search_text = ["we are so back", "all good", "totally fine"]
embed.add_field(
name="Injury Length",
value=result_text,
inline=True
)
embed.add_field(name="Injury Length", value=result_text, inline=True)
try:
injury_gif = await GiphyService().get_gif(
phrase_options=gif_search_text
)
injury_gif = await GiphyService().get_gif(phrase_options=gif_search_text)
except Exception:
injury_gif = ''
injury_gif = ""
embed.set_image(url=injury_gif)
@ -251,7 +243,6 @@ class InjuryGroup(app_commands.Group):
else:
await interaction.followup.send(embed=embed)
def _get_injury_result(self, rating: str, games_played: int, roll: int):
"""
Get injury result from the injury table.
@ -266,89 +257,194 @@ class InjuryGroup(app_commands.Group):
"""
# Injury table mapping
inj_data = {
'one': {
'p70': ['OK', 'OK', 'OK', 'OK', 'OK', 'OK', 'REM', 'REM', 1, 1, 2, 2, 3, 3, 4, 4],
'p65': [2, 2, 'OK', 'REM', 1, 2, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12],
'p60': ['OK', 'OK', 'REM', 1, 2, 3, 4, 4, 4, 5, 5, 6, 8, 12, 16, 16],
'p50': ['OK', 'REM', 1, 2, 3, 4, 4, 5, 5, 6, 8, 8, 12, 16, 16, 'OK'],
'p40': ['OK', 1, 2, 3, 4, 4, 5, 6, 6, 8, 8, 12, 16, 24, 'REM', 'OK'],
'p30': ['OK', 4, 1, 3, 4, 5, 6, 8, 8, 12, 16, 24, 4, 2, 'REM', 'OK'],
'p20': ['OK', 1, 2, 4, 5, 8, 8, 24, 16, 12, 12, 6, 4, 3, 'REM', 'OK']
"one": {
"p70": [
"OK",
"OK",
"OK",
"OK",
"OK",
"OK",
"REM",
"REM",
1,
1,
2,
2,
3,
3,
4,
4,
],
"p65": [2, 2, "OK", "REM", 1, 2, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12],
"p60": ["OK", "OK", "REM", 1, 2, 3, 4, 4, 4, 5, 5, 6, 8, 12, 16, 16],
"p50": ["OK", "REM", 1, 2, 3, 4, 4, 5, 5, 6, 8, 8, 12, 16, 16, "OK"],
"p40": ["OK", 1, 2, 3, 4, 4, 5, 6, 6, 8, 8, 12, 16, 24, "REM", "OK"],
"p30": ["OK", 4, 1, 3, 4, 5, 6, 8, 8, 12, 16, 24, 4, 2, "REM", "OK"],
"p20": ["OK", 1, 2, 4, 5, 8, 8, 24, 16, 12, 12, 6, 4, 3, "REM", "OK"],
},
'two': {
'p70': [4, 3, 2, 2, 1, 1, 'REM', 'OK', 'REM', 'OK', 2, 1, 2, 2, 3, 4],
'p65': [8, 5, 4, 2, 2, 'OK', 1, 'OK', 'REM', 1, 'REM', 2, 3, 4, 6, 12],
'p60': [1, 3, 4, 5, 2, 2, 'OK', 1, 3, 'REM', 4, 4, 6, 8, 12, 3],
'p50': [4, 'OK', 'OK', 'REM', 1, 2, 4, 3, 4, 5, 4, 6, 8, 12, 12, 'OK'],
'p40': ['OK', 'OK', 'REM', 1, 2, 3, 4, 4, 5, 4, 6, 8, 12, 16, 16, 'OK'],
'p30': ['OK', 'REM', 1, 2, 3, 4, 4, 5, 6, 5, 8, 12, 16, 24, 'REM', 'OK'],
'p20': ['OK', 1, 4, 4, 5, 5, 6, 6, 12, 8, 16, 24, 8, 3, 2, 'REM']
"two": {
"p70": [4, 3, 2, 2, 1, 1, "REM", "OK", "REM", "OK", 2, 1, 2, 2, 3, 4],
"p65": [8, 5, 4, 2, 2, "OK", 1, "OK", "REM", 1, "REM", 2, 3, 4, 6, 12],
"p60": [1, 3, 4, 5, 2, 2, "OK", 1, 3, "REM", 4, 4, 6, 8, 12, 3],
"p50": [4, "OK", "OK", "REM", 1, 2, 4, 3, 4, 5, 4, 6, 8, 12, 12, "OK"],
"p40": ["OK", "OK", "REM", 1, 2, 3, 4, 4, 5, 4, 6, 8, 12, 16, 16, "OK"],
"p30": [
"OK",
"REM",
1,
2,
3,
4,
4,
5,
6,
5,
8,
12,
16,
24,
"REM",
"OK",
],
"p20": ["OK", 1, 4, 4, 5, 5, 6, 6, 12, 8, 16, 24, 8, 3, 2, "REM"],
},
'three': {
'p70': [],
'p65': ['OK', 'OK', 'REM', 1, 3, 'OK', 'REM', 1, 2, 1, 2, 3, 4, 4, 5, 'REM'],
'p60': ['OK', 5, 'OK', 'REM', 1, 2, 2, 3, 4, 4, 1, 3, 5, 6, 8, 'REM'],
'p50': ['OK', 'OK', 'REM', 1, 2, 3, 4, 4, 5, 4, 4, 6, 8, 8, 12, 'REM'],
'p40': ['OK', 1, 1, 2, 3, 4, 4, 5, 6, 5, 6, 8, 8, 12, 4, 'REM'],
'p30': ['OK', 1, 2, 3, 4, 5, 4, 6, 5, 6, 8, 8, 12, 16, 1, 'REM'],
'p20': ['OK', 1, 2, 4, 4, 8, 8, 6, 5, 12, 6, 16, 24, 3, 4, 'REM']
"three": {
"p70": [],
"p65": [
"OK",
"OK",
"REM",
1,
3,
"OK",
"REM",
1,
2,
1,
2,
3,
4,
4,
5,
"REM",
],
"p60": ["OK", 5, "OK", "REM", 1, 2, 2, 3, 4, 4, 1, 3, 5, 6, 8, "REM"],
"p50": ["OK", "OK", "REM", 1, 2, 3, 4, 4, 5, 4, 4, 6, 8, 8, 12, "REM"],
"p40": ["OK", 1, 1, 2, 3, 4, 4, 5, 6, 5, 6, 8, 8, 12, 4, "REM"],
"p30": ["OK", 1, 2, 3, 4, 5, 4, 6, 5, 6, 8, 8, 12, 16, 1, "REM"],
"p20": ["OK", 1, 2, 4, 4, 8, 8, 6, 5, 12, 6, 16, 24, 3, 4, "REM"],
},
'four': {
'p70': [],
'p65': [],
'p60': ['OK', 'OK', 'REM', 3, 3, 'OK', 'REM', 1, 2, 1, 4, 4, 5, 6, 8, 'REM'],
'p50': ['OK', 6, 4, 'OK', 'REM', 1, 2, 4, 4, 3, 5, 3, 6, 8, 12, 'REM'],
'p40': ['OK', 'OK', 'REM', 1, 2, 3, 4, 4, 5, 4, 4, 6, 8, 8, 12, 'REM'],
'p30': ['OK', 1, 1, 2, 3, 4, 4, 5, 6, 5, 6, 8, 8, 12, 4, 'REM'],
'p20': ['OK', 1, 2, 3, 4, 5, 4, 6, 5, 6, 12, 8, 8, 16, 1, 'REM']
"four": {
"p70": [],
"p65": [],
"p60": [
"OK",
"OK",
"REM",
3,
3,
"OK",
"REM",
1,
2,
1,
4,
4,
5,
6,
8,
"REM",
],
"p50": ["OK", 6, 4, "OK", "REM", 1, 2, 4, 4, 3, 5, 3, 6, 8, 12, "REM"],
"p40": ["OK", "OK", "REM", 1, 2, 3, 4, 4, 5, 4, 4, 6, 8, 8, 12, "REM"],
"p30": ["OK", 1, 1, 2, 3, 4, 4, 5, 6, 5, 6, 8, 8, 12, 4, "REM"],
"p20": ["OK", 1, 2, 3, 4, 5, 4, 6, 5, 6, 12, 8, 8, 16, 1, "REM"],
},
'five': {
'p70': [],
'p65': [],
'p60': ['OK', 'REM', 'REM', 'REM', 3, 'OK', 1, 'REM', 2, 1, 'OK', 4, 5, 2, 6, 8],
'p50': ['OK', 'OK', 'REM', 1, 1, 'OK', 'REM', 3, 2, 4, 4, 5, 5, 6, 8, 12],
'p40': ['OK', 6, 6, 'OK', 1, 3, 2, 4, 4, 5, 'REM', 3, 8, 6, 12, 1],
'p30': ['OK', 'OK', 'REM', 4, 1, 2, 5, 4, 6, 3, 4, 8, 5, 6, 12, 'REM'],
'p20': ['OK', 'REM', 2, 3, 4, 4, 5, 4, 6, 5, 8, 6, 8, 1, 12, 'REM']
"five": {
"p70": [],
"p65": [],
"p60": [
"OK",
"REM",
"REM",
"REM",
3,
"OK",
1,
"REM",
2,
1,
"OK",
4,
5,
2,
6,
8,
],
"p50": [
"OK",
"OK",
"REM",
1,
1,
"OK",
"REM",
3,
2,
4,
4,
5,
5,
6,
8,
12,
],
"p40": ["OK", 6, 6, "OK", 1, 3, 2, 4, 4, 5, "REM", 3, 8, 6, 12, 1],
"p30": ["OK", "OK", "REM", 4, 1, 2, 5, 4, 6, 3, 4, 8, 5, 6, 12, "REM"],
"p20": ["OK", "REM", 2, 3, 4, 4, 5, 4, 6, 5, 8, 6, 8, 1, 12, "REM"],
},
"six": {
"p70": [],
"p65": [],
"p60": [],
"p50": [],
"p40": ["OK", 6, 6, "OK", 1, 3, 2, 4, 4, 5, "REM", 3, 8, 6, 1, 12],
"p30": ["OK", "OK", "REM", 5, 1, 3, 6, 4, 5, 2, 4, 8, 3, 5, 12, "REM"],
"p20": ["OK", "REM", 4, 6, 2, 3, 6, 4, 8, 5, 5, 6, 3, 1, 12, "REM"],
},
'six': {
'p70': [],
'p65': [],
'p60': [],
'p50': [],
'p40': ['OK', 6, 6, 'OK', 1, 3, 2, 4, 4, 5, 'REM', 3, 8, 6, 1, 12],
'p30': ['OK', 'OK', 'REM', 5, 1, 3, 6, 4, 5, 2, 4, 8, 3, 5, 12, 'REM'],
'p20': ['OK', 'REM', 4, 6, 2, 3, 6, 4, 8, 5, 5, 6, 3, 1, 12, 'REM']
}
}
# Map games_played to key
games_map = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six'}
games_map = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six"}
games_key = games_map.get(games_played)
if not games_key:
return 'OK'
return "OK"
# Get the injury table for this rating and games played
injury_table = inj_data.get(games_key, {}).get(rating, [])
# If no table exists (e.g., p70 with 3+ games), no injury
if not injury_table:
return 'OK'
return "OK"
# Get result from table (roll 3-18 maps to index 0-15)
table_index = roll - 3
if 0 <= table_index < len(injury_table):
return injury_table[table_index]
return 'OK'
return "OK"
@app_commands.command(name="set-new", description="Set a new injury for a player (requires SBA Players role)")
@app_commands.command(
name="set-new",
description="Set a new injury for a player (requires SBA Players role)",
)
@app_commands.describe(
player_name="Player name to injure",
this_week="Current week number",
this_game="Current game number (1-4)",
injury_games="Number of games player will be out"
injury_games="Number of games player will be out",
)
@league_only()
@logged_command("/injury set-new")
@ -358,14 +454,14 @@ class InjuryGroup(app_commands.Group):
player_name: str,
this_week: int,
this_game: int,
injury_games: int
injury_games: int,
):
"""Set a new injury for a player on your team."""
# Check role permissions
if not self.has_player_role(interaction):
embed = EmbedTemplate.error(
title="Permission Denied",
description=f"This command requires the **{get_config().sba_players_role_name}** role."
description=f"This command requires the **{get_config().sba_players_role_name}** role.",
)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
@ -376,7 +472,7 @@ class InjuryGroup(app_commands.Group):
if this_game < 1 or this_game > 4:
embed = EmbedTemplate.error(
title="Invalid Input",
description="Game number must be between 1 and 4."
description="Game number must be between 1 and 4.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -384,7 +480,7 @@ class InjuryGroup(app_commands.Group):
if injury_games < 1:
embed = EmbedTemplate.error(
title="Invalid Input",
description="Injury duration must be at least 1 game."
description="Injury duration must be at least 1 game.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -395,12 +491,14 @@ class InjuryGroup(app_commands.Group):
raise BotException("Failed to get current season information")
# Search for player using the search endpoint (more reliable than name param)
players = await player_service.search_players(player_name, limit=10, season=current.season)
players = await player_service.search_players(
player_name, limit=10, season=current.season
)
if not players:
embed = EmbedTemplate.error(
title="Player Not Found",
description=f"I did not find anybody named **{player_name}**."
description=f"I did not find anybody named **{player_name}**.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -410,6 +508,7 @@ class InjuryGroup(app_commands.Group):
# Fetch full team data if team is not populated
if player.team_id and not player.team:
from services.team_service import team_service
player.team = await team_service.get_team(player.team_id)
# Check if player is on user's team
@ -418,7 +517,9 @@ class InjuryGroup(app_commands.Group):
# TODO: Add team ownership verification
# Check if player already has an active injury
existing_injury = await injury_service.get_active_injury(player.id, current.season)
existing_injury = await injury_service.get_active_injury(
player.id, current.season
)
# Data consistency check: If injury exists but il_return is None, it's stale data
if existing_injury:
@ -431,12 +532,14 @@ class InjuryGroup(app_commands.Group):
await injury_service.clear_injury(existing_injury.id)
# Notify user but allow them to proceed
self.logger.info(f"Cleared stale injury {existing_injury.id} for player {player.id}")
self.logger.info(
f"Cleared stale injury {existing_injury.id} for player {player.id}"
)
else:
# Valid active injury - player is actually injured
embed = EmbedTemplate.error(
title="Already Injured",
description=f"Hm. It looks like {player.name} is already hurt (returns {player.il_return})."
description=f"Hm. It looks like {player.name} is already hurt (returns {player.il_return}).",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -456,7 +559,7 @@ class InjuryGroup(app_commands.Group):
start_week = this_week if this_game != 4 else this_week + 1
start_game = this_game + 1 if this_game != 4 else 1
return_date = f'w{return_week:02d}g{return_game}'
return_date = f"w{return_week:02d}g{return_game}"
# Create injury record
injury = await injury_service.create_injury(
@ -466,49 +569,43 @@ class InjuryGroup(app_commands.Group):
start_week=start_week,
start_game=start_game,
end_week=return_week,
end_game=return_game
end_game=return_game,
)
if not injury:
embed = EmbedTemplate.error(
title="Error",
description="Well that didn't work. Failed to create injury record."
description="Well that didn't work. Failed to create injury record.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
# Update player's il_return field
await player_service.update_player(player.id, {'il_return': return_date})
await player_service.update_player(player.id, {"il_return": return_date})
# Success response
embed = EmbedTemplate.success(
title="Injury Recorded",
description=f"{player.name}'s injury has been logged"
description=f"{player.name}'s injury has been logged",
)
embed.add_field(
name="Player",
value=f"{player.name} ({player.pos_1})",
inline=True
name="Player", value=f"{player.name} ({player.pos_1})", inline=True
)
embed.add_field(
name="Duration",
value=f"{injury_games} game{'s' if injury_games > 1 else ''}",
inline=True
inline=True,
)
embed.add_field(
name="Return Date",
value=return_date,
inline=True
)
embed.add_field(name="Return Date", value=return_date, inline=True)
if player.team:
embed.add_field(
name="Team",
value=f"{player.team.lname} ({player.team.abbrev})",
inline=False
inline=False,
)
await interaction.followup.send(embed=embed)
@ -518,10 +615,12 @@ class InjuryGroup(app_commands.Group):
f"Injury set for {player.name}: {injury_games} games, returns {return_date}",
player_id=player.id,
season=current.season,
injury_id=injury.id
injury_id=injury.id,
)
def _calc_injury_dates(self, start_week: int, start_game: int, injury_games: int) -> dict:
def _calc_injury_dates(
self, start_week: int, start_game: int, injury_games: int
) -> dict:
"""
Calculate injury dates from start week/game and injury duration.
@ -549,15 +648,16 @@ class InjuryGroup(app_commands.Group):
actual_start_game = start_game + 1 if start_game != 4 else 1
return {
'total_games': injury_games,
'start_week': actual_start_week,
'start_game': actual_start_game,
'end_week': return_week,
'end_game': return_game
"total_games": injury_games,
"start_week": actual_start_week,
"start_game": actual_start_game,
"end_week": return_week,
"end_game": return_game,
}
@app_commands.command(name="clear", description="Clear a player's injury (requires SBA Players role)")
@app_commands.command(
name="clear", description="Clear a player's injury (requires SBA Players role)"
)
@app_commands.describe(player_name="Player name to clear injury")
@app_commands.autocomplete(player_name=player_autocomplete)
@league_only()
@ -568,7 +668,7 @@ class InjuryGroup(app_commands.Group):
if not self.has_player_role(interaction):
embed = EmbedTemplate.error(
title="Permission Denied",
description=f"This command requires the **{get_config().sba_players_role_name}** role."
description=f"This command requires the **{get_config().sba_players_role_name}** role.",
)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
@ -581,12 +681,14 @@ class InjuryGroup(app_commands.Group):
raise BotException("Failed to get current season information")
# Search for player using the search endpoint (more reliable than name param)
players = await player_service.search_players(player_name, limit=10, season=current.season)
players = await player_service.search_players(
player_name, limit=10, season=current.season
)
if not players:
embed = EmbedTemplate.error(
title="Player Not Found",
description=f"I did not find anybody named **{player_name}**."
description=f"I did not find anybody named **{player_name}**.",
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -596,6 +698,7 @@ class InjuryGroup(app_commands.Group):
# Fetch full team data if team is not populated
if player.team_id and not player.team:
from services.team_service import team_service
player.team = await team_service.get_team(player.team_id)
# Get active injury
@ -603,8 +706,7 @@ class InjuryGroup(app_commands.Group):
if not injury:
embed = EmbedTemplate.error(
title="No Active Injury",
description=f"{player.name} isn't injured."
title="No Active Injury", description=f"{player.name} isn't injured."
)
await interaction.followup.send(embed=embed, ephemeral=True)
return
@ -612,7 +714,7 @@ class InjuryGroup(app_commands.Group):
# Create confirmation embed
embed = EmbedTemplate.info(
title=f"{player.name}",
description=f"Is **{player.name}** cleared to return?"
description=f"Is **{player.name}** cleared to return?",
)
if player.team and player.team.thumbnail is not None:
@ -621,33 +723,27 @@ class InjuryGroup(app_commands.Group):
embed.add_field(
name="Player",
value=f"{player.name} ({player.primary_position})",
inline=True
inline=True,
)
if player.team:
embed.add_field(
name="Team",
value=f"{player.team.lname} ({player.team.abbrev})",
inline=True
inline=True,
)
embed.add_field(
name="Expected Return",
value=injury.return_date,
inline=True
)
embed.add_field(name="Expected Return", value=injury.return_date, inline=True)
embed.add_field(
name="Games Missed",
value=injury.duration_display,
inline=True
)
embed.add_field(name="Games Missed", value=injury.duration_display, inline=True)
# Initialize responder_team to None for major league teams
if player.team.roster_type() == RosterType.MAJOR_LEAGUE:
responder_team = player.team
else:
responder_team = await team_utils.get_user_major_league_team(interaction.user.id)
responder_team = await team_utils.get_user_major_league_team(
interaction.user.id
)
# Create callback for confirmation
async def clear_confirm_callback(button_interaction: discord.Interaction):
@ -658,37 +754,33 @@ class InjuryGroup(app_commands.Group):
if not success:
error_embed = EmbedTemplate.error(
title="Error",
description="Failed to clear the injury. Please try again."
description="Failed to clear the injury. Please try again.",
)
await button_interaction.response.send_message(
embed=error_embed, ephemeral=True
)
await button_interaction.response.send_message(embed=error_embed, ephemeral=True)
return
# Clear player's il_return field
await player_service.update_player(player.id, {'il_return': ''})
await player_service.update_player(player.id, {"il_return": ""})
# Success response
success_embed = EmbedTemplate.success(
title="Injury Cleared",
description=f"{player.name} has been cleared and is eligible to play again."
description=f"{player.name} has been cleared and is eligible to play again.",
)
success_embed.add_field(
name="Injury Return Date",
value=injury.return_date,
inline=True
name="Injury Return Date", value=injury.return_date, inline=True
)
success_embed.add_field(
name="Total Games Missed",
value=injury.duration_display,
inline=True
name="Total Games Missed", value=injury.duration_display, inline=True
)
if player.team:
success_embed.add_field(
name="Team",
value=f"{player.team.lname}",
inline=False
name="Team", value=f"{player.team.lname}", inline=False
)
if player.team.thumbnail is not None:
success_embed.set_thumbnail(url=player.team.thumbnail)
@ -700,17 +792,19 @@ class InjuryGroup(app_commands.Group):
f"Injury cleared for {player.name}",
player_id=player.id,
season=current.season,
injury_id=injury.id
injury_id=injury.id,
)
# Create confirmation view
view = ConfirmationView(
user_id=interaction.user.id,
timeout=180.0, # 3 minutes for confirmation
responders=[responder_team.gmid, responder_team.gmid2] if responder_team else None,
responders=(
[responder_team.gmid, responder_team.gmid2] if responder_team else None
),
confirm_callback=clear_confirm_callback,
confirm_label="Clear Injury",
cancel_label="Cancel"
cancel_label="Cancel",
)
# Send confirmation embed with view

View File

@ -3,10 +3,10 @@ League command package for Discord Bot v2.0
Provides league-wide slash commands for standings and current state.
"""
import logging
from typing import List, Tuple, Type
import discord
from discord.ext import commands
from .info import LeagueInfoCommands
@ -14,13 +14,13 @@ from .standings import StandingsCommands
from .schedule import ScheduleCommands
from .submit_scorecard import SubmitScorecardCommands
logger = logging.getLogger(f'{__name__}.setup_league')
logger = logging.getLogger(f"{__name__}.setup_league")
async def setup_league(bot: commands.Bot) -> Tuple[int, int, List[str]]:
"""
Set up league command modules.
Returns:
Tuple of (successful_loads, failed_loads, failed_modules)
"""
@ -30,11 +30,11 @@ async def setup_league(bot: commands.Bot) -> Tuple[int, int, List[str]]:
("ScheduleCommands", ScheduleCommands),
("SubmitScorecardCommands", SubmitScorecardCommands),
]
successful = 0
failed = 0
failed_modules = []
for cog_name, cog_class in league_cogs:
try:
await bot.add_cog(cog_class(bot))
@ -44,13 +44,15 @@ async def setup_league(bot: commands.Bot) -> Tuple[int, int, List[str]]:
logger.error(f"❌ Failed to load league command module {cog_name}: {e}")
failed += 1
failed_modules.append(cog_name)
# Log summary
if failed == 0:
logger.info(f"🎉 All {successful} league command modules loaded successfully")
else:
logger.warning(f"⚠️ League commands loaded with issues: {successful} successful, {failed} failed")
logger.warning(
f"⚠️ League commands loaded with issues: {successful} successful, {failed} failed"
)
if failed_modules:
logger.warning(f"Failed modules: {', '.join(failed_modules)}")
return successful, failed, failed_modules
return successful, failed, failed_modules

View File

@ -1,17 +1,13 @@
"""
League information commands for Discord Bot v2.0
"""
import logging
from typing import Optional
import discord
from discord.ext import commands
from services import league_service
from config import get_config
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from exceptions import BotException
from utils.permissions import requires_team
from views.embeds import EmbedTemplate

View File

@ -9,7 +9,6 @@ import discord
from discord.ext import commands
from config import get_config
from models.team import Team
from services.standings_service import standings_service
from utils.logging import get_contextual_logger
from utils.decorators import logged_command

View File

@ -175,14 +175,14 @@ class SubmitScorecardCommands(commands.Cog):
# Delete old data
try:
await play_service.delete_plays_for_game(duplicate_game.id)
except:
except Exception:
pass # May not exist
try:
await decision_service.delete_decisions_for_game(
duplicate_game.id
)
except:
except Exception:
pass # May not exist
await game_service.wipe_game_data(duplicate_game.id)
@ -354,7 +354,7 @@ class SubmitScorecardCommands(commands.Cog):
try:
await standings_service.recalculate_standings(current.season)
except:
except Exception:
# Non-critical error
self.logger.error("Failed to recalculate standings")
@ -372,11 +372,11 @@ class SubmitScorecardCommands(commands.Cog):
await play_service.delete_plays_for_game(game_id)
elif rollback_state == "PLAYS_POSTED":
await play_service.delete_plays_for_game(game_id)
except:
except Exception:
pass # Best effort rollback
await interaction.edit_original_response(
content=f"❌ An unexpected error occurred: {str(e)}"
content="❌ An unexpected error occurred. Please try again or contact an admin."
)
def _match_manager(self, team: Team, manager_name: str):

View File

@ -4,7 +4,7 @@ Player Image Management Commands
Allows users to update player fancy card and headshot images for players
on teams they own. Admins can update any player's images.
"""
from typing import Optional, List, Tuple
from typing import List, Tuple
import asyncio
import aiohttp

View File

@ -9,7 +9,6 @@ All new code should import from services.giphy_service instead.
from services import giphy_service
# Re-export tier configuration for backwards compatibility
from services.giphy_service import DISAPPOINTMENT_TIERS
def get_tier_for_seconds(seconds_elapsed):

View File

@ -3,23 +3,23 @@ Team command package for Discord Bot v2.0
Provides team-related slash commands for the SBA league.
"""
import logging
from typing import List, Tuple, Type
import discord
from discord.ext import commands
from .info import TeamInfoCommands
from .roster import TeamRosterCommands
from .branding import BrandingCommands
logger = logging.getLogger(f'{__name__}.setup_teams')
logger = logging.getLogger(f"{__name__}.setup_teams")
async def setup_teams(bot: commands.Bot) -> Tuple[int, int, List[str]]:
"""
Set up team command modules.
Returns:
Tuple of (successful_loads, failed_loads, failed_modules)
"""
@ -28,11 +28,11 @@ async def setup_teams(bot: commands.Bot) -> Tuple[int, int, List[str]]:
("TeamRosterCommands", TeamRosterCommands),
("BrandingCommands", BrandingCommands),
]
successful = 0
failed = 0
failed_modules = []
for cog_name, cog_class in team_cogs:
try:
await bot.add_cog(cog_class(bot))
@ -42,13 +42,15 @@ async def setup_teams(bot: commands.Bot) -> Tuple[int, int, List[str]]:
logger.error(f"❌ Failed to load team command module {cog_name}: {e}")
failed += 1
failed_modules.append(cog_name)
# Log summary
if failed == 0:
logger.info(f"🎉 All {successful} team command modules loaded successfully")
else:
logger.warning(f"⚠️ Team commands loaded with issues: {successful} successful, {failed} failed")
logger.warning(
f"⚠️ Team commands loaded with issues: {successful} successful, {failed} failed"
)
if failed_modules:
logger.warning(f"Failed modules: {', '.join(failed_modules)}")
return successful, failed, failed_modules
return successful, failed, failed_modules

View File

@ -1,7 +1,6 @@
"""
Team information commands for Discord Bot v2.0
"""
import logging
from typing import Optional
from config import get_config
@ -12,7 +11,6 @@ from services import team_service, player_service
from models.team import RosterType, Team
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from exceptions import BotException
from views.embeds import EmbedTemplate, EmbedColors
from views.base import PaginationView

View File

@ -1,19 +1,17 @@
"""
Team roster commands for Discord Bot v2.0
"""
import logging
from typing import Optional, Dict, Any, List
from typing import Dict, Any, List
import discord
from discord.ext import commands
from config import get_config
from models.player import Player
from services import team_service, player_service
from services import team_service
from models.team import Team
from utils.logging import get_contextual_logger
from utils.decorators import logged_command
from exceptions import BotException
from utils.permissions import requires_team
from views.embeds import EmbedTemplate, EmbedColors

View File

@ -3,10 +3,10 @@ Transaction command package for Discord Bot v2.0
Contains transaction management commands for league operations.
"""
import logging
from typing import List, Tuple, Type
import discord
from discord.ext import commands
from .management import TransactionCommands
@ -14,13 +14,13 @@ from .dropadd import DropAddCommands
from .trade import TradeCommands
from .ilmove import ILMoveCommands
logger = logging.getLogger(f'{__name__}.setup_transactions')
logger = logging.getLogger(f"{__name__}.setup_transactions")
async def setup_transactions(bot: commands.Bot) -> Tuple[int, int, List[str]]:
"""
Set up transaction command modules.
Returns:
Tuple of (successful_loads, failed_loads, failed_modules)
"""
@ -30,27 +30,33 @@ async def setup_transactions(bot: commands.Bot) -> Tuple[int, int, List[str]]:
("TradeCommands", TradeCommands),
("ILMoveCommands", ILMoveCommands),
]
successful = 0
failed = 0
failed_modules = []
for cog_name, cog_class in transaction_cogs:
try:
await bot.add_cog(cog_class(bot))
logger.info(f"✅ Loaded transaction command module: {cog_name}")
successful += 1
except Exception as e:
logger.error(f"❌ Failed to load transaction command module {cog_name}: {e}")
logger.error(
f"❌ Failed to load transaction command module {cog_name}: {e}"
)
failed += 1
failed_modules.append(cog_name)
# Log summary
if failed == 0:
logger.info(f"🎉 All {successful} transaction command modules loaded successfully")
logger.info(
f"🎉 All {successful} transaction command modules loaded successfully"
)
else:
logger.warning(f"⚠️ Transaction commands loaded with issues: {successful} successful, {failed} failed")
logger.warning(
f"⚠️ Transaction commands loaded with issues: {successful} successful, {failed} failed"
)
if failed_modules:
logger.warning(f"Failed modules: {', '.join(failed_modules)}")
return successful, failed, failed_modules
return successful, failed, failed_modules

View File

@ -3,7 +3,7 @@ Modern /dropadd Command
Interactive transaction builder with real-time validation and elegant UX.
"""
from typing import Optional, List
from typing import Optional
import discord
from discord.ext import commands
@ -24,7 +24,6 @@ from services.transaction_builder import (
clear_transaction_builder
)
from services.player_service import player_service
from services.team_service import team_service
from views.transaction_embed import TransactionEmbedView, create_transaction_embed

View File

@ -29,7 +29,6 @@ from services.transaction_builder import (
)
from services.player_service import player_service
from services.team_service import team_service
from services.league_service import league_service
from views.transaction_embed import TransactionEmbedView, create_transaction_embed

View File

@ -16,7 +16,6 @@ from utils.autocomplete import player_autocomplete, major_league_team_autocomple
from utils.team_utils import validate_user_has_team, get_team_by_abbrev_with_validation
from services.trade_builder import (
TradeBuilder,
get_trade_builder,
get_trade_builder_by_team,
clear_trade_builder,

View File

@ -4,7 +4,6 @@ Trade Channel Tracker
Provides persistent tracking of bot-created trade discussion channels using JSON file storage.
"""
import json
import logging
from datetime import datetime, UTC
from pathlib import Path
from typing import Dict, List, Optional, Any

View File

@ -12,7 +12,7 @@ from typing import List, Optional
from config import get_config
from utils.decorators import logged_command
from utils.logging import get_contextual_logger, set_discord_context
from services.chart_service import get_chart_service, Chart
from services.chart_service import get_chart_service
from views.embeds import EmbedTemplate, EmbedColors
from exceptions import BotException

View File

@ -4,7 +4,7 @@ Weather command for Discord Bot v2.0
Provides ballpark weather checks with dice rolls for gameplay.
"""
import random
from typing import Optional, Tuple
from typing import Optional
import discord
from discord.ext import commands

View File

@ -1,6 +1,7 @@
"""
Configuration management for Discord Bot v2.0
"""
import os
from typing import Optional
@ -40,17 +41,18 @@ class BotConfig(BaseSettings):
playoff_round_two_games: int = 7
playoff_round_three_games: int = 7
modern_stats_start_season: int = 8
offseason_flag: bool = False # When True, relaxes roster limits and disables weekly freeze/thaw
offseason_flag: bool = (
False # When True, relaxes roster limits and disables weekly freeze/thaw
)
# Roster Limits
expand_mil_week: int = 15 # Week when MiL roster expands (early vs late limits)
ml_roster_limit_early: int = 26 # ML limit for weeks before expand_mil_week
ml_roster_limit_late: int = 26 # ML limit for weeks >= expand_mil_week
mil_roster_limit_early: int = 6 # MiL limit for weeks before expand_mil_week
mil_roster_limit_late: int = 14 # MiL limit for weeks >= expand_mil_week
expand_mil_week: int = 15 # Week when MiL roster expands (early vs late limits)
ml_roster_limit_early: int = 26 # ML limit for weeks before expand_mil_week
ml_roster_limit_late: int = 26 # ML limit for weeks >= expand_mil_week
mil_roster_limit_early: int = 6 # MiL limit for weeks before expand_mil_week
mil_roster_limit_late: int = 14 # MiL limit for weeks >= expand_mil_week
ml_roster_limit_offseason: int = 69 # ML limit during offseason
mil_roster_limit_offseason: int = 69 # MiL limit during offseason
mil_roster_limit_offseason: int = 69 # MiL limit during offseason
# API Constants
api_version: str = "v3"
@ -60,10 +62,10 @@ class BotConfig(BaseSettings):
# Draft Constants
default_pick_minutes: int = 10
draft_rounds: int = 32
draft_team_count: int = 16 # Number of teams in draft
draft_linear_rounds: int = 10 # Rounds 1-10 are linear, 11+ are snake
swar_cap_limit: float = 32.00 # Maximum sWAR cap for team roster
cap_player_count: int = 26 # Number of players that count toward cap
draft_team_count: int = 16 # Number of teams in draft
draft_linear_rounds: int = 10 # Rounds 1-10 are linear, 11+ are snake
swar_cap_limit: float = 32.00 # Maximum sWAR cap for team roster
cap_player_count: int = 26 # Number of players that count toward cap
# Special Team IDs
free_agent_team_id: int = 547
@ -80,41 +82,45 @@ class BotConfig(BaseSettings):
# Base URLs
sba_base_url: str = "https://sba.manticorum.com"
sba_logo_url: str = f'{sba_base_url}/images/sba-logo.png'
sba_logo_url: str = f"{sba_base_url}/images/sba-logo.png"
# Application settings
log_level: str = "INFO"
environment: str = "development"
testing: bool = True
testing: bool = False
# Google Sheets settings
sheets_credentials_path: str = "/app/data/major-domo-service-creds.json"
# Draft Sheet settings (for writing picks to Google Sheets)
# Sheet IDs can be overridden via environment variables: DRAFT_SHEET_KEY_12, DRAFT_SHEET_KEY_13, etc.
draft_sheet_enabled: bool = True # Feature flag - set DRAFT_SHEET_ENABLED=false to disable
draft_sheet_enabled: bool = (
True # Feature flag - set DRAFT_SHEET_ENABLED=false to disable
)
draft_sheet_worksheet: str = "Ordered List" # Worksheet name to write picks to
draft_sheet_start_column: str = "D" # Column where pick data starts (D, E, F, G for 4 columns)
draft_sheet_start_column: str = (
"D" # Column where pick data starts (D, E, F, G for 4 columns)
)
# Giphy API settings
giphy_api_key: str = "H86xibttEuUcslgmMM6uu74IgLEZ7UOD"
giphy_api_key: str = ""
giphy_translate_url: str = "https://api.giphy.com/v1/gifs/translate"
# Optional Redis caching settings
redis_url: str = "" # Empty string means no Redis caching
redis_cache_ttl: int = 300 # 5 minutes default TTL
model_config = SettingsConfigDict(
env_file=".env",
case_sensitive=False,
extra="ignore" # Ignore extra environment variables
extra="ignore", # Ignore extra environment variables
)
@property
def is_development(self) -> bool:
"""Check if running in development mode."""
return self.environment.lower() == "development"
@property
def is_testing(self) -> bool:
"""Check if running in test mode."""
@ -139,7 +145,7 @@ class BotConfig(BaseSettings):
# Default sheet IDs (hardcoded as fallback)
default_keys = {
12: "1OF-sAFykebc_2BrcYCgxCR-4rJo0GaNmTstagV-PMBU",
13: "1vWJfvuz9jN5BU2ZR0X0oC9BAVr_R8o-dWZsF2KXQMsE"
13: "1vWJfvuz9jN5BU2ZR0X0oC9BAVr_R8o-dWZsF2KXQMsE",
}
# Check environment variable first (allows runtime override)
@ -165,9 +171,10 @@ class BotConfig(BaseSettings):
# Global configuration instance - lazily initialized to avoid import-time errors
_config = None
def get_config() -> BotConfig:
"""Get the global configuration instance."""
global _config
if _config is None:
_config = BotConfig() # type: ignore
return _config
return _config

View File

@ -3,8 +3,9 @@ Custom Command models for Discord Bot v2.0
Modern Pydantic models for the custom command system with full type safety.
"""
from datetime import datetime
from typing import Optional, Dict, Any
from datetime import UTC, datetime
from typing import Optional
import re
from pydantic import BaseModel, Field, field_validator
@ -13,136 +14,158 @@ from models.base import SBABaseModel
class CustomCommandCreator(SBABaseModel):
"""Creator of custom commands."""
id: int = Field(..., description="Database ID") # type: ignore
id: int = Field(..., description="Database ID") # type: ignore
discord_id: int = Field(..., description="Discord user ID")
username: str = Field(..., description="Discord username")
display_name: Optional[str] = Field(None, description="Discord display name")
created_at: datetime = Field(..., description="When creator was first recorded") # type: ignore
created_at: datetime = Field(..., description="When creator was first recorded") # type: ignore
total_commands: int = Field(0, description="Total commands created by this user")
active_commands: int = Field(0, description="Currently active commands")
class CustomCommand(SBABaseModel):
"""A custom command created by a user."""
id: int = Field(..., description="Database ID") # type: ignore
id: int = Field(..., description="Database ID") # type: ignore
name: str = Field(..., description="Command name (unique)")
content: str = Field(..., description="Command response content")
creator_id: Optional[int] = Field(None, description="ID of the creator (may be missing from execute endpoint)")
creator_id: Optional[int] = Field(
None, description="ID of the creator (may be missing from execute endpoint)"
)
creator: Optional[CustomCommandCreator] = Field(None, description="Creator details")
# Timestamps
created_at: datetime = Field(..., description="When command was created") # type: ignore
updated_at: Optional[datetime] = Field(None, description="When command was last updated") # type: ignore
last_used: Optional[datetime] = Field(None, description="When command was last executed")
created_at: datetime = Field(..., description="When command was created") # type: ignore
updated_at: Optional[datetime] = Field(None, description="When command was last updated") # type: ignore
last_used: Optional[datetime] = Field(
None, description="When command was last executed"
)
# Usage tracking
use_count: int = Field(0, description="Total times command has been used")
warning_sent: bool = Field(False, description="Whether cleanup warning was sent")
# Metadata
is_active: bool = Field(True, description="Whether command is currently active")
tags: Optional[list[str]] = Field(None, description="Optional tags for categorization")
@field_validator('name')
tags: Optional[list[str]] = Field(
None, description="Optional tags for categorization"
)
@field_validator("name")
@classmethod
def validate_name(cls, v):
"""Validate command name."""
if not v or len(v.strip()) == 0:
raise ValueError("Command name cannot be empty")
name = v.strip().lower()
# Length validation
if len(name) < 2:
raise ValueError("Command name must be at least 2 characters")
if len(name) > 32:
raise ValueError("Command name cannot exceed 32 characters")
# Character validation - only allow alphanumeric, dashes, underscores
if not re.match(r'^[a-z0-9_-]+$', name):
raise ValueError("Command name can only contain letters, numbers, dashes, and underscores")
if not re.match(r"^[a-z0-9_-]+$", name):
raise ValueError(
"Command name can only contain letters, numbers, dashes, and underscores"
)
# Reserved names
reserved = {
'help', 'ping', 'info', 'list', 'create', 'delete', 'edit',
'admin', 'mod', 'owner', 'bot', 'system', 'config'
"help",
"ping",
"info",
"list",
"create",
"delete",
"edit",
"admin",
"mod",
"owner",
"bot",
"system",
"config",
}
if name in reserved:
raise ValueError(f"'{name}' is a reserved command name")
return name.lower()
@field_validator('content')
@field_validator("content")
@classmethod
def validate_content(cls, v):
"""Validate command content."""
if not v or len(v.strip()) == 0:
raise ValueError("Command content cannot be empty")
content = v.strip()
# Length validation
if len(content) > 2000:
raise ValueError("Command content cannot exceed 2000 characters")
# Basic content filtering
prohibited = ['@everyone', '@here']
prohibited = ["@everyone", "@here"]
content_lower = content.lower()
for term in prohibited:
if term in content_lower:
raise ValueError(f"Command content cannot contain '{term}'")
return content
@property
def days_since_last_use(self) -> Optional[int]:
"""Calculate days since last use."""
if not self.last_used:
return None
return (datetime.now() - self.last_used).days
return (datetime.now(UTC) - self.last_used).days
@property
def is_eligible_for_warning(self) -> bool:
"""Check if command is eligible for deletion warning."""
if not self.last_used or self.warning_sent:
return False
return self.days_since_last_use >= 60 # type: ignore
return self.days_since_last_use >= 60 # type: ignore
@property
def is_eligible_for_deletion(self) -> bool:
"""Check if command is eligible for deletion."""
if not self.last_used:
return False
return self.days_since_last_use >= 90 # type: ignore
return self.days_since_last_use >= 90 # type: ignore
@property
def popularity_score(self) -> float:
"""Calculate popularity score based on usage and recency."""
if self.use_count == 0:
return 0.0
# Base score from usage
base_score = min(self.use_count / 10.0, 10.0) # Max 10 points from usage
# Recency modifier
if self.last_used:
days_ago = self.days_since_last_use
if days_ago <= 7: # type: ignore
if days_ago <= 7: # type: ignore
recency_modifier = 1.5 # Recent use bonus
elif days_ago <= 30: # type: ignore
elif days_ago <= 30: # type: ignore
recency_modifier = 1.0 # No modifier
elif days_ago <= 60: # type: ignore
elif days_ago <= 60: # type: ignore
recency_modifier = 0.7 # Slight penalty
else:
recency_modifier = 0.3 # Old command penalty
else:
recency_modifier = 0.1 # Never used
return base_score * recency_modifier
class CustomCommandSearchFilters(BaseModel):
"""Filters for searching custom commands."""
name_contains: Optional[str] = None
creator_id: Optional[int] = None
creator_name: Optional[str] = None
@ -150,33 +173,43 @@ class CustomCommandSearchFilters(BaseModel):
max_days_unused: Optional[int] = None
has_tags: Optional[list[str]] = None
is_active: bool = True
# Sorting options
sort_by: str = Field('name', description="Sort field: name, created_at, last_used, use_count, popularity")
sort_by: str = Field(
"name",
description="Sort field: name, created_at, last_used, use_count, popularity",
)
sort_desc: bool = Field(False, description="Sort in descending order")
# Pagination
page: int = Field(1, description="Page number (1-based)")
page_size: int = Field(25, description="Items per page")
@field_validator('sort_by')
@field_validator("sort_by")
@classmethod
def validate_sort_by(cls, v):
"""Validate sort field."""
valid_sorts = {'name', 'created_at', 'last_used', 'use_count', 'popularity', 'creator'}
valid_sorts = {
"name",
"created_at",
"last_used",
"use_count",
"popularity",
"creator",
}
if v not in valid_sorts:
raise ValueError(f"sort_by must be one of: {', '.join(valid_sorts)}")
return v
@field_validator('page')
@field_validator("page")
@classmethod
def validate_page(cls, v):
"""Validate page number."""
if v < 1:
raise ValueError("Page number must be >= 1")
return v
@field_validator('page_size')
@field_validator("page_size")
@classmethod
def validate_page_size(cls, v):
"""Validate page size."""
@ -187,18 +220,19 @@ class CustomCommandSearchFilters(BaseModel):
class CustomCommandSearchResult(BaseModel):
"""Result of a custom command search."""
commands: list[CustomCommand]
total_count: int
page: int
page_size: int
total_pages: int
has_more: bool
@property
def start_index(self) -> int:
"""Get the starting index for this page."""
return (self.page - 1) * self.page_size + 1
@property
def end_index(self) -> int:
"""Get the ending index for this page."""
@ -207,30 +241,31 @@ class CustomCommandSearchResult(BaseModel):
class CustomCommandStats(BaseModel):
"""Statistics about custom commands."""
total_commands: int
active_commands: int
total_creators: int
total_uses: int
# Usage statistics
most_popular_command: Optional[CustomCommand] = None
most_active_creator: Optional[CustomCommandCreator] = None
recent_commands_count: int = 0 # Commands created in last 7 days
# Cleanup statistics
commands_needing_warning: int = 0
commands_eligible_for_deletion: int = 0
@property
def average_uses_per_command(self) -> float:
"""Calculate average uses per command."""
if self.active_commands == 0:
return 0.0
return self.total_uses / self.active_commands
@property
def average_commands_per_creator(self) -> float:
"""Calculate average commands per creator."""
if self.total_creators == 0:
return 0.0
return self.active_commands / self.total_creators
return self.active_commands / self.total_creators

View File

@ -3,8 +3,9 @@ Draft configuration and state model
Represents the current draft settings and timer state.
"""
from typing import Optional
from datetime import datetime
from datetime import UTC, datetime
from pydantic import Field, field_validator
from models.base import SBABaseModel
@ -15,10 +16,18 @@ class DraftData(SBABaseModel):
currentpick: int = Field(0, description="Current pick number in progress")
timer: bool = Field(False, description="Whether draft timer is active")
paused: bool = Field(False, description="Whether draft is paused (blocks all picks)")
pick_deadline: Optional[datetime] = Field(None, description="Deadline for current pick")
result_channel: Optional[int] = Field(None, description="Discord channel ID for draft results")
ping_channel: Optional[int] = Field(None, description="Discord channel ID for draft pings")
paused: bool = Field(
False, description="Whether draft is paused (blocks all picks)"
)
pick_deadline: Optional[datetime] = Field(
None, description="Deadline for current pick"
)
result_channel: Optional[int] = Field(
None, description="Discord channel ID for draft results"
)
ping_channel: Optional[int] = Field(
None, description="Discord channel ID for draft pings"
)
pick_minutes: int = Field(1, description="Minutes allowed per pick")
@field_validator("result_channel", "ping_channel", mode="before")
@ -30,7 +39,7 @@ class DraftData(SBABaseModel):
if isinstance(v, str):
return int(v)
return v
@property
def is_draft_active(self) -> bool:
"""Check if the draft is currently active (timer running and not paused)."""
@ -41,7 +50,7 @@ class DraftData(SBABaseModel):
"""Check if the current pick deadline has passed."""
if not self.pick_deadline:
return False
return datetime.now() > self.pick_deadline
return datetime.now(UTC) > self.pick_deadline
@property
def can_make_picks(self) -> bool:
@ -55,4 +64,4 @@ class DraftData(SBABaseModel):
status = "Active"
else:
status = "Inactive"
return f"Draft {status}: Pick {self.currentpick} ({self.pick_minutes}min timer)"
return f"Draft {status}: Pick {self.currentpick} ({self.pick_minutes}min timer)"

View File

@ -3,7 +3,7 @@ Draft preference list model
Represents team draft board rankings and preferences.
"""
from typing import Optional, Dict, Any
from typing import Dict, Any
from pydantic import Field
from models.base import SBABaseModel

View File

@ -9,8 +9,8 @@ When the API short_output=false, these fields contain full Team/Player objects.
When short_output=true (or default), they contain integer IDs.
We use Pydantic aliases to handle both cases.
"""
from typing import Optional, Any, Dict, Union
from pydantic import Field, field_validator, model_validator
from typing import Optional, Any, Dict
from pydantic import Field
from models.base import SBABaseModel
from models.team import Team

View File

@ -5,7 +5,8 @@ Modern Pydantic models for the custom help system with full type safety.
Allows admins and help editors to create custom help topics for league documentation,
resources, FAQs, links, and guides.
"""
from datetime import datetime
from datetime import UTC, datetime
from typing import Optional
import re
@ -15,6 +16,7 @@ from models.base import SBABaseModel
class HelpCommand(SBABaseModel):
"""A help topic created by an admin or help editor."""
id: int = Field(..., description="Database ID") # type: ignore
name: str = Field(..., description="Help topic name (unique)")
title: str = Field(..., description="Display title")
@ -22,17 +24,23 @@ class HelpCommand(SBABaseModel):
category: Optional[str] = Field(None, description="Category for organization")
# Audit fields
created_by_discord_id: str = Field(..., description="Creator Discord ID (stored as text)")
created_by_discord_id: str = Field(
..., description="Creator Discord ID (stored as text)"
)
created_at: datetime = Field(..., description="When help topic was created") # type: ignore
updated_at: Optional[datetime] = Field(None, description="When help topic was last updated") # type: ignore
last_modified_by: Optional[str] = Field(None, description="Discord ID of last editor (stored as text)")
last_modified_by: Optional[str] = Field(
None, description="Discord ID of last editor (stored as text)"
)
# Status and metrics
is_active: bool = Field(True, description="Whether help topic is active (soft delete)")
is_active: bool = Field(
True, description="Whether help topic is active (soft delete)"
)
view_count: int = Field(0, description="Number of times viewed")
display_order: int = Field(0, description="Sort order for display")
@field_validator('name')
@field_validator("name")
@classmethod
def validate_name(cls, v):
"""Validate help topic name."""
@ -48,12 +56,14 @@ class HelpCommand(SBABaseModel):
raise ValueError("Help topic name cannot exceed 32 characters")
# Character validation - only allow alphanumeric, dashes, underscores
if not re.match(r'^[a-z0-9_-]+$', name):
raise ValueError("Help topic name can only contain letters, numbers, dashes, and underscores")
if not re.match(r"^[a-z0-9_-]+$", name):
raise ValueError(
"Help topic name can only contain letters, numbers, dashes, and underscores"
)
return name.lower()
@field_validator('title')
@field_validator("title")
@classmethod
def validate_title(cls, v):
"""Validate help topic title."""
@ -68,7 +78,7 @@ class HelpCommand(SBABaseModel):
return title
@field_validator('content')
@field_validator("content")
@classmethod
def validate_content(cls, v):
"""Validate help topic content."""
@ -86,7 +96,7 @@ class HelpCommand(SBABaseModel):
return content
@field_validator('category')
@field_validator("category")
@classmethod
def validate_category(cls, v):
"""Validate category if provided."""
@ -103,8 +113,10 @@ class HelpCommand(SBABaseModel):
raise ValueError("Category cannot exceed 50 characters")
# Character validation
if not re.match(r'^[a-z0-9_-]+$', category):
raise ValueError("Category can only contain letters, numbers, dashes, and underscores")
if not re.match(r"^[a-z0-9_-]+$", category):
raise ValueError(
"Category can only contain letters, numbers, dashes, and underscores"
)
return category
@ -118,12 +130,12 @@ class HelpCommand(SBABaseModel):
"""Calculate days since last update."""
if not self.updated_at:
return None
return (datetime.now() - self.updated_at).days
return (datetime.now(UTC) - self.updated_at).days
@property
def days_since_creation(self) -> int:
"""Calculate days since creation."""
return (datetime.now() - self.created_at).days
return (datetime.now(UTC) - self.created_at).days
@property
def popularity_score(self) -> float:
@ -153,28 +165,40 @@ class HelpCommand(SBABaseModel):
class HelpCommandSearchFilters(BaseModel):
"""Filters for searching help commands."""
name_contains: Optional[str] = None
category: Optional[str] = None
is_active: bool = True
# Sorting
sort_by: str = Field('name', description="Sort field: name, category, created_at, view_count, display_order")
sort_by: str = Field(
"name",
description="Sort field: name, category, created_at, view_count, display_order",
)
sort_desc: bool = Field(False, description="Sort in descending order")
# Pagination
page: int = Field(1, description="Page number (1-based)")
page_size: int = Field(25, description="Items per page")
@field_validator('sort_by')
@field_validator("sort_by")
@classmethod
def validate_sort_by(cls, v):
"""Validate sort field."""
valid_sorts = {'name', 'title', 'category', 'created_at', 'updated_at', 'view_count', 'display_order'}
valid_sorts = {
"name",
"title",
"category",
"created_at",
"updated_at",
"view_count",
"display_order",
}
if v not in valid_sorts:
raise ValueError(f"sort_by must be one of: {', '.join(valid_sorts)}")
return v
@field_validator('page')
@field_validator("page")
@classmethod
def validate_page(cls, v):
"""Validate page number."""
@ -182,7 +206,7 @@ class HelpCommandSearchFilters(BaseModel):
raise ValueError("Page number must be >= 1")
return v
@field_validator('page_size')
@field_validator("page_size")
@classmethod
def validate_page_size(cls, v):
"""Validate page size."""
@ -193,6 +217,7 @@ class HelpCommandSearchFilters(BaseModel):
class HelpCommandSearchResult(BaseModel):
"""Result of a help command search."""
help_commands: list[HelpCommand]
total_count: int
page: int
@ -213,6 +238,7 @@ class HelpCommandSearchResult(BaseModel):
class HelpCommandStats(BaseModel):
"""Statistics about help commands."""
total_commands: int
active_commands: int
total_views: int

View File

@ -3,7 +3,7 @@ Injury model for tracking player injuries
Represents an injury record with game timeline and status information.
"""
from typing import Optional, Any, Dict
from typing import Any
from pydantic import Field, model_validator
from models.base import SBABaseModel

View File

@ -3,11 +3,10 @@ Trade-specific data models for multi-team transactions.
Extends the base transaction system to support trades between multiple teams.
"""
from typing import List, Optional, Dict, Set
from typing import List, Optional
from dataclasses import dataclass
from enum import Enum
from models.player import Player
from models.team import Team, RosterType
from services.transaction_builder import TransactionMove

View File

@ -3,7 +3,7 @@ Transaction models for SBA transaction management
Represents transactions and player moves based on actual API structure.
"""
from typing import Optional, List
from typing import List
from pydantic import Field
from models.base import SBABaseModel

View File

@ -5,7 +5,6 @@ Provides common CRUD operations and error handling for all data services.
"""
import logging
import hashlib
import json
from typing import Optional, Type, TypeVar, Generic, Dict, Any, List, Tuple
from api.client import get_global_client, APIClient

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,6 @@ from typing import List, Dict, Any, Optional, Tuple
from utils.logging import get_contextual_logger
from api.client import get_global_client
from models.decision import Decision
from models.player import Player
from exceptions import APIException

View File

@ -8,7 +8,6 @@ from typing import Optional, List
from services.base_service import BaseService
from models.draft_list import DraftList
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.DraftListService')
@ -46,7 +45,6 @@ class DraftListService(BaseService[DraftList]):
Returns:
Tuple of (items list, total count)
"""
from typing import Any, Dict, List, Tuple
if isinstance(data, list):
return data, len(data)

View File

@ -8,7 +8,6 @@ from typing import Optional, List
from services.base_service import BaseService
from models.draft_pick import DraftPick
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.DraftPickService')

View File

@ -3,15 +3,15 @@ Draft service for Discord Bot v2.0
Core draft business logic and state management. NO CACHING - draft state changes constantly.
"""
import logging
from typing import Optional, Dict, Any
from datetime import datetime, timedelta
from datetime import UTC, datetime, timedelta
from services.base_service import BaseService
from models.draft_data import DraftData
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.DraftService')
logger = logging.getLogger(f"{__name__}.DraftService")
class DraftService(BaseService[DraftData]):
@ -30,7 +30,7 @@ class DraftService(BaseService[DraftData]):
def __init__(self):
"""Initialize draft service."""
super().__init__(DraftData, 'draftdata')
super().__init__(DraftData, "draftdata")
logger.debug("DraftService initialized")
async def get_draft_data(self) -> Optional[DraftData]:
@ -63,9 +63,7 @@ class DraftService(BaseService[DraftData]):
return None
async def update_draft_data(
self,
draft_id: int,
updates: Dict[str, Any]
self, draft_id: int, updates: Dict[str, Any]
) -> Optional[DraftData]:
"""
Update draft configuration.
@ -93,10 +91,7 @@ class DraftService(BaseService[DraftData]):
return None
async def set_timer(
self,
draft_id: int,
active: bool,
pick_minutes: Optional[int] = None
self, draft_id: int, active: bool, pick_minutes: Optional[int] = None
) -> Optional[DraftData]:
"""
Enable or disable draft timer.
@ -110,27 +105,31 @@ class DraftService(BaseService[DraftData]):
Updated DraftData instance
"""
try:
updates = {'timer': active}
updates = {"timer": active}
if pick_minutes is not None:
updates['pick_minutes'] = pick_minutes
updates["pick_minutes"] = pick_minutes
# Set deadline based on timer state
if active:
# Calculate new deadline
if pick_minutes:
deadline = datetime.now() + timedelta(minutes=pick_minutes)
deadline = datetime.now(UTC) + timedelta(minutes=pick_minutes)
else:
# Get current pick_minutes from existing data
current_data = await self.get_draft_data()
if current_data:
deadline = datetime.now() + timedelta(minutes=current_data.pick_minutes)
deadline = datetime.now(UTC) + timedelta(
minutes=current_data.pick_minutes
)
else:
deadline = datetime.now() + timedelta(minutes=2) # Default fallback
updates['pick_deadline'] = deadline
deadline = datetime.now(UTC) + timedelta(
minutes=2
) # Default fallback
updates["pick_deadline"] = deadline
else:
# Set deadline far in future when timer inactive
updates['pick_deadline'] = datetime.now() + timedelta(days=690)
updates["pick_deadline"] = datetime.now(UTC) + timedelta(days=690)
updated = await self.update_draft_data(draft_id, updates)
@ -147,9 +146,7 @@ class DraftService(BaseService[DraftData]):
return None
async def advance_pick(
self,
draft_id: int,
current_pick: int
self, draft_id: int, current_pick: int
) -> Optional[DraftData]:
"""
Advance to next pick in draft.
@ -200,12 +197,14 @@ class DraftService(BaseService[DraftData]):
return await self.get_draft_data()
# Update to next pick
updates = {'currentpick': next_pick}
updates = {"currentpick": next_pick}
# Reset deadline if timer is active
current_data = await self.get_draft_data()
if current_data and current_data.timer:
updates['pick_deadline'] = datetime.now() + timedelta(minutes=current_data.pick_minutes)
updates["pick_deadline"] = datetime.now(UTC) + timedelta(
minutes=current_data.pick_minutes
)
updated = await self.update_draft_data(draft_id, updates)
@ -221,10 +220,7 @@ class DraftService(BaseService[DraftData]):
return None
async def set_current_pick(
self,
draft_id: int,
overall: int,
reset_timer: bool = True
self, draft_id: int, overall: int, reset_timer: bool = True
) -> Optional[DraftData]:
"""
Manually set current pick (admin operation).
@ -238,12 +234,14 @@ class DraftService(BaseService[DraftData]):
Updated DraftData
"""
try:
updates = {'currentpick': overall}
updates = {"currentpick": overall}
if reset_timer:
current_data = await self.get_draft_data()
if current_data and current_data.timer:
updates['pick_deadline'] = datetime.now() + timedelta(minutes=current_data.pick_minutes)
updates["pick_deadline"] = datetime.now(UTC) + timedelta(
minutes=current_data.pick_minutes
)
updated = await self.update_draft_data(draft_id, updates)
@ -262,7 +260,7 @@ class DraftService(BaseService[DraftData]):
self,
draft_id: int,
ping_channel_id: Optional[int] = None,
result_channel_id: Optional[int] = None
result_channel_id: Optional[int] = None,
) -> Optional[DraftData]:
"""
Update draft Discord channel configuration.
@ -278,9 +276,9 @@ class DraftService(BaseService[DraftData]):
try:
updates = {}
if ping_channel_id is not None:
updates['ping_channel'] = ping_channel_id
updates["ping_channel"] = ping_channel_id
if result_channel_id is not None:
updates['result_channel'] = result_channel_id
updates["result_channel"] = result_channel_id
if not updates:
logger.warning("No channel updates provided")
@ -300,9 +298,7 @@ class DraftService(BaseService[DraftData]):
return None
async def reset_draft_deadline(
self,
draft_id: int,
minutes: Optional[int] = None
self, draft_id: int, minutes: Optional[int] = None
) -> Optional[DraftData]:
"""
Reset the current pick deadline.
@ -322,8 +318,8 @@ class DraftService(BaseService[DraftData]):
return None
minutes = current_data.pick_minutes
new_deadline = datetime.now() + timedelta(minutes=minutes)
updates = {'pick_deadline': new_deadline}
new_deadline = datetime.now(UTC) + timedelta(minutes=minutes)
updates = {"pick_deadline": new_deadline}
updated = await self.update_draft_data(draft_id, updates)
@ -358,9 +354,9 @@ class DraftService(BaseService[DraftData]):
# Pause the draft AND stop the timer
# Set deadline far in future so it doesn't expire while paused
updates = {
'paused': True,
'timer': False,
'pick_deadline': datetime.now() + timedelta(days=690)
"paused": True,
"timer": False,
"pick_deadline": datetime.now(UTC) + timedelta(days=690),
}
updated = await self.update_draft_data(draft_id, updates)
@ -395,16 +391,14 @@ class DraftService(BaseService[DraftData]):
pick_minutes = current_data.pick_minutes if current_data else 2
# Resume the draft AND restart the timer with fresh deadline
new_deadline = datetime.now() + timedelta(minutes=pick_minutes)
updates = {
'paused': False,
'timer': True,
'pick_deadline': new_deadline
}
new_deadline = datetime.now(UTC) + timedelta(minutes=pick_minutes)
updates = {"paused": False, "timer": True, "pick_deadline": new_deadline}
updated = await self.update_draft_data(draft_id, updates)
if updated:
logger.info(f"Draft resumed - timer restarted with {pick_minutes}min deadline")
logger.info(
f"Draft resumed - timer restarted with {pick_minutes}min deadline"
)
else:
logger.error("Failed to resume draft")

View File

@ -4,11 +4,11 @@ Draft Sheet Service
Handles writing draft picks to Google Sheets for public tracking.
Extends SheetsService to reuse authentication and async patterns.
"""
import asyncio
from typing import List, Optional, Tuple
from config import get_config
from exceptions import SheetsException
from services.sheets_service import SheetsService
from utils.logging import get_contextual_logger
@ -25,7 +25,7 @@ class DraftSheetService(SheetsService):
If None, will use path from config
"""
super().__init__(credentials_path)
self.logger = get_contextual_logger(f'{__name__}.DraftSheetService')
self.logger = get_contextual_logger(f"{__name__}.DraftSheetService")
self._config = get_config()
async def write_pick(
@ -35,7 +35,7 @@ class DraftSheetService(SheetsService):
orig_owner_abbrev: str,
owner_abbrev: str,
player_name: str,
swar: float
swar: float,
) -> bool:
"""
Write a single draft pick to the season's draft sheet.
@ -69,23 +69,19 @@ class DraftSheetService(SheetsService):
return False
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get pygsheets client
sheets = await loop.run_in_executor(None, self._get_client)
# Open the draft sheet by key
spreadsheet = await loop.run_in_executor(
None,
sheets.open_by_key,
sheet_key
None, sheets.open_by_key, sheet_key
)
# Get the worksheet
worksheet = await loop.run_in_executor(
None,
spreadsheet.worksheet_by_title,
self._config.draft_sheet_worksheet
None, spreadsheet.worksheet_by_title, self._config.draft_sheet_worksheet
)
# Prepare pick data (4 columns: orig_owner, owner, player, swar)
@ -94,12 +90,12 @@ class DraftSheetService(SheetsService):
# Calculate row (overall + 1 to leave row 1 for headers)
row = overall + 1
start_column = self._config.draft_sheet_start_column
cell_range = f'{start_column}{row}'
cell_range = f"{start_column}{row}"
# Write the pick data
await loop.run_in_executor(
None,
lambda: worksheet.update_values(crange=cell_range, values=pick_data)
lambda: worksheet.update_values(crange=cell_range, values=pick_data),
)
self.logger.info(
@ -107,7 +103,7 @@ class DraftSheetService(SheetsService):
season=season,
overall=overall,
player=player_name,
owner=owner_abbrev
owner=owner_abbrev,
)
return True
@ -116,14 +112,12 @@ class DraftSheetService(SheetsService):
f"Failed to write pick to draft sheet: {e}",
season=season,
overall=overall,
player=player_name
player=player_name,
)
return False
async def write_picks_batch(
self,
season: int,
picks: List[Tuple[int, str, str, str, float]]
self, season: int, picks: List[Tuple[int, str, str, str, float]]
) -> Tuple[int, int]:
"""
Write multiple draft picks to the sheet in a single batch operation.
@ -152,23 +146,19 @@ class DraftSheetService(SheetsService):
return (0, 0)
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get pygsheets client
sheets = await loop.run_in_executor(None, self._get_client)
# Open the draft sheet by key
spreadsheet = await loop.run_in_executor(
None,
sheets.open_by_key,
sheet_key
None, sheets.open_by_key, sheet_key
)
# Get the worksheet
worksheet = await loop.run_in_executor(
None,
spreadsheet.worksheet_by_title,
self._config.draft_sheet_worksheet
None, spreadsheet.worksheet_by_title, self._config.draft_sheet_worksheet
)
# Sort picks by overall to find range bounds
@ -181,7 +171,7 @@ class DraftSheetService(SheetsService):
# Build a 2D array for the entire range (sparse - empty rows for missing picks)
# Row index 0 = min_overall, row index N = max_overall
num_rows = max_overall - min_overall + 1
batch_data: List[List[str]] = [['', '', '', ''] for _ in range(num_rows)]
batch_data: List[List[str]] = [["", "", "", ""] for _ in range(num_rows)]
# Populate the batch data array
for overall, orig_owner, owner, player_name, swar in sorted_picks:
@ -194,23 +184,23 @@ class DraftSheetService(SheetsService):
end_column = chr(ord(start_column) + 3) # 4 columns: D -> G
end_row = max_overall + 1
cell_range = f'{start_column}{start_row}:{end_column}{end_row}'
cell_range = f"{start_column}{start_row}:{end_column}{end_row}"
self.logger.info(
f"Writing {len(picks)} picks in single batch to range {cell_range}",
season=season
season=season,
)
# Write all picks in a single API call
await loop.run_in_executor(
None,
lambda: worksheet.update_values(crange=cell_range, values=batch_data)
lambda: worksheet.update_values(crange=cell_range, values=batch_data),
)
self.logger.info(
f"Batch write complete: {len(picks)} picks written successfully",
season=season,
total_picks=len(picks)
total_picks=len(picks),
)
return (len(picks), 0)
@ -219,10 +209,7 @@ class DraftSheetService(SheetsService):
return (0, len(picks))
async def clear_picks_range(
self,
season: int,
start_overall: int = 1,
end_overall: int = 512
self, season: int, start_overall: int = 1, end_overall: int = 512
) -> bool:
"""
Clear a range of picks from the draft sheet.
@ -247,23 +234,19 @@ class DraftSheetService(SheetsService):
return False
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get pygsheets client
sheets = await loop.run_in_executor(None, self._get_client)
# Open the draft sheet by key
spreadsheet = await loop.run_in_executor(
None,
sheets.open_by_key,
sheet_key
None, sheets.open_by_key, sheet_key
)
# Get the worksheet
worksheet = await loop.run_in_executor(
None,
spreadsheet.worksheet_by_title,
self._config.draft_sheet_worksheet
None, spreadsheet.worksheet_by_title, self._config.draft_sheet_worksheet
)
# Calculate range (4 columns: D through G)
@ -274,24 +257,23 @@ class DraftSheetService(SheetsService):
# Convert start column letter to end column (D -> G for 4 columns)
end_column = chr(ord(start_column) + 3)
cell_range = f'{start_column}{start_row}:{end_column}{end_row}'
cell_range = f"{start_column}{start_row}:{end_column}{end_row}"
# Clear the range by setting empty values
# We create a 2D array of empty strings
num_rows = end_row - start_row + 1
empty_data = [['', '', '', ''] for _ in range(num_rows)]
empty_data = [["", "", "", ""] for _ in range(num_rows)]
await loop.run_in_executor(
None,
lambda: worksheet.update_values(
crange=f'{start_column}{start_row}',
values=empty_data
)
crange=f"{start_column}{start_row}", values=empty_data
),
)
self.logger.info(
f"Cleared picks {start_overall}-{end_overall} from draft sheet",
season=season
season=season,
)
return True

View File

@ -4,93 +4,88 @@ Giphy Service for Discord Bot v2.0
Provides async interface to Giphy API with disappointment-based search phrases.
Used for Easter egg features like the soak command.
"""
import random
from typing import List, Optional
from urllib.parse import quote
import aiohttp
from utils.logging import get_contextual_logger
from config import get_config
from exceptions import APIException
# Disappointment tier configuration
DISAPPOINTMENT_TIERS = {
'tier_1': {
'max_seconds': 1800, # 30 minutes
'phrases': [
"tier_1": {
"max_seconds": 1800, # 30 minutes
"phrases": [
"extremely disappointed",
"so disappointed",
"are you kidding me",
"seriously",
"unbelievable"
"unbelievable",
],
'description': "Maximum Disappointment"
"description": "Maximum Disappointment",
},
'tier_2': {
'max_seconds': 7200, # 2 hours
'phrases': [
"tier_2": {
"max_seconds": 7200, # 2 hours
"phrases": [
"very disappointed",
"can't believe you",
"not happy",
"shame on you",
"facepalm"
"facepalm",
],
'description': "Severe Disappointment"
"description": "Severe Disappointment",
},
'tier_3': {
'max_seconds': 21600, # 6 hours
'phrases': [
"tier_3": {
"max_seconds": 21600, # 6 hours
"phrases": [
"disappointed",
"not impressed",
"shaking head",
"eye roll",
"really"
"really",
],
'description': "Strong Disappointment"
"description": "Strong Disappointment",
},
'tier_4': {
'max_seconds': 86400, # 24 hours
'phrases': [
"tier_4": {
"max_seconds": 86400, # 24 hours
"phrases": [
"mildly disappointed",
"not great",
"could be better",
"sigh",
"seriously"
"seriously",
],
'description': "Moderate Disappointment"
"description": "Moderate Disappointment",
},
'tier_5': {
'max_seconds': 604800, # 7 days
'phrases': [
"slightly disappointed",
"oh well",
"shrug",
"meh",
"not bad"
],
'description': "Mild Disappointment"
"tier_5": {
"max_seconds": 604800, # 7 days
"phrases": ["slightly disappointed", "oh well", "shrug", "meh", "not bad"],
"description": "Mild Disappointment",
},
'tier_6': {
'max_seconds': float('inf'), # 7+ days
'phrases': [
"tier_6": {
"max_seconds": float("inf"), # 7+ days
"phrases": [
"not disappointed",
"relieved",
"proud",
"been worse",
"fine i guess"
"fine i guess",
],
'description': "Minimal Disappointment"
"description": "Minimal Disappointment",
},
'first_ever': {
'phrases': [
"first_ever": {
"phrases": [
"here we go",
"oh boy",
"uh oh",
"getting started",
"and so it begins"
"and so it begins",
],
'description': "The Beginning"
}
"description": "The Beginning",
},
}
@ -102,7 +97,7 @@ class GiphyService:
self.config = get_config()
self.api_key = self.config.giphy_api_key
self.translate_url = self.config.giphy_translate_url
self.logger = get_contextual_logger(f'{__name__}.GiphyService')
self.logger = get_contextual_logger(f"{__name__}.GiphyService")
def get_tier_for_seconds(self, seconds_elapsed: Optional[int]) -> str:
"""
@ -115,13 +110,13 @@ class GiphyService:
Tier key string (e.g., 'tier_1', 'first_ever')
"""
if seconds_elapsed is None:
return 'first_ever'
return "first_ever"
for tier_key in ['tier_1', 'tier_2', 'tier_3', 'tier_4', 'tier_5', 'tier_6']:
if seconds_elapsed <= DISAPPOINTMENT_TIERS[tier_key]['max_seconds']:
for tier_key in ["tier_1", "tier_2", "tier_3", "tier_4", "tier_5", "tier_6"]:
if seconds_elapsed <= DISAPPOINTMENT_TIERS[tier_key]["max_seconds"]:
return tier_key
return 'tier_6' # Fallback to lowest disappointment
return "tier_6" # Fallback to lowest disappointment
def get_random_phrase_for_tier(self, tier_key: str) -> str:
"""
@ -139,7 +134,7 @@ class GiphyService:
if tier_key not in DISAPPOINTMENT_TIERS:
raise ValueError(f"Invalid tier key: {tier_key}")
phrases = DISAPPOINTMENT_TIERS[tier_key]['phrases']
phrases = DISAPPOINTMENT_TIERS[tier_key]["phrases"]
return random.choice(phrases)
def get_tier_description(self, tier_key: str) -> str:
@ -158,7 +153,7 @@ class GiphyService:
if tier_key not in DISAPPOINTMENT_TIERS:
raise ValueError(f"Invalid tier key: {tier_key}")
return DISAPPOINTMENT_TIERS[tier_key]['description']
return DISAPPOINTMENT_TIERS[tier_key]["description"]
async def get_disappointment_gif(self, tier_key: str) -> str:
"""
@ -181,7 +176,7 @@ class GiphyService:
if tier_key not in DISAPPOINTMENT_TIERS:
raise ValueError(f"Invalid tier key: {tier_key}")
phrases = DISAPPOINTMENT_TIERS[tier_key]['phrases']
phrases = DISAPPOINTMENT_TIERS[tier_key]["phrases"]
# Shuffle phrases for variety and retry capability
shuffled_phrases = random.sample(phrases, len(phrases))
@ -189,39 +184,61 @@ class GiphyService:
async with aiohttp.ClientSession() as session:
for phrase in shuffled_phrases:
try:
url = f"{self.translate_url}?s={phrase}&api_key={self.api_key}"
url = f"{self.translate_url}?s={quote(phrase)}&api_key={quote(self.api_key)}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=5)) as resp:
async with session.get(
url, timeout=aiohttp.ClientTimeout(total=5)
) as resp:
if resp.status == 200:
data = await resp.json()
# Filter out Trump GIFs (legacy behavior)
gif_title = data.get('data', {}).get('title', '').lower()
if 'trump' in gif_title:
self.logger.debug(f"Filtered out Trump GIF for phrase: {phrase}")
gif_title = data.get("data", {}).get("title", "").lower()
if "trump" in gif_title:
self.logger.debug(
f"Filtered out Trump GIF for phrase: {phrase}"
)
continue
# Get the actual GIF image URL, not the web page URL
gif_url = data.get('data', {}).get('images', {}).get('original', {}).get('url')
gif_url = (
data.get("data", {})
.get("images", {})
.get("original", {})
.get("url")
)
if gif_url:
self.logger.info(f"Successfully fetched GIF for phrase: {phrase}", gif_url=gif_url)
self.logger.info(
f"Successfully fetched GIF for phrase: {phrase}",
gif_url=gif_url,
)
return gif_url
else:
self.logger.warning(f"No GIF URL in response for phrase: {phrase}")
self.logger.warning(
f"No GIF URL in response for phrase: {phrase}"
)
else:
self.logger.warning(f"Giphy API returned status {resp.status} for phrase: {phrase}")
self.logger.warning(
f"Giphy API returned status {resp.status} for phrase: {phrase}"
)
except aiohttp.ClientError as e:
self.logger.error(f"HTTP error fetching GIF for phrase '{phrase}': {e}")
self.logger.error(
f"HTTP error fetching GIF for phrase '{phrase}': {e}"
)
except Exception as e:
self.logger.error(f"Unexpected error fetching GIF for phrase '{phrase}': {e}")
self.logger.error(
f"Unexpected error fetching GIF for phrase '{phrase}': {e}"
)
# All phrases failed
error_msg = f"Failed to fetch any GIF for tier: {tier_key}"
self.logger.error(error_msg)
raise APIException(error_msg)
async def get_gif(self, phrase: Optional[str] = None, phrase_options: Optional[List[str]] = None) -> str:
async def get_gif(
self, phrase: Optional[str] = None, phrase_options: Optional[List[str]] = None
) -> str:
"""
Fetch a GIF from Giphy based on a phrase or list of phrase options.
@ -237,9 +254,11 @@ class GiphyService:
APIException: If all GIF fetch attempts fail
"""
if phrase is None and phrase_options is None:
raise ValueError('To get a gif, one of `phrase` or `phrase_options` must be provided')
raise ValueError(
"To get a gif, one of `phrase` or `phrase_options` must be provided"
)
search_phrase = 'send help'
search_phrase = "send help"
if phrase is not None:
search_phrase = phrase
elif phrase_options is not None:
@ -250,33 +269,53 @@ class GiphyService:
while attempts < 3:
attempts += 1
try:
url = f"{self.translate_url}?s={search_phrase}&api_key={self.api_key}"
url = f"{self.translate_url}?s={quote(search_phrase)}&api_key={quote(self.api_key)}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=3)) as resp:
async with session.get(
url, timeout=aiohttp.ClientTimeout(total=3)
) as resp:
if resp.status != 200:
self.logger.warning(f"Giphy API returned status {resp.status} for phrase: {search_phrase}")
self.logger.warning(
f"Giphy API returned status {resp.status} for phrase: {search_phrase}"
)
continue
data = await resp.json()
# Filter out Trump GIFs (legacy behavior)
gif_title = data.get('data', {}).get('title', '').lower()
if 'trump' in gif_title:
self.logger.debug(f"Filtered out Trump GIF for phrase: {search_phrase}")
gif_title = data.get("data", {}).get("title", "").lower()
if "trump" in gif_title:
self.logger.debug(
f"Filtered out Trump GIF for phrase: {search_phrase}"
)
continue
# Get the actual GIF image URL, not the web page URL
gif_url = data.get('data', {}).get('images', {}).get('original', {}).get('url')
gif_url = (
data.get("data", {})
.get("images", {})
.get("original", {})
.get("url")
)
if gif_url:
self.logger.info(f"Successfully fetched GIF for phrase: {search_phrase}", gif_url=gif_url)
self.logger.info(
f"Successfully fetched GIF for phrase: {search_phrase}",
gif_url=gif_url,
)
return gif_url
else:
self.logger.warning(f"No GIF URL in response for phrase: {search_phrase}")
self.logger.warning(
f"No GIF URL in response for phrase: {search_phrase}"
)
except aiohttp.ClientError as e:
self.logger.error(f"HTTP error fetching GIF for phrase '{search_phrase}': {e}")
self.logger.error(
f"HTTP error fetching GIF for phrase '{search_phrase}': {e}"
)
except Exception as e:
self.logger.error(f"Unexpected error fetching GIF for phrase '{search_phrase}': {e}")
self.logger.error(
f"Unexpected error fetching GIF for phrase '{search_phrase}': {e}"
)
# All attempts failed
error_msg = f"Failed to fetch any GIF for phrase: {search_phrase}"

View File

@ -5,7 +5,6 @@ Modern async service layer for managing help commands with full type safety.
Allows admins and help editors to create custom help topics for league documentation,
resources, FAQs, links, and guides.
"""
import math
from typing import Optional, List
from utils.logging import get_contextual_logger

View File

@ -8,7 +8,6 @@ from typing import Optional, List
from services.base_service import BaseService
from models.injury import Injury
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.InjuryService')

View File

@ -7,7 +7,6 @@ Handles roster operations and validation.
import logging
from typing import Optional, List, Dict
from services.base_service import BaseService
from models.roster import TeamRoster
from models.player import Player
from models.transaction import RosterValidation

View File

@ -6,9 +6,7 @@ Handles game schedule and results retrieval and processing.
import logging
from typing import Optional, List, Dict, Tuple
from services.base_service import BaseService
from models.game import Game
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.ScheduleService')

View File

@ -3,8 +3,9 @@ Scorebug Service
Handles reading live game data from Google Sheets scorecards for real-time score displays.
"""
import asyncio
from typing import Dict, List, Any, Optional
from typing import Dict, Any, Optional
import pygsheets
from utils.logging import get_contextual_logger
@ -16,30 +17,32 @@ class ScorebugData:
"""Data class for scorebug information."""
def __init__(self, data: Dict[str, Any]):
self.away_team_id = data.get('away_team_id', 1)
self.home_team_id = data.get('home_team_id', 1)
self.header = data.get('header', '')
self.away_score = data.get('away_score', 0)
self.home_score = data.get('home_score', 0)
self.which_half = data.get('which_half', '')
self.inning = data.get('inning', 1)
self.is_final = data.get('is_final', False)
self.outs = data.get('outs', 0)
self.win_percentage = data.get('win_percentage', 50.0)
self.away_team_id = data.get("away_team_id", 1)
self.home_team_id = data.get("home_team_id", 1)
self.header = data.get("header", "")
self.away_score = data.get("away_score", 0)
self.home_score = data.get("home_score", 0)
self.which_half = data.get("which_half", "")
self.inning = data.get("inning", 1)
self.is_final = data.get("is_final", False)
self.outs = data.get("outs", 0)
self.win_percentage = data.get("win_percentage")
# Current matchup information
self.pitcher_name = data.get('pitcher_name', '')
self.pitcher_url = data.get('pitcher_url', '')
self.pitcher_stats = data.get('pitcher_stats', '')
self.batter_name = data.get('batter_name', '')
self.batter_url = data.get('batter_url', '')
self.batter_stats = data.get('batter_stats', '')
self.on_deck_name = data.get('on_deck_name', '')
self.in_hole_name = data.get('in_hole_name', '')
self.pitcher_name = data.get("pitcher_name", "")
self.pitcher_url = data.get("pitcher_url", "")
self.pitcher_stats = data.get("pitcher_stats", "")
self.batter_name = data.get("batter_name", "")
self.batter_url = data.get("batter_url", "")
self.batter_stats = data.get("batter_stats", "")
self.on_deck_name = data.get("on_deck_name", "")
self.in_hole_name = data.get("in_hole_name", "")
# Additional data
self.runners = data.get('runners', []) # [Catcher, On First, On Second, On Third]
self.summary = data.get('summary', []) # Play-by-play summary lines
self.runners = data.get(
"runners", []
) # [Catcher, On First, On Second, On Third]
self.summary = data.get("summary", []) # Play-by-play summary lines
@property
def score_line(self) -> str:
@ -79,12 +82,10 @@ class ScorebugService(SheetsService):
credentials_path: Path to service account credentials JSON
"""
super().__init__(credentials_path)
self.logger = get_contextual_logger(f'{__name__}.ScorebugService')
self.logger = get_contextual_logger(f"{__name__}.ScorebugService")
async def read_scorebug_data(
self,
sheet_url_or_key: str,
full_length: bool = True
self, sheet_url_or_key: str, full_length: bool = True
) -> ScorebugData:
"""
Read live scorebug data from Google Sheets scorecard.
@ -107,24 +108,28 @@ class ScorebugService(SheetsService):
scorecard = await self.open_scorecard(sheet_url_or_key)
self.logger.debug(f" ✅ Scorecard opened successfully")
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get Scorebug tab
scorebug_tab = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Scorebug'
None, scorecard.worksheet_by_title, "Scorebug"
)
# Read all data from B2:S20 for efficiency
all_data = await loop.run_in_executor(
None,
lambda: scorebug_tab.get_values('B2', 'S20', include_tailing_empty_rows=True)
lambda: scorebug_tab.get_values(
"B2", "S20", include_tailing_empty_rows=True
),
)
self.logger.debug(f"📊 Raw scorebug data dimensions: {len(all_data)} rows")
self.logger.debug(f"📊 First row length: {len(all_data[0]) if all_data else 0} columns")
self.logger.debug(f"📊 Reading from range B2:S20 (columns B-S = indices 0-17 in data)")
self.logger.debug(
f"📊 First row length: {len(all_data[0]) if all_data else 0} columns"
)
self.logger.debug(
f"📊 Reading from range B2:S20 (columns B-S = indices 0-17 in data)"
)
self.logger.debug(f"📊 Raw data structure (all rows):")
for idx, row in enumerate(all_data):
self.logger.debug(f" Row {idx} (Sheet row {idx + 2}): {row}")
@ -133,8 +138,13 @@ class ScorebugService(SheetsService):
# This corresponds to columns B-G (indices 0-5 in all_data)
# Rows 2-8 in sheet (indices 0-6 in all_data)
game_state = [
all_data[0][:6], all_data[1][:6], all_data[2][:6], all_data[3][:6],
all_data[4][:6], all_data[5][:6], all_data[6][:6]
all_data[0][:6],
all_data[1][:6],
all_data[2][:6],
all_data[3][:6],
all_data[4][:6],
all_data[5][:6],
all_data[6][:6],
]
self.logger.debug(f"🎮 Extracted game_state (B2:G8):")
@ -145,12 +155,24 @@ class ScorebugService(SheetsService):
# game_state[3] is away team row (Sheet row 5), game_state[4] is home team row (Sheet row 6)
# First column (index 0) contains the team ID - this is column B in the sheet
self.logger.debug(f"🏟️ Extracting team IDs from game_state:")
self.logger.debug(f" Away team row: game_state[3] = Sheet row 5, column B (index 0)")
self.logger.debug(f" Home team row: game_state[4] = Sheet row 6, column B (index 0)")
self.logger.debug(
f" Away team row: game_state[3] = Sheet row 5, column B (index 0)"
)
self.logger.debug(
f" Home team row: game_state[4] = Sheet row 6, column B (index 0)"
)
try:
away_team_id_raw = game_state[3][0] if len(game_state) > 3 and len(game_state[3]) > 0 else None
home_team_id_raw = game_state[4][0] if len(game_state) > 4 and len(game_state[4]) > 0 else None
away_team_id_raw = (
game_state[3][0]
if len(game_state) > 3 and len(game_state[3]) > 0
else None
)
home_team_id_raw = (
game_state[4][0]
if len(game_state) > 4 and len(game_state[4]) > 0
else None
)
self.logger.debug(f" Raw away team ID value: '{away_team_id_raw}'")
self.logger.debug(f" Raw home team ID value: '{home_team_id_raw}'")
@ -158,61 +180,97 @@ class ScorebugService(SheetsService):
away_team_id = int(away_team_id_raw) if away_team_id_raw else None
home_team_id = int(home_team_id_raw) if home_team_id_raw else None
self.logger.debug(f" ✅ Parsed team IDs - Away: {away_team_id}, Home: {home_team_id}")
self.logger.debug(
f" ✅ Parsed team IDs - Away: {away_team_id}, Home: {home_team_id}"
)
if away_team_id is None or home_team_id is None:
raise ValueError(f'Team IDs not found in scorebug (away: {away_team_id}, home: {home_team_id})')
raise ValueError(
f"Team IDs not found in scorebug (away: {away_team_id}, home: {home_team_id})"
)
except (ValueError, IndexError) as e:
self.logger.error(f"❌ Failed to parse team IDs from scorebug: {e}")
raise ValueError(f'Could not extract team IDs from scorecard')
raise ValueError(f"Could not extract team IDs from scorecard")
# Parse game state
self.logger.debug(f"📝 Parsing header from game_state[0][0] (Sheet B2):")
header = game_state[0][0] if game_state[0] else ''
is_final = header[-5:] == 'FINAL' if header else False
header = game_state[0][0] if game_state[0] else ""
is_final = header[-5:] == "FINAL" if header else False
self.logger.debug(f" Header value: '{header}'")
self.logger.debug(f" Is Final: {is_final}")
# Parse scores with validation
self.logger.debug(f"⚾ Parsing scores:")
self.logger.debug(f" Away score: game_state[3][2] (Sheet row 5, column D)")
self.logger.debug(f" Home score: game_state[4][2] (Sheet row 6, column D)")
self.logger.debug(
f" Away score: game_state[3][2] (Sheet row 5, column D)"
)
self.logger.debug(
f" Home score: game_state[4][2] (Sheet row 6, column D)"
)
try:
away_score_raw = game_state[3][2] if len(game_state) > 3 and len(game_state[3]) > 2 else '0'
self.logger.debug(f" Raw away score value: '{away_score_raw}' (type: {type(away_score_raw).__name__})")
away_score = int(away_score_raw) if away_score_raw != '' else 0
away_score_raw = (
game_state[3][2]
if len(game_state) > 3 and len(game_state[3]) > 2
else "0"
)
self.logger.debug(
f" Raw away score value: '{away_score_raw}' (type: {type(away_score_raw).__name__})"
)
away_score = int(away_score_raw) if away_score_raw != "" else 0
self.logger.debug(f" ✅ Parsed away score: {away_score}")
except (ValueError, IndexError) as e:
self.logger.warning(f" ⚠️ Failed to parse away score: {e}")
away_score = 0
try:
home_score_raw = game_state[4][2] if len(game_state) > 4 and len(game_state[4]) > 2 else '0'
self.logger.debug(f" Raw home score value: '{home_score_raw}' (type: {type(home_score_raw).__name__})")
home_score = int(home_score_raw) if home_score_raw != '' else 0
home_score_raw = (
game_state[4][2]
if len(game_state) > 4 and len(game_state[4]) > 2
else "0"
)
self.logger.debug(
f" Raw home score value: '{home_score_raw}' (type: {type(home_score_raw).__name__})"
)
home_score = int(home_score_raw) if home_score_raw != "" else 0
self.logger.debug(f" ✅ Parsed home score: {home_score}")
except (ValueError, IndexError) as e:
self.logger.warning(f" ⚠️ Failed to parse home score: {e}")
home_score = 0
try:
inning_raw = game_state[3][5] if len(game_state) > 3 and len(game_state[3]) > 5 else '0'
self.logger.debug(f" Raw inning value: '{inning_raw}' (type: {type(inning_raw).__name__})")
inning = int(inning_raw) if inning_raw != '' else 1
inning_raw = (
game_state[3][5]
if len(game_state) > 3 and len(game_state[3]) > 5
else "0"
)
self.logger.debug(
f" Raw inning value: '{inning_raw}' (type: {type(inning_raw).__name__})"
)
inning = int(inning_raw) if inning_raw != "" else 1
self.logger.debug(f" ✅ Parsed inning: {inning}")
except (ValueError, IndexError) as e:
self.logger.warning(f" ⚠️ Failed to parse home score: {e}")
inning = 1
self.logger.debug(f"⏱️ Parsing game state from game_state[3][4] (Sheet row 5, column F):")
which_half = game_state[3][4] if len(game_state) > 3 and len(game_state[3]) > 4 else ''
self.logger.debug(
f"⏱️ Parsing game state from game_state[3][4] (Sheet row 5, column F):"
)
which_half = (
game_state[3][4]
if len(game_state) > 3 and len(game_state[3]) > 4
else ""
)
self.logger.debug(f" Which half value: '{which_half}'")
# Parse outs from all_data[4][4] (Sheet F6 - columns start at B, so F=index 4)
self.logger.debug(f"🔢 Parsing outs from F6 (all_data[4][4]):")
try:
outs_raw = all_data[4][4] if len(all_data) > 4 and len(all_data[4]) > 4 else '0'
outs_raw = (
all_data[4][4]
if len(all_data) > 4 and len(all_data[4]) > 4
else "0"
)
self.logger.debug(f" Raw outs value: '{outs_raw}'")
# Handle "2" or any number
outs = int(outs_raw) if outs_raw and str(outs_raw).strip() else 0
@ -232,39 +290,95 @@ class ScorebugService(SheetsService):
]
# Pitcher: matchups[0][0]=name, [1]=URL, [2]=stats
pitcher_name = matchups[0][0] if len(matchups[0]) > 0 else ''
pitcher_url = matchups[0][1] if len(matchups[0]) > 1 else ''
pitcher_stats = matchups[0][2] if len(matchups[0]) > 2 else ''
self.logger.debug(f" Pitcher: {pitcher_name} | {pitcher_stats} | {pitcher_url}")
pitcher_name = matchups[0][0] if len(matchups[0]) > 0 else ""
pitcher_url = matchups[0][1] if len(matchups[0]) > 1 else ""
pitcher_stats = matchups[0][2] if len(matchups[0]) > 2 else ""
self.logger.debug(
f" Pitcher: {pitcher_name} | {pitcher_stats} | {pitcher_url}"
)
# Batter: matchups[1][0]=name, [1]=URL, [2]=stats, [3]=order, [4]=position
batter_name = matchups[1][0] if len(matchups[1]) > 0 else ''
batter_url = matchups[1][1] if len(matchups[1]) > 1 else ''
batter_stats = matchups[1][2] if len(matchups[1]) > 2 else ''
self.logger.debug(f" Batter: {batter_name} | {batter_stats} | {batter_url}")
batter_name = matchups[1][0] if len(matchups[1]) > 0 else ""
batter_url = matchups[1][1] if len(matchups[1]) > 1 else ""
batter_stats = matchups[1][2] if len(matchups[1]) > 2 else ""
self.logger.debug(
f" Batter: {batter_name} | {batter_stats} | {batter_url}"
)
# On Deck: matchups[2][0]=name
on_deck_name = matchups[2][0] if len(matchups[2]) > 0 else ''
on_deck_url = matchups[2][1] if len(matchups[2]) > 1 else ''
on_deck_name = matchups[2][0] if len(matchups[2]) > 0 else ""
on_deck_url = matchups[2][1] if len(matchups[2]) > 1 else ""
self.logger.debug(f" On Deck: {on_deck_name}")
# In Hole: matchups[3][0]=name
in_hole_name = matchups[3][0] if len(matchups[3]) > 0 else ''
in_hole_url = matchups[3][1] if len(matchups[3]) > 1 else ''
in_hole_name = matchups[3][0] if len(matchups[3]) > 0 else ""
in_hole_url = matchups[3][1] if len(matchups[3]) > 1 else ""
self.logger.debug(f" In Hole: {in_hole_name}")
# Parse win percentage from all_data[6][2] (Sheet D8 - row 8, column D)
self.logger.debug(f"📈 Parsing win percentage from D8 (all_data[6][2]):")
# Parse win percentage from C8 (team abbrev) and D8 (percentage)
# C8 = all_data[6][1] = winning team abbreviation
# D8 = all_data[6][2] = win probability percentage
# The sheet outputs the LEADING team's win%, so we need to
# normalize to home team's win% for the progress bar.
self.logger.debug(
f"📈 Parsing win percentage from C8:D8 (all_data[6][1:3]):"
)
try:
win_pct_raw = all_data[6][2] if len(all_data) > 6 and len(all_data[6]) > 2 else '50%'
win_pct_team_raw = (
all_data[6][1]
if len(all_data) > 6 and len(all_data[6]) > 1
else None
)
win_pct_raw = (
all_data[6][2]
if len(all_data) > 6 and len(all_data[6]) > 2
else None
)
self.logger.debug(f" Raw win percentage team: '{win_pct_team_raw}'")
self.logger.debug(f" Raw win percentage value: '{win_pct_raw}'")
# Remove % sign if present and convert to float
win_pct_str = str(win_pct_raw).replace('%', '').strip()
win_percentage = float(win_pct_str) if win_pct_str else 50.0
self.logger.debug(f" ✅ Parsed win percentage: {win_percentage}%")
if win_pct_raw is None or str(win_pct_raw).strip() == "":
self.logger.info(
f" Win percentage unavailable (raw value: '{win_pct_raw}')"
)
win_percentage = None
else:
# Remove % sign if present and convert to float
win_pct_str = str(win_pct_raw).replace("%", "").strip()
win_percentage = float(win_pct_str)
# Handle 0.0-1.0 range (pygsheets may return decimal like 0.75)
if 0.0 <= win_percentage <= 1.0:
win_percentage = win_percentage * 100
# The sheet gives the LEADING team's win%.
# Progress bar expects HOME team's win%.
# Compare C8 abbreviation to home team abbreviation to orient correctly.
home_abbrev_raw = (
game_state[4][1]
if len(game_state) > 4 and len(game_state[4]) > 1
else ""
)
win_pct_team = (
str(win_pct_team_raw).strip() if win_pct_team_raw else ""
)
if win_pct_team and win_pct_team != home_abbrev_raw:
# The percentage belongs to the away team, flip for home perspective
self.logger.debug(
f" Win% team '{win_pct_team}' is away (home is '{home_abbrev_raw}'), "
f"flipping {win_percentage}% -> {100 - win_percentage}%"
)
win_percentage = 100 - win_percentage
self.logger.debug(
f" ✅ Parsed home win percentage: {win_percentage}%"
)
except (ValueError, IndexError, AttributeError) as e:
self.logger.warning(f" ⚠️ Failed to parse win percentage: {e}")
win_percentage = 50.0
self.logger.info(
f" Win percentage could not be parsed (raw value: '{win_pct_raw}'): {e}"
)
win_percentage = None
self.logger.debug(f"📊 Final parsed values:")
self.logger.debug(f" Away team {away_team_id}: {away_score}")
@ -281,10 +395,10 @@ class ScorebugService(SheetsService):
# Each runner is [name, URL]
self.logger.debug(f"🏃 Extracting runners from K11:L14:")
runners = [
all_data[9][9:11] if len(all_data) > 9 else [], # Catcher (row 11)
all_data[9][9:11] if len(all_data) > 9 else [], # Catcher (row 11)
all_data[10][9:11] if len(all_data) > 10 else [], # On First (row 12)
all_data[11][9:11] if len(all_data) > 11 else [], # On Second (row 13)
all_data[12][9:11] if len(all_data) > 12 else [] # On Third (row 14)
all_data[12][9:11] if len(all_data) > 12 else [], # On Third (row 14)
]
self.logger.debug(f" Catcher: {runners[0]}")
self.logger.debug(f" On First: {runners[1]}")
@ -308,28 +422,30 @@ class ScorebugService(SheetsService):
self.logger.debug(f"✅ Scorebug data extraction complete!")
scorebug_data = ScorebugData({
'away_team_id': away_team_id,
'home_team_id': home_team_id,
'header': header,
'away_score': away_score,
'home_score': home_score,
'which_half': which_half,
'inning': inning,
'is_final': is_final,
'outs': outs,
'win_percentage': win_percentage,
'pitcher_name': pitcher_name,
'pitcher_url': pitcher_url,
'pitcher_stats': pitcher_stats,
'batter_name': batter_name,
'batter_url': batter_url,
'batter_stats': batter_stats,
'on_deck_name': on_deck_name,
'in_hole_name': in_hole_name,
'runners': runners, # [Catcher, On First, On Second, On Third], each is [name, URL]
'summary': summary # Play-by-play lines from R3:S20
})
scorebug_data = ScorebugData(
{
"away_team_id": away_team_id,
"home_team_id": home_team_id,
"header": header,
"away_score": away_score,
"home_score": home_score,
"which_half": which_half,
"inning": inning,
"is_final": is_final,
"outs": outs,
"win_percentage": win_percentage,
"pitcher_name": pitcher_name,
"pitcher_url": pitcher_url,
"pitcher_stats": pitcher_stats,
"batter_name": batter_name,
"batter_url": batter_url,
"batter_stats": batter_stats,
"on_deck_name": on_deck_name,
"in_hole_name": in_hole_name,
"runners": runners, # [Catcher, On First, On Second, On Third], each is [name, URL]
"summary": summary, # Play-by-play lines from R3:S20
}
)
self.logger.debug(f"🎯 Created ScorebugData object:")
self.logger.debug(f" Away Team ID: {scorebug_data.away_team_id}")

View File

@ -3,6 +3,7 @@ Google Sheets Service
Handles reading data from Google Sheets scorecards for game submission.
"""
import asyncio
from typing import Dict, List, Any, Optional
import pygsheets
@ -24,10 +25,11 @@ class SheetsService:
"""
if credentials_path is None:
from config import get_config
credentials_path = get_config().sheets_credentials_path
self.credentials_path = credentials_path
self.logger = get_contextual_logger(f'{__name__}.SheetsService')
self.logger = get_contextual_logger(f"{__name__}.SheetsService")
self._sheets_client = None
def _get_client(self) -> pygsheets.client.Client:
@ -53,7 +55,16 @@ class SheetsService:
return False
# Common spreadsheet errors
error_values = ['#N/A', '#REF!', '#VALUE!', '#DIV/0!', '#NUM!', '#NAME?', '#NULL!', '#ERROR!']
error_values = [
"#N/A",
"#REF!",
"#VALUE!",
"#DIV/0!",
"#NUM!",
"#NAME?",
"#NULL!",
"#ERROR!",
]
return value.strip() in error_values
@staticmethod
@ -68,7 +79,7 @@ class SheetsService:
Returns:
Integer value or None if invalid
"""
if value is None or value == '':
if value is None or value == "":
return None
# Check for spreadsheet errors
@ -96,16 +107,9 @@ class SheetsService:
"""
try:
# Run in thread pool since pygsheets is synchronous
loop = asyncio.get_event_loop()
sheets = await loop.run_in_executor(
None,
self._get_client
)
scorecard = await loop.run_in_executor(
None,
sheets.open_by_url,
sheet_url
)
loop = asyncio.get_running_loop()
sheets = await loop.run_in_executor(None, self._get_client)
scorecard = await loop.run_in_executor(None, sheets.open_by_url, sheet_url)
self.logger.info(f"Opened scorecard: {scorecard.title}")
return scorecard
@ -116,10 +120,7 @@ class SheetsService:
"Unable to access scorecard. Is it publicly readable?"
) from e
async def read_setup_data(
self,
scorecard: pygsheets.Spreadsheet
) -> Dict[str, Any]:
async def read_setup_data(self, scorecard: pygsheets.Spreadsheet) -> Dict[str, Any]:
"""
Read game metadata from Setup tab.
@ -138,38 +139,27 @@ class SheetsService:
- home_manager_name: str
"""
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get Setup tab
setup_tab = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Setup'
None, scorecard.worksheet_by_title, "Setup"
)
# Read version
version = await loop.run_in_executor(
None,
setup_tab.get_value,
'V35'
)
version = await loop.run_in_executor(None, setup_tab.get_value, "V35")
# Read game data (C3:D7)
g_data = await loop.run_in_executor(
None,
setup_tab.get_values,
'C3',
'D7'
)
g_data = await loop.run_in_executor(None, setup_tab.get_values, "C3", "D7")
return {
'version': version,
'week': int(g_data[1][0]),
'game_num': int(g_data[2][0]),
'away_team_abbrev': g_data[3][0],
'home_team_abbrev': g_data[4][0],
'away_manager_name': g_data[3][1],
'home_manager_name': g_data[4][1]
"version": version,
"week": int(g_data[1][0]),
"game_num": int(g_data[2][0]),
"away_team_abbrev": g_data[3][0],
"home_team_abbrev": g_data[4][0],
"away_manager_name": g_data[3][1],
"home_manager_name": g_data[4][1],
}
except Exception as e:
@ -177,8 +167,7 @@ class SheetsService:
raise SheetsException("Unable to read game setup data") from e
async def read_playtable_data(
self,
scorecard: pygsheets.Spreadsheet
self, scorecard: pygsheets.Spreadsheet
) -> List[Dict[str, Any]]:
"""
Read all plays from Playtable tab.
@ -190,49 +179,101 @@ class SheetsService:
List of play dictionaries with field names mapped
"""
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get Playtable tab
playtable = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Playtable'
None, scorecard.worksheet_by_title, "Playtable"
)
# Read play data
all_plays = await loop.run_in_executor(
None,
playtable.get_values,
'B3',
'BW300'
None, playtable.get_values, "B3", "BW300"
)
# Field names in order (from old bot lines 1621-1632)
play_keys = [
'play_num', 'batter_id', 'batter_pos', 'pitcher_id',
'on_base_code', 'inning_half', 'inning_num', 'batting_order',
'starting_outs', 'away_score', 'home_score', 'on_first_id',
'on_first_final', 'on_second_id', 'on_second_final',
'on_third_id', 'on_third_final', 'batter_final', 'pa', 'ab',
'run', 'e_run', 'hit', 'rbi', 'double', 'triple', 'homerun',
'bb', 'so', 'hbp', 'sac', 'ibb', 'gidp', 'bphr', 'bpfo',
'bp1b', 'bplo', 'sb', 'cs', 'outs', 'pitcher_rest_outs',
'wpa', 'catcher_id', 'defender_id', 'runner_id', 'check_pos',
'error', 'wild_pitch', 'passed_ball', 'pick_off', 'balk',
'is_go_ahead', 'is_tied', 'is_new_inning', 'inherited_runners',
'inherited_scored', 'on_hook_for_loss', 'run_differential',
'unused-manager', 'unused-pitcherpow', 'unused-pitcherrestip',
'unused-runners', 'unused-fatigue', 'unused-roundedip',
'unused-elitestart', 'unused-scenario', 'unused-winxaway',
'unused-winxhome', 'unused-pinchrunner', 'unused-order',
'hand_batting', 'hand_pitching', 're24_primary', 're24_running'
"play_num",
"batter_id",
"batter_pos",
"pitcher_id",
"on_base_code",
"inning_half",
"inning_num",
"batting_order",
"starting_outs",
"away_score",
"home_score",
"on_first_id",
"on_first_final",
"on_second_id",
"on_second_final",
"on_third_id",
"on_third_final",
"batter_final",
"pa",
"ab",
"run",
"e_run",
"hit",
"rbi",
"double",
"triple",
"homerun",
"bb",
"so",
"hbp",
"sac",
"ibb",
"gidp",
"bphr",
"bpfo",
"bp1b",
"bplo",
"sb",
"cs",
"outs",
"pitcher_rest_outs",
"wpa",
"catcher_id",
"defender_id",
"runner_id",
"check_pos",
"error",
"wild_pitch",
"passed_ball",
"pick_off",
"balk",
"is_go_ahead",
"is_tied",
"is_new_inning",
"inherited_runners",
"inherited_scored",
"on_hook_for_loss",
"run_differential",
"unused-manager",
"unused-pitcherpow",
"unused-pitcherrestip",
"unused-runners",
"unused-fatigue",
"unused-roundedip",
"unused-elitestart",
"unused-scenario",
"unused-winxaway",
"unused-winxhome",
"unused-pinchrunner",
"unused-order",
"hand_batting",
"hand_pitching",
"re24_primary",
"re24_running",
]
p_data = []
for line in all_plays:
this_data = {}
for count, value in enumerate(line):
if value != '' and count < len(play_keys):
if value != "" and count < len(play_keys):
this_data[play_keys[count]] = value
# Only include rows with meaningful data (>5 fields)
@ -247,8 +288,7 @@ class SheetsService:
raise SheetsException("Unable to read play-by-play data") from e
async def read_pitching_decisions(
self,
scorecard: pygsheets.Spreadsheet
self, scorecard: pygsheets.Spreadsheet
) -> List[Dict[str, Any]]:
"""
Read pitching decisions from Pitcherstats tab.
@ -260,37 +300,51 @@ class SheetsService:
List of decision dictionaries with field names mapped
"""
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Get Pitcherstats tab
pitching = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Pitcherstats'
None, scorecard.worksheet_by_title, "Pitcherstats"
)
# Read decision data
all_decisions = await loop.run_in_executor(
None,
pitching.get_values,
'B3',
'O30'
None, pitching.get_values, "B3", "O30"
)
# Field names in order (from old bot lines 1688-1691)
pit_keys = [
'pitcher_id', 'rest_ip', 'is_start', 'base_rest',
'extra_rest', 'rest_required', 'win', 'loss', 'is_save',
'hold', 'b_save', 'irunners', 'irunners_scored', 'team_id'
"pitcher_id",
"rest_ip",
"is_start",
"base_rest",
"extra_rest",
"rest_required",
"win",
"loss",
"is_save",
"hold",
"b_save",
"irunners",
"irunners_scored",
"team_id",
]
# Fields that must be integers
int_fields = {
'pitcher_id', 'rest_required', 'win', 'loss', 'is_save',
'hold', 'b_save', 'irunners', 'irunners_scored', 'team_id'
"pitcher_id",
"rest_required",
"win",
"loss",
"is_save",
"hold",
"b_save",
"irunners",
"irunners_scored",
"team_id",
}
# Fields that are required and cannot be None
required_fields = {'pitcher_id', 'team_id'}
required_fields = {"pitcher_id", "team_id"}
pit_data = []
row_num = 3 # Start at row 3 (B3 in spreadsheet)
@ -310,7 +364,7 @@ class SheetsService:
field_name = pit_keys[count]
# Skip empty values
if value == '':
if value == "":
continue
# Check for spreadsheet errors
@ -332,7 +386,7 @@ class SheetsService:
# Sanitize integer fields
if field_name in int_fields:
sanitized = self._sanitize_int_field(value, field_name)
if sanitized is None and value != '':
if sanitized is None and value != "":
self.logger.warning(
f"Row {row_num}: Invalid integer value '{value}' for field '{field_name}' - skipping row"
)
@ -367,8 +421,7 @@ class SheetsService:
raise SheetsException("Unable to read pitching decisions") from e
async def read_box_score(
self,
scorecard: pygsheets.Spreadsheet
self, scorecard: pygsheets.Spreadsheet
) -> Dict[str, List[int]]:
"""
Read box score from Scorecard or Box Score tab.
@ -381,38 +434,28 @@ class SheetsService:
[runs, hits, errors]
"""
try:
loop = asyncio.get_event_loop()
loop = asyncio.get_running_loop()
# Try Scorecard tab first
try:
sc_tab = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Scorecard'
None, scorecard.worksheet_by_title, "Scorecard"
)
score_table = await loop.run_in_executor(
None,
sc_tab.get_values,
'BW8',
'BY9'
None, sc_tab.get_values, "BW8", "BY9"
)
except pygsheets.WorksheetNotFound:
# Fallback to Box Score tab
sc_tab = await loop.run_in_executor(
None,
scorecard.worksheet_by_title,
'Box Score'
None, scorecard.worksheet_by_title, "Box Score"
)
score_table = await loop.run_in_executor(
None,
sc_tab.get_values,
'T6',
'V7'
None, sc_tab.get_values, "T6", "V7"
)
return {
'away': [int(x) for x in score_table[0]], # [R, H, E]
'home': [int(x) for x in score_table[1]] # [R, H, E]
"away": [int(x) for x in score_table[0]], # [R, H, E]
"home": [int(x) for x in score_table[1]], # [R, H, E]
}
except Exception as e:

View File

@ -6,7 +6,6 @@ Handles team standings retrieval and processing.
import logging
from typing import Optional, List, Dict
from services.base_service import BaseService
from models.standings import TeamStandings
from exceptions import APIException

View File

@ -4,12 +4,10 @@ Statistics service for Discord Bot v2.0
Handles batting and pitching statistics retrieval and processing.
"""
import logging
from typing import Optional, List
from typing import Optional
from services.base_service import BaseService
from models.batting_stats import BattingStats
from models.pitching_stats import PitchingStats
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.StatsService')

View File

@ -4,18 +4,16 @@ Trade Builder Service
Extends the TransactionBuilder to support multi-team trades and player exchanges.
"""
import logging
from typing import Dict, List, Optional, Set, Tuple
from typing import Dict, List, Optional, Set
from datetime import datetime, timezone
import uuid
from config import get_config
from models.trade import Trade, TradeParticipant, TradeMove, TradeStatus
from models.trade import Trade, TradeMove, TradeStatus
from models.team import Team, RosterType
from models.player import Player
from services.transaction_builder import TransactionBuilder, RosterValidationResult, TransactionMove
from services.team_service import team_service
from services.roster_service import roster_service
from services.league_service import league_service
logger = logging.getLogger(f'{__name__}.TradeBuilder')

View File

@ -4,8 +4,7 @@ Transaction Builder Service
Handles the complex logic for building multi-move transactions interactively.
"""
import logging
from typing import Dict, List, Optional, Tuple, Set
from enum import Enum
from typing import Dict, List, Optional
from dataclasses import dataclass
from datetime import datetime, timezone
@ -14,8 +13,6 @@ from models.transaction import Transaction
from models.team import Team
from models.player import Player
from models.roster import TeamRoster
from services.player_service import player_service
from services.team_service import team_service
from services.roster_service import roster_service
from services.transaction_service import transaction_service
from services.league_service import league_service

View File

@ -4,12 +4,11 @@ Transaction service for Discord Bot v2.0
Handles transaction CRUD operations and business logic.
"""
import logging
from typing import Optional, List, Tuple
from typing import Optional, List
from datetime import datetime, UTC
from services.base_service import BaseService
from models.transaction import Transaction, RosterValidation
from models.roster import TeamRoster
from exceptions import APIException
logger = logging.getLogger(f'{__name__}.TransactionService')

View File

@ -5,7 +5,7 @@ Modern automated cleanup system with better notifications and logging.
"""
import asyncio
from datetime import datetime, timedelta, UTC
from typing import Dict, List, Optional
from typing import Dict, List
import discord
from discord.ext import commands, tasks
@ -13,7 +13,7 @@ from discord.ext import commands, tasks
from services.custom_commands_service import custom_commands_service
from models.custom_command import CustomCommand
from utils.logging import get_contextual_logger
from views.embeds import EmbedTemplate, EmbedColors
from views.embeds import EmbedTemplate
from config import get_config

View File

@ -4,9 +4,8 @@ Draft Monitor Task for Discord Bot v2.0
Automated background task for draft timer monitoring, warnings, and auto-draft.
Self-terminates when draft timer is disabled to conserve resources.
"""
import asyncio
from datetime import datetime
from typing import Optional
from datetime import UTC, datetime
import discord
from discord.ext import commands, tasks
@ -15,12 +14,9 @@ from services.draft_service import draft_service
from services.draft_pick_service import draft_pick_service
from services.draft_list_service import draft_list_service
from services.draft_sheet_service import get_draft_sheet_service
from services.player_service import player_service
from services.team_service import team_service
from services.roster_service import roster_service
from utils.logging import get_contextual_logger
from utils.helpers import get_team_salary_cap
from views.embeds import EmbedTemplate, EmbedColors
from views.draft_views import create_on_clock_announcement_embed
from config import get_config
@ -39,7 +35,7 @@ class DraftMonitorTask:
def __init__(self, bot: commands.Bot):
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.DraftMonitorTask')
self.logger = get_contextual_logger(f"{__name__}.DraftMonitorTask")
# Warning flags (reset each pick)
self.warning_60s_sent = False
@ -106,7 +102,7 @@ class DraftMonitorTask:
return
# Check if we need to take action
now = datetime.now()
now = datetime.now(UTC)
deadline = draft_data.pick_deadline
if not deadline:
@ -120,7 +116,9 @@ class DraftMonitorTask:
new_interval = self._get_poll_interval(time_remaining)
if self.monitor_loop.seconds != new_interval:
self.monitor_loop.change_interval(seconds=new_interval)
self.logger.debug(f"Adjusted poll interval to {new_interval}s (time remaining: {time_remaining:.0f}s)")
self.logger.debug(
f"Adjusted poll interval to {new_interval}s (time remaining: {time_remaining:.0f}s)"
)
if time_remaining <= 0:
# Timer expired - auto-draft
@ -155,8 +153,7 @@ class DraftMonitorTask:
# Get current pick
current_pick = await draft_pick_service.get_pick(
config.sba_season,
draft_data.currentpick
config.sba_season, draft_data.currentpick
)
if not current_pick or not current_pick.owner:
@ -164,7 +161,7 @@ class DraftMonitorTask:
return
# Get draft picks cog to check/acquire lock
draft_picks_cog = self.bot.get_cog('DraftPicksCog')
draft_picks_cog = self.bot.get_cog("DraftPicksCog")
if not draft_picks_cog:
self.logger.error("Could not find DraftPicksCog")
@ -177,7 +174,7 @@ class DraftMonitorTask:
# Acquire lock
async with draft_picks_cog.pick_lock:
draft_picks_cog.lock_acquired_at = datetime.now()
draft_picks_cog.lock_acquired_at = datetime.now(UTC)
draft_picks_cog.lock_acquired_by = None # System auto-draft
try:
@ -204,17 +201,20 @@ class DraftMonitorTask:
# Get ping channel
ping_channel = guild.get_channel(draft_data.ping_channel)
if not ping_channel:
self.logger.error(f"Could not find ping channel {draft_data.ping_channel}")
self.logger.error(
f"Could not find ping channel {draft_data.ping_channel}"
)
return
# Get team's draft list
draft_list = await draft_list_service.get_team_list(
config.sba_season,
current_pick.owner.id
config.sba_season, current_pick.owner.id
)
if not draft_list:
self.logger.warning(f"Team {current_pick.owner.abbrev} has no draft list")
self.logger.warning(
f"Team {current_pick.owner.abbrev} has no draft list"
)
await ping_channel.send(
content=f"{current_pick.owner.abbrev} time expired with no draft list - pick skipped"
)
@ -252,11 +252,7 @@ class DraftMonitorTask:
# Attempt to draft this player
success = await self._attempt_draft_player(
current_pick,
player,
ping_channel,
draft_data,
guild
current_pick, player, ping_channel, draft_data, guild
)
if success:
@ -264,7 +260,9 @@ class DraftMonitorTask:
f"Auto-drafted {player.name} for {current_pick.owner.abbrev}"
)
# Advance to next pick
await draft_service.advance_pick(draft_data.id, draft_data.currentpick)
await draft_service.advance_pick(
draft_data.id, draft_data.currentpick
)
# Post on-clock announcement for next team
await self._post_on_clock_announcement(ping_channel, draft_data)
# Reset warning flags
@ -289,12 +287,7 @@ class DraftMonitorTask:
self.logger.error("Error auto-drafting player", error=e)
async def _attempt_draft_player(
self,
draft_pick,
player,
ping_channel,
draft_data,
guild
self, draft_pick, player, ping_channel, draft_data, guild
) -> bool:
"""
Attempt to draft a specific player.
@ -314,14 +307,18 @@ class DraftMonitorTask:
from services.team_service import team_service
# Get team roster for cap validation
roster = await team_service.get_team_roster(draft_pick.owner.id, 'current')
roster = await team_service.get_team_roster(draft_pick.owner.id, "current")
if not roster:
self.logger.error(f"Could not get roster for team {draft_pick.owner.id}")
self.logger.error(
f"Could not get roster for team {draft_pick.owner.id}"
)
return False
# Validate cap space
is_valid, projected_total, cap_limit = await validate_cap_space(roster, player.wara)
is_valid, projected_total, cap_limit = await validate_cap_space(
roster, player.wara
)
if not is_valid:
self.logger.debug(
@ -332,8 +329,7 @@ class DraftMonitorTask:
# Update draft pick
updated_pick = await draft_pick_service.update_pick_selection(
draft_pick.id,
player.id
draft_pick.id, player.id
)
if not updated_pick:
@ -343,13 +339,14 @@ class DraftMonitorTask:
# Get current league state for dem_week calculation
from services.player_service import player_service
from services.league_service import league_service
current = await league_service.get_current_state()
# Update player team with dem_week set to current.week + 2 for draft picks
updated_player = await player_service.update_player_team(
player.id,
draft_pick.owner.id,
dem_week=current.week + 2 if current else None
dem_week=current.week + 2 if current else None,
)
if not updated_player:
@ -362,7 +359,7 @@ class DraftMonitorTask:
# Post to ping channel
await ping_channel.send(
content=f"🤖 AUTO-DRAFT: {draft_pick.owner.abbrev} selects **{player.name}** "
f"(Pick #{draft_pick.overall})"
f"(Pick #{draft_pick.overall})"
)
# Post draft card to result channel (same as regular /draft picks)
@ -370,11 +367,14 @@ class DraftMonitorTask:
result_channel = guild.get_channel(draft_data.result_channel)
if result_channel:
from views.draft_views import create_player_draft_card
draft_card = await create_player_draft_card(player, draft_pick)
draft_card.set_footer(text="🤖 Auto-drafted from draft list")
await result_channel.send(embed=draft_card)
else:
self.logger.warning(f"Could not find result channel {draft_data.result_channel}")
self.logger.warning(
f"Could not find result channel {draft_data.result_channel}"
)
return True
@ -408,23 +408,26 @@ class DraftMonitorTask:
# Get the new current pick
next_pick = await draft_pick_service.get_pick(
config.sba_season,
updated_draft_data.currentpick
config.sba_season, updated_draft_data.currentpick
)
if not next_pick or not next_pick.owner:
self.logger.error(f"Could not get pick #{updated_draft_data.currentpick} for announcement")
self.logger.error(
f"Could not get pick #{updated_draft_data.currentpick} for announcement"
)
return
# Get recent picks (last 5 completed)
recent_picks = await draft_pick_service.get_recent_picks(
config.sba_season,
updated_draft_data.currentpick - 1, # Start from previous pick
limit=5
limit=5,
)
# Get team roster for sWAR calculation
team_roster = await roster_service.get_team_roster(next_pick.owner.id, "current")
team_roster = await roster_service.get_team_roster(
next_pick.owner.id, "current"
)
roster_swar = team_roster.total_wara if team_roster else 0.0
cap_limit = get_team_salary_cap(next_pick.owner)
@ -432,7 +435,9 @@ class DraftMonitorTask:
top_roster_players = []
if team_roster:
all_players = team_roster.all_players
sorted_players = sorted(all_players, key=lambda p: p.wara if p.wara else 0.0, reverse=True)
sorted_players = sorted(
all_players, key=lambda p: p.wara if p.wara else 0.0, reverse=True
)
top_roster_players = sorted_players[:5]
# Get sheet URL
@ -446,7 +451,7 @@ class DraftMonitorTask:
roster_swar=roster_swar,
cap_limit=cap_limit,
top_roster_players=top_roster_players,
sheet_url=sheet_url
sheet_url=sheet_url,
)
# Mention the team's role (using team.lname)
@ -455,10 +460,14 @@ class DraftMonitorTask:
if team_role:
team_mention = f"{team_role.mention} "
else:
self.logger.warning(f"Could not find role for team {next_pick.owner.lname}")
self.logger.warning(
f"Could not find role for team {next_pick.owner.lname}"
)
await ping_channel.send(content=team_mention, embed=embed)
self.logger.info(f"Posted on-clock announcement for pick #{updated_draft_data.currentpick}")
self.logger.info(
f"Posted on-clock announcement for pick #{updated_draft_data.currentpick}"
)
# Reset poll interval to 30s for new pick
if self.monitor_loop.seconds != 30:
@ -489,8 +498,7 @@ class DraftMonitorTask:
# Get current pick for mention
current_pick = await draft_pick_service.get_pick(
config.sba_season,
draft_data.currentpick
config.sba_season, draft_data.currentpick
)
if not current_pick or not current_pick.owner:
@ -500,7 +508,7 @@ class DraftMonitorTask:
if 55 <= time_remaining <= 60 and not self.warning_60s_sent:
await ping_channel.send(
content=f"{current_pick.owner.abbrev} - **60 seconds remaining** "
f"for pick #{current_pick.overall}!"
f"for pick #{current_pick.overall}!"
)
self.warning_60s_sent = True
self.logger.debug(f"Sent 60s warning for pick #{current_pick.overall}")
@ -509,7 +517,7 @@ class DraftMonitorTask:
elif 25 <= time_remaining <= 30 and not self.warning_30s_sent:
await ping_channel.send(
content=f"{current_pick.owner.abbrev} - **30 seconds remaining** "
f"for pick #{current_pick.overall}!"
f"for pick #{current_pick.overall}!"
)
self.warning_30s_sent = True
self.logger.debug(f"Sent 30s warning for pick #{current_pick.overall}")
@ -540,10 +548,14 @@ class DraftMonitorTask:
success = await draft_sheet_service.write_pick(
season=config.sba_season,
overall=draft_pick.overall,
orig_owner_abbrev=draft_pick.origowner.abbrev if draft_pick.origowner else draft_pick.owner.abbrev,
orig_owner_abbrev=(
draft_pick.origowner.abbrev
if draft_pick.origowner
else draft_pick.owner.abbrev
),
owner_abbrev=draft_pick.owner.abbrev,
player_name=player.name,
swar=player.wara
swar=player.wara,
)
if not success:
@ -551,7 +563,7 @@ class DraftMonitorTask:
await self._notify_sheet_failure(
ping_channel=ping_channel,
pick_overall=draft_pick.overall,
player_name=player.name
player_name=player.name,
)
except Exception as e:
@ -559,10 +571,12 @@ class DraftMonitorTask:
await self._notify_sheet_failure(
ping_channel=ping_channel,
pick_overall=draft_pick.overall,
player_name=player.name
player_name=player.name,
)
async def _notify_sheet_failure(self, ping_channel, pick_overall: int, player_name: str) -> None:
async def _notify_sheet_failure(
self, ping_channel, pick_overall: int, player_name: str
) -> None:
"""
Post notification to ping channel when sheet write fails.

View File

@ -3,8 +3,9 @@ Live Scorebug Tracker
Background task that monitors published scorecards and updates live score displays.
"""
import asyncio
from typing import List, Optional
from typing import List
import discord
from discord.ext import tasks, commands
@ -16,7 +17,6 @@ from services.scorebug_service import ScorebugData, ScorebugService
from services.team_service import team_service
from commands.gameplay.scorecard_tracker import ScorecardTracker
from commands.voice.tracker import VoiceChannelTracker
from views.embeds import EmbedTemplate, EmbedColors
from config import get_config
from exceptions import SheetsException
@ -40,7 +40,7 @@ class LiveScorebugTracker:
bot: Discord bot instance
"""
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.LiveScorebugTracker')
self.logger = get_contextual_logger(f"{__name__}.LiveScorebugTracker")
self.scorebug_service = ScorebugService()
self.scorecard_tracker = ScorecardTracker()
self.voice_tracker = VoiceChannelTracker()
@ -84,10 +84,14 @@ class LiveScorebugTracker:
return
# Get live scores channel
live_scores_channel = discord.utils.get(guild.text_channels, name='live-sba-scores')
live_scores_channel = discord.utils.get(
guild.text_channels, name="live-sba-scores"
)
if not live_scores_channel:
self.logger.warning("live-sba-scores channel not found, skipping channel update")
self.logger.warning(
"live-sba-scores channel not found, skipping channel update"
)
# Don't return - still update voice channels
else:
# Get all published scorecards
@ -97,76 +101,84 @@ class LiveScorebugTracker:
# No active scorebugs - clear the channel and hide it
await self._clear_live_scores_channel(live_scores_channel)
await set_channel_visibility(
live_scores_channel,
visible=False,
reason="No active games"
live_scores_channel, visible=False, reason="No active games"
)
return
# Read all scorebugs and create embeds
active_scorebugs = []
read_failures = 0
confirmed_final = 0
for text_channel_id, sheet_url in all_scorecards:
try:
scorebug_data = await self.scorebug_service.read_scorebug_data(
sheet_url,
full_length=False # Compact view for live channel
sheet_url, full_length=False # Compact view for live channel
)
# Only include active (non-final) games
if scorebug_data.is_active:
# Get team data
away_team = await team_service.get_team(scorebug_data.away_team_id)
home_team = await team_service.get_team(scorebug_data.home_team_id)
away_team = await team_service.get_team(
scorebug_data.away_team_id
)
home_team = await team_service.get_team(
scorebug_data.home_team_id
)
if away_team is None or home_team is None:
raise ValueError(f'Error looking up teams in scorecard; IDs provided: {scorebug_data.away_team_id} & {scorebug_data.home_team_id}')
raise ValueError(
f"Error looking up teams in scorecard; IDs provided: {scorebug_data.away_team_id} & {scorebug_data.home_team_id}"
)
# Create compact embed using shared utility
embed = create_scorebug_embed(
scorebug_data,
away_team,
home_team,
full_length=False # Compact view for live channel
full_length=False, # Compact view for live channel
)
active_scorebugs.append(embed)
# Update associated voice channel if it exists
await self._update_voice_channel_description(
text_channel_id,
scorebug_data,
away_team,
home_team
text_channel_id, scorebug_data, away_team, home_team
)
else:
confirmed_final += 1
await asyncio.sleep(1) # Rate limit between reads
except SheetsException as e:
read_failures += 1
self.logger.warning(f"Could not read scorecard {sheet_url}: {e}")
except Exception as e:
read_failures += 1
self.logger.error(f"Error processing scorecard {sheet_url}: {e}")
# Update live scores channel
if active_scorebugs:
await set_channel_visibility(
live_scores_channel,
visible=True,
reason="Active games in progress"
live_scores_channel, visible=True, reason="Active games in progress"
)
await self._post_scorebugs_to_channel(
live_scores_channel, active_scorebugs
)
elif read_failures > 0 and confirmed_final < len(all_scorecards):
# Some reads failed — don't hide the channel, preserve last state
self.logger.warning(
f"Skipping channel hide: {read_failures} scorecard read(s) failed, "
f"only {confirmed_final}/{len(all_scorecards)} confirmed final"
)
await self._post_scorebugs_to_channel(live_scores_channel, active_scorebugs)
else:
# All games finished - clear the channel and hide it
# All games confirmed final — safe to clear and hide
await self._clear_live_scores_channel(live_scores_channel)
await set_channel_visibility(
live_scores_channel,
visible=False,
reason="No active games"
live_scores_channel, visible=False, reason="No active games"
)
async def _post_scorebugs_to_channel(
self,
channel: discord.TextChannel,
embeds: List[discord.Embed]
self, channel: discord.TextChannel, embeds: List[discord.Embed]
):
"""
Post scorebugs to the live scores channel.
@ -186,7 +198,7 @@ class LiveScorebugTracker:
else:
# Split into multiple messages if more than 10 embeds
for i in range(0, len(embeds), 10):
batch = embeds[i:i+10]
batch = embeds[i : i + 10]
await channel.send(embeds=batch)
self.logger.info(f"Posted {len(embeds)} scorebugs to live-sba-scores")
@ -220,7 +232,7 @@ class LiveScorebugTracker:
text_channel_id: int,
scorebug_data: ScorebugData,
away_team: Team,
home_team: Team
home_team: Team,
):
"""
Update voice channel description with live score.
@ -233,10 +245,14 @@ class LiveScorebugTracker:
"""
try:
# Check if there's an associated voice channel
voice_channel_id = self.voice_tracker.get_voice_channel_for_text_channel(text_channel_id)
voice_channel_id = self.voice_tracker.get_voice_channel_for_text_channel(
text_channel_id
)
if not voice_channel_id:
self.logger.debug(f'No voice channel associated with text channel ID {text_channel_id} (may have been cleaned up)')
self.logger.debug(
f"No voice channel associated with text channel ID {text_channel_id} (may have been cleaned up)"
)
return # No associated voice channel
# Get the voice channel
@ -249,7 +265,9 @@ class LiveScorebugTracker:
voice_channel = guild.get_channel(voice_channel_id)
if not voice_channel or not isinstance(voice_channel, discord.VoiceChannel):
self.logger.debug(f"Voice channel {voice_channel_id} not found or wrong type")
self.logger.debug(
f"Voice channel {voice_channel_id} not found or wrong type"
)
return
# Format description: "BOS 4 @ 3 NYY" or "BOS 5 @ 3 NYY - FINAL"
@ -264,10 +282,14 @@ class LiveScorebugTracker:
# Update voice channel description (topic)
await voice_channel.edit(status=description)
self.logger.debug(f"Updated voice channel {voice_channel.name} description to: {description}")
self.logger.debug(
f"Updated voice channel {voice_channel.name} description to: {description}"
)
except discord.Forbidden:
self.logger.warning(f"Missing permissions to update voice channel {voice_channel_id}")
self.logger.warning(
f"Missing permissions to update voice channel {voice_channel_id}"
)
except Exception as e:
self.logger.error(f"Error updating voice channel description: {e}")

View File

@ -4,6 +4,7 @@ Transaction Freeze/Thaw Task for Discord Bot v2.0
Automated weekly system for freezing and processing transactions.
Runs on a schedule to increment weeks and process contested transactions.
"""
import asyncio
import random
from datetime import datetime, UTC
@ -30,6 +31,7 @@ class TransactionPriority:
Data class for transaction priority calculation.
Used to resolve contested transactions (multiple teams wanting same player).
"""
transaction: Transaction
team_win_percentage: float
tiebreaker: float # win% + small random number for randomized tiebreak
@ -42,6 +44,7 @@ class TransactionPriority:
@dataclass
class ConflictContender:
"""A team contending for a contested player."""
team_abbrev: str
wins: int
losses: int
@ -52,6 +55,7 @@ class ConflictContender:
@dataclass
class ConflictResolution:
"""Details of a conflict resolution for a contested player."""
player_name: str
player_swar: float
contenders: List[ConflictContender]
@ -62,6 +66,7 @@ class ConflictResolution:
@dataclass
class ThawedMove:
"""A move that was successfully thawed (unfrozen)."""
move_id: str
team_abbrev: str
players: List[Tuple[str, float, str, str]] # (name, sWAR, old_team, new_team)
@ -71,6 +76,7 @@ class ThawedMove:
@dataclass
class CancelledMove:
"""A move that was cancelled due to conflict."""
move_id: str
team_abbrev: str
players: List[Tuple[str, float, str, str]] # (name, sWAR, old_team, new_team)
@ -81,6 +87,7 @@ class CancelledMove:
@dataclass
class ThawReport:
"""Complete thaw report for admin review."""
week: int
season: int
timestamp: datetime
@ -94,8 +101,7 @@ class ThawReport:
async def resolve_contested_transactions(
transactions: List[Transaction],
season: int
transactions: List[Transaction], season: int
) -> Tuple[List[str], List[str], List[ConflictResolution]]:
"""
Resolve contested transactions where multiple teams want the same player.
@ -109,7 +115,7 @@ async def resolve_contested_transactions(
Returns:
Tuple of (winning_move_ids, losing_move_ids, conflict_resolutions)
"""
logger = get_contextual_logger(f'{__name__}.resolve_contested_transactions')
logger = get_contextual_logger(f"{__name__}.resolve_contested_transactions")
# Group transactions by player name
player_transactions: Dict[str, List[Transaction]] = {}
@ -118,7 +124,7 @@ async def resolve_contested_transactions(
player_name = transaction.player.name.lower()
# Only consider transactions where a team is acquiring a player (not FA drops)
if transaction.newteam.abbrev.upper() != 'FA':
if transaction.newteam.abbrev.upper() != "FA":
if player_name not in player_transactions:
player_transactions[player_name] = []
player_transactions[player_name].append(transaction)
@ -130,7 +136,9 @@ async def resolve_contested_transactions(
for player_name, player_transactions_list in player_transactions.items():
if len(player_transactions_list) > 1:
contested_players[player_name] = player_transactions_list
logger.info(f"Contested player: {player_name} ({len(player_transactions_list)} teams)")
logger.info(
f"Contested player: {player_name} ({len(player_transactions_list)} teams)"
)
else:
# Non-contested, automatically wins
non_contested_moves.add(player_transactions_list[0].moveid)
@ -143,50 +151,66 @@ async def resolve_contested_transactions(
for player_name, contested_transactions in contested_players.items():
priorities: List[TransactionPriority] = []
# Track standings data for each team for report
team_standings_data: Dict[str, Tuple[int, int, float]] = {} # abbrev -> (wins, losses, win_pct)
team_standings_data: Dict[str, Tuple[int, int, float]] = (
{}
) # abbrev -> (wins, losses, win_pct)
for transaction in contested_transactions:
# Get team for priority calculation
# If adding to MiL team, use the parent ML team for standings
if transaction.newteam.abbrev.endswith('MiL'):
if transaction.newteam.abbrev.endswith("MiL"):
team_abbrev = transaction.newteam.abbrev[:-3] # Remove 'MiL' suffix
else:
team_abbrev = transaction.newteam.abbrev
try:
# Get team standings to calculate win percentage
standings = await standings_service.get_team_standings(team_abbrev, season)
standings = await standings_service.get_team_standings(
team_abbrev, season
)
if standings and standings.wins is not None and standings.losses is not None:
if (
standings
and standings.wins is not None
and standings.losses is not None
):
total_games = standings.wins + standings.losses
win_pct = standings.wins / total_games if total_games > 0 else 0.0
team_standings_data[transaction.newteam.abbrev] = (
standings.wins, standings.losses, win_pct
standings.wins,
standings.losses,
win_pct,
)
else:
win_pct = 0.0
team_standings_data[transaction.newteam.abbrev] = (0, 0, 0.0)
logger.warning(f"Could not get standings for {team_abbrev}, using 0.0 win%")
logger.warning(
f"Could not get standings for {team_abbrev}, using 0.0 win%"
)
# Add small random component for tiebreaking (5 decimal precision)
random_component = random.randint(10000, 99999) * 0.00000001
tiebreaker = win_pct + random_component
priorities.append(TransactionPriority(
transaction=transaction,
team_win_percentage=win_pct,
tiebreaker=tiebreaker
))
priorities.append(
TransactionPriority(
transaction=transaction,
team_win_percentage=win_pct,
tiebreaker=tiebreaker,
)
)
except Exception as e:
logger.error(f"Error calculating priority for {team_abbrev}: {e}")
team_standings_data[transaction.newteam.abbrev] = (0, 0, 0.0)
# Give them 0.0 priority on error
priorities.append(TransactionPriority(
transaction=transaction,
team_win_percentage=0.0,
tiebreaker=random.randint(10000, 99999) * 0.00000001
))
priorities.append(
TransactionPriority(
transaction=transaction,
team_win_percentage=0.0,
tiebreaker=random.randint(10000, 99999) * 0.00000001,
)
)
# Sort by tiebreaker (lowest win% wins - worst teams get priority)
priorities.sort()
@ -204,7 +228,7 @@ async def resolve_contested_transactions(
wins=winner_standings[0],
losses=winner_standings[1],
win_pct=winner_standings[2],
move_id=winner.transaction.moveid
move_id=winner.transaction.moveid,
)
loser_contenders: List[ConflictContender] = []
@ -224,7 +248,7 @@ async def resolve_contested_transactions(
wins=loser_standings[0],
losses=loser_standings[1],
win_pct=loser_standings[2],
move_id=loser.transaction.moveid
move_id=loser.transaction.moveid,
)
loser_contenders.append(loser_contender)
all_contenders.append(loser_contender)
@ -236,13 +260,15 @@ async def resolve_contested_transactions(
# Get player info from first transaction (they all have same player)
player = contested_transactions[0].player
conflict_resolutions.append(ConflictResolution(
player_name=player.name,
player_swar=player.wara,
contenders=all_contenders,
winner=winner_contender,
losers=loser_contenders
))
conflict_resolutions.append(
ConflictResolution(
player_name=player.name,
player_swar=player.wara,
contenders=all_contenders,
winner=winner_contender,
losers=loser_contenders,
)
)
# Add non-contested moves to winners
winning_move_ids.update(non_contested_moves)
@ -255,7 +281,7 @@ class TransactionFreezeTask:
def __init__(self, bot: commands.Bot):
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.TransactionFreezeTask')
self.logger = get_contextual_logger(f"{__name__}.TransactionFreezeTask")
# Track last execution to prevent duplicate operations
self.last_freeze_week: int | None = None
@ -288,7 +314,9 @@ class TransactionFreezeTask:
# Skip if offseason mode is enabled
if config.offseason_flag:
self.logger.info("Skipping freeze/thaw operations - offseason mode enabled")
self.logger.info(
"Skipping freeze/thaw operations - offseason mode enabled"
)
return
# Get current league state
@ -297,14 +325,14 @@ class TransactionFreezeTask:
self.logger.warning("Could not get current league state")
return
now = datetime.now()
now = datetime.now(UTC)
self.logger.info(
f"Weekly loop check",
datetime=now.isoformat(),
weekday=now.weekday(),
hour=now.hour,
current_week=current.week,
freeze_status=current.freeze
freeze_status=current.freeze,
)
# BEGIN FREEZE: Monday at 00:00, not already frozen
@ -312,13 +340,23 @@ class TransactionFreezeTask:
# Only run if we haven't already frozen this week
# Track the week we're freezing FROM (before increment)
if self.last_freeze_week != current.week:
freeze_from_week = current.week # Save BEFORE _begin_freeze modifies it
self.logger.info("Triggering freeze begin", current_week=current.week)
freeze_from_week = (
current.week
) # Save BEFORE _begin_freeze modifies it
self.logger.info(
"Triggering freeze begin", current_week=current.week
)
await self._begin_freeze(current)
self.last_freeze_week = freeze_from_week # Track the week we froze FROM
self.error_notification_sent = False # Reset error flag for new cycle
self.last_freeze_week = (
freeze_from_week # Track the week we froze FROM
)
self.error_notification_sent = (
False # Reset error flag for new cycle
)
else:
self.logger.debug("Freeze already executed for week", week=current.week)
self.logger.debug(
"Freeze already executed for week", week=current.week
)
# END FREEZE: Saturday at 00:00, currently frozen
elif now.weekday() == 5 and now.hour == 0 and current.freeze:
@ -327,9 +365,13 @@ class TransactionFreezeTask:
self.logger.info("Triggering freeze end", current_week=current.week)
await self._end_freeze(current)
self.last_thaw_week = current.week
self.error_notification_sent = False # Reset error flag for new cycle
self.error_notification_sent = (
False # Reset error flag for new cycle
)
else:
self.logger.debug("Thaw already executed for week", week=current.week)
self.logger.debug(
"Thaw already executed for week", week=current.week
)
else:
self.logger.debug("No freeze/thaw action needed at this time")
@ -375,8 +417,7 @@ class TransactionFreezeTask:
# Increment week and set freeze via service
new_week = current.week + 1
updated_current = await league_service.update_current_state(
week=new_week,
freeze=True
week=new_week, freeze=True
)
if not updated_current:
@ -449,15 +490,18 @@ class TransactionFreezeTask:
try:
# Get non-frozen, non-cancelled transactions for current week via service
transactions = await transaction_service.get_regular_transactions_by_week(
season=current.season,
week=current.week
season=current.season, week=current.week
)
if not transactions:
self.logger.info(f"No regular transactions to process for week {current.week}")
self.logger.info(
f"No regular transactions to process for week {current.week}"
)
return
self.logger.info(f"Processing {len(transactions)} regular transactions for week {current.week}")
self.logger.info(
f"Processing {len(transactions)} regular transactions for week {current.week}"
)
# Execute player roster updates for all transactions
success_count = 0
@ -470,7 +514,7 @@ class TransactionFreezeTask:
player_id=transaction.player.id,
new_team_id=transaction.newteam.id,
player_name=transaction.player.name,
dem_week=current.week + 2
dem_week=current.week + 2,
)
success_count += 1
@ -482,7 +526,7 @@ class TransactionFreezeTask:
f"Failed to execute transaction for {transaction.player.name}",
player_id=transaction.player.id,
new_team_id=transaction.newteam.id,
error=str(e)
error=str(e),
)
failure_count += 1
@ -490,7 +534,7 @@ class TransactionFreezeTask:
f"Transaction execution complete for week {current.week}",
success=success_count,
failures=failure_count,
total=len(transactions)
total=len(transactions),
)
except Exception as e:
@ -514,11 +558,13 @@ class TransactionFreezeTask:
transactions = await transaction_service.get_frozen_transactions_by_week(
season=current.season,
week_start=current.week,
week_end=current.week + 1
week_end=current.week + 1,
)
if not transactions:
self.logger.warning(f"No frozen transactions to process for week {current.week}")
self.logger.warning(
f"No frozen transactions to process for week {current.week}"
)
# Still post an empty report for visibility
empty_report = ThawReport(
week=current.week,
@ -530,23 +576,26 @@ class TransactionFreezeTask:
conflict_count=0,
conflicts=[],
thawed_moves=[],
cancelled_moves=[]
cancelled_moves=[],
)
await self._post_thaw_report(empty_report)
return
self.logger.info(f"Processing {len(transactions)} frozen transactions for week {current.week}")
self.logger.info(
f"Processing {len(transactions)} frozen transactions for week {current.week}"
)
# Resolve contested transactions
winning_move_ids, losing_move_ids, conflict_resolutions = await resolve_contested_transactions(
transactions,
current.season
winning_move_ids, losing_move_ids, conflict_resolutions = (
await resolve_contested_transactions(transactions, current.season)
)
# Build mapping from conflict player to winner for cancelled move tracking
conflict_player_to_winner: Dict[str, str] = {}
for conflict in conflict_resolutions:
conflict_player_to_winner[conflict.player_name.lower()] = conflict.winner.team_abbrev
conflict_player_to_winner[conflict.player_name.lower()] = (
conflict.winner.team_abbrev
)
# Track cancelled moves for report
cancelled_moves_report: List[CancelledMove] = []
@ -555,24 +604,34 @@ class TransactionFreezeTask:
for losing_move_id in losing_move_ids:
try:
# Get all moves with this moveid (could be multiple players in one transaction)
losing_moves = [t for t in transactions if t.moveid == losing_move_id]
losing_moves = [
t for t in transactions if t.moveid == losing_move_id
]
if losing_moves:
# Cancel the entire transaction (all moves with same moveid)
for move in losing_moves:
success = await transaction_service.cancel_transaction(move.moveid)
success = await transaction_service.cancel_transaction(
move.moveid
)
if not success:
self.logger.warning(f"Failed to cancel transaction {move.moveid}")
self.logger.warning(
f"Failed to cancel transaction {move.moveid}"
)
# Notify the GM(s) about cancellation
first_move = losing_moves[0]
# Determine which team to notify (the team that was trying to acquire)
team_for_notification = (first_move.newteam
if first_move.newteam.abbrev.upper() != 'FA'
else first_move.oldteam)
team_for_notification = (
first_move.newteam
if first_move.newteam.abbrev.upper() != "FA"
else first_move.oldteam
)
await self._notify_gm_of_cancellation(first_move, team_for_notification)
await self._notify_gm_of_cancellation(
first_move, team_for_notification
)
# Find which player caused the conflict
contested_player = ""
@ -586,16 +645,23 @@ class TransactionFreezeTask:
# Build report entry
players = [
(move.player.name, move.player.wara, move.oldteam.abbrev, move.newteam.abbrev)
(
move.player.name,
move.player.wara,
move.oldteam.abbrev,
move.newteam.abbrev,
)
for move in losing_moves
]
cancelled_moves_report.append(CancelledMove(
move_id=losing_move_id,
team_abbrev=team_for_notification.abbrev,
players=players,
lost_to=lost_to,
contested_player=contested_player
))
cancelled_moves_report.append(
CancelledMove(
move_id=losing_move_id,
team_abbrev=team_for_notification.abbrev,
players=players,
lost_to=lost_to,
contested_player=contested_player,
)
)
contested_players = [move.player.name for move in losing_moves]
self.logger.info(
@ -604,7 +670,9 @@ class TransactionFreezeTask:
)
except Exception as e:
self.logger.error(f"Error cancelling transaction {losing_move_id}: {e}")
self.logger.error(
f"Error cancelling transaction {losing_move_id}: {e}"
)
# Track thawed moves for report
thawed_moves_report: List[ThawedMove] = []
@ -613,13 +681,19 @@ class TransactionFreezeTask:
for winning_move_id in winning_move_ids:
try:
# Get all moves with this moveid
winning_moves = [t for t in transactions if t.moveid == winning_move_id]
winning_moves = [
t for t in transactions if t.moveid == winning_move_id
]
for move in winning_moves:
# Unfreeze the transaction via service
success = await transaction_service.unfreeze_transaction(move.moveid)
success = await transaction_service.unfreeze_transaction(
move.moveid
)
if not success:
self.logger.warning(f"Failed to unfreeze transaction {move.moveid}")
self.logger.warning(
f"Failed to unfreeze transaction {move.moveid}"
)
# Post to transaction log
await self._post_transaction_to_log(winning_move_id, transactions)
@ -627,34 +701,45 @@ class TransactionFreezeTask:
# Build report entry
if winning_moves:
first_move = winning_moves[0]
# Extract timestamp from moveid (format: Season-XXX-Week-XX-DD-HH:MM:SS)
# Extract timestamp from moveid (format: Season-{season:03d}-Week-{week:02d}-{unix_timestamp})
try:
parts = winning_move_id.split('-')
submitted_at = parts[-1] if len(parts) >= 6 else "Unknown"
parts = winning_move_id.split("-")
submitted_at = parts[-1] if len(parts) >= 4 else "Unknown"
except Exception:
submitted_at = "Unknown"
# Determine team abbrev
if first_move.newteam.abbrev.upper() != 'FA':
if first_move.newteam.abbrev.upper() != "FA":
team_abbrev = first_move.newteam.abbrev
else:
team_abbrev = first_move.oldteam.abbrev
players = [
(move.player.name, move.player.wara, move.oldteam.abbrev, move.newteam.abbrev)
(
move.player.name,
move.player.wara,
move.oldteam.abbrev,
move.newteam.abbrev,
)
for move in winning_moves
]
thawed_moves_report.append(ThawedMove(
move_id=winning_move_id,
team_abbrev=team_abbrev,
players=players,
submitted_at=submitted_at
))
thawed_moves_report.append(
ThawedMove(
move_id=winning_move_id,
team_abbrev=team_abbrev,
players=players,
submitted_at=submitted_at,
)
)
self.logger.info(f"Processed successful transaction {winning_move_id}")
self.logger.info(
f"Processed successful transaction {winning_move_id}"
)
except Exception as e:
self.logger.error(f"Error processing winning transaction {winning_move_id}: {e}")
self.logger.error(
f"Error processing winning transaction {winning_move_id}: {e}"
)
# Generate and post thaw report
thaw_report = ThawReport(
@ -667,7 +752,7 @@ class TransactionFreezeTask:
conflict_count=len(conflict_resolutions),
conflicts=conflict_resolutions,
thawed_moves=thawed_moves_report,
cancelled_moves=cancelled_moves_report
cancelled_moves=cancelled_moves_report,
)
await self._post_thaw_report(thaw_report)
@ -685,7 +770,7 @@ class TransactionFreezeTask:
player_id: int,
new_team_id: int,
player_name: str,
dem_week: Optional[int] = None
dem_week: Optional[int] = None,
) -> bool:
"""
Execute a player roster update via API PATCH.
@ -708,13 +793,11 @@ class TransactionFreezeTask:
player_id=player_id,
player_name=player_name,
new_team_id=new_team_id,
dem_week=dem_week
dem_week=dem_week,
)
updated_player = await player_service.update_player_team(
player_id,
new_team_id,
dem_week=dem_week
player_id, new_team_id, dem_week=dem_week
)
# Verify response (200 or 204 indicates success)
@ -724,7 +807,7 @@ class TransactionFreezeTask:
player_id=player_id,
player_name=player_name,
new_team_id=new_team_id,
dem_week=dem_week
dem_week=dem_week,
)
return True
else:
@ -733,7 +816,7 @@ class TransactionFreezeTask:
player_id=player_id,
player_name=player_name,
new_team_id=new_team_id,
dem_week=dem_week
dem_week=dem_week,
)
return False
@ -745,7 +828,7 @@ class TransactionFreezeTask:
new_team_id=new_team_id,
dem_week=dem_week,
error=str(e),
exc_info=True
exc_info=True,
)
raise
@ -764,34 +847,36 @@ class TransactionFreezeTask:
self.logger.warning("Could not find guild for freeze announcement")
return
channel = discord.utils.get(guild.text_channels, name='transaction-log')
channel = discord.utils.get(guild.text_channels, name="transaction-log")
if not channel:
self.logger.warning("Could not find transaction-log channel")
return
# Create announcement message (formatted like legacy bot)
week_num = f'Week {week}'
stars = '*' * 32
week_num = f"Week {week}"
stars = "*" * 32
if is_beginning:
message = (
f'```\n'
f'{stars}\n'
f'{week_num:>9} Freeze Period Begins\n'
f'{stars}\n'
f'```'
f"```\n"
f"{stars}\n"
f"{week_num:>9} Freeze Period Begins\n"
f"{stars}\n"
f"```"
)
else:
message = (
f'```\n'
f"```\n"
f'{"*" * 30}\n'
f'{week_num:>9} Freeze Period Ends\n'
f"{week_num:>9} Freeze Period Ends\n"
f'{"*" * 30}\n'
f'```'
f"```"
)
await channel.send(message)
self.logger.info(f"Freeze announcement sent for week {week} ({'begin' if is_beginning else 'end'})")
self.logger.info(
f"Freeze announcement sent for week {week} ({'begin' if is_beginning else 'end'})"
)
except Exception as e:
self.logger.error(f"Error sending freeze announcement: {e}")
@ -809,7 +894,7 @@ class TransactionFreezeTask:
if not guild:
return
info_channel = discord.utils.get(guild.text_channels, name='weekly-info')
info_channel = discord.utils.get(guild.text_channels, name="weekly-info")
if not info_channel:
self.logger.warning("Could not find weekly-info channel")
return
@ -818,7 +903,7 @@ class TransactionFreezeTask:
async for message in info_channel.history(limit=25):
try:
await message.delete()
except:
except Exception:
pass # Ignore deletion errors
# Determine season emoji
@ -835,17 +920,17 @@ class TransactionFreezeTask:
is_div_week = current.week in [1, 3, 6, 14, 16, 18]
weekly_str = (
f'**Season**: {season_str}\n'
f'**Time of Day**: {night_str} / {night_str if is_div_week else day_str} / '
f'{night_str} / {day_str}'
f"**Season**: {season_str}\n"
f"**Time of Day**: {night_str} / {night_str if is_div_week else day_str} / "
f"{night_str} / {day_str}"
)
# Send info messages
await info_channel.send(
content=(
f'Each team has manage permissions in their home ballpark. '
f'They may pin messages and rename the channel.\n\n'
f'**Make sure your ballpark starts with your team abbreviation.**'
f"Each team has manage permissions in their home ballpark. "
f"They may pin messages and rename the channel.\n\n"
f"**Make sure your ballpark starts with your team abbreviation.**"
)
)
await info_channel.send(weekly_str)
@ -856,9 +941,7 @@ class TransactionFreezeTask:
self.logger.error(f"Error posting weekly info: {e}")
async def _post_transaction_to_log(
self,
move_id: str,
all_transactions: List[Transaction]
self, move_id: str, all_transactions: List[Transaction]
):
"""
Post a transaction to the transaction log channel.
@ -873,7 +956,7 @@ class TransactionFreezeTask:
if not guild:
return
channel = discord.utils.get(guild.text_channels, name='transaction-log')
channel = discord.utils.get(guild.text_channels, name="transaction-log")
if not channel:
return
@ -884,9 +967,15 @@ class TransactionFreezeTask:
# Determine the team for the embed (team making the moves)
first_move = moves[0]
if first_move.newteam.abbrev.upper() != 'FA' and 'IL' not in first_move.newteam.abbrev:
if (
first_move.newteam.abbrev.upper() != "FA"
and "IL" not in first_move.newteam.abbrev
):
this_team = first_move.newteam
elif first_move.oldteam.abbrev.upper() != 'FA' and 'IL' not in first_move.oldteam.abbrev:
elif (
first_move.oldteam.abbrev.upper() != "FA"
and "IL" not in first_move.oldteam.abbrev
):
this_team = first_move.oldteam
else:
# Default to newteam if both are FA/IL
@ -898,25 +987,29 @@ class TransactionFreezeTask:
for move in moves:
move_string += (
f'**{move.player.name}** ({move.player.wara:.2f}) '
f'from {move.oldteam.abbrev} to {move.newteam.abbrev}\n'
f"**{move.player.name}** ({move.player.wara:.2f}) "
f"from {move.oldteam.abbrev} to {move.newteam.abbrev}\n"
)
# Create embed
embed = EmbedTemplate.create_base_embed(
title=f'Week {week_num} Transaction',
description=this_team.sname if hasattr(this_team, 'sname') else this_team.lname,
color=EmbedColors.INFO
title=f"Week {week_num} Transaction",
description=(
this_team.sname if hasattr(this_team, "sname") else this_team.lname
),
color=EmbedColors.INFO,
)
# Set team color if available
if hasattr(this_team, 'color') and this_team.color:
if hasattr(this_team, "color") and this_team.color:
try:
embed.color = discord.Color(int(this_team.color.replace('#', ''), 16))
except:
embed.color = discord.Color(
int(this_team.color.replace("#", ""), 16)
)
except Exception:
pass # Use default color on error
embed.add_field(name='Player Moves', value=move_string, inline=False)
embed.add_field(name="Player Moves", value=move_string, inline=False)
await channel.send(embed=embed)
self.logger.info(f"Transaction posted to log: {move_id}")
@ -924,11 +1017,7 @@ class TransactionFreezeTask:
except Exception as e:
self.logger.error(f"Error posting transaction to log: {e}")
async def _notify_gm_of_cancellation(
self,
transaction: Transaction,
team
):
async def _notify_gm_of_cancellation(self, transaction: Transaction, team):
"""
Send DM to GM(s) about cancelled transaction.
@ -943,27 +1032,31 @@ class TransactionFreezeTask:
return
cancel_text = (
f'Your transaction for **{transaction.player.name}** has been cancelled '
f'because another team successfully claimed them during the freeze period.'
f"Your transaction for **{transaction.player.name}** has been cancelled "
f"because another team successfully claimed them during the freeze period."
)
# Notify GM1
if hasattr(team, 'gmid') and team.gmid:
if hasattr(team, "gmid") and team.gmid:
try:
gm_one = guild.get_member(team.gmid)
if gm_one:
await gm_one.send(cancel_text)
self.logger.info(f"Cancellation notification sent to GM1 of {team.abbrev}")
self.logger.info(
f"Cancellation notification sent to GM1 of {team.abbrev}"
)
except Exception as e:
self.logger.error(f"Could not notify GM1 of {team.abbrev}: {e}")
# Notify GM2 if exists
if hasattr(team, 'gmid2') and team.gmid2:
if hasattr(team, "gmid2") and team.gmid2:
try:
gm_two = guild.get_member(team.gmid2)
if gm_two:
await gm_two.send(cancel_text)
self.logger.info(f"Cancellation notification sent to GM2 of {team.abbrev}")
self.logger.info(
f"Cancellation notification sent to GM2 of {team.abbrev}"
)
except Exception as e:
self.logger.error(f"Could not notify GM2 of {team.abbrev}: {e}")
@ -986,30 +1079,43 @@ class TransactionFreezeTask:
admin_channel = self.bot.get_channel(config.thaw_report_channel_id)
if not admin_channel:
self.logger.warning("Could not find thaw report channel", channel_id=config.thaw_report_channel_id)
self.logger.warning(
"Could not find thaw report channel",
channel_id=config.thaw_report_channel_id,
)
return
# Build the report content
report_lines = []
# Header with summary
timestamp_str = report.timestamp.strftime('%B %d, %Y %H:%M UTC')
timestamp_str = report.timestamp.strftime("%B %d, %Y %H:%M UTC")
report_lines.append(f"# Transaction Thaw Report")
report_lines.append(f"**Week {report.week}** | **Season {report.season}** | {timestamp_str}")
report_lines.append(f"**Total:** {report.total_moves} moves | **Thawed:** {report.thawed_count} | **Cancelled:** {report.cancelled_count} | **Conflicts:** {report.conflict_count}")
report_lines.append(
f"**Week {report.week}** | **Season {report.season}** | {timestamp_str}"
)
report_lines.append(
f"**Total:** {report.total_moves} moves | **Thawed:** {report.thawed_count} | **Cancelled:** {report.cancelled_count} | **Conflicts:** {report.conflict_count}"
)
report_lines.append("")
# Conflict Resolution section (if any)
if report.conflicts:
report_lines.append("## Conflict Resolution")
for conflict in report.conflicts:
report_lines.append(f"**{conflict.player_name}** (sWAR: {conflict.player_swar:.1f})")
contenders_str = " vs ".join([
f"{c.team_abbrev} ({c.wins}-{c.losses})"
for c in conflict.contenders
])
report_lines.append(
f"**{conflict.player_name}** (sWAR: {conflict.player_swar:.1f})"
)
contenders_str = " vs ".join(
[
f"{c.team_abbrev} ({c.wins}-{c.losses})"
for c in conflict.contenders
]
)
report_lines.append(f"- Contested by: {contenders_str}")
report_lines.append(f"- **Awarded to: {conflict.winner.team_abbrev}** (worst record wins)")
report_lines.append(
f"- **Awarded to: {conflict.winner.team_abbrev}** (worst record wins)"
)
report_lines.append("")
# Thawed Moves section
@ -1018,7 +1124,9 @@ class TransactionFreezeTask:
for move in report.thawed_moves:
report_lines.append(f"**{move.move_id}** | {move.team_abbrev}")
for player_name, swar, old_team, new_team in move.players:
report_lines.append(f" - {player_name} ({swar:.1f}): {old_team}{new_team}")
report_lines.append(
f" - {player_name} ({swar:.1f}): {old_team}{new_team}"
)
else:
report_lines.append("*No moves thawed*")
report_lines.append("")
@ -1027,10 +1135,18 @@ class TransactionFreezeTask:
report_lines.append("## Cancelled Moves")
if report.cancelled_moves:
for move in report.cancelled_moves:
lost_info = f" (lost {move.contested_player} to {move.lost_to})" if move.lost_to else ""
report_lines.append(f"**{move.move_id}** | {move.team_abbrev}{lost_info}")
lost_info = (
f" (lost {move.contested_player} to {move.lost_to})"
if move.lost_to
else ""
)
report_lines.append(
f"**{move.move_id}** | {move.team_abbrev}{lost_info}"
)
for player_name, swar, old_team, new_team in move.players:
report_lines.append(f" - ❌ {player_name} ({swar:.1f}): {old_team}{new_team}")
report_lines.append(
f" - ❌ {player_name} ({swar:.1f}): {old_team}{new_team}"
)
else:
report_lines.append("*No moves cancelled*")

View File

@ -3,6 +3,7 @@ Pytest configuration and fixtures for Discord Bot v2.0 tests.
This file provides test isolation and shared fixtures.
"""
import asyncio
import os
import pytest
@ -27,12 +28,14 @@ def reset_singleton_state():
# Reset transaction builder caches
try:
from services.transaction_builder import _transaction_builders
_transaction_builders.clear()
except ImportError:
pass
try:
from services.trade_builder import _trade_builders, _team_to_trade_key
_trade_builders.clear()
_team_to_trade_key.clear()
except ImportError:
@ -40,8 +43,9 @@ def reset_singleton_state():
# Reset config singleton to ensure clean state
try:
from config import _config
from config import _config # noqa: F401
import config as cfg
cfg._config = None
except (ImportError, AttributeError):
pass

View File

@ -5,10 +5,10 @@ Provides factory functions to create test instances of models with sensible defa
This eliminates the need for ad-hoc fixture creation and makes tests resilient
to model changes.
"""
from typing import Optional, Dict, Any
from typing import Optional
from models.player import Player
from models.team import Team, RosterType
from models.team import Team
from models.transaction import Transaction
from models.game import Game
from models.current import Current

View File

@ -3,7 +3,6 @@ API client tests using aioresponses for clean HTTP mocking
"""
import pytest
import asyncio
import aiohttp
from unittest.mock import MagicMock, patch
from aioresponses import aioresponses

View File

@ -6,13 +6,12 @@ import json
import tempfile
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
from discord import app_commands
from commands.utilities.charts import (
ChartCommands, ChartManageGroup, ChartCategoryGroup,
chart_autocomplete, category_autocomplete
chart_autocomplete
)
from services.chart_service import ChartService, Chart, get_chart_service
from services.chart_service import ChartService, Chart
from exceptions import BotException

View File

@ -9,7 +9,7 @@ import discord
from discord.ext import commands
from commands.dice.rolls import DiceRollCommands
from utils.dice_utils import DiceRoll, parse_and_roll_multiple_dice, parse_and_roll_single_dice
from utils.dice_utils import DiceRoll, parse_and_roll_multiple_dice
class TestDiceRollCommands:

View File

@ -3,22 +3,19 @@ Tests for /dropadd Discord Commands
Validates the Discord command interface, autocomplete, and user interactions.
"""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
import discord
from discord import app_commands
from commands.transactions.dropadd import DropAddCommands
from services.transaction_builder import TransactionBuilder
from models.team import RosterType
from models.team import Team
from models.player import Player
from tests.factories import PlayerFactory, TeamFactory
class TestDropAddCommands:
"""Test DropAddCommands Discord command functionality."""
@pytest.fixture
def mock_bot(self):
"""Create mock Discord bot."""
@ -26,12 +23,12 @@ class TestDropAddCommands:
bot.user = MagicMock()
bot.user.id = 123456789
return bot
@pytest.fixture
def commands_cog(self, mock_bot):
"""Create DropAddCommands cog instance."""
return DropAddCommands(mock_bot)
@pytest.fixture
def mock_interaction(self):
"""Create mock Discord interaction."""
@ -47,88 +44,108 @@ class TestDropAddCommands:
interaction.guild = MagicMock()
interaction.guild.id = 669356687294988350 # Test guild ID matching config
return interaction
@pytest.fixture
def mock_team(self):
"""Create mock team data."""
return TeamFactory.west_virginia()
@pytest.fixture
def mock_player(self):
"""Create mock player data."""
return PlayerFactory.mike_trout()
@pytest.mark.asyncio
async def test_player_autocomplete_success(self, commands_cog, mock_interaction):
"""Test successful player autocomplete."""
mock_players = [
PlayerFactory.mike_trout(id=1),
PlayerFactory.ronald_acuna(id=2)
PlayerFactory.ronald_acuna(id=2),
]
with patch('utils.autocomplete.player_service') as mock_service:
with patch("utils.autocomplete.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=mock_players)
from utils.autocomplete import player_autocomplete
choices = await player_autocomplete(mock_interaction, 'Trout')
choices = await player_autocomplete(mock_interaction, "Trout")
assert len(choices) == 2
assert choices[0].name == 'Mike Trout (CF)'
assert choices[0].value == 'Mike Trout'
assert choices[1].name == 'Ronald Acuna Jr. (OF)'
assert choices[1].value == 'Ronald Acuna Jr.'
assert choices[0].name == "Mike Trout (CF)"
assert choices[0].value == "Mike Trout"
assert choices[1].name == "Ronald Acuna Jr. (OF)"
assert choices[1].value == "Ronald Acuna Jr."
@pytest.mark.asyncio
async def test_player_autocomplete_with_team(self, commands_cog, mock_interaction):
"""Test player autocomplete with team information."""
mock_team = TeamFactory.create(id=499, abbrev='LAA', sname='Angels', lname='Los Angeles Angels')
mock_team = TeamFactory.create(
id=499, abbrev="LAA", sname="Angels", lname="Los Angeles Angels"
)
mock_player = PlayerFactory.mike_trout(id=1)
mock_player.team = mock_team # Add team info
with patch('utils.autocomplete.player_service') as mock_service:
with patch("utils.autocomplete.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[mock_player])
from utils.autocomplete import player_autocomplete
choices = await player_autocomplete(mock_interaction, 'Trout')
choices = await player_autocomplete(mock_interaction, "Trout")
assert len(choices) == 1
assert choices[0].name == 'Mike Trout (CF - LAA)'
assert choices[0].value == 'Mike Trout'
assert choices[0].name == "Mike Trout (CF - LAA)"
assert choices[0].value == "Mike Trout"
@pytest.mark.asyncio
async def test_player_autocomplete_short_input(self, commands_cog, mock_interaction):
async def test_player_autocomplete_short_input(
self, commands_cog, mock_interaction
):
"""Test player autocomplete with short input returns empty."""
from utils.autocomplete import player_autocomplete
choices = await player_autocomplete(mock_interaction, 'T')
choices = await player_autocomplete(mock_interaction, "T")
assert len(choices) == 0
@pytest.mark.asyncio
async def test_player_autocomplete_error_handling(self, commands_cog, mock_interaction):
async def test_player_autocomplete_error_handling(
self, commands_cog, mock_interaction
):
"""Test player autocomplete error handling."""
with patch('utils.autocomplete.player_service') as mock_service:
with patch("utils.autocomplete.player_service") as mock_service:
mock_service.search_players.side_effect = Exception("API Error")
from utils.autocomplete import player_autocomplete
choices = await player_autocomplete(mock_interaction, 'Trout')
choices = await player_autocomplete(mock_interaction, "Trout")
assert len(choices) == 0
@pytest.mark.asyncio
async def test_dropadd_command_no_team(self, commands_cog, mock_interaction):
"""Test /dropadd command when user has no team."""
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
mock_validate.return_value = None
await commands_cog.dropadd.callback(commands_cog, mock_interaction)
mock_interaction.response.defer.assert_called_once()
# validate_user_has_team sends its own error message, command just returns
mock_validate.assert_called_once_with(mock_interaction)
@pytest.mark.asyncio
async def test_dropadd_command_success_no_params(self, commands_cog, mock_interaction, mock_team):
async def test_dropadd_command_success_no_params(
self, commands_cog, mock_interaction, mock_team
):
"""Test /dropadd command success without parameters."""
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch('commands.transactions.dropadd.get_transaction_builder') as mock_get_builder:
with patch('commands.transactions.dropadd.create_transaction_embed') as mock_create_embed:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
with patch(
"commands.transactions.dropadd.get_transaction_builder"
) as mock_get_builder:
with patch(
"commands.transactions.dropadd.create_transaction_embed"
) as mock_create_embed:
mock_validate.return_value = mock_team
mock_builder = MagicMock()
@ -143,17 +160,29 @@ class TestDropAddCommands:
# Verify flow
mock_interaction.response.defer.assert_called_once()
mock_validate.assert_called_once_with(mock_interaction)
mock_get_builder.assert_called_once_with(mock_interaction.user.id, mock_team)
mock_create_embed.assert_called_once_with(mock_builder, command_name='/dropadd')
mock_get_builder.assert_called_once_with(
mock_interaction.user.id, mock_team
)
mock_create_embed.assert_called_once_with(
mock_builder, command_name="/dropadd"
)
mock_interaction.followup.send.assert_called_once()
@pytest.mark.asyncio
async def test_dropadd_command_with_quick_move(self, commands_cog, mock_interaction, mock_team):
async def test_dropadd_command_with_quick_move(
self, commands_cog, mock_interaction, mock_team
):
"""Test /dropadd command with quick move parameters."""
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch('commands.transactions.dropadd.get_transaction_builder') as mock_get_builder:
with patch.object(commands_cog, '_add_quick_move') as mock_add_quick:
with patch('commands.transactions.dropadd.create_transaction_embed') as mock_create_embed:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
with patch(
"commands.transactions.dropadd.get_transaction_builder"
) as mock_get_builder:
with patch.object(commands_cog, "_add_quick_move") as mock_add_quick:
with patch(
"commands.transactions.dropadd.create_transaction_embed"
) as mock_create_embed:
mock_validate.return_value = mock_team
mock_builder = MagicMock()
@ -162,17 +191,18 @@ class TestDropAddCommands:
mock_add_quick.return_value = (True, "")
mock_create_embed.return_value = MagicMock()
await commands_cog.dropadd.callback(commands_cog,
await commands_cog.dropadd.callback(
commands_cog,
mock_interaction,
player='Mike Trout',
destination='ml'
player="Mike Trout",
destination="ml",
)
# Verify quick move was attempted
mock_add_quick.assert_called_once_with(
mock_builder, 'Mike Trout', 'ml'
mock_builder, "Mike Trout", "ml"
)
@pytest.mark.asyncio
async def test_add_quick_move_success(self, commands_cog, mock_team, mock_player):
"""Test successful quick move addition."""
@ -185,45 +215,49 @@ class TestDropAddCommands:
mock_builder._current_roster.minor_league_players = []
mock_builder._current_roster.il_players = []
with patch('commands.transactions.dropadd.player_service') as mock_service:
with patch("commands.transactions.dropadd.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[mock_player])
success, error_message = await commands_cog._add_quick_move(
mock_builder, 'Mike Trout', 'ml'
mock_builder, "Mike Trout", "ml"
)
assert success is True
assert error_message == ""
mock_service.search_players.assert_called_once_with('Mike Trout', limit=10, season=13)
mock_service.search_players.assert_called_once_with(
"Mike Trout", limit=10, season=13
)
mock_builder.add_move.assert_called_once()
@pytest.mark.asyncio
async def test_add_quick_move_player_not_found(self, commands_cog, mock_team):
"""Test quick move when player not found."""
mock_builder = MagicMock()
mock_builder.team = mock_team
with patch('commands.transactions.dropadd.player_service') as mock_service:
with patch("commands.transactions.dropadd.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[])
success, error_message = await commands_cog._add_quick_move(
mock_builder, 'Nonexistent Player', 'ml'
mock_builder, "Nonexistent Player", "ml"
)
assert success is False
assert "not found" in error_message
@pytest.mark.asyncio
async def test_add_quick_move_invalid_action(self, commands_cog, mock_team, mock_player):
async def test_add_quick_move_invalid_action(
self, commands_cog, mock_team, mock_player
):
"""Test quick move with invalid action."""
mock_builder = MagicMock()
mock_builder.team = mock_team
with patch('commands.transactions.dropadd.player_service') as mock_service:
with patch("commands.transactions.dropadd.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[mock_player])
success, error_message = await commands_cog._add_quick_move(
mock_builder, 'Mike Trout', 'invalid_destination'
mock_builder, "Mike Trout", "invalid_destination"
)
assert success is False
@ -244,11 +278,11 @@ class TestDropAddCommands:
mock_player.name = "Mike Trout"
mock_player.team = other_team
with patch('commands.transactions.dropadd.player_service') as mock_service:
with patch("commands.transactions.dropadd.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[mock_player])
success, error_message = await commands_cog._add_quick_move(
mock_builder, 'Mike Trout', 'ml'
mock_builder, "Mike Trout", "ml"
)
assert success is False
@ -270,50 +304,55 @@ class TestDropAddCommands:
mock_builder._current_roster.il_players = []
# Create a Free Agent team and player
fa_team = TeamFactory.create(id=1, abbrev="FA", sname="Free Agency", lname="Free Agency")
fa_team = TeamFactory.create(
id=1, abbrev="FA", sname="Free Agency", lname="Free Agency"
)
fa_player = PlayerFactory.create(id=12472, name="Mike Trout", team_id=1)
fa_player.team = fa_team
with patch('commands.transactions.dropadd.player_service') as mock_service:
with patch("commands.transactions.dropadd.player_service") as mock_service:
mock_service.search_players = AsyncMock(return_value=[fa_player])
success, error_message = await commands_cog._add_quick_move(
mock_builder, 'Mike Trout', 'ml'
mock_builder, "Mike Trout", "ml"
)
assert success is True
assert error_message == ""
# TODO: These tests are for obsolete MoveAction-based functionality
# The transaction system now uses from_roster/to_roster directly
# def test_determine_roster_types_add(self, commands_cog):
# def test_determine_roster_types_drop(self, commands_cog):
# def test_determine_roster_types_recall(self, commands_cog):
# def test_determine_roster_types_demote(self, commands_cog):
pass # Placeholder
@pytest.mark.asyncio
async def test_clear_transaction_command(self, commands_cog, mock_interaction):
"""Test /cleartransaction command."""
with patch('commands.transactions.dropadd.clear_transaction_builder') as mock_clear:
await commands_cog.clear_transaction.callback(commands_cog, mock_interaction)
with patch(
"commands.transactions.dropadd.clear_transaction_builder"
) as mock_clear:
await commands_cog.clear_transaction.callback(
commands_cog, mock_interaction
)
mock_clear.assert_called_once_with(mock_interaction.user.id)
mock_interaction.response.send_message.assert_called_once()
# Check success message
call_args = mock_interaction.response.send_message.call_args
assert "transaction builder has been cleared" in call_args[0][0]
assert call_args[1]['ephemeral'] is True
assert call_args[1]["ephemeral"] is True
@pytest.mark.asyncio
async def test_dropadd_first_move_shows_full_embed(self, commands_cog, mock_interaction, mock_team):
async def test_dropadd_first_move_shows_full_embed(
self, commands_cog, mock_interaction, mock_team
):
"""Test /dropadd command with first move shows full interactive embed."""
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch('commands.transactions.dropadd.get_transaction_builder') as mock_get_builder:
with patch.object(commands_cog, '_add_quick_move') as mock_add_quick:
with patch('commands.transactions.dropadd.create_transaction_embed') as mock_create_embed:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
with patch(
"commands.transactions.dropadd.get_transaction_builder"
) as mock_get_builder:
with patch.object(commands_cog, "_add_quick_move") as mock_add_quick:
with patch(
"commands.transactions.dropadd.create_transaction_embed"
) as mock_create_embed:
mock_validate.return_value = mock_team
# Create empty builder (first move)
@ -324,76 +363,88 @@ class TestDropAddCommands:
mock_add_quick.return_value = (True, "")
mock_create_embed.return_value = MagicMock()
await commands_cog.dropadd.callback(commands_cog,
await commands_cog.dropadd.callback(
commands_cog,
mock_interaction,
player='Mike Trout',
destination='ml'
player="Mike Trout",
destination="ml",
)
# Should show full embed with view (now ephemeral)
mock_interaction.followup.send.assert_called_once()
call_args = mock_interaction.followup.send.call_args
assert call_args[1]['ephemeral'] is True
assert 'embed' in call_args[1]
assert 'view' in call_args[1]
assert 'content' in call_args[1]
assert call_args[1]["ephemeral"] is True
assert "embed" in call_args[1]
assert "view" in call_args[1]
assert "content" in call_args[1]
@pytest.mark.asyncio
async def test_dropadd_append_mode_shows_confirmation(self, commands_cog, mock_interaction, mock_team):
async def test_dropadd_append_mode_shows_confirmation(
self, commands_cog, mock_interaction, mock_team
):
"""Test /dropadd command in append mode shows ephemeral confirmation."""
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch('commands.transactions.dropadd.get_transaction_builder') as mock_get_builder:
with patch.object(commands_cog, '_add_quick_move') as mock_add_quick:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
with patch(
"commands.transactions.dropadd.get_transaction_builder"
) as mock_get_builder:
with patch.object(commands_cog, "_add_quick_move") as mock_add_quick:
mock_validate.return_value = mock_team
# Create builder with existing moves (append mode)
mock_builder = MagicMock()
mock_builder.is_empty = False
mock_builder.move_count = 2
mock_builder.validate_transaction = AsyncMock(return_value=MagicMock(
is_legal=True,
major_league_count=25,
minor_league_count=10,
warnings=[],
errors=[],
suggestions=[]
))
mock_builder.validate_transaction = AsyncMock(
return_value=MagicMock(
is_legal=True,
major_league_count=25,
minor_league_count=10,
warnings=[],
errors=[],
suggestions=[],
)
)
mock_get_builder.return_value = mock_builder
mock_add_quick.return_value = (True, "")
with patch('commands.transactions.dropadd.create_transaction_embed') as mock_create_embed:
with patch(
"commands.transactions.dropadd.create_transaction_embed"
) as mock_create_embed:
mock_create_embed.return_value = MagicMock()
await commands_cog.dropadd.callback(commands_cog,
await commands_cog.dropadd.callback(
commands_cog,
mock_interaction,
player='Kevin Ginkel',
destination='ml'
player="Kevin Ginkel",
destination="ml",
)
# Should show embed with ephemeral confirmation
mock_interaction.followup.send.assert_called_once()
call_args = mock_interaction.followup.send.call_args
assert call_args[1]['ephemeral'] is True
assert 'embed' in call_args[1]
assert 'view' in call_args[1]
content = call_args[1]['content']
assert call_args[1]["ephemeral"] is True
assert "embed" in call_args[1]
assert "view" in call_args[1]
content = call_args[1]["content"]
assert "Added Kevin Ginkel → ML" in content
assert "Transaction now has 2 moves" in content
class TestDropAddCommandsIntegration:
"""Integration tests for dropadd commands with real-like data flows."""
@pytest.fixture
def mock_bot(self):
"""Create mock Discord bot."""
return MagicMock()
@pytest.fixture
def commands_cog(self, mock_bot):
"""Create DropAddCommands cog instance."""
return DropAddCommands(mock_bot)
@pytest.mark.asyncio
async def test_full_dropadd_workflow(self, commands_cog):
"""Test complete dropadd workflow from command to builder creation."""
@ -407,13 +458,23 @@ class TestDropAddCommandsIntegration:
mock_player = PlayerFactory.mike_trout(id=12472)
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch('commands.transactions.dropadd.player_service') as mock_player_service:
with patch('commands.transactions.dropadd.get_transaction_builder') as mock_get_builder:
with patch('commands.transactions.dropadd.create_transaction_embed') as mock_create_embed:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
with patch(
"commands.transactions.dropadd.player_service"
) as mock_player_service:
with patch(
"commands.transactions.dropadd.get_transaction_builder"
) as mock_get_builder:
with patch(
"commands.transactions.dropadd.create_transaction_embed"
) as mock_create_embed:
# Setup mocks
mock_validate.return_value = mock_team
mock_player_service.search_players = AsyncMock(return_value=[mock_player])
mock_player_service.search_players = AsyncMock(
return_value=[mock_player]
)
mock_builder = TransactionBuilder(mock_team, 123456789, 13)
mock_get_builder.return_value = mock_builder
@ -421,13 +482,15 @@ class TestDropAddCommandsIntegration:
# Mock the async function
async def mock_create_embed_func(builder, command_name=None):
return MagicMock()
mock_create_embed.side_effect = mock_create_embed_func
# Execute command with parameters
await commands_cog.dropadd.callback(commands_cog,
await commands_cog.dropadd.callback(
commands_cog,
mock_interaction,
player='Mike Trout',
destination='ml'
player="Mike Trout",
destination="ml",
)
# Verify the builder has the move
@ -436,7 +499,7 @@ class TestDropAddCommandsIntegration:
assert move.player == mock_player
# Note: TransactionMove no longer has 'action' field - uses from_roster/to_roster instead
assert move.to_roster == RosterType.MAJOR_LEAGUE
@pytest.mark.asyncio
async def test_error_recovery_in_workflow(self, commands_cog):
"""Test error recovery in dropadd workflow."""
@ -446,7 +509,9 @@ class TestDropAddCommandsIntegration:
mock_interaction.guild = MagicMock()
mock_interaction.guild.id = 669356687294988350
with patch('commands.transactions.dropadd.validate_user_has_team') as mock_validate:
with patch(
"commands.transactions.dropadd.validate_user_has_team"
) as mock_validate:
# Simulate API error
mock_validate.side_effect = Exception("API Error")
@ -455,4 +520,4 @@ class TestDropAddCommandsIntegration:
await commands_cog.dropadd.callback(commands_cog, mock_interaction)
# Should have deferred before the error occurred
mock_interaction.response.defer.assert_called_once()
mock_interaction.response.defer.assert_called_once()

View File

@ -15,8 +15,6 @@ from commands.profile.images import (
can_edit_player_image,
ImageCommands
)
from models.player import Player
from models.team import Team
from tests.factories import PlayerFactory, TeamFactory

View File

@ -7,12 +7,11 @@ Tests cover:
- Message listener (detection logic)
- Info command (response formatting)
"""
import pytest
import json
import re
from datetime import datetime, timedelta, UTC
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch, mock_open
from datetime import datetime, UTC
from aioresponses import aioresponses
# Import modules to test
@ -21,15 +20,16 @@ from commands.soak.giphy_service import (
get_random_phrase_for_tier,
get_tier_description,
get_disappointment_gif,
DISAPPOINTMENT_TIERS
)
from services.giphy_service import DISAPPOINTMENT_TIERS
from commands.soak.tracker import SoakTracker
# Listener uses simple string matching: ' soak' in msg_text.lower()
# Define helper function that mimics the listener's detection logic
def soak_detected(text: str) -> bool:
"""Check if soak mention is detected using listener's logic."""
return ' soak' in text.lower()
return " soak" in text.lower()
class TestGiphyService:
@ -38,78 +38,78 @@ class TestGiphyService:
def test_tier_determination_first_ever(self):
"""Test tier determination for first ever soak."""
tier = get_tier_for_seconds(None)
assert tier == 'first_ever'
assert tier == "first_ever"
def test_tier_determination_maximum_disappointment(self):
"""Test tier 1: 0-30 minutes (maximum disappointment)."""
# 15 minutes
tier = get_tier_for_seconds(900)
assert tier == 'tier_1'
assert tier == "tier_1"
# 30 minutes (boundary)
tier = get_tier_for_seconds(1800)
assert tier == 'tier_1'
assert tier == "tier_1"
def test_tier_determination_severe_disappointment(self):
"""Test tier 2: 30min-2hrs (severe disappointment)."""
# 1 hour
tier = get_tier_for_seconds(3600)
assert tier == 'tier_2'
assert tier == "tier_2"
# 2 hours (boundary)
tier = get_tier_for_seconds(7200)
assert tier == 'tier_2'
assert tier == "tier_2"
def test_tier_determination_strong_disappointment(self):
"""Test tier 3: 2-6 hours (strong disappointment)."""
# 4 hours
tier = get_tier_for_seconds(14400)
assert tier == 'tier_3'
assert tier == "tier_3"
# 6 hours (boundary)
tier = get_tier_for_seconds(21600)
assert tier == 'tier_3'
assert tier == "tier_3"
def test_tier_determination_moderate_disappointment(self):
"""Test tier 4: 6-24 hours (moderate disappointment)."""
# 12 hours
tier = get_tier_for_seconds(43200)
assert tier == 'tier_4'
assert tier == "tier_4"
# 24 hours (boundary)
tier = get_tier_for_seconds(86400)
assert tier == 'tier_4'
assert tier == "tier_4"
def test_tier_determination_mild_disappointment(self):
"""Test tier 5: 1-7 days (mild disappointment)."""
# 3 days
tier = get_tier_for_seconds(259200)
assert tier == 'tier_5'
assert tier == "tier_5"
# 7 days (boundary)
tier = get_tier_for_seconds(604800)
assert tier == 'tier_5'
assert tier == "tier_5"
def test_tier_determination_minimal_disappointment(self):
"""Test tier 6: 7+ days (minimal disappointment)."""
# 10 days
tier = get_tier_for_seconds(864000)
assert tier == 'tier_6'
assert tier == "tier_6"
# 30 days
tier = get_tier_for_seconds(2592000)
assert tier == 'tier_6'
assert tier == "tier_6"
def test_random_phrase_selection(self):
"""Test that random phrase selection returns valid phrases."""
for tier_key in DISAPPOINTMENT_TIERS.keys():
phrase = get_random_phrase_for_tier(tier_key)
assert phrase in DISAPPOINTMENT_TIERS[tier_key]['phrases']
assert phrase in DISAPPOINTMENT_TIERS[tier_key]["phrases"]
def test_tier_description_retrieval(self):
"""Test tier description retrieval."""
assert get_tier_description('tier_1') == "Maximum Disappointment"
assert get_tier_description('first_ever') == "The Beginning"
assert get_tier_description("tier_1") == "Maximum Disappointment"
assert get_tier_description("first_ever") == "The Beginning"
@pytest.mark.asyncio
async def test_get_disappointment_gif_success(self):
@ -122,22 +122,22 @@ class TestGiphyService:
# Mock successful Giphy response with correct response structure
# The service looks for data.images.original.url, not data.url
m.get(
re.compile(r'https://api\.giphy\.com/v1/gifs/translate\?.*'),
re.compile(r"https://api\.giphy\.com/v1/gifs/translate\?.*"),
payload={
'data': {
'images': {
'original': {
'url': 'https://media.giphy.com/media/test123/giphy.gif'
"data": {
"images": {
"original": {
"url": "https://media.giphy.com/media/test123/giphy.gif"
}
},
'title': 'Disappointed Reaction'
"title": "Disappointed Reaction",
}
},
status=200
status=200,
)
gif_url = await get_disappointment_gif('tier_1')
assert gif_url == 'https://media.giphy.com/media/test123/giphy.gif'
gif_url = await get_disappointment_gif("tier_1")
assert gif_url == "https://media.giphy.com/media/test123/giphy.gif"
@pytest.mark.asyncio
async def test_get_disappointment_gif_filters_trump(self):
@ -151,37 +151,37 @@ class TestGiphyService:
# First response is Trump GIF (should be filtered)
# Uses correct response structure with images.original.url
m.get(
re.compile(r'https://api\.giphy\.com/v1/gifs/translate\?.*'),
re.compile(r"https://api\.giphy\.com/v1/gifs/translate\?.*"),
payload={
'data': {
'images': {
'original': {
'url': 'https://media.giphy.com/media/trump123/giphy.gif'
"data": {
"images": {
"original": {
"url": "https://media.giphy.com/media/trump123/giphy.gif"
}
},
'title': 'Donald Trump Disappointed'
"title": "Donald Trump Disappointed",
}
},
status=200
status=200,
)
# Second response is acceptable
m.get(
re.compile(r'https://api\.giphy\.com/v1/gifs/translate\?.*'),
re.compile(r"https://api\.giphy\.com/v1/gifs/translate\?.*"),
payload={
'data': {
'images': {
'original': {
'url': 'https://media.giphy.com/media/good456/giphy.gif'
"data": {
"images": {
"original": {
"url": "https://media.giphy.com/media/good456/giphy.gif"
}
},
'title': 'Disappointed Reaction'
"title": "Disappointed Reaction",
}
},
status=200
status=200,
)
gif_url = await get_disappointment_gif('tier_1')
assert gif_url == 'https://media.giphy.com/media/good456/giphy.gif'
gif_url = await get_disappointment_gif("tier_1")
assert gif_url == "https://media.giphy.com/media/good456/giphy.gif"
@pytest.mark.asyncio
async def test_get_disappointment_gif_api_failure(self):
@ -189,12 +189,12 @@ class TestGiphyService:
with aioresponses() as m:
# Mock API failure for all requests
m.get(
re.compile(r'https://api\.giphy\.com/v1/gifs/translate\?.*'),
re.compile(r"https://api\.giphy\.com/v1/gifs/translate\?.*"),
status=500,
repeat=True
repeat=True,
)
gif_url = await get_disappointment_gif('tier_1')
gif_url = await get_disappointment_gif("tier_1")
assert gif_url is None
@ -224,13 +224,13 @@ class TestSoakTracker:
"username": "testuser",
"display_name": "Test User",
"channel_id": "456",
"message_id": "789"
"message_id": "789",
},
"total_count": 5,
"history": []
"history": [],
}
with open(temp_tracker_file, 'w') as f:
with open(temp_tracker_file, "w") as f:
json.dump(existing_data, f)
tracker = SoakTracker(temp_tracker_file)
@ -247,15 +247,15 @@ class TestSoakTracker:
username="testuser",
display_name="Test User",
channel_id=789012,
message_id=345678
message_id=345678,
)
assert tracker.get_soak_count() == 1
last_soak = tracker.get_last_soak()
assert last_soak is not None
assert last_soak['user_id'] == '123456'
assert last_soak['username'] == 'testuser'
assert last_soak["user_id"] == "123456"
assert last_soak["username"] == "testuser"
def test_record_multiple_soaks(self, temp_tracker_file):
"""Test recording multiple soaks maintains history."""
@ -268,7 +268,7 @@ class TestSoakTracker:
username=f"user{i}",
display_name=f"User {i}",
channel_id=100,
message_id=200 + i
message_id=200 + i,
)
assert tracker.get_soak_count() == 3
@ -276,8 +276,8 @@ class TestSoakTracker:
history = tracker.get_history()
assert len(history) == 3
# History should be newest first
assert history[0]['user_id'] == '2'
assert history[2]['user_id'] == '0'
assert history[0]["user_id"] == "2"
assert history[2]["user_id"] == "0"
def test_get_time_since_last_soak(self, temp_tracker_file):
"""Test time calculation since last soak."""
@ -292,7 +292,7 @@ class TestSoakTracker:
username="test",
display_name="Test",
channel_id=456,
message_id=789
message_id=789,
)
# Time since should be very small (just recorded)
@ -311,7 +311,7 @@ class TestSoakTracker:
username=f"user{i}",
display_name=f"User {i}",
channel_id=100,
message_id=200 + i
message_id=200 + i,
)
history = tracker.get_history(limit=9999)
@ -380,5 +380,9 @@ class TestInfoCommand:
channel_id = 987654321
message_id = 111222333
expected_url = f"https://discord.com/channels/{guild_id}/{channel_id}/{message_id}"
assert expected_url == "https://discord.com/channels/123456789/987654321/111222333"
expected_url = (
f"https://discord.com/channels/{guild_id}/{channel_id}/{message_id}"
)
assert (
expected_url == "https://discord.com/channels/123456789/987654321/111222333"
)

View File

@ -14,7 +14,6 @@ from commands.teams.branding import (
BrandingCommands
)
from models.team import Team
from tests.factories import TeamFactory
class TestHexColorValidation:

View File

@ -3,8 +3,6 @@ Tests for voice channel commands
Validates voice channel creation, cleanup, and migration message functionality.
"""
import asyncio
import json
import tempfile
from datetime import datetime, timedelta, UTC
from pathlib import Path

View File

@ -3,39 +3,46 @@ Tests for configuration management
Ensures configuration loading, validation, and environment handling work correctly.
"""
import os
import pytest
from unittest.mock import patch
from config import BotConfig
from exceptions import ConfigurationException
class TestBotConfig:
"""Test configuration loading and validation."""
def test_config_loads_required_fields(self):
"""Test that config loads all required fields from environment."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
):
config = BotConfig()
assert config.bot_token == 'test_bot_token'
assert config.bot_token == "test_bot_token"
assert config.guild_id == 123456789
assert config.api_token == 'test_api_token'
assert config.db_url == 'https://api.example.com'
assert config.api_token == "test_api_token"
assert config.db_url == "https://api.example.com"
def test_config_has_default_values(self):
"""Test that config provides sensible defaults."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}, clear=True):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
clear=True,
):
# Create config with disabled env file to test true defaults
config = BotConfig(_env_file=None)
assert config.sba_season == 13
@ -44,199 +51,246 @@ class TestBotConfig:
assert config.sba_color == "a6ce39"
assert config.log_level == "INFO"
assert config.environment == "development"
assert config.testing is True
assert config.testing is False
def test_config_overrides_defaults_from_env(self):
"""Test that environment variables override default values."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'SBA_SEASON': '15',
'LOG_LEVEL': 'DEBUG',
'ENVIRONMENT': 'production',
'TESTING': 'true'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"SBA_SEASON": "15",
"LOG_LEVEL": "DEBUG",
"ENVIRONMENT": "production",
"TESTING": "true",
},
):
config = BotConfig()
assert config.sba_season == 15
assert config.log_level == "DEBUG"
assert config.environment == "production"
assert config.testing is True
def test_config_ignores_extra_env_vars(self):
"""Test that extra environment variables are ignored."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'RANDOM_EXTRA_VAR': 'should_be_ignored',
'ANOTHER_RANDOM_VAR': 'also_ignored'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"RANDOM_EXTRA_VAR": "should_be_ignored",
"ANOTHER_RANDOM_VAR": "also_ignored",
},
):
# Should not raise validation error
config = BotConfig()
assert config.bot_token == 'test_bot_token'
assert config.bot_token == "test_bot_token"
# Extra vars should not be accessible
assert not hasattr(config, 'random_extra_var')
assert not hasattr(config, 'another_random_var')
assert not hasattr(config, "random_extra_var")
assert not hasattr(config, "another_random_var")
def test_config_converts_string_to_int(self):
"""Test that guild_id is properly converted from string to int."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '987654321', # String input
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "987654321", # String input
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
):
config = BotConfig()
assert config.guild_id == 987654321
assert isinstance(config.guild_id, int)
def test_config_converts_string_to_bool(self):
"""Test that boolean fields are properly converted."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'TESTING': 'false'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"TESTING": "false",
},
):
config = BotConfig()
assert config.testing is False
assert isinstance(config.testing, bool)
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'TESTING': '1'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"TESTING": "1",
},
):
config = BotConfig()
assert config.testing is True
def test_config_case_insensitive(self):
"""Test that environment variables are case insensitive."""
with patch.dict(os.environ, {
'bot_token': 'test_bot_token', # lowercase
'GUILD_ID': '123456789', # uppercase
'Api_Token': 'test_api_token', # mixed case
'db_url': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"bot_token": "test_bot_token", # lowercase
"GUILD_ID": "123456789", # uppercase
"Api_Token": "test_api_token", # mixed case
"db_url": "https://api.example.com",
},
):
config = BotConfig()
assert config.bot_token == 'test_bot_token'
assert config.api_token == 'test_api_token'
assert config.db_url == 'https://api.example.com'
assert config.bot_token == "test_bot_token"
assert config.api_token == "test_api_token"
assert config.db_url == "https://api.example.com"
def test_is_development_property(self):
"""Test the is_development property."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'ENVIRONMENT': 'development'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"ENVIRONMENT": "development",
},
):
config = BotConfig()
assert config.is_development is True
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'ENVIRONMENT': 'production'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"ENVIRONMENT": "production",
},
):
config = BotConfig()
assert config.is_development is False
def test_is_testing_property(self):
"""Test the is_testing property."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'TESTING': 'true'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"TESTING": "true",
},
):
config = BotConfig()
assert config.is_testing is True
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com',
'TESTING': 'false'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
"TESTING": "false",
},
):
config = BotConfig()
assert config.is_testing is False
class TestConfigValidation:
"""Test configuration validation and error handling."""
def test_missing_required_field_raises_error(self):
"""Test that missing required fields raise validation errors."""
# Missing BOT_TOKEN
with patch.dict(os.environ, {
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}, clear=True):
with patch.dict(
os.environ,
{
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
clear=True,
):
with pytest.raises(Exception): # Pydantic ValidationError
BotConfig(_env_file=None)
# Missing GUILD_ID
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}, clear=True):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
clear=True,
):
with pytest.raises(Exception): # Pydantic ValidationError
BotConfig(_env_file=None)
def test_invalid_guild_id_raises_error(self):
"""Test that invalid guild_id values raise validation errors."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': 'not_a_number',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "not_a_number",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
):
with pytest.raises(Exception): # Pydantic ValidationError
BotConfig()
def test_empty_required_field_is_allowed(self):
"""Test that empty required fields are allowed (Pydantic default behavior)."""
with patch.dict(os.environ, {
'BOT_TOKEN': '', # Empty string
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "", # Empty string
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
):
# Should not raise - Pydantic allows empty strings by default
config = BotConfig()
assert config.bot_token == ''
assert config.bot_token == ""
@pytest.fixture
def valid_config():
"""Provide a valid configuration for testing."""
with patch.dict(os.environ, {
'BOT_TOKEN': 'test_bot_token',
'GUILD_ID': '123456789',
'API_TOKEN': 'test_api_token',
'DB_URL': 'https://api.example.com'
}):
with patch.dict(
os.environ,
{
"BOT_TOKEN": "test_bot_token",
"GUILD_ID": "123456789",
"API_TOKEN": "test_api_token",
"DB_URL": "https://api.example.com",
},
):
return BotConfig()
def test_config_fixture(valid_config):
"""Test that the valid_config fixture works correctly."""
assert valid_config.bot_token == 'test_bot_token'
assert valid_config.bot_token == "test_bot_token"
assert valid_config.guild_id == 123456789
assert valid_config.api_token == 'test_api_token'
assert valid_config.db_url == 'https://api.example.com'
assert valid_config.api_token == "test_api_token"
assert valid_config.db_url == "https://api.example.com"

View File

@ -3,7 +3,6 @@ Tests for application configuration
Validates that config values have sensible defaults.
"""
import pytest
from config import get_config, PITCHER_POSITIONS, POSITION_FIELDERS, ALL_POSITIONS

View File

@ -6,18 +6,15 @@ Tests complete workflows from command invocation through transaction submission.
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from datetime import datetime
from commands.transactions.dropadd import DropAddCommands
from services.transaction_builder import (
TransactionBuilder,
TransactionMove,
get_transaction_builder,
clear_transaction_builder,
)
from models.team import RosterType
from views.transaction_embed import TransactionEmbedView, SubmitConfirmationModal
from models.team import Team
from views.transaction_embed import SubmitConfirmationModal
from models.player import Player
from models.roster import TeamRoster
from models.transaction import Transaction

View File

@ -4,7 +4,6 @@ Tests for SBA data models
Validates model creation, validation, and business logic.
"""
import pytest
from datetime import datetime
from models import Team, Player, Current, DraftPick, DraftData, DraftList

View File

@ -3,6 +3,7 @@ Simplified tests for Custom Command models in Discord Bot v2.0
Testing dataclass models without Pydantic validation.
"""
import pytest
from datetime import datetime, timedelta, timezone
@ -11,13 +12,13 @@ from models.custom_command import (
CustomCommandCreator,
CustomCommandSearchFilters,
CustomCommandSearchResult,
CustomCommandStats
CustomCommandStats,
)
class TestCustomCommandCreator:
"""Test the CustomCommandCreator dataclass."""
def test_creator_creation(self):
"""Test creating a creator instance."""
now = datetime.now(timezone.utc)
@ -28,9 +29,9 @@ class TestCustomCommandCreator:
display_name="Test User",
created_at=now,
total_commands=10,
active_commands=5
active_commands=5,
)
assert creator.id == 1
assert creator.discord_id == 12345
assert creator.username == "testuser"
@ -38,7 +39,7 @@ class TestCustomCommandCreator:
assert creator.created_at == now
assert creator.total_commands == 10
assert creator.active_commands == 5
def test_creator_optional_fields(self):
"""Test creator with None display_name."""
now = datetime.now(timezone.utc)
@ -49,9 +50,9 @@ class TestCustomCommandCreator:
display_name=None,
created_at=now,
total_commands=0,
active_commands=0
active_commands=0,
)
assert creator.display_name is None
assert creator.total_commands == 0
assert creator.active_commands == 0
@ -59,7 +60,7 @@ class TestCustomCommandCreator:
class TestCustomCommand:
"""Test the CustomCommand dataclass."""
@pytest.fixture
def sample_creator(self) -> CustomCommandCreator:
"""Fixture providing a sample creator."""
@ -70,9 +71,9 @@ class TestCustomCommand:
display_name="Test User",
created_at=datetime.now(timezone.utc),
total_commands=5,
active_commands=5
active_commands=5,
)
def test_command_basic_creation(self, sample_creator: CustomCommandCreator):
"""Test creating a basic command."""
now = datetime.now(timezone.utc)
@ -88,9 +89,9 @@ class TestCustomCommand:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
assert command.id == 1
assert command.name == "hello"
assert command.content == "Hello, world!"
@ -102,13 +103,13 @@ class TestCustomCommand:
assert command.tags is None
assert command.is_active is True
assert command.warning_sent is False
def test_command_with_optional_fields(self, sample_creator: CustomCommandCreator):
"""Test command with all optional fields."""
now = datetime.now(timezone.utc)
last_used = now - timedelta(hours=1)
updated = now - timedelta(minutes=30)
command = CustomCommand(
id=1,
name="advanced",
@ -121,19 +122,19 @@ class TestCustomCommand:
use_count=25,
warning_sent=True,
is_active=True,
tags=["fun", "utility"]
tags=["fun", "utility"],
)
assert command.use_count == 25
assert command.last_used == last_used
assert command.updated_at == updated
assert command.tags == ["fun", "utility"]
assert command.warning_sent is True
def test_days_since_last_use_property(self, sample_creator: CustomCommandCreator):
"""Test days since last use calculation."""
now = datetime.now(timezone.utc)
# Command used 5 days ago
command = CustomCommand(
id=1,
@ -147,17 +148,21 @@ class TestCustomCommand:
use_count=1,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
# Mock datetime.utcnow for consistent testing
with pytest.MonkeyPatch().context() as m:
m.setattr('models.custom_command.datetime', type('MockDateTime', (), {
'utcnow': lambda: now,
'now': lambda: now
}))
m.setattr(
"models.custom_command.datetime",
type(
"MockDateTime",
(),
{"utcnow": lambda: now, "now": lambda tz=None: now},
),
)
assert command.days_since_last_use == 5
# Command never used
unused_command = CustomCommand(
id=2,
@ -171,15 +176,15 @@ class TestCustomCommand:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
assert unused_command.days_since_last_use is None
def test_popularity_score_calculation(self, sample_creator: CustomCommandCreator):
"""Test popularity score calculation."""
now = datetime.now(timezone.utc)
# Test with recent usage
recent_command = CustomCommand(
id=1,
@ -193,18 +198,22 @@ class TestCustomCommand:
use_count=50,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
with pytest.MonkeyPatch().context() as m:
m.setattr('models.custom_command.datetime', type('MockDateTime', (), {
'utcnow': lambda: now,
'now': lambda: now
}))
m.setattr(
"models.custom_command.datetime",
type(
"MockDateTime",
(),
{"utcnow": lambda: now, "now": lambda tz=None: now},
),
)
score = recent_command.popularity_score
assert 0 <= score <= 15 # Can be higher due to recency bonus
assert score > 0 # Should have some score due to usage
# Test with no usage
unused_command = CustomCommand(
id=2,
@ -218,19 +227,19 @@ class TestCustomCommand:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
assert unused_command.popularity_score == 0
class TestCustomCommandSearchFilters:
"""Test the search filters dataclass."""
def test_default_filters(self):
"""Test default filter values."""
filters = CustomCommandSearchFilters()
assert filters.name_contains is None
assert filters.creator_id is None
assert filters.creator_name is None
@ -240,7 +249,7 @@ class TestCustomCommandSearchFilters:
assert filters.is_active is True
# Note: sort_by, sort_desc, page, page_size have Field objects as defaults
# due to mixed dataclass/Pydantic usage - skipping specific value tests
def test_custom_filters(self):
"""Test creating filters with custom values."""
filters = CustomCommandSearchFilters(
@ -250,9 +259,9 @@ class TestCustomCommandSearchFilters:
sort_by="popularity",
sort_desc=True,
page=2,
page_size=10
page_size=10,
)
assert filters.name_contains == "test"
assert filters.creator_name == "user123"
assert filters.min_uses == 5
@ -264,7 +273,7 @@ class TestCustomCommandSearchFilters:
class TestCustomCommandSearchResult:
"""Test the search result dataclass."""
@pytest.fixture
def sample_commands(self) -> list[CustomCommand]:
"""Fixture providing sample commands."""
@ -275,9 +284,9 @@ class TestCustomCommandSearchResult:
created_at=datetime.now(timezone.utc),
display_name=None,
total_commands=3,
active_commands=3
active_commands=3,
)
now = datetime.now(timezone.utc)
return [
CustomCommand(
@ -292,11 +301,11 @@ class TestCustomCommandSearchResult:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
for i in range(3)
]
def test_search_result_creation(self, sample_commands: list[CustomCommand]):
"""Test creating a search result."""
result = CustomCommandSearchResult(
@ -305,16 +314,16 @@ class TestCustomCommandSearchResult:
page=1,
page_size=20,
total_pages=1,
has_more=False
has_more=False,
)
assert result.commands == sample_commands
assert result.total_count == 10
assert result.page == 1
assert result.page_size == 20
assert result.total_pages == 1
assert result.has_more is False
def test_search_result_properties(self):
"""Test search result calculated properties."""
result = CustomCommandSearchResult(
@ -323,16 +332,16 @@ class TestCustomCommandSearchResult:
page=2,
page_size=20,
total_pages=3,
has_more=True
has_more=True,
)
assert result.start_index == 21 # (2-1) * 20 + 1
assert result.end_index == 40 # min(2 * 20, 47)
assert result.end_index == 40 # min(2 * 20, 47)
class TestCustomCommandStats:
"""Test the statistics dataclass."""
def test_stats_creation(self):
"""Test creating statistics."""
creator = CustomCommandCreator(
@ -342,9 +351,9 @@ class TestCustomCommandStats:
created_at=datetime.now(timezone.utc),
display_name=None,
total_commands=50,
active_commands=45
active_commands=45,
)
command = CustomCommand(
id=1,
name="hello",
@ -357,9 +366,9 @@ class TestCustomCommandStats:
use_count=100,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
stats = CustomCommandStats(
total_commands=100,
active_commands=95,
@ -369,9 +378,9 @@ class TestCustomCommandStats:
most_active_creator=creator,
recent_commands_count=15,
commands_needing_warning=5,
commands_eligible_for_deletion=2
commands_eligible_for_deletion=2,
)
assert stats.total_commands == 100
assert stats.active_commands == 95
assert stats.total_creators == 25
@ -381,7 +390,7 @@ class TestCustomCommandStats:
assert stats.recent_commands_count == 15
assert stats.commands_needing_warning == 5
assert stats.commands_eligible_for_deletion == 2
def test_stats_calculated_properties(self):
"""Test calculated statistics properties."""
# Test with active commands
@ -394,12 +403,12 @@ class TestCustomCommandStats:
most_active_creator=None,
recent_commands_count=0,
commands_needing_warning=0,
commands_eligible_for_deletion=0
commands_eligible_for_deletion=0,
)
assert stats.average_uses_per_command == 20.0 # 1000 / 50
assert stats.average_commands_per_creator == 5.0 # 50 / 10
# Test with no active commands
empty_stats = CustomCommandStats(
total_commands=0,
@ -410,16 +419,16 @@ class TestCustomCommandStats:
most_active_creator=None,
recent_commands_count=0,
commands_needing_warning=0,
commands_eligible_for_deletion=0
commands_eligible_for_deletion=0,
)
assert empty_stats.average_uses_per_command == 0.0
assert empty_stats.average_commands_per_creator == 0.0
class TestModelIntegration:
"""Test integration between models."""
def test_command_with_creator_relationship(self):
"""Test the relationship between command and creator."""
now = datetime.now(timezone.utc)
@ -430,9 +439,9 @@ class TestModelIntegration:
display_name="Test User",
created_at=now,
total_commands=3,
active_commands=3
active_commands=3,
)
command = CustomCommand(
id=1,
name="test",
@ -445,25 +454,21 @@ class TestModelIntegration:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
# Verify relationship
assert command.creator == creator
assert command.creator_id == creator.id
assert command.creator.discord_id == 12345
assert command.creator.username == "testuser"
def test_search_result_with_filters(self):
"""Test search result creation with filters."""
filters = CustomCommandSearchFilters(
name_contains="test",
min_uses=5,
sort_by="popularity",
page=2,
page_size=10
name_contains="test", min_uses=5, sort_by="popularity", page=2, page_size=10
)
creator = CustomCommandCreator(
id=1,
discord_id=12345,
@ -471,9 +476,9 @@ class TestModelIntegration:
created_at=datetime.now(timezone.utc),
display_name=None,
total_commands=1,
active_commands=1
active_commands=1,
)
commands = [
CustomCommand(
id=1,
@ -487,21 +492,21 @@ class TestModelIntegration:
use_count=0,
warning_sent=False,
is_active=True,
tags=None
tags=None,
)
]
result = CustomCommandSearchResult(
commands=commands,
total_count=25,
page=filters.page,
page_size=filters.page_size,
total_pages=3,
has_more=True
has_more=True,
)
assert result.page == 2
assert result.page_size == 10
assert len(result.commands) == 1
assert result.total_pages == 3
assert result.has_more is True
assert result.has_more is True

View File

@ -3,15 +3,16 @@ Tests for Help Command models
Validates model creation, validation, and business logic.
"""
import pytest
from datetime import datetime, timedelta
from datetime import UTC, datetime, timedelta
from pydantic import ValidationError
from models.help_command import (
HelpCommand,
HelpCommandSearchFilters,
HelpCommandSearchResult,
HelpCommandStats
HelpCommandStats,
)
@ -22,133 +23,133 @@ class TestHelpCommandModel:
"""Test help command creation with minimal required fields."""
help_cmd = HelpCommand(
id=1,
name='test-topic',
title='Test Topic',
content='This is test content',
created_by_discord_id='123456789',
created_at=datetime.now()
name="test-topic",
title="Test Topic",
content="This is test content",
created_by_discord_id="123456789",
created_at=datetime.now(UTC),
)
assert help_cmd.id == 1
assert help_cmd.name == 'test-topic'
assert help_cmd.title == 'Test Topic'
assert help_cmd.content == 'This is test content'
assert help_cmd.created_by_discord_id == '123456789'
assert help_cmd.name == "test-topic"
assert help_cmd.title == "Test Topic"
assert help_cmd.content == "This is test content"
assert help_cmd.created_by_discord_id == "123456789"
assert help_cmd.is_active is True
assert help_cmd.view_count == 0
def test_help_command_creation_with_optional_fields(self):
"""Test help command creation with all optional fields."""
now = datetime.now()
now = datetime.now(UTC)
help_cmd = HelpCommand(
id=2,
name='trading-rules',
title='Trading Rules & Guidelines',
content='Complete trading rules...',
category='rules',
created_by_discord_id='123456789',
name="trading-rules",
title="Trading Rules & Guidelines",
content="Complete trading rules...",
category="rules",
created_by_discord_id="123456789",
created_at=now,
updated_at=now,
last_modified_by='987654321',
last_modified_by="987654321",
is_active=True,
view_count=100,
display_order=10
display_order=10,
)
assert help_cmd.category == 'rules'
assert help_cmd.category == "rules"
assert help_cmd.updated_at == now
assert help_cmd.last_modified_by == '987654321'
assert help_cmd.last_modified_by == "987654321"
assert help_cmd.view_count == 100
assert help_cmd.display_order == 10
def test_help_command_name_validation(self):
"""Test help command name validation."""
base_data = {
'id': 3,
'title': 'Test',
'content': 'Content',
'created_by_discord_id': '123',
'created_at': datetime.now()
"id": 3,
"title": "Test",
"content": "Content",
"created_by_discord_id": "123",
"created_at": datetime.now(UTC),
}
# Valid names
valid_names = ['test', 'test-topic', 'test_topic', 'test123', 'abc']
valid_names = ["test", "test-topic", "test_topic", "test123", "abc"]
for name in valid_names:
help_cmd = HelpCommand(name=name, **base_data)
assert help_cmd.name == name.lower()
# Invalid names - too short
with pytest.raises(ValidationError):
HelpCommand(name='a', **base_data)
HelpCommand(name="a", **base_data)
# Invalid names - too long
with pytest.raises(ValidationError):
HelpCommand(name='a' * 33, **base_data)
HelpCommand(name="a" * 33, **base_data)
# Invalid names - special characters
with pytest.raises(ValidationError):
HelpCommand(name='test@topic', **base_data)
HelpCommand(name="test@topic", **base_data)
with pytest.raises(ValidationError):
HelpCommand(name='test topic', **base_data)
HelpCommand(name="test topic", **base_data)
def test_help_command_title_validation(self):
"""Test help command title validation."""
base_data = {
'id': 4,
'name': 'test',
'content': 'Content',
'created_by_discord_id': '123',
'created_at': datetime.now()
"id": 4,
"name": "test",
"content": "Content",
"created_by_discord_id": "123",
"created_at": datetime.now(UTC),
}
# Valid title
help_cmd = HelpCommand(title='Test Topic', **base_data)
assert help_cmd.title == 'Test Topic'
help_cmd = HelpCommand(title="Test Topic", **base_data)
assert help_cmd.title == "Test Topic"
# Empty title
with pytest.raises(ValidationError):
HelpCommand(title='', **base_data)
HelpCommand(title="", **base_data)
# Title too long
with pytest.raises(ValidationError):
HelpCommand(title='a' * 201, **base_data)
HelpCommand(title="a" * 201, **base_data)
def test_help_command_content_validation(self):
"""Test help command content validation."""
base_data = {
'id': 5,
'name': 'test',
'title': 'Test',
'created_by_discord_id': '123',
'created_at': datetime.now()
"id": 5,
"name": "test",
"title": "Test",
"created_by_discord_id": "123",
"created_at": datetime.now(UTC),
}
# Valid content
help_cmd = HelpCommand(content='Test content', **base_data)
assert help_cmd.content == 'Test content'
help_cmd = HelpCommand(content="Test content", **base_data)
assert help_cmd.content == "Test content"
# Empty content
with pytest.raises(ValidationError):
HelpCommand(content='', **base_data)
HelpCommand(content="", **base_data)
# Content too long
with pytest.raises(ValidationError):
HelpCommand(content='a' * 4001, **base_data)
HelpCommand(content="a" * 4001, **base_data)
def test_help_command_category_validation(self):
"""Test help command category validation."""
base_data = {
'id': 6,
'name': 'test',
'title': 'Test',
'content': 'Content',
'created_by_discord_id': '123',
'created_at': datetime.now()
"id": 6,
"name": "test",
"title": "Test",
"content": "Content",
"created_by_discord_id": "123",
"created_at": datetime.now(UTC),
}
# Valid categories
valid_categories = ['rules', 'guides', 'resources', 'info', 'faq']
valid_categories = ["rules", "guides", "resources", "info", "faq"]
for category in valid_categories:
help_cmd = HelpCommand(category=category, **base_data)
assert help_cmd.category == category.lower()
@ -159,28 +160,28 @@ class TestHelpCommandModel:
# Invalid category - special characters
with pytest.raises(ValidationError):
HelpCommand(category='test@category', **base_data)
HelpCommand(category="test@category", **base_data)
def test_help_command_is_deleted_property(self):
"""Test is_deleted property."""
active = HelpCommand(
id=7,
name='active',
title='Active Topic',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
is_active=True
name="active",
title="Active Topic",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
is_active=True,
)
deleted = HelpCommand(
id=8,
name='deleted',
title='Deleted Topic',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
is_active=False
name="deleted",
title="Deleted Topic",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
is_active=False,
)
assert active.is_deleted is False
@ -191,24 +192,24 @@ class TestHelpCommandModel:
# No updates
no_update = HelpCommand(
id=9,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
updated_at=None
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
updated_at=None,
)
assert no_update.days_since_update is None
# Recent update
recent = HelpCommand(
id=10,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
updated_at=datetime.now() - timedelta(days=5)
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
updated_at=datetime.now(UTC) - timedelta(days=5),
)
assert recent.days_since_update == 5
@ -216,11 +217,11 @@ class TestHelpCommandModel:
"""Test days_since_creation property."""
old = HelpCommand(
id=11,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now() - timedelta(days=30)
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC) - timedelta(days=30),
)
assert old.days_since_creation == 30
@ -229,24 +230,24 @@ class TestHelpCommandModel:
# No views
no_views = HelpCommand(
id=12,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
view_count=0
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
view_count=0,
)
assert no_views.popularity_score == 0.0
# New topic with views
new_popular = HelpCommand(
id=13,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now() - timedelta(days=5),
view_count=50
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC) - timedelta(days=5),
view_count=50,
)
score = new_popular.popularity_score
assert score > 5.0 # Base score (5.0) with new topic bonus (1.5x)
@ -254,12 +255,12 @@ class TestHelpCommandModel:
# Old topic with views
old_popular = HelpCommand(
id=14,
name='test',
title='Test',
content='Content',
created_by_discord_id='123',
created_at=datetime.now() - timedelta(days=100),
view_count=50
name="test",
title="Test",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC) - timedelta(days=100),
view_count=50,
)
old_score = old_popular.popularity_score
assert old_score < new_popular.popularity_score # Older topics get penalty
@ -275,7 +276,7 @@ class TestHelpCommandSearchFilters:
assert filters.name_contains is None
assert filters.category is None
assert filters.is_active is True
assert filters.sort_by == 'name'
assert filters.sort_by == "name"
assert filters.sort_desc is False
assert filters.page == 1
assert filters.page_size == 25
@ -283,19 +284,19 @@ class TestHelpCommandSearchFilters:
def test_search_filters_custom_values(self):
"""Test search filters with custom values."""
filters = HelpCommandSearchFilters(
name_contains='trading',
category='rules',
name_contains="trading",
category="rules",
is_active=False,
sort_by='view_count',
sort_by="view_count",
sort_desc=True,
page=2,
page_size=50
page_size=50,
)
assert filters.name_contains == 'trading'
assert filters.category == 'rules'
assert filters.name_contains == "trading"
assert filters.category == "rules"
assert filters.is_active is False
assert filters.sort_by == 'view_count'
assert filters.sort_by == "view_count"
assert filters.sort_desc is True
assert filters.page == 2
assert filters.page_size == 50
@ -303,14 +304,22 @@ class TestHelpCommandSearchFilters:
def test_search_filters_sort_by_validation(self):
"""Test sort_by field validation."""
# Valid sort fields
valid_sorts = ['name', 'title', 'category', 'created_at', 'updated_at', 'view_count', 'display_order']
valid_sorts = [
"name",
"title",
"category",
"created_at",
"updated_at",
"view_count",
"display_order",
]
for sort_field in valid_sorts:
filters = HelpCommandSearchFilters(sort_by=sort_field)
assert filters.sort_by == sort_field
# Invalid sort field
with pytest.raises(ValidationError):
HelpCommandSearchFilters(sort_by='invalid_field')
HelpCommandSearchFilters(sort_by="invalid_field")
def test_search_filters_page_validation(self):
"""Test page number validation."""
@ -353,11 +362,11 @@ class TestHelpCommandSearchResult:
help_commands = [
HelpCommand(
id=i,
name=f'topic-{i}',
title=f'Topic {i}',
content=f'Content {i}',
created_by_discord_id='123',
created_at=datetime.now()
name=f"topic-{i}",
title=f"Topic {i}",
content=f"Content {i}",
created_by_discord_id="123",
created_at=datetime.now(UTC),
)
for i in range(1, 11)
]
@ -368,7 +377,7 @@ class TestHelpCommandSearchResult:
page=1,
page_size=10,
total_pages=5,
has_more=True
has_more=True,
)
assert len(result.help_commands) == 10
@ -386,7 +395,7 @@ class TestHelpCommandSearchResult:
page=3,
page_size=25,
total_pages=4,
has_more=True
has_more=True,
)
assert result.start_index == 51 # (3-1) * 25 + 1
@ -400,7 +409,7 @@ class TestHelpCommandSearchResult:
page=3,
page_size=25,
total_pages=3,
has_more=False
has_more=False,
)
assert result.end_index == 55 # min(3 * 25, 55)
@ -412,7 +421,7 @@ class TestHelpCommandSearchResult:
page=2,
page_size=25,
total_pages=4,
has_more=True
has_more=True,
)
assert result.end_index == 50 # min(2 * 25, 100)
@ -428,7 +437,7 @@ class TestHelpCommandStats:
active_commands=45,
total_views=1000,
most_viewed_command=None,
recent_commands_count=5
recent_commands_count=5,
)
assert stats.total_commands == 50
@ -441,12 +450,12 @@ class TestHelpCommandStats:
"""Test stats with most viewed command."""
most_viewed = HelpCommand(
id=1,
name='popular-topic',
title='Popular Topic',
content='Content',
created_by_discord_id='123',
created_at=datetime.now(),
view_count=500
name="popular-topic",
title="Popular Topic",
content="Content",
created_by_discord_id="123",
created_at=datetime.now(UTC),
view_count=500,
)
stats = HelpCommandStats(
@ -454,11 +463,11 @@ class TestHelpCommandStats:
active_commands=45,
total_views=1000,
most_viewed_command=most_viewed,
recent_commands_count=5
recent_commands_count=5,
)
assert stats.most_viewed_command is not None
assert stats.most_viewed_command.name == 'popular-topic'
assert stats.most_viewed_command.name == "popular-topic"
assert stats.most_viewed_command.view_count == 500
def test_stats_average_views_per_command(self):
@ -469,7 +478,7 @@ class TestHelpCommandStats:
active_commands=40,
total_views=800,
most_viewed_command=None,
recent_commands_count=5
recent_commands_count=5,
)
assert stats.average_views_per_command == 20.0 # 800 / 40
@ -480,7 +489,7 @@ class TestHelpCommandStats:
active_commands=0,
total_views=0,
most_viewed_command=None,
recent_commands_count=0
recent_commands_count=0,
)
assert stats.average_views_per_command == 0.0
@ -492,44 +501,44 @@ class TestHelpCommandFromAPIData:
def test_from_api_data_complete(self):
"""Test from_api_data with complete data."""
api_data = {
'id': 1,
'name': 'trading-rules',
'title': 'Trading Rules & Guidelines',
'content': 'Complete trading rules...',
'category': 'rules',
'created_by_discord_id': '123456789',
'created_at': '2025-01-01T12:00:00',
'updated_at': '2025-01-10T15:30:00',
'last_modified_by': '987654321',
'is_active': True,
'view_count': 100,
'display_order': 10
"id": 1,
"name": "trading-rules",
"title": "Trading Rules & Guidelines",
"content": "Complete trading rules...",
"category": "rules",
"created_by_discord_id": "123456789",
"created_at": "2025-01-01T12:00:00",
"updated_at": "2025-01-10T15:30:00",
"last_modified_by": "987654321",
"is_active": True,
"view_count": 100,
"display_order": 10,
}
help_cmd = HelpCommand.from_api_data(api_data)
assert help_cmd.id == 1
assert help_cmd.name == 'trading-rules'
assert help_cmd.title == 'Trading Rules & Guidelines'
assert help_cmd.content == 'Complete trading rules...'
assert help_cmd.category == 'rules'
assert help_cmd.name == "trading-rules"
assert help_cmd.title == "Trading Rules & Guidelines"
assert help_cmd.content == "Complete trading rules..."
assert help_cmd.category == "rules"
assert help_cmd.view_count == 100
def test_from_api_data_minimal(self):
"""Test from_api_data with minimal required data."""
api_data = {
'id': 2,
'name': 'simple-topic',
'title': 'Simple Topic',
'content': 'Simple content',
'created_by_discord_id': '123456789',
'created_at': '2025-01-01T12:00:00'
"id": 2,
"name": "simple-topic",
"title": "Simple Topic",
"content": "Simple content",
"created_by_discord_id": "123456789",
"created_at": "2025-01-01T12:00:00",
}
help_cmd = HelpCommand.from_api_data(api_data)
assert help_cmd.id == 2
assert help_cmd.name == 'simple-topic'
assert help_cmd.name == "simple-topic"
assert help_cmd.category is None
assert help_cmd.updated_at is None
assert help_cmd.view_count == 0

View File

@ -4,7 +4,6 @@ Tests for trade-specific models.
Tests the Trade, TradeParticipant, and TradeMove models to ensure proper
validation and behavior for multi-team trades.
"""
import pytest
from unittest.mock import MagicMock
from models.trade import Trade, TradeParticipant, TradeMove, TradeStatus

View File

@ -5,11 +5,8 @@ Validates transaction model creation, validation, and business logic.
"""
import pytest
import copy
from datetime import datetime
from models.transaction import Transaction, RosterValidation
from models.player import Player
from models.team import Team
class TestTransaction:

176
tests/test_scorebug_bugs.py Normal file
View File

@ -0,0 +1,176 @@
"""
Tests for scorebug bug fixes (#39 and #40).
#40: ScorecardTracker reads stale in-memory data — fix ensures get_scorecard()
and get_all_scorecards() reload from disk before returning data.
#39: Win percentage stuck at 50% — fix makes parsing robust for decimal (0.75),
percentage string ("75%"), plain number ("75"), empty, and None values.
When parsing fails, win_percentage is None instead of a misleading 50.0.
"""
import json
import tempfile
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from commands.gameplay.scorecard_tracker import ScorecardTracker
from services.scorebug_service import ScorebugData
from utils.scorebug_helpers import create_scorebug_embed, create_team_progress_bar
class TestScorecardTrackerFreshReads:
"""Tests that ScorecardTracker reads fresh data from disk (fix for #40)."""
def test_get_all_scorecards_reads_fresh_data(self, tmp_path):
"""get_all_scorecards() should pick up scorecards written by another process.
Simulates the background task having a stale tracker instance while
the /publish-scorecard command writes new data to the JSON file.
"""
data_file = tmp_path / "scorecards.json"
data_file.write_text(json.dumps({"scorecards": {}}))
tracker = ScorecardTracker(data_file=str(data_file))
assert tracker.get_all_scorecards() == []
# Another process writes a scorecard to the same file
new_data = {
"scorecards": {
"111": {
"text_channel_id": "111",
"sheet_url": "https://docs.google.com/spreadsheets/d/abc123",
"published_at": "2026-01-01T00:00:00",
"last_updated": "2026-01-01T00:00:00",
"publisher_id": "999",
}
}
}
data_file.write_text(json.dumps(new_data))
# Should see the new scorecard without restart
result = tracker.get_all_scorecards()
assert len(result) == 1
assert result[0] == (111, "https://docs.google.com/spreadsheets/d/abc123")
def test_get_scorecard_reads_fresh_data(self, tmp_path):
"""get_scorecard() should pick up a scorecard written by another process."""
data_file = tmp_path / "scorecards.json"
data_file.write_text(json.dumps({"scorecards": {}}))
tracker = ScorecardTracker(data_file=str(data_file))
assert tracker.get_scorecard(222) is None
# Another process writes a scorecard
new_data = {
"scorecards": {
"222": {
"text_channel_id": "222",
"sheet_url": "https://docs.google.com/spreadsheets/d/xyz789",
"published_at": "2026-01-01T00:00:00",
"last_updated": "2026-01-01T00:00:00",
"publisher_id": "999",
}
}
}
data_file.write_text(json.dumps(new_data))
# Should see the new scorecard
assert (
tracker.get_scorecard(222)
== "https://docs.google.com/spreadsheets/d/xyz789"
)
class TestWinPercentageParsing:
"""Tests for robust win percentage parsing in ScorebugData (fix for #39)."""
def test_percentage_string(self):
"""'75%' string should parse to 75.0."""
data = ScorebugData({"win_percentage": 75.0})
assert data.win_percentage == 75.0
def test_none_default(self):
"""Missing win_percentage key should default to None."""
data = ScorebugData({})
assert data.win_percentage is None
def test_explicit_none(self):
"""Explicit None should stay None."""
data = ScorebugData({"win_percentage": None})
assert data.win_percentage is None
def test_zero_is_valid(self):
"""0.0 win percentage is a valid value (team certain to lose)."""
data = ScorebugData({"win_percentage": 0.0})
assert data.win_percentage == 0.0
class TestWinPercentageEmbed:
"""Tests for embed creation with win_percentage=None (fix for #39 Part B)."""
def _make_scorebug_data(self, win_percentage):
"""Create minimal ScorebugData for embed testing."""
return ScorebugData(
{
"away_team_id": 1,
"home_team_id": 2,
"header": "Test Game",
"away_score": 10,
"home_score": 2,
"which_half": "Top",
"inning": 5,
"is_final": False,
"outs": 1,
"win_percentage": win_percentage,
"pitcher_name": "",
"batter_name": "",
"runners": [["", ""], ["", ""], ["", ""], ["", ""]],
"summary": [],
}
)
def _make_team(self, abbrev, color_int=0x3498DB):
"""Create a mock team object."""
team = MagicMock()
team.abbrev = abbrev
team.get_color_int.return_value = color_int
return team
def test_embed_with_none_win_percentage_shows_unavailable(self):
"""When win_percentage is None, embed should show unavailable message."""
data = self._make_scorebug_data(win_percentage=None)
away = self._make_team("POR")
home = self._make_team("WV")
embed = create_scorebug_embed(data, away, home, full_length=False)
# Find the Win Probability field
win_prob_field = next(f for f in embed.fields if f.name == "Win Probability")
assert "unavailable" in win_prob_field.value.lower()
def test_embed_with_valid_win_percentage_shows_bar(self):
"""When win_percentage is valid, embed should show the progress bar."""
data = self._make_scorebug_data(win_percentage=75.0)
away = self._make_team("POR")
home = self._make_team("WV")
embed = create_scorebug_embed(data, away, home, full_length=False)
win_prob_field = next(f for f in embed.fields if f.name == "Win Probability")
assert "75.0%" in win_prob_field.value
assert "unavailable" not in win_prob_field.value.lower()
def test_embed_with_50_percent_shows_even_bar(self):
"""50% win probability should show the even/balanced bar."""
data = self._make_scorebug_data(win_percentage=50.0)
away = self._make_team("POR")
home = self._make_team("WV")
embed = create_scorebug_embed(data, away, home, full_length=False)
win_prob_field = next(f for f in embed.fields if f.name == "Win Probability")
assert "50.0%" in win_prob_field.value
assert "=" in win_prob_field.value

View File

@ -6,7 +6,6 @@ from unittest.mock import AsyncMock
from services.base_service import BaseService
from models.base import SBABaseModel
from exceptions import APIException
class MockModel(SBABaseModel):

View File

@ -4,23 +4,17 @@ Tests for Custom Commands Service in Discord Bot v2.0
Fixed version with proper mocking following established patterns.
"""
import pytest
import asyncio
from datetime import datetime, timedelta, timezone
from unittest.mock import AsyncMock, MagicMock, patch
from typing import List
from unittest.mock import AsyncMock
from services.custom_commands_service import (
CustomCommandsService,
CustomCommandNotFoundError,
CustomCommandExistsError,
CustomCommandPermissionError
CustomCommandExistsError
)
from models.custom_command import (
CustomCommand,
CustomCommandCreator,
CustomCommandSearchFilters,
CustomCommandSearchResult,
CustomCommandStats
CustomCommandCreator
)

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,7 @@ Tests the Google Sheets integration for draft pick tracking.
Uses mocked pygsheets to avoid actual API calls.
"""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from typing import Tuple, List
from unittest.mock import MagicMock, patch
from services.draft_sheet_service import DraftSheetService, get_draft_sheet_service

View File

@ -4,7 +4,7 @@ Tests for Help Commands Service in Discord Bot v2.0
Comprehensive tests for help commands CRUD operations and business logic.
"""
import pytest
from datetime import datetime, timezone, timedelta
from datetime import datetime, timezone
from unittest.mock import AsyncMock
from services.help_commands_service import (

View File

@ -12,7 +12,7 @@ rather than trying to mock HTTP responses, since the service uses BaseService
which manages its own client instance.
"""
import pytest
from unittest.mock import AsyncMock, MagicMock
from unittest.mock import AsyncMock
from services.injury_service import InjuryService
from models.injury import Injury

View File

@ -10,7 +10,6 @@ from typing import Dict, Any, List
from services.league_service import LeagueService, league_service
from models.current import Current
from exceptions import APIException
class TestLeagueService:

View File

@ -19,7 +19,7 @@ from services.trade_builder import (
_team_to_trade_key,
)
from models.trade import TradeStatus
from models.team import RosterType, Team
from models.team import RosterType
from tests.factories import PlayerFactory, TeamFactory

View File

@ -4,8 +4,7 @@ Tests for TransactionService
Validates transaction service functionality, API interaction, and business logic.
"""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from datetime import datetime
from unittest.mock import AsyncMock, patch
from services.transaction_service import TransactionService, transaction_service
from models.transaction import Transaction, RosterValidation

View File

@ -5,7 +5,6 @@ Validates transaction building, roster validation, and move management.
"""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from datetime import datetime
from services.transaction_builder import (
TransactionBuilder,
@ -19,7 +18,6 @@ from models.team import Team
from models.player import Player
from models.roster import TeamRoster
from models.transaction import Transaction
from tests.factories import PlayerFactory, TeamFactory
class TestTransactionBuilder:

View File

@ -4,10 +4,7 @@ Tests for Custom Command Cleanup Tasks in Discord Bot v2.0
Fixed version that tests cleanup logic without Discord task infrastructure.
"""
import pytest
import asyncio
from datetime import datetime, timedelta, timezone
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from typing import List
from models.custom_command import (
CustomCommand,

View File

@ -9,8 +9,7 @@ Validates the automated weekly freeze system for transactions, including:
- Transaction processing
"""
import pytest
from datetime import datetime, timezone, UTC
from unittest.mock import AsyncMock, MagicMock, Mock, patch, call
from unittest.mock import AsyncMock, MagicMock, patch
from typing import List
from tasks.transaction_freeze import (

View File

@ -9,7 +9,6 @@ import asyncio
from models.transaction import Transaction, RosterValidation
from models.team import Team
from models.roster import TeamRoster
from services.transaction_service import transaction_service
from commands.transactions.management import TransactionCommands
from tests.factories import TeamFactory

View File

@ -2,9 +2,7 @@
Tests for the logging decorator utility
"""
import pytest
import asyncio
from unittest.mock import AsyncMock, Mock, patch
import discord
from unittest.mock import Mock, patch
from utils.decorators import logged_command
from utils.logging import get_contextual_logger

View File

@ -12,7 +12,6 @@ Why these tests matter:
- Proper fallback behavior ensures backwards compatibility
"""
import pytest
from utils.helpers import (
DEFAULT_SALARY_CAP,
SALARY_CAP_TOLERANCE,

View File

@ -6,7 +6,6 @@ Tests contextual logging, operation tracing, and Discord context management.
import pytest
import time
from unittest.mock import Mock, patch
from typing import Dict, Any
from utils.logging import (
get_contextual_logger,

View File

@ -4,7 +4,6 @@ Tests for scorebug_helpers utility functions.
Tests the create_team_progress_bar function to ensure correct
win probability visualization for home and away teams.
"""
import pytest
from utils.scorebug_helpers import create_team_progress_bar

View File

@ -4,17 +4,14 @@ Tests for Custom Command Views in Discord Bot v2.0
Fixed version with proper async handling and model validation.
"""
import pytest
import asyncio
from datetime import datetime, timedelta, timezone
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from typing import List
from unittest.mock import AsyncMock, Mock
import discord
from models.custom_command import (
CustomCommand,
CustomCommandCreator,
CustomCommandSearchResult
CustomCommandCreator
)

Some files were not shown because too many files have changed in this diff Show More