- Fix TypeError in check_steal_opportunity by properly mocking catcher defense - Correct tag_from_third test calculation to account for all adjustment conditions - Fix pitcher replacement test by setting appropriate allowed runners threshold - Add comprehensive test coverage for AI service business logic - Implement VS Code testing panel configuration with pytest integration - Create pytest.ini for consistent test execution and warning management - Add test isolation guidelines and factory pattern implementation - Establish 102 passing tests with zero failures 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
244 lines
7.5 KiB
Python
244 lines
7.5 KiB
Python
"""
|
|
Unit tests for ManagerAi model.
|
|
|
|
Tests data validation, field constraints, and model behavior.
|
|
"""
|
|
|
|
import pytest
|
|
from pydantic import ValidationError
|
|
from sqlmodel import Session, SQLModel, create_engine
|
|
|
|
from app.models.manager_ai import ManagerAi, ManagerAiBase
|
|
|
|
|
|
@pytest.fixture
|
|
def test_db():
|
|
"""Create PostgreSQL test database connection."""
|
|
# Use test database on port 5434
|
|
test_url = "postgresql://paper_dynasty_user:paper_dynasty_test_password@localhost:5434/paper_dynasty_test"
|
|
engine = create_engine(test_url, echo=False)
|
|
SQLModel.metadata.create_all(engine)
|
|
return engine
|
|
|
|
|
|
@pytest.fixture
|
|
def session(test_db):
|
|
"""Create database session for testing."""
|
|
with Session(test_db) as session:
|
|
yield session
|
|
# Clean up after each test
|
|
session.rollback()
|
|
|
|
|
|
class TestManagerAiBase:
|
|
"""Test ManagerAiBase model validation."""
|
|
|
|
def test_create_with_defaults(self):
|
|
"""Test creating ManagerAi with default values."""
|
|
ai = ManagerAiBase(name="Test AI")
|
|
|
|
assert ai.name == "Test AI"
|
|
assert ai.steal == 5
|
|
assert ai.running == 5
|
|
assert ai.hold == 5
|
|
assert ai.catcher_throw == 5
|
|
assert ai.uncapped_home == 5
|
|
assert ai.uncapped_third == 5
|
|
assert ai.uncapped_trail == 5
|
|
assert ai.bullpen_matchup == 5
|
|
assert ai.behind_aggression == 5
|
|
assert ai.ahead_aggression == 5
|
|
assert ai.decide_throw == 5
|
|
|
|
def test_create_with_custom_values(self):
|
|
"""Test creating ManagerAi with custom values."""
|
|
ai = ManagerAiBase(
|
|
name="Aggressive AI",
|
|
steal=10,
|
|
running=8,
|
|
hold=3,
|
|
behind_aggression=9,
|
|
ahead_aggression=2
|
|
)
|
|
|
|
assert ai.name == "Aggressive AI"
|
|
assert ai.steal == 10
|
|
assert ai.running == 8
|
|
assert ai.hold == 3
|
|
assert ai.behind_aggression == 9
|
|
assert ai.ahead_aggression == 2
|
|
|
|
def test_validate_field_ranges(self):
|
|
"""Test field validation constraints."""
|
|
# Valid values at boundaries
|
|
ai = ManagerAiBase(
|
|
name="Boundary Test",
|
|
steal=1,
|
|
running=10,
|
|
hold=1
|
|
)
|
|
assert ai.steal == 1
|
|
assert ai.running == 10
|
|
assert ai.hold == 1
|
|
|
|
def test_invalid_field_values(self):
|
|
"""Test that invalid field values raise ValidationError."""
|
|
# Values below minimum
|
|
with pytest.raises(ValidationError) as exc_info:
|
|
ManagerAiBase(name="Invalid", steal=0)
|
|
assert "Input should be greater than or equal to 1" in str(exc_info.value)
|
|
|
|
# Values above maximum
|
|
with pytest.raises(ValidationError) as exc_info:
|
|
ManagerAiBase(name="Invalid", steal=11)
|
|
assert "Input should be less than or equal to 10" in str(exc_info.value)
|
|
|
|
def test_required_name_field(self):
|
|
"""Test that name field is required."""
|
|
with pytest.raises(ValidationError) as exc_info:
|
|
ManagerAiBase()
|
|
assert "Field required" in str(exc_info.value)
|
|
|
|
|
|
class TestManagerAi:
|
|
"""Test ManagerAi table model."""
|
|
|
|
def test_create_and_save(self, session):
|
|
"""Test creating and saving ManagerAi to database."""
|
|
ai = ManagerAi(
|
|
name="Test AI",
|
|
steal=7,
|
|
running=6,
|
|
hold=4
|
|
)
|
|
|
|
session.add(ai)
|
|
session.commit()
|
|
session.refresh(ai)
|
|
|
|
assert ai.id is not None
|
|
assert ai.name == "Test AI"
|
|
assert ai.steal == 7
|
|
|
|
def test_retrieve_from_database(self, session):
|
|
"""Test retrieving ManagerAi from database."""
|
|
# Create and save
|
|
ai = ManagerAi(name="Retrieval Test", steal=8)
|
|
session.add(ai)
|
|
session.commit()
|
|
|
|
# Retrieve
|
|
retrieved = session.get(ManagerAi, ai.id)
|
|
assert retrieved is not None
|
|
assert retrieved.name == "Retrieval Test"
|
|
assert retrieved.steal == 8
|
|
|
|
def test_update_values(self, session):
|
|
"""Test updating ManagerAi values."""
|
|
ai = ManagerAi(name="Update Test")
|
|
session.add(ai)
|
|
session.commit()
|
|
|
|
# Update values
|
|
ai.steal = 9
|
|
ai.running = 7
|
|
session.commit()
|
|
|
|
# Verify updates
|
|
session.refresh(ai)
|
|
assert ai.steal == 9
|
|
assert ai.running == 7
|
|
|
|
def test_multiple_instances(self, session):
|
|
"""Test creating multiple ManagerAi instances."""
|
|
ai1 = ManagerAi(name="Balanced", steal=5, running=5)
|
|
ai2 = ManagerAi(name="Aggressive", steal=10, running=10)
|
|
ai3 = ManagerAi(name="Conservative", steal=1, running=1)
|
|
|
|
session.add_all([ai1, ai2, ai3])
|
|
session.commit()
|
|
|
|
# Verify all saved with different IDs
|
|
assert ai1.id != ai2.id != ai3.id
|
|
assert ai1.name == "Balanced"
|
|
assert ai2.steal == 10
|
|
assert ai3.running == 1
|
|
|
|
def test_field_descriptions(self):
|
|
"""Test that field descriptions are properly set."""
|
|
ai = ManagerAi(name="Description Test")
|
|
|
|
# Access field descriptions through the model class using Pydantic v2
|
|
fields = ManagerAi.model_fields
|
|
assert "AI steal aggression level" in str(fields['steal'])
|
|
assert "AI base running aggression" in str(fields['running'])
|
|
assert "AI pitcher hold tendency" in str(fields['hold'])
|
|
|
|
|
|
class TestManagerAiPresets:
|
|
"""Test creating preset ManagerAi configurations."""
|
|
|
|
def test_balanced_preset(self, session):
|
|
"""Test creating a balanced AI preset."""
|
|
balanced = ManagerAi(name="Balanced")
|
|
session.add(balanced)
|
|
session.commit()
|
|
|
|
# All defaults should be 5 (balanced)
|
|
assert all(getattr(balanced, field) == 5 for field in [
|
|
'steal', 'running', 'hold', 'catcher_throw',
|
|
'uncapped_home', 'uncapped_third', 'uncapped_trail',
|
|
'bullpen_matchup', 'behind_aggression', 'ahead_aggression',
|
|
'decide_throw'
|
|
])
|
|
|
|
def test_yolo_preset(self, session):
|
|
"""Test creating an aggressive 'YOLO' AI preset."""
|
|
yolo = ManagerAi(
|
|
name="Yolo",
|
|
steal=10,
|
|
running=10,
|
|
hold=5,
|
|
catcher_throw=10,
|
|
uncapped_home=10,
|
|
uncapped_third=10,
|
|
uncapped_trail=10,
|
|
bullpen_matchup=3,
|
|
behind_aggression=10,
|
|
ahead_aggression=10,
|
|
decide_throw=10
|
|
)
|
|
session.add(yolo)
|
|
session.commit()
|
|
|
|
assert yolo.steal == 10
|
|
assert yolo.running == 10
|
|
assert yolo.bullpen_matchup == 3 # Conservative on bullpen
|
|
assert yolo.behind_aggression == 10
|
|
assert yolo.ahead_aggression == 10
|
|
|
|
def test_safe_preset(self, session):
|
|
"""Test creating a conservative 'Safe' AI preset."""
|
|
safe = ManagerAi(
|
|
name="Safe",
|
|
steal=3,
|
|
running=3,
|
|
hold=8,
|
|
catcher_throw=5,
|
|
uncapped_home=5,
|
|
uncapped_third=3,
|
|
uncapped_trail=5,
|
|
bullpen_matchup=8,
|
|
behind_aggression=5,
|
|
ahead_aggression=1,
|
|
decide_throw=1
|
|
)
|
|
session.add(safe)
|
|
session.commit()
|
|
|
|
assert safe.steal == 3
|
|
assert safe.running == 3
|
|
assert safe.hold == 8 # High hold tendency
|
|
assert safe.bullpen_matchup == 8 # Conservative bullpen usage
|
|
assert safe.ahead_aggression == 1 # Very conservative when ahead
|
|
assert safe.decide_throw == 1 |