Card Evolution Phase 1a: Schema & Data Foundation #104

Merged
cal merged 8 commits from feature/card-evolution-phase1a into card-evolution 2026-03-18 16:06:44 +00:00
10 changed files with 2151 additions and 140 deletions

View File

@ -1194,6 +1194,155 @@ if not SKIP_TABLE_CREATION:
db.create_tables([ScoutOpportunity, ScoutClaim], safe=True)
class PlayerSeasonStats(BaseModel):
player = ForeignKeyField(Player)
team = ForeignKeyField(Team)
season = IntegerField()
# Batting stats
games_batting = IntegerField(default=0)
pa = IntegerField(default=0)
ab = IntegerField(default=0)
hits = IntegerField(default=0)
doubles = IntegerField(default=0)
triples = IntegerField(default=0)
hr = IntegerField(default=0)
bb = IntegerField(default=0)
hbp = IntegerField(default=0)
so = IntegerField(default=0)
rbi = IntegerField(default=0)
runs = IntegerField(default=0)
sb = IntegerField(default=0)
cs = IntegerField(default=0)
# Pitching stats
games_pitching = IntegerField(default=0)
outs = IntegerField(default=0)
k = IntegerField(default=0)
bb_allowed = IntegerField(default=0)
hits_allowed = IntegerField(default=0)
hr_allowed = IntegerField(default=0)
wins = IntegerField(default=0)
losses = IntegerField(default=0)
saves = IntegerField(default=0)
holds = IntegerField(default=0)
blown_saves = IntegerField(default=0)
# Meta
last_game = ForeignKeyField(StratGame, null=True)
last_updated_at = DateTimeField(null=True)
class Meta:
database = db
table_name = "player_season_stats"
player_season_stats_unique_index = ModelIndex(
PlayerSeasonStats,
(PlayerSeasonStats.player, PlayerSeasonStats.team, PlayerSeasonStats.season),
unique=True,
)
PlayerSeasonStats.add_index(player_season_stats_unique_index)
player_season_stats_team_season_index = ModelIndex(
PlayerSeasonStats,
(PlayerSeasonStats.team, PlayerSeasonStats.season),
unique=False,
)
PlayerSeasonStats.add_index(player_season_stats_team_season_index)
player_season_stats_player_season_index = ModelIndex(
PlayerSeasonStats,
(PlayerSeasonStats.player, PlayerSeasonStats.season),
unique=False,
)
PlayerSeasonStats.add_index(player_season_stats_player_season_index)
if not SKIP_TABLE_CREATION:
db.create_tables([PlayerSeasonStats], safe=True)
class EvolutionTrack(BaseModel):
name = CharField(unique=True)
card_type = CharField() # 'batter', 'sp', 'rp'
formula = CharField() # e.g. "pa + tb * 2"
t1_threshold = IntegerField()
t2_threshold = IntegerField()
t3_threshold = IntegerField()
t4_threshold = IntegerField()
class Meta:
database = db
table_name = "evolution_track"
class EvolutionCardState(BaseModel):
player = ForeignKeyField(Player)
team = ForeignKeyField(Team)
track = ForeignKeyField(EvolutionTrack)
current_tier = IntegerField(default=0) # 0-4
current_value = FloatField(default=0.0)
fully_evolved = BooleanField(default=False)
last_evaluated_at = DateTimeField(null=True)
class Meta:
database = db
table_name = "evolution_card_state"
evolution_card_state_index = ModelIndex(
EvolutionCardState,
(EvolutionCardState.player, EvolutionCardState.team),
unique=True,
)
EvolutionCardState.add_index(evolution_card_state_index)
class EvolutionTierBoost(BaseModel):
track = ForeignKeyField(EvolutionTrack)
tier = IntegerField() # 1-4
boost_type = CharField() # e.g. 'rating', 'stat'
boost_target = CharField() # e.g. 'contact_vl', 'power_vr'
boost_value = FloatField(default=0.0)
class Meta:
database = db
table_name = "evolution_tier_boost"
evolution_tier_boost_index = ModelIndex(
EvolutionTierBoost,
(
EvolutionTierBoost.track,
EvolutionTierBoost.tier,
EvolutionTierBoost.boost_type,
EvolutionTierBoost.boost_target,
),
unique=True,
)
EvolutionTierBoost.add_index(evolution_tier_boost_index)
class EvolutionCosmetic(BaseModel):
name = CharField(unique=True)
tier_required = IntegerField(default=0)
cosmetic_type = CharField() # 'frame', 'badge', 'theme'
css_class = CharField(null=True)
asset_url = CharField(null=True)
class Meta:
database = db
table_name = "evolution_cosmetic"
if not SKIP_TABLE_CREATION:
db.create_tables(
[EvolutionTrack, EvolutionCardState, EvolutionTierBoost, EvolutionCosmetic],
safe=True,
)
db.close()
# scout_db = SqliteDatabase(

View File

@ -1,5 +1,29 @@
[
{"name": "Batter", "card_type": "batter", "formula": "pa+tb*2", "t1": 37, "t2": 149, "t3": 448, "t4": 896},
{"name": "Starting Pitcher", "card_type": "sp", "formula": "ip+k", "t1": 10, "t2": 40, "t3": 120, "t4": 240},
{"name": "Relief Pitcher", "card_type": "rp", "formula": "ip+k", "t1": 3, "t2": 12, "t3": 35, "t4": 70}
{
"name": "Batter Track",
"card_type": "batter",
"formula": "pa + tb * 2",
"t1_threshold": 37,
"t2_threshold": 149,
"t3_threshold": 448,
"t4_threshold": 896
},
{
"name": "Starting Pitcher Track",
"card_type": "sp",
"formula": "ip + k",
"t1_threshold": 10,
"t2_threshold": 40,
"t3_threshold": 120,
"t4_threshold": 240
},
{
"name": "Relief Pitcher Track",
"card_type": "rp",
"formula": "ip + k",
"t1_threshold": 3,
"t2_threshold": 12,
"t3_threshold": 35,
"t4_threshold": 70
}
]

View File

@ -1,41 +1,66 @@
"""Seed data fixture for EvolutionTrack.
"""Seed script for EvolutionTrack records.
Inserts the three universal evolution tracks (Batter, Starting Pitcher,
Relief Pitcher) if they do not already exist. Safe to call multiple times
thanks to get_or_create depends on WP-01 (EvolutionTrack model) to run.
Loads track definitions from evolution_tracks.json and upserts them into the
database using get_or_create keyed on name. Existing tracks have their
thresholds and formula updated to match the JSON in case values have changed.
Can be run standalone:
python -m app.seed.evolution_tracks
"""
import json
import os
import logging
from pathlib import Path
_JSON_PATH = os.path.join(os.path.dirname(__file__), "evolution_tracks.json")
from app.db_engine import EvolutionTrack
logger = logging.getLogger(__name__)
_JSON_PATH = Path(__file__).parent / "evolution_tracks.json"
def load_tracks():
"""Return the locked list of evolution track dicts from the JSON fixture."""
with open(_JSON_PATH) as fh:
return json.load(fh)
def seed_evolution_tracks() -> list[EvolutionTrack]:
"""Upsert evolution tracks from JSON seed data.
def seed(model_class=None):
"""Insert evolution tracks that are not yet in the database.
Args:
model_class: Peewee model with get_or_create support. Defaults to
``app.db_engine.EvolutionTrack`` (imported lazily so this module
can be imported before WP-01 lands).
Returns:
List of (instance, created) tuples from get_or_create.
Returns a list of EvolutionTrack instances that were created or updated.
"""
if model_class is None:
from app.db_engine import EvolutionTrack as model_class # noqa: PLC0415
raw = _JSON_PATH.read_text(encoding="utf-8")
track_defs = json.loads(raw)
results = []
for track in load_tracks():
instance, created = model_class.get_or_create(
card_type=track["card_type"],
defaults=track,
results: list[EvolutionTrack] = []
for defn in track_defs:
track, created = EvolutionTrack.get_or_create(
name=defn["name"],
defaults={
"card_type": defn["card_type"],
"formula": defn["formula"],
"t1_threshold": defn["t1_threshold"],
"t2_threshold": defn["t2_threshold"],
"t3_threshold": defn["t3_threshold"],
"t4_threshold": defn["t4_threshold"],
},
)
results.append((instance, created))
if not created:
# Update mutable fields in case the JSON values changed.
track.card_type = defn["card_type"]
track.formula = defn["formula"]
track.t1_threshold = defn["t1_threshold"]
track.t2_threshold = defn["t2_threshold"]
track.t3_threshold = defn["t3_threshold"]
track.t4_threshold = defn["t4_threshold"]
track.save()
action = "created" if created else "updated"
logger.info("[%s] %s (card_type=%s)", action, track.name, track.card_type)
results.append(track)
return results
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger.info("Seeding evolution tracks...")
tracks = seed_evolution_tracks()
logger.info("Done. %d track(s) processed.", len(tracks))

View File

@ -0,0 +1,484 @@
"""
season_stats.py Incremental PlayerSeasonStats update logic.
Called once per completed StratGame to accumulate batting and pitching
statistics into the player_season_stats table.
Idempotency limitation: re-delivery of a game is detected by checking
whether any PlayerSeasonStats row still carries that game_id as last_game.
This guard only works if no later game has been processed for the same
players if game G+1 is processed first, a re-delivery of game G will
bypass the guard and double-count stats. A persistent processed-game
ledger is needed for full idempotency across out-of-order re-delivery
(see issue #105).
Peewee upsert strategy:
- SQLite: on_conflict_replace() simplest path, deletes + re-inserts
- PostgreSQL: on_conflict() with EXCLUDED true atomic increment via SQL
"""
import logging
import os
from collections import defaultdict
from datetime import datetime
from peewee import EXCLUDED
from app.db_engine import (
db,
Decision,
PlayerSeasonStats,
StratGame,
StratPlay,
)
logger = logging.getLogger(__name__)
DATABASE_TYPE = os.environ.get("DATABASE_TYPE", "sqlite").lower()
def _build_batting_groups(plays):
"""
Aggregate per-play batting stats by (batter_id, batter_team_id).
Only plays where pa > 0 are counted toward games_batting, but all
play-level stat fields are accumulated regardless of pa value so
that rare edge cases (e.g. sac bunt without official PA) are
correctly included in the totals.
Returns a dict keyed by (batter_id, batter_team_id) with stat dicts.
"""
groups = defaultdict(
lambda: {
"games_batting": 0,
"pa": 0,
"ab": 0,
"hits": 0,
"doubles": 0,
"triples": 0,
"hr": 0,
"bb": 0,
"hbp": 0,
"so": 0,
"rbi": 0,
"runs": 0,
"sb": 0,
"cs": 0,
"appeared": False, # tracks whether batter appeared at all in this game
}
)
for play in plays:
batter_id = play.batter_id
batter_team_id = play.batter_team_id
if batter_id is None:
continue
key = (batter_id, batter_team_id)
g = groups[key]
g["pa"] += play.pa
g["ab"] += play.ab
g["hits"] += play.hit
g["doubles"] += play.double
g["triples"] += play.triple
g["hr"] += play.homerun
g["bb"] += play.bb
g["hbp"] += play.hbp
g["so"] += play.so
g["rbi"] += play.rbi
g["runs"] += play.run
g["sb"] += play.sb
g["cs"] += play.cs
if play.pa > 0 and not g["appeared"]:
g["games_batting"] = 1
g["appeared"] = True
# Clean up the helper flag before returning
for key in groups:
del groups[key]["appeared"]
return groups
def _build_pitching_groups(plays):
"""
Aggregate per-play pitching stats by (pitcher_id, pitcher_team_id).
Stats on StratPlay are recorded from the batter's perspective, so
when accumulating pitcher stats we collect:
- outs pitcher outs recorded (directly on play)
- so strikeouts (batter's so = pitcher's k)
- hit hits allowed
- bb+hbp base-on-balls allowed
- homerun home runs allowed
games_pitching counts unique pitchers who appeared (at least one
play as pitcher), capped at 1 per game since this function processes
a single game.
Returns a dict keyed by (pitcher_id, pitcher_team_id) with stat dicts.
"""
groups = defaultdict(
lambda: {
"games_pitching": 1, # pitcher appeared in this game by definition
"outs": 0,
"k": 0,
"hits_allowed": 0,
"bb_allowed": 0,
"hr_allowed": 0,
# Decision stats added later
"wins": 0,
"losses": 0,
"saves": 0,
"holds": 0,
"blown_saves": 0,
}
)
for play in plays:
pitcher_id = play.pitcher_id
pitcher_team_id = play.pitcher_team_id
key = (pitcher_id, pitcher_team_id)
g = groups[key]
g["outs"] += play.outs
g["k"] += play.so
g["hits_allowed"] += play.hit
g["bb_allowed"] += play.bb + play.hbp
g["hr_allowed"] += play.homerun
return groups
def _apply_decisions(pitching_groups, decisions):
"""
Merge Decision rows into the pitching stat groups.
Each Decision belongs to exactly one pitcher in the game, containing
win/loss/save/hold/blown-save flags and the is_start indicator.
"""
for decision in decisions:
pitcher_id = decision.pitcher_id
pitcher_team_id = decision.pitcher_team_id
key = (pitcher_id, pitcher_team_id)
# Pitcher may have a Decision without plays (rare edge case for
# games where the Decision was recorded without StratPlay rows).
# Initialise a zeroed entry if not already present.
if key not in pitching_groups:
pitching_groups[key] = {
"games_pitching": 1,
"outs": 0,
"k": 0,
"hits_allowed": 0,
"bb_allowed": 0,
"hr_allowed": 0,
"wins": 0,
"losses": 0,
"saves": 0,
"holds": 0,
"blown_saves": 0,
}
g = pitching_groups[key]
g["wins"] += decision.win
g["losses"] += decision.loss
g["saves"] += decision.is_save
g["holds"] += decision.hold
g["blown_saves"] += decision.b_save
def _upsert_postgres(player_id, team_id, season, game_id, batting, pitching):
"""
PostgreSQL upsert using ON CONFLICT ... DO UPDATE with column-level
increments. Each stat column is incremented by the value from the
EXCLUDED (incoming) row, ensuring concurrent games don't overwrite
each other.
"""
now = datetime.now()
row = {
"player_id": player_id,
"team_id": team_id,
"season": season,
"games_batting": batting.get("games_batting", 0),
"pa": batting.get("pa", 0),
"ab": batting.get("ab", 0),
"hits": batting.get("hits", 0),
"doubles": batting.get("doubles", 0),
"triples": batting.get("triples", 0),
"hr": batting.get("hr", 0),
"bb": batting.get("bb", 0),
"hbp": batting.get("hbp", 0),
"so": batting.get("so", 0),
"rbi": batting.get("rbi", 0),
"runs": batting.get("runs", 0),
"sb": batting.get("sb", 0),
"cs": batting.get("cs", 0),
"games_pitching": pitching.get("games_pitching", 0),
"outs": pitching.get("outs", 0),
"k": pitching.get("k", 0),
"hits_allowed": pitching.get("hits_allowed", 0),
"bb_allowed": pitching.get("bb_allowed", 0),
"hr_allowed": pitching.get("hr_allowed", 0),
"wins": pitching.get("wins", 0),
"losses": pitching.get("losses", 0),
"saves": pitching.get("saves", 0),
"holds": pitching.get("holds", 0),
"blown_saves": pitching.get("blown_saves", 0),
"last_game_id": game_id,
"last_updated_at": now,
}
# Incrementable stat columns (all batting + pitching accumulators)
increment_cols = [
"games_batting",
"pa",
"ab",
"hits",
"doubles",
"triples",
"hr",
"bb",
"hbp",
"so",
"rbi",
"runs",
"sb",
"cs",
"games_pitching",
"outs",
"k",
"hits_allowed",
"bb_allowed",
"hr_allowed",
"wins",
"losses",
"saves",
"holds",
"blown_saves",
]
# Build the conflict-target field objects
conflict_target = [
PlayerSeasonStats.player,
PlayerSeasonStats.team,
PlayerSeasonStats.season,
]
# Build the update dict: increment accumulators, overwrite metadata
update_dict = {}
for col in increment_cols:
field_obj = getattr(PlayerSeasonStats, col)
update_dict[field_obj] = field_obj + EXCLUDED[col]
update_dict[PlayerSeasonStats.last_game] = EXCLUDED["last_game_id"]
update_dict[PlayerSeasonStats.last_updated_at] = EXCLUDED["last_updated_at"]
PlayerSeasonStats.insert(
player=player_id,
team=team_id,
season=season,
games_batting=row["games_batting"],
pa=row["pa"],
ab=row["ab"],
hits=row["hits"],
doubles=row["doubles"],
triples=row["triples"],
hr=row["hr"],
bb=row["bb"],
hbp=row["hbp"],
so=row["so"],
rbi=row["rbi"],
runs=row["runs"],
sb=row["sb"],
cs=row["cs"],
games_pitching=row["games_pitching"],
outs=row["outs"],
k=row["k"],
hits_allowed=row["hits_allowed"],
bb_allowed=row["bb_allowed"],
hr_allowed=row["hr_allowed"],
wins=row["wins"],
losses=row["losses"],
saves=row["saves"],
holds=row["holds"],
blown_saves=row["blown_saves"],
last_game=game_id,
last_updated_at=now,
).on_conflict(
conflict_target=conflict_target,
action="update",
update=update_dict,
).execute()
def _upsert_sqlite(player_id, team_id, season, game_id, batting, pitching):
"""
SQLite upsert: read-modify-write inside the outer atomic() block.
SQLite doesn't support EXCLUDED-based increments via Peewee's
on_conflict(), so we use get_or_create + field-level addition.
This is safe because the entire update_season_stats() call is
wrapped in db.atomic().
"""
now = datetime.now()
obj, _ = PlayerSeasonStats.get_or_create(
player_id=player_id,
team_id=team_id,
season=season,
)
obj.games_batting += batting.get("games_batting", 0)
obj.pa += batting.get("pa", 0)
obj.ab += batting.get("ab", 0)
obj.hits += batting.get("hits", 0)
obj.doubles += batting.get("doubles", 0)
obj.triples += batting.get("triples", 0)
obj.hr += batting.get("hr", 0)
obj.bb += batting.get("bb", 0)
obj.hbp += batting.get("hbp", 0)
obj.so += batting.get("so", 0)
obj.rbi += batting.get("rbi", 0)
obj.runs += batting.get("runs", 0)
obj.sb += batting.get("sb", 0)
obj.cs += batting.get("cs", 0)
obj.games_pitching += pitching.get("games_pitching", 0)
obj.outs += pitching.get("outs", 0)
obj.k += pitching.get("k", 0)
obj.hits_allowed += pitching.get("hits_allowed", 0)
obj.bb_allowed += pitching.get("bb_allowed", 0)
obj.hr_allowed += pitching.get("hr_allowed", 0)
obj.wins += pitching.get("wins", 0)
obj.losses += pitching.get("losses", 0)
obj.saves += pitching.get("saves", 0)
obj.holds += pitching.get("holds", 0)
obj.blown_saves += pitching.get("blown_saves", 0)
obj.last_game_id = game_id
obj.last_updated_at = now
obj.save()
def update_season_stats(game_id: int) -> dict:
"""
Accumulate per-game batting and pitching stats into PlayerSeasonStats.
This function is safe to call exactly once per game. If called again
for the same game_id while it is still the most-recently-processed
game for at least one affected player (detected by checking last_game
FK), it returns early without modifying any data.
Limitation: the guard only detects re-delivery if no later game has
been processed for the same players. Out-of-order re-delivery (e.g.
game G re-delivered after game G+1 was already processed) will not be
caught and will silently double-count stats. See issue #105 for the
planned ProcessedGame ledger fix.
Algorithm:
1. Fetch StratGame to get the season.
2. Guard against re-processing via last_game_id check.
3. Collect all StratPlay rows for the game.
4. Group batting stats by (batter_id, batter_team_id).
5. Group pitching stats by (pitcher_id, pitcher_team_id).
6. Merge Decision rows into pitching groups.
7. Upsert each player's contribution using either:
- PostgreSQL: atomic SQL increment via ON CONFLICT DO UPDATE
- SQLite: read-modify-write inside a transaction
Args:
game_id: Primary key of the StratGame to process.
Returns:
Summary dict with keys: game_id, season, batters_updated,
pitchers_updated. If the game was already processed, also
includes "skipped": True.
Raises:
StratGame.DoesNotExist: If no StratGame row matches game_id.
"""
logger.info("update_season_stats: starting for game_id=%d", game_id)
# Step 1 — Fetch the game to get season
game = StratGame.get_by_id(game_id)
season = game.season
with db.atomic():
# Step 2 — Double-count prevention: check if any row still
# carries this game_id as last_game. Note: only detects replay
# of the most-recently-processed game; out-of-order re-delivery
# bypasses this guard (see issue #105).
already_processed = (
PlayerSeasonStats.select()
.where(PlayerSeasonStats.last_game == game_id)
.exists()
)
if already_processed:
logger.info(
"update_season_stats: game_id=%d already processed, skipping",
game_id,
)
return {
"game_id": game_id,
"season": season,
"batters_updated": 0,
"pitchers_updated": 0,
"skipped": True,
}
# Step 3 — Load plays
plays = list(StratPlay.select().where(StratPlay.game == game_id))
logger.debug(
"update_season_stats: game_id=%d loaded %d plays", game_id, len(plays)
)
# Steps 4 & 5 — Aggregate batting and pitching groups
batting_groups = _build_batting_groups(plays)
pitching_groups = _build_pitching_groups(plays)
# Step 6 — Merge Decision rows into pitching groups
decisions = list(Decision.select().where(Decision.game == game_id))
_apply_decisions(pitching_groups, decisions)
# Collect all unique player keys across both perspectives.
# A two-way player (batter who also pitched, or vice-versa) gets
# a single combined row in PlayerSeasonStats.
all_keys = set(batting_groups.keys()) | set(pitching_groups.keys())
batters_updated = 0
pitchers_updated = 0
upsert_fn = (
_upsert_postgres if DATABASE_TYPE == "postgresql" else _upsert_sqlite
)
for player_id, team_id in all_keys:
batting = batting_groups.get((player_id, team_id), {})
pitching = pitching_groups.get((player_id, team_id), {})
upsert_fn(player_id, team_id, season, game_id, batting, pitching)
if batting:
batters_updated += 1
if pitching:
pitchers_updated += 1
logger.info(
"update_season_stats: game_id=%d complete — "
"batters_updated=%d pitchers_updated=%d",
game_id,
batters_updated,
pitchers_updated,
)
return {
"game_id": game_id,
"season": season,
"batters_updated": batters_updated,
"pitchers_updated": pitchers_updated,
}

View File

@ -0,0 +1,203 @@
-- Migration: Add card evolution tables and column extensions
-- Date: 2026-03-17
-- Issue: WP-04
-- Purpose: Support the Card Evolution system — tracks player season stats,
-- evolution tracks with tier thresholds, per-card evolution state,
-- tier-based stat boosts, and cosmetic unlocks. Also extends the
-- card, battingcard, and pitchingcard tables with variant and
-- image_url columns required by the evolution display layer.
--
-- Run on dev first, verify with:
-- SELECT count(*) FROM player_season_stats;
-- SELECT count(*) FROM evolution_track;
-- SELECT count(*) FROM evolution_card_state;
-- SELECT count(*) FROM evolution_tier_boost;
-- SELECT count(*) FROM evolution_cosmetic;
-- SELECT column_name FROM information_schema.columns
-- WHERE table_name IN ('card', 'battingcard', 'pitchingcard')
-- AND column_name IN ('variant', 'image_url')
-- ORDER BY table_name, column_name;
--
-- Rollback: See DROP/ALTER statements at bottom of file
-- ============================================
-- FORWARD MIGRATION
-- ============================================
BEGIN;
-- --------------------------------------------
-- Table 1: player_season_stats
-- Accumulates per-player per-team per-season
-- batting and pitching totals for evolution
-- formula evaluation.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS player_season_stats (
id SERIAL PRIMARY KEY,
player_id INTEGER NOT NULL REFERENCES player(player_id) ON DELETE CASCADE,
team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
season INTEGER NOT NULL,
-- Batting stats
games_batting INTEGER NOT NULL DEFAULT 0,
pa INTEGER NOT NULL DEFAULT 0,
ab INTEGER NOT NULL DEFAULT 0,
hits INTEGER NOT NULL DEFAULT 0,
doubles INTEGER NOT NULL DEFAULT 0,
triples INTEGER NOT NULL DEFAULT 0,
hr INTEGER NOT NULL DEFAULT 0,
bb INTEGER NOT NULL DEFAULT 0,
hbp INTEGER NOT NULL DEFAULT 0,
so INTEGER NOT NULL DEFAULT 0,
rbi INTEGER NOT NULL DEFAULT 0,
runs INTEGER NOT NULL DEFAULT 0,
sb INTEGER NOT NULL DEFAULT 0,
cs INTEGER NOT NULL DEFAULT 0,
-- Pitching stats
games_pitching INTEGER NOT NULL DEFAULT 0,
outs INTEGER NOT NULL DEFAULT 0,
k INTEGER NOT NULL DEFAULT 0,
bb_allowed INTEGER NOT NULL DEFAULT 0,
hits_allowed INTEGER NOT NULL DEFAULT 0,
hr_allowed INTEGER NOT NULL DEFAULT 0,
wins INTEGER NOT NULL DEFAULT 0,
losses INTEGER NOT NULL DEFAULT 0,
saves INTEGER NOT NULL DEFAULT 0,
holds INTEGER NOT NULL DEFAULT 0,
blown_saves INTEGER NOT NULL DEFAULT 0,
-- Meta
last_game_id INTEGER REFERENCES stratgame(id) ON DELETE SET NULL,
last_updated_at TIMESTAMP
);
-- One row per player per team per season
CREATE UNIQUE INDEX IF NOT EXISTS player_season_stats_player_team_season_uniq
ON player_season_stats (player_id, team_id, season);
-- Fast lookup by team + season (e.g. leaderboard queries)
CREATE INDEX IF NOT EXISTS player_season_stats_team_season_idx
ON player_season_stats (team_id, season);
-- Fast lookup by player across seasons
CREATE INDEX IF NOT EXISTS player_season_stats_player_season_idx
ON player_season_stats (player_id, season);
-- --------------------------------------------
-- Table 2: evolution_track
-- Defines the available evolution tracks
-- (e.g. "HR Mastery", "Ace SP"), their
-- metric formula, and the four tier thresholds.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_track (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
card_type VARCHAR(50) NOT NULL, -- 'batter', 'sp', or 'rp'
formula VARCHAR(255) NOT NULL, -- e.g. 'hr', 'k_per_9', 'ops'
t1_threshold INTEGER NOT NULL,
t2_threshold INTEGER NOT NULL,
t3_threshold INTEGER NOT NULL,
t4_threshold INTEGER NOT NULL
);
-- --------------------------------------------
-- Table 3: evolution_card_state
-- Records each card's current evolution tier,
-- running metric value, and the track it
-- belongs to. One state row per card (player
-- + team combination uniquely identifies a
-- card in a given season).
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_card_state (
id SERIAL PRIMARY KEY,
player_id INTEGER NOT NULL REFERENCES player(player_id) ON DELETE CASCADE,
team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
track_id INTEGER NOT NULL REFERENCES evolution_track(id) ON DELETE CASCADE,
current_tier INTEGER NOT NULL DEFAULT 0,
current_value DOUBLE PRECISION NOT NULL DEFAULT 0.0,
fully_evolved BOOLEAN NOT NULL DEFAULT FALSE,
last_evaluated_at TIMESTAMP
);
-- One evolution state per card (player + team)
CREATE UNIQUE INDEX IF NOT EXISTS evolution_card_state_player_team_uniq
ON evolution_card_state (player_id, team_id);
-- --------------------------------------------
-- Table 4: evolution_tier_boost
-- Defines the stat boosts unlocked at each
-- tier within a track. A single tier may
-- grant multiple boosts (e.g. +1 HR and
-- +1 power rating).
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_tier_boost (
id SERIAL PRIMARY KEY,
track_id INTEGER NOT NULL REFERENCES evolution_track(id) ON DELETE CASCADE,
tier INTEGER NOT NULL, -- 1-4
boost_type VARCHAR(50) NOT NULL, -- e.g. 'rating_bump', 'display_only'
boost_target VARCHAR(50) NOT NULL, -- e.g. 'hr_rating', 'contact_rating'
boost_value DOUBLE PRECISION NOT NULL DEFAULT 0.0
);
-- Prevent duplicate boost definitions for the same track/tier/type/target
CREATE UNIQUE INDEX IF NOT EXISTS evolution_tier_boost_track_tier_type_target_uniq
ON evolution_tier_boost (track_id, tier, boost_type, boost_target);
-- --------------------------------------------
-- Table 5: evolution_cosmetic
-- Catalogue of unlockable visual treatments
-- (borders, foils, badges, etc.) tied to
-- minimum tier requirements.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_cosmetic (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
tier_required INTEGER NOT NULL DEFAULT 0,
cosmetic_type VARCHAR(50) NOT NULL, -- e.g. 'border', 'foil', 'badge'
css_class VARCHAR(255),
asset_url VARCHAR(500)
);
-- --------------------------------------------
-- Column extensions for existing tables
-- --------------------------------------------
-- Track which visual variant a card is displaying
-- (NULL = base card, 1+ = evolved variants)
ALTER TABLE card ADD COLUMN IF NOT EXISTS variant INTEGER DEFAULT NULL;
-- Store pre-rendered or externally-hosted card image URLs
ALTER TABLE battingcard ADD COLUMN IF NOT EXISTS image_url VARCHAR(500);
ALTER TABLE pitchingcard ADD COLUMN IF NOT EXISTS image_url VARCHAR(500);
COMMIT;
-- ============================================
-- VERIFICATION QUERIES
-- ============================================
-- \d player_season_stats
-- \d evolution_track
-- \d evolution_card_state
-- \d evolution_tier_boost
-- \d evolution_cosmetic
-- SELECT indexname FROM pg_indexes
-- WHERE tablename IN (
-- 'player_season_stats',
-- 'evolution_card_state',
-- 'evolution_tier_boost'
-- )
-- ORDER BY tablename, indexname;
-- SELECT column_name, data_type FROM information_schema.columns
-- WHERE table_name IN ('card', 'battingcard', 'pitchingcard')
-- AND column_name IN ('variant', 'image_url')
-- ORDER BY table_name, column_name;
-- ============================================
-- ROLLBACK (if needed)
-- ============================================
-- ALTER TABLE pitchingcard DROP COLUMN IF EXISTS image_url;
-- ALTER TABLE battingcard DROP COLUMN IF EXISTS image_url;
-- ALTER TABLE card DROP COLUMN IF EXISTS variant;
-- DROP TABLE IF EXISTS evolution_cosmetic CASCADE;
-- DROP TABLE IF EXISTS evolution_tier_boost CASCADE;
-- DROP TABLE IF EXISTS evolution_card_state CASCADE;
-- DROP TABLE IF EXISTS evolution_track CASCADE;
-- DROP TABLE IF EXISTS player_season_stats CASCADE;

3
ruff.toml Normal file
View File

@ -0,0 +1,3 @@
[lint]
# db_engine.py uses `from peewee import *` intentionally — suppress star-import warnings
ignore = ["F403", "F405"]

View File

@ -1,14 +1,171 @@
"""Pytest configuration for the paper-dynasty-database test suite.
"""
Shared test fixtures for the Paper Dynasty database test suite.
Sets DATABASE_TYPE=postgresql before any app module is imported so that
db_engine.py sets SKIP_TABLE_CREATION=True and does not try to mutate the
production SQLite file during test collection. Each test module is
responsible for binding models to its own in-memory database.
Uses in-memory SQLite with foreign_keys pragma enabled. Each test
gets a fresh set of tables via the setup_test_db fixture (autouse).
All models are bound to the in-memory database before table creation
so that no connection to the real storage/pd_master.db occurs during
tests.
"""
import os
import pytest
from peewee import SqliteDatabase
# Set DATABASE_TYPE=postgresql so that the module-level SKIP_TABLE_CREATION
# flag is True. This prevents db_engine.py from calling create_tables()
# against the real storage/pd_master.db during import — those calls would
# fail if indexes already exist and would also contaminate the dev database.
# The PooledPostgresqlDatabase object is created but never actually connects
# because our fixture rebinds all models to an in-memory SQLite db before
# any query is executed.
os.environ["DATABASE_TYPE"] = "postgresql"
# Provide dummy credentials so PooledPostgresqlDatabase can be instantiated
# without raising a configuration error (it will not actually be used).
os.environ.setdefault("POSTGRES_PASSWORD", "test-dummy")
from app.db_engine import (
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
PackType,
Pack,
Card,
Roster,
RosterSlot,
StratGame,
StratPlay,
Decision,
PlayerSeasonStats,
EvolutionTrack,
EvolutionCardState,
EvolutionTierBoost,
EvolutionCosmetic,
ScoutOpportunity,
ScoutClaim,
)
_test_db = SqliteDatabase(":memory:", pragmas={"foreign_keys": 1})
# All models in dependency order (parents before children) so that
# create_tables and drop_tables work without FK violations.
_TEST_MODELS = [
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
PackType,
Pack,
Card,
Roster,
RosterSlot,
StratGame,
StratPlay,
Decision,
ScoutOpportunity,
ScoutClaim,
PlayerSeasonStats,
EvolutionTrack,
EvolutionCardState,
EvolutionTierBoost,
EvolutionCosmetic,
]
@pytest.fixture(autouse=True)
def setup_test_db():
"""Bind all models to in-memory SQLite and create tables.
The fixture is autouse so every test automatically gets a fresh,
isolated database schema without needing to request it explicitly.
Tables are dropped in reverse dependency order after each test to
keep the teardown clean and to catch any accidental FK reference
direction bugs early.
"""
_test_db.bind(_TEST_MODELS)
_test_db.connect()
_test_db.create_tables(_TEST_MODELS)
yield _test_db
_test_db.drop_tables(list(reversed(_TEST_MODELS)), safe=True)
_test_db.close()
# ---------------------------------------------------------------------------
# Minimal shared fixtures — create just enough data for FK dependencies
# ---------------------------------------------------------------------------
@pytest.fixture
def rarity():
"""A single Common rarity row used as FK seed for Player rows."""
return Rarity.create(value=1, name="Common", color="#ffffff")
@pytest.fixture
def player(rarity):
"""A minimal Player row with all required (non-nullable) columns filled.
Player.p_name is the real column name (not 'name'). All FK and
non-nullable varchar fields are provided so SQLite's NOT NULL
constraints are satisfied even with foreign_keys=ON.
"""
cardset = Cardset.create(
name="Test Set",
description="Test cardset",
total_cards=100,
)
return Player.create(
p_name="Test Player",
rarity=rarity,
cardset=cardset,
set_num=1,
pos_1="1B",
image="https://example.com/image.png",
mlbclub="TST",
franchise="TST",
description="A test player",
)
@pytest.fixture
def team():
"""A minimal Team row.
Team uses abbrev/lname/sname/gmid/gmname/gsheet/wallet/team_value/
collection_value not the 'name'/'user_id' shorthand described in
the spec, which referred to the real underlying columns by
simplified names.
"""
return Team.create(
abbrev="TST",
sname="Test",
lname="Test Team",
gmid=100000001,
gmname="testuser",
gsheet="https://docs.google.com/spreadsheets/test",
wallet=500,
team_value=1000,
collection_value=1000,
season=11,
is_ai=False,
)
@pytest.fixture
def track():
"""A minimal EvolutionTrack for batter cards."""
return EvolutionTrack.create(
name="Batter Track",
card_type="batter",
formula="pa + tb * 2",
t1_threshold=37,
t2_threshold=149,
t3_threshold=448,
t4_threshold=896,
)

View File

@ -0,0 +1,332 @@
"""
Tests for evolution-related models and PlayerSeasonStats.
Covers WP-01 acceptance criteria:
- EvolutionTrack: CRUD and unique-name constraint
- EvolutionCardState: CRUD, defaults, unique-(player,team) constraint,
and FK resolution back to EvolutionTrack
- EvolutionTierBoost: CRUD and unique-(track, tier, boost_type, boost_target)
- EvolutionCosmetic: CRUD and unique-name constraint
- PlayerSeasonStats: CRUD with defaults, unique-(player, team, season),
and in-place stat accumulation
Each test class is self-contained: fixtures from conftest.py supply the
minimal parent rows needed to satisfy FK constraints, and every assertion
targets a single, clearly-named behaviour so failures are easy to trace.
"""
import pytest
from peewee import IntegrityError
from playhouse.shortcuts import model_to_dict
from app.db_engine import (
EvolutionCardState,
EvolutionCosmetic,
EvolutionTierBoost,
EvolutionTrack,
PlayerSeasonStats,
)
# ---------------------------------------------------------------------------
# EvolutionTrack
# ---------------------------------------------------------------------------
class TestEvolutionTrack:
"""Tests for the EvolutionTrack model.
EvolutionTrack defines a named progression path (formula +
tier thresholds) for a card type. The name column carries a
UNIQUE constraint so that accidental duplicates are caught at
the database level.
"""
def test_create_track(self, track):
"""Creating a track persists all fields and they round-trip correctly.
Reads back via model_to_dict (recurse=False) to verify the raw
column values, not Python-object representations, match what was
inserted.
"""
data = model_to_dict(track, recurse=False)
assert data["name"] == "Batter Track"
assert data["card_type"] == "batter"
assert data["formula"] == "pa + tb * 2"
assert data["t1_threshold"] == 37
assert data["t2_threshold"] == 149
assert data["t3_threshold"] == 448
assert data["t4_threshold"] == 896
def test_track_unique_name(self, track):
"""Inserting a second track with the same name raises IntegrityError.
The UNIQUE constraint on EvolutionTrack.name must prevent two
tracks from sharing the same identifier, as the name is used as
a human-readable key throughout the evolution system.
"""
with pytest.raises(IntegrityError):
EvolutionTrack.create(
name="Batter Track", # duplicate
card_type="sp",
formula="outs * 3",
t1_threshold=10,
t2_threshold=40,
t3_threshold=120,
t4_threshold=240,
)
# ---------------------------------------------------------------------------
# EvolutionCardState
# ---------------------------------------------------------------------------
class TestEvolutionCardState:
"""Tests for EvolutionCardState, which tracks per-player evolution progress.
Each row represents one card (player) owned by one team, linked to a
specific EvolutionTrack. The model records the current tier (0-4),
accumulated progress value, and whether the card is fully evolved.
"""
def test_create_card_state(self, player, team, track):
"""Creating a card state stores all fields and defaults are correct.
Defaults under test:
current_tier 0 (fresh card, no tier unlocked yet)
current_value 0.0 (no formula progress accumulated)
fully_evolved False (evolution is not complete at creation)
last_evaluated_at None (never evaluated yet)
"""
state = EvolutionCardState.create(player=player, team=team, track=track)
fetched = EvolutionCardState.get_by_id(state.id)
assert fetched.player_id == player.player_id
assert fetched.team_id == team.id
assert fetched.track_id == track.id
assert fetched.current_tier == 0
assert fetched.current_value == 0.0
assert fetched.fully_evolved is False
assert fetched.last_evaluated_at is None
def test_card_state_unique_player_team(self, player, team, track):
"""A second card state for the same (player, team) pair raises IntegrityError.
The unique index on (player, team) enforces that each player card
has at most one evolution state per team roster slot, preventing
duplicate evolution progress rows for the same physical card.
"""
EvolutionCardState.create(player=player, team=team, track=track)
with pytest.raises(IntegrityError):
EvolutionCardState.create(player=player, team=team, track=track)
def test_card_state_fk_track(self, player, team, track):
"""Accessing card_state.track returns the original EvolutionTrack instance.
This confirms the FK is correctly wired and that Peewee resolves
the relationship, returning an object with the same primary key and
name as the track used during creation.
"""
state = EvolutionCardState.create(player=player, team=team, track=track)
fetched = EvolutionCardState.get_by_id(state.id)
resolved_track = fetched.track
assert resolved_track.id == track.id
assert resolved_track.name == "Batter Track"
# ---------------------------------------------------------------------------
# EvolutionTierBoost
# ---------------------------------------------------------------------------
class TestEvolutionTierBoost:
"""Tests for EvolutionTierBoost, the per-tier stat/rating bonus table.
Each row maps a (track, tier) combination to a single boost the
specific stat or rating column to buff and by how much. The four-
column unique constraint prevents double-booking the same boost slot.
"""
def test_create_tier_boost(self, track):
"""Creating a boost row persists all fields accurately.
Verifies boost_type, boost_target, and boost_value are stored
and retrieved without modification.
"""
boost = EvolutionTierBoost.create(
track=track,
tier=1,
boost_type="rating",
boost_target="contact_vl",
boost_value=1.5,
)
fetched = EvolutionTierBoost.get_by_id(boost.id)
assert fetched.track_id == track.id
assert fetched.tier == 1
assert fetched.boost_type == "rating"
assert fetched.boost_target == "contact_vl"
assert fetched.boost_value == 1.5
def test_tier_boost_unique_constraint(self, track):
"""Duplicate (track, tier, boost_type, boost_target) raises IntegrityError.
The four-column unique index ensures that a single boost slot
(e.g. Tier-1 contact_vl rating) cannot be defined twice for the
same track, which would create ambiguity during evolution evaluation.
"""
EvolutionTierBoost.create(
track=track,
tier=2,
boost_type="rating",
boost_target="power_vr",
boost_value=2.0,
)
with pytest.raises(IntegrityError):
EvolutionTierBoost.create(
track=track,
tier=2,
boost_type="rating",
boost_target="power_vr",
boost_value=3.0, # different value, same identity columns
)
# ---------------------------------------------------------------------------
# EvolutionCosmetic
# ---------------------------------------------------------------------------
class TestEvolutionCosmetic:
"""Tests for EvolutionCosmetic, decorative unlocks tied to evolution tiers.
Cosmetics are purely visual rewards (frames, badges, themes) that a
card unlocks when it reaches a required tier. The name column is
the stable identifier and carries a UNIQUE constraint.
"""
def test_create_cosmetic(self):
"""Creating a cosmetic persists all fields correctly.
Verifies all columns including optional ones (css_class, asset_url)
are stored and retrieved.
"""
cosmetic = EvolutionCosmetic.create(
name="Gold Frame",
tier_required=2,
cosmetic_type="frame",
css_class="evo-frame-gold",
asset_url="https://cdn.example.com/frames/gold.png",
)
fetched = EvolutionCosmetic.get_by_id(cosmetic.id)
assert fetched.name == "Gold Frame"
assert fetched.tier_required == 2
assert fetched.cosmetic_type == "frame"
assert fetched.css_class == "evo-frame-gold"
assert fetched.asset_url == "https://cdn.example.com/frames/gold.png"
def test_cosmetic_unique_name(self):
"""Inserting a second cosmetic with the same name raises IntegrityError.
The UNIQUE constraint on EvolutionCosmetic.name prevents duplicate
cosmetic definitions that could cause ambiguous tier unlock lookups.
"""
EvolutionCosmetic.create(
name="Silver Badge",
tier_required=1,
cosmetic_type="badge",
)
with pytest.raises(IntegrityError):
EvolutionCosmetic.create(
name="Silver Badge", # duplicate
tier_required=3,
cosmetic_type="badge",
)
# ---------------------------------------------------------------------------
# PlayerSeasonStats
# ---------------------------------------------------------------------------
class TestPlayerSeasonStats:
"""Tests for PlayerSeasonStats, the per-season accumulation table.
Each row aggregates game-by-game batting and pitching stats for one
player on one team in one season. The three-column unique constraint
prevents double-counting and ensures a single authoritative row for
each (player, team, season) combination.
"""
def test_create_season_stats(self, player, team):
"""Creating a stats row with explicit values stores everything correctly.
Also verifies the integer stat defaults (all 0) for columns that
are not provided, which is the initial state before any games are
processed.
"""
stats = PlayerSeasonStats.create(
player=player,
team=team,
season=11,
games_batting=5,
pa=20,
ab=18,
hits=6,
doubles=1,
triples=0,
hr=2,
bb=2,
hbp=0,
so=4,
rbi=5,
runs=3,
sb=1,
cs=0,
)
fetched = PlayerSeasonStats.get_by_id(stats.id)
assert fetched.player_id == player.player_id
assert fetched.team_id == team.id
assert fetched.season == 11
assert fetched.games_batting == 5
assert fetched.pa == 20
assert fetched.hits == 6
assert fetched.hr == 2
# Pitching fields were not set — confirm default zero values
assert fetched.games_pitching == 0
assert fetched.outs == 0
assert fetched.wins == 0
assert fetched.saves == 0
# Nullable meta fields
assert fetched.last_game is None
assert fetched.last_updated_at is None
def test_season_stats_unique_constraint(self, player, team):
"""A second row for the same (player, team, season) raises IntegrityError.
The unique index on these three columns guarantees that each
player-team-season combination has exactly one accumulation row,
preventing duplicate stat aggregation that would inflate totals.
"""
PlayerSeasonStats.create(player=player, team=team, season=11)
with pytest.raises(IntegrityError):
PlayerSeasonStats.create(player=player, team=team, season=11)
def test_season_stats_increment(self, player, team):
"""Manually incrementing hits on an existing row persists the change.
Simulates the common pattern used by the stats accumulator:
fetch the row, add the game delta, save. Verifies that save()
writes back to the database and that subsequent reads reflect the
updated value.
"""
stats = PlayerSeasonStats.create(
player=player,
team=team,
season=11,
hits=10,
)
stats.hits += 3
stats.save()
refreshed = PlayerSeasonStats.get_by_id(stats.id)
assert refreshed.hits == 13

View File

@ -1,119 +1,159 @@
"""Tests for the evolution track seed data fixture (WP-03).
"""
Tests for app/seed/evolution_tracks.py seed_evolution_tracks().
Unit tests verify the JSON fixture is correctly formed without touching any
database. The integration test binds a minimal in-memory EvolutionTrack
model (mirroring the schema WP-01 will add to db_engine) to an in-memory
SQLite database, calls seed(), and verifies idempotency.
What: Verify that the JSON-driven seed function correctly creates, counts,
and idempotently updates EvolutionTrack rows in the database.
Why: The seed is the single source of truth for track configuration. A
regression here (duplicates, wrong thresholds, missing formula) would
silently corrupt evolution scoring for every card in the system.
Each test operates on a fresh in-memory SQLite database provided by the
autouse `setup_test_db` fixture in conftest.py. The seed reads its data
from `app/seed/evolution_tracks.json` on disk, so the tests also serve as
a light integration check between the JSON file and the Peewee model.
"""
import json
from pathlib import Path
import pytest
from peewee import CharField, IntegerField, Model, SqliteDatabase
from app.seed.evolution_tracks import load_tracks, seed
from app.db_engine import EvolutionTrack
from app.seed.evolution_tracks import seed_evolution_tracks
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
_test_db = SqliteDatabase(":memory:")
# Path to the JSON fixture that the seed reads from at runtime
_JSON_PATH = Path(__file__).parent.parent / "app" / "seed" / "evolution_tracks.json"
class EvolutionTrackStub(Model):
"""Minimal EvolutionTrack model for integration tests.
@pytest.fixture
def json_tracks():
"""Load the raw JSON definitions so tests can assert against them.
Mirrors the schema that WP-01 will add to db_engine so the integration
test can run without WP-01 being merged.
This avoids hardcoding expected values if the JSON changes, tests
automatically follow without needing manual updates.
"""
name = CharField()
card_type = CharField(unique=True)
formula = CharField()
t1 = IntegerField()
t2 = IntegerField()
t3 = IntegerField()
t4 = IntegerField()
class Meta:
database = _test_db
table_name = "evolution_track"
return json.loads(_JSON_PATH.read_text(encoding="utf-8"))
@pytest.fixture(autouse=True)
def _db():
"""Bind and create the stub table; drop it after each test."""
_test_db.connect(reuse_if_open=True)
_test_db.create_tables([EvolutionTrackStub])
yield
_test_db.drop_tables([EvolutionTrackStub])
def test_seed_creates_three_tracks(json_tracks):
"""After one seed call, exactly 3 EvolutionTrack rows must exist.
# ---------------------------------------------------------------------------
# Unit tests — JSON fixture only, no database
# ---------------------------------------------------------------------------
def test_three_tracks_in_seed_data():
"""load_tracks() must return exactly 3 evolution tracks."""
assert len(load_tracks()) == 3
def test_card_types_are_exactly_batter_sp_rp():
"""The set of card_type values must be exactly {'batter', 'sp', 'rp'}."""
types = {t["card_type"] for t in load_tracks()}
assert types == {"batter", "sp", "rp"}
def test_all_thresholds_positive_and_ascending():
"""Each track must have t1 < t2 < t3 < t4, all positive."""
for track in load_tracks():
assert track["t1"] > 0
assert track["t1"] < track["t2"] < track["t3"] < track["t4"]
def test_all_tracks_have_non_empty_formula():
"""Every track must have a non-empty formula string."""
for track in load_tracks():
assert isinstance(track["formula"], str) and track["formula"].strip()
def test_tier_thresholds_match_locked_values():
"""Threshold values must exactly match the locked design spec."""
tracks = {t["card_type"]: t for t in load_tracks()}
assert tracks["batter"]["t1"] == 37
assert tracks["batter"]["t2"] == 149
assert tracks["batter"]["t3"] == 448
assert tracks["batter"]["t4"] == 896
assert tracks["sp"]["t1"] == 10
assert tracks["sp"]["t2"] == 40
assert tracks["sp"]["t3"] == 120
assert tracks["sp"]["t4"] == 240
assert tracks["rp"]["t1"] == 3
assert tracks["rp"]["t2"] == 12
assert tracks["rp"]["t3"] == 35
assert tracks["rp"]["t4"] == 70
# ---------------------------------------------------------------------------
# Integration test — uses the stub model + in-memory SQLite
# ---------------------------------------------------------------------------
def test_seed_is_idempotent():
"""Calling seed() twice must not create duplicate rows (get_or_create).
First call: all three tracks created (created=True for each).
Second call: all three already exist (created=False for each).
Both calls succeed without error.
Why: The JSON currently defines three card-type tracks (batter, sp, rp).
If the count is wrong the system would either be missing tracks
(evolution disabled for a card type) or have phantom extras.
"""
results_first = seed(model_class=EvolutionTrackStub)
assert len(results_first) == 3
assert all(created for _, created in results_first)
seed_evolution_tracks()
assert EvolutionTrack.select().count() == 3
results_second = seed(model_class=EvolutionTrackStub)
assert len(results_second) == 3
assert not any(created for _, created in results_second)
assert EvolutionTrackStub.select().count() == 3
def test_seed_correct_card_types(json_tracks):
"""The set of card_type values persisted must match the JSON exactly.
Why: card_type is used as a discriminator throughout the evolution engine.
An unexpected value (e.g. 'pitcher' instead of 'sp') would cause
track-lookup misses and silently skip evolution scoring for that role.
"""
seed_evolution_tracks()
expected_types = {d["card_type"] for d in json_tracks}
actual_types = {t.card_type for t in EvolutionTrack.select()}
assert actual_types == expected_types
def test_seed_thresholds_ascending():
"""For every track, t1 < t2 < t3 < t4.
Why: The evolution engine uses these thresholds to determine tier
boundaries. If they are not strictly ascending, tier comparisons
would produce incorrect or undefined results (e.g. a player could
simultaneously satisfy tier 3 and not satisfy tier 2).
"""
seed_evolution_tracks()
for track in EvolutionTrack.select():
assert (
track.t1_threshold < track.t2_threshold
), f"{track.name}: t1 ({track.t1_threshold}) >= t2 ({track.t2_threshold})"
assert (
track.t2_threshold < track.t3_threshold
), f"{track.name}: t2 ({track.t2_threshold}) >= t3 ({track.t3_threshold})"
assert (
track.t3_threshold < track.t4_threshold
), f"{track.name}: t3 ({track.t3_threshold}) >= t4 ({track.t4_threshold})"
def test_seed_thresholds_positive():
"""All tier threshold values must be strictly greater than zero.
Why: A zero or negative threshold would mean a card starts the game
already evolved (tier >= 1 at 0 accumulated stat points), which would
bypass the entire progression system.
"""
seed_evolution_tracks()
for track in EvolutionTrack.select():
assert track.t1_threshold > 0, f"{track.name}: t1_threshold is not positive"
assert track.t2_threshold > 0, f"{track.name}: t2_threshold is not positive"
assert track.t3_threshold > 0, f"{track.name}: t3_threshold is not positive"
assert track.t4_threshold > 0, f"{track.name}: t4_threshold is not positive"
def test_seed_formula_present():
"""Every persisted track must have a non-empty formula string.
Why: The formula is evaluated at runtime to compute a player's evolution
score. An empty formula would cause either a Python eval error or
silently produce 0 for every player, halting all evolution progress.
"""
seed_evolution_tracks()
for track in EvolutionTrack.select():
assert (
track.formula and track.formula.strip()
), f"{track.name}: formula is empty or whitespace-only"
def test_seed_idempotent():
"""Calling seed_evolution_tracks() twice must still yield exactly 3 rows.
Why: The seed is designed to be safe to re-run (e.g. as part of a
migration or CI bootstrap). If it inserts duplicates on a second call,
the unique constraint on EvolutionTrack.name would raise an IntegrityError
in PostgreSQL, and in SQLite it would silently create phantom rows that
corrupt tier-lookup joins.
"""
seed_evolution_tracks()
seed_evolution_tracks()
assert EvolutionTrack.select().count() == 3
def test_seed_updates_on_rerun(json_tracks):
"""A second seed call must restore any manually changed threshold to the JSON value.
What: Seed once, manually mutate a threshold in the DB, then seed again.
Assert that the threshold is now back to the JSON-defined value.
Why: The seed must act as the authoritative source of truth. If
re-seeding does not overwrite local changes, configuration drift can
build up silently and the production database would diverge from the
checked-in JSON without any visible error.
"""
seed_evolution_tracks()
# Pick the first track and corrupt its t1_threshold
first_def = json_tracks[0]
track = EvolutionTrack.get(EvolutionTrack.name == first_def["name"])
original_t1 = track.t1_threshold
corrupted_value = original_t1 + 9999
track.t1_threshold = corrupted_value
track.save()
# Confirm the corruption took effect before re-seeding
track_check = EvolutionTrack.get(EvolutionTrack.name == first_def["name"])
assert track_check.t1_threshold == corrupted_value
# Re-seed — should restore the JSON value
seed_evolution_tracks()
restored = EvolutionTrack.get(EvolutionTrack.name == first_def["name"])
assert restored.t1_threshold == first_def["t1_threshold"], (
f"Expected t1_threshold={first_def['t1_threshold']} after re-seed, "
f"got {restored.t1_threshold}"
)

View File

@ -0,0 +1,594 @@
"""
Tests for app/services/season_stats.py update_season_stats().
What: Verify that the incremental stat accumulation function correctly
aggregates StratPlay and Decision rows into PlayerSeasonStats, handles
duplicate calls idempotently, and accumulates stats across multiple games.
Why: This is the core bookkeeping engine for card evolution scoring. A
double-count bug, a missed Decision merge, or a team-isolation failure
would silently produce wrong stats that would then corrupt every
evolution tier calculation downstream.
Test data is created using real Peewee models (no mocking) against the
in-memory SQLite database provided by the autouse setup_test_db fixture
in conftest.py. All Player and Team creation uses the actual required
column set discovered from the model definition in db_engine.py.
"""
import app.services.season_stats as _season_stats_module
import pytest
from app.db_engine import (
Cardset,
Decision,
Player,
PlayerSeasonStats,
Rarity,
StratGame,
StratPlay,
Team,
)
from app.services.season_stats import update_season_stats
from tests.conftest import _test_db
# ---------------------------------------------------------------------------
# Module-level patch: redirect season_stats.db to the test database
# ---------------------------------------------------------------------------
# season_stats.py holds a module-level reference to the `db` object imported
# from db_engine. When test models are rebound to _test_db via bind(), the
# `db` object inside season_stats still points at the original production db
# (SQLite file or PostgreSQL). We replace it here so that db.atomic() in
# update_season_stats() operates on the same in-memory connection that the
# test fixtures write to.
_season_stats_module.db = _test_db
# ---------------------------------------------------------------------------
# Helper factories
# ---------------------------------------------------------------------------
def _make_cardset():
"""Return a reusable Cardset row (or fetch the existing one by name)."""
cs, _ = Cardset.get_or_create(
name="Test Set",
defaults={"description": "Test cardset", "total_cards": 100},
)
return cs
def _make_rarity():
"""Return the Common rarity singleton."""
r, _ = Rarity.get_or_create(value=1, name="Common", defaults={"color": "#ffffff"})
return r
def _make_player(name: str, pos: str = "1B") -> Player:
"""Create a Player row with all required (non-nullable) columns satisfied.
Why we need this helper: Player has many non-nullable varchar columns
(image, mlbclub, franchise, description) and a required FK to Cardset.
A single helper keeps test fixtures concise and consistent.
"""
return Player.create(
p_name=name,
rarity=_make_rarity(),
cardset=_make_cardset(),
set_num=1,
pos_1=pos,
image="https://example.com/image.png",
mlbclub="TST",
franchise="TST",
description=f"Test player: {name}",
)
def _make_team(abbrev: str, gmid: int, season: int = 11) -> Team:
"""Create a Team row with all required (non-nullable) columns satisfied."""
return Team.create(
abbrev=abbrev,
sname=abbrev,
lname=f"Team {abbrev}",
gmid=gmid,
gmname=f"gm_{abbrev.lower()}",
gsheet="https://docs.google.com/spreadsheets/test",
wallet=500,
team_value=1000,
collection_value=1000,
season=season,
is_ai=False,
)
def make_play(game, play_num, batter, batter_team, pitcher, pitcher_team, **stats):
"""Create a StratPlay row with sensible defaults for all required fields.
Why we provide defaults for every stat column: StratPlay has many
IntegerField columns with default=0 at the model level, but supplying
them explicitly makes it clear what the baseline state of each play is
and keeps the helper signature stable if defaults change.
"""
defaults = dict(
on_base_code="000",
inning_half="top",
inning_num=1,
batting_order=1,
starting_outs=0,
away_score=0,
home_score=0,
pa=0,
ab=0,
hit=0,
run=0,
double=0,
triple=0,
homerun=0,
bb=0,
so=0,
hbp=0,
rbi=0,
sb=0,
cs=0,
outs=0,
sac=0,
ibb=0,
gidp=0,
bphr=0,
bpfo=0,
bp1b=0,
bplo=0,
)
defaults.update(stats)
return StratPlay.create(
game=game,
play_num=play_num,
batter=batter,
batter_team=batter_team,
pitcher=pitcher,
pitcher_team=pitcher_team,
**defaults,
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def team_a():
return _make_team("TMA", gmid=1001)
@pytest.fixture
def team_b():
return _make_team("TMB", gmid=1002)
@pytest.fixture
def player_batter():
"""A batter-type player for team A."""
return _make_player("Batter One", pos="CF")
@pytest.fixture
def player_pitcher():
"""A pitcher-type player for team B."""
return _make_player("Pitcher One", pos="SP")
@pytest.fixture
def game(team_a, team_b):
return StratGame.create(
season=11,
game_type="ranked",
away_team=team_a,
home_team=team_b,
)
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
def test_single_game_batting_stats(team_a, team_b, player_batter, player_pitcher, game):
"""Batting stat totals from StratPlay rows are correctly accumulated.
What: Create three plate appearances (2 hits, 1 strikeout, a walk, and a
home run) for one batter. After update_season_stats(), the
PlayerSeasonStats row should reflect the exact sum of all play fields.
Why: The core of the batting aggregation pipeline. If any field mapping
is wrong (e.g. 'hit' mapped to 'doubles' instead of 'hits'), evolution
scoring and leaderboards would silently report incorrect stats.
"""
# PA 1: single (hit=1, ab=1, pa=1)
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
# PA 2: home run (hit=1, homerun=1, ab=1, pa=1, rbi=1, run=1)
make_play(
game,
2,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
homerun=1,
rbi=1,
run=1,
outs=0,
)
# PA 3: strikeout (ab=1, pa=1, so=1, outs=1)
make_play(
game,
3,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
)
# PA 4: walk (pa=1, bb=1)
make_play(
game,
4,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
bb=1,
outs=0,
)
result = update_season_stats(game.id)
assert result["batters_updated"] >= 1
stats = PlayerSeasonStats.get(
PlayerSeasonStats.player == player_batter,
PlayerSeasonStats.team == team_a,
PlayerSeasonStats.season == 11,
)
assert stats.pa == 4
assert stats.ab == 3
assert stats.hits == 2
assert stats.hr == 1
assert stats.so == 1
assert stats.bb == 1
assert stats.rbi == 1
assert stats.runs == 1
assert stats.games_batting == 1
def test_single_game_pitching_stats(
team_a, team_b, player_batter, player_pitcher, game
):
"""Pitching stat totals (outs, k, hits_allowed, bb_allowed) are correct.
What: The same plays that create batting stats for the batter are also
the source for the pitcher's opposing stats. This test checks that
_build_pitching_groups() correctly inverts batter-perspective fields.
Why: The batter's 'so' becomes the pitcher's 'k', the batter's 'hit'
becomes 'hits_allowed', etc. Any transposition in this mapping would
corrupt pitcher stats silently.
"""
# Play 1: strikeout — batter so=1, outs=1
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
)
# Play 2: single — batter hit=1
make_play(
game,
2,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
# Play 3: walk — batter bb=1
make_play(
game,
3,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
bb=1,
outs=0,
)
update_season_stats(game.id)
stats = PlayerSeasonStats.get(
PlayerSeasonStats.player == player_pitcher,
PlayerSeasonStats.team == team_b,
PlayerSeasonStats.season == 11,
)
assert stats.outs == 1 # one strikeout = one out recorded
assert stats.k == 1 # batter's so → pitcher's k
assert stats.hits_allowed == 1 # batter's hit → pitcher hits_allowed
assert stats.bb_allowed == 1 # batter's bb → pitcher bb_allowed
assert stats.games_pitching == 1
def test_decision_integration(team_a, team_b, player_batter, player_pitcher, game):
"""Decision.win=1 for a pitcher results in wins=1 in PlayerSeasonStats.
What: Add a single StratPlay to establish the pitcher in pitching_groups,
then create a Decision row recording a win. Call update_season_stats()
and verify the wins column is 1.
Why: Decisions are stored in a separate table from StratPlay. If
_apply_decisions() fails to merge them (wrong FK lookup, key mismatch),
pitchers would always show 0 wins/losses/saves regardless of actual game
outcomes, breaking standings and evolution criteria.
"""
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
Decision.create(
season=11,
game=game,
pitcher=player_pitcher,
pitcher_team=team_b,
win=1,
loss=0,
is_save=0,
hold=0,
b_save=0,
is_start=True,
)
update_season_stats(game.id)
stats = PlayerSeasonStats.get(
PlayerSeasonStats.player == player_pitcher,
PlayerSeasonStats.team == team_b,
PlayerSeasonStats.season == 11,
)
assert stats.wins == 1
assert stats.losses == 0
def test_double_count_prevention(team_a, team_b, player_batter, player_pitcher, game):
"""Calling update_season_stats() twice for the same game must not double the stats.
What: Process a game once (pa=3), then immediately call the function
again with the same game_id. The second call detects via the
PlayerSeasonStats.last_game FK check that this game is still the
most-recently-processed game and returns early with 'skipped'=True.
The resulting pa should still be 3, not 6.
Why: The bot infrastructure may deliver game-complete events more than
once (network retries, message replays). The guard prevents
double-counting when the replayed game is still the last game
processed for those players. Note: this test only covers same-game
immediate replay out-of-order re-delivery (game G after G+1) is a
known limitation tracked in issue #105.
"""
for i in range(3):
make_play(
game,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
first_result = update_season_stats(game.id)
assert "skipped" not in first_result
second_result = update_season_stats(game.id)
assert second_result.get("skipped") is True
assert second_result["batters_updated"] == 0
assert second_result["pitchers_updated"] == 0
stats = PlayerSeasonStats.get(
PlayerSeasonStats.player == player_batter,
PlayerSeasonStats.team == team_a,
PlayerSeasonStats.season == 11,
)
# Must still be 3, not 6
assert stats.pa == 3
def test_two_games_accumulate(team_a, team_b, player_batter, player_pitcher):
"""Stats from two separate games are summed in a single PlayerSeasonStats row.
What: Process game 1 (pa=2) then game 2 (pa=3) for the same batter/team.
After both updates the stats row should show pa=5.
Why: PlayerSeasonStats is a season-long accumulator, not a per-game
snapshot. If the upsert logic overwrites instead of increments, a player's
stats would always reflect only their most recent game.
"""
game1 = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
game2 = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
# Game 1: 2 plate appearances
for i in range(2):
make_play(
game1,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
# Game 2: 3 plate appearances
for i in range(3):
make_play(
game2,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
update_season_stats(game1.id)
update_season_stats(game2.id)
stats = PlayerSeasonStats.get(
PlayerSeasonStats.player == player_batter,
PlayerSeasonStats.team == team_a,
PlayerSeasonStats.season == 11,
)
assert stats.pa == 5
assert stats.games_batting == 2
def test_two_team_game(team_a, team_b):
"""Players from both teams in a game each get their own stats row.
What: Create a batter+pitcher pair for team A and another pair for team B.
In the same game, team A bats against team B's pitcher and vice versa.
After update_season_stats(), both batters and both pitchers must have
correct, isolated stats rows.
Why: A key correctness guarantee is that stats are attributed to the
correct (player, team) combination. If team attribution is wrong,
a player's stats could appear under the wrong franchise or be merged
with an opponent's row.
"""
batter_a = _make_player("Batter A", pos="CF")
pitcher_a = _make_player("Pitcher A", pos="SP")
batter_b = _make_player("Batter B", pos="CF")
pitcher_b = _make_player("Pitcher B", pos="SP")
game = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
# Team A bats against team B's pitcher (away half)
make_play(
game,
1,
batter_a,
team_a,
pitcher_b,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
inning_half="top",
)
make_play(
game,
2,
batter_a,
team_a,
pitcher_b,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
inning_half="top",
)
# Team B bats against team A's pitcher (home half)
make_play(
game,
3,
batter_b,
team_b,
pitcher_a,
team_a,
pa=1,
ab=1,
bb=1,
outs=0,
inning_half="bottom",
)
update_season_stats(game.id)
# Team A's batter: 2 PA, 1 hit, 1 SO
stats_ba = PlayerSeasonStats.get(
PlayerSeasonStats.player == batter_a,
PlayerSeasonStats.team == team_a,
)
assert stats_ba.pa == 2
assert stats_ba.hits == 1
assert stats_ba.so == 1
# Team B's batter: 1 PA, 1 BB
stats_bb = PlayerSeasonStats.get(
PlayerSeasonStats.player == batter_b,
PlayerSeasonStats.team == team_b,
)
assert stats_bb.pa == 1
assert stats_bb.bb == 1
# Team B's pitcher (faced team A's batter): 1 hit allowed, 1 K
stats_pb = PlayerSeasonStats.get(
PlayerSeasonStats.player == pitcher_b,
PlayerSeasonStats.team == team_b,
)
assert stats_pb.hits_allowed == 1
assert stats_pb.k == 1
# Team A's pitcher (faced team B's batter): 1 BB allowed
stats_pa = PlayerSeasonStats.get(
PlayerSeasonStats.player == pitcher_a,
PlayerSeasonStats.team == team_a,
)
assert stats_pa.bb_allowed == 1