Compare commits

...

69 Commits

Author SHA1 Message Date
cal
87599a67d5 Merge pull request 'docs: refractor card art mockup and visual spec' (#53) from wip/refractor-card-art into main 2026-04-04 03:13:34 +00:00
cal
e266f814ca Merge branch 'main' into wip/refractor-card-art 2026-04-04 03:13:29 +00:00
Cal Corum
f329d74ed8 docs: refractor tier mockup — diamond indicator, effects, and visual spec
Interactive mockup for refractor card art with:
- 4-quadrant diamond tier indicator (baseball base-path fill order)
- Metallic sheen + pulse glow effect (approved combo)
- Tier colors: T1 orange, T2 red, T3 purple, T4 blue-flame
- T3 gold shimmer sweep, T4 prismatic rainbow + dual glow + bar shimmer
- Cherry-pick reference: docs/refractor-visual-spec.md

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-03 19:33:31 -05:00
cal
c1f06eb9c7 Merge pull request 'fix: scrub exposed credentials from docs' (#52) from fix/scrub-exposed-credentials into main 2026-04-01 18:02:01 +00:00
cal
d6b65594b8 Merge branch 'main' into fix/scrub-exposed-credentials 2026-04-01 18:01:56 +00:00
Cal Corum
94fd72344d fix: scrub exposed API token from docs
Replace real API token in PD_CARDS_CLI_REFERENCE.md example command
with placeholder value to prevent credential exposure in the repo.

Closes #50
2026-04-01 11:58:58 -05:00
Cal Corum
43aff3568f fix: increase API timeouts to prevent bulk query failures
db_calls.py default timeouts raised from 3s to 30s across all methods
(db_get, url_get, db_patch, db_post, db_put). scouting_batters.py
fetch_data now passes timeout=120 for large card rating queries.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-30 07:53:58 -05:00
cal
eaf4bdbd6c docs: refractor Phase 2 design validation spec (#51) 2026-03-24 21:09:06 +00:00
Cal Corum
6f67cfec9a docs: address PR #51 review — rarity naming, OPS threshold, truncation invariant
- Add rarity name cross-reference table in Background section mapping PRD
  display names (Replacement/Reserve/Starter/All-Star/MVP/Hall of Fame) to
  codebase names (Common/Bronze/Silver/Gold/Diamond/HoF) with IDs
- Fix T4-2: correct Gold OPS threshold from 0.700 to 0.900 (confirmed in
  rarity_thresholds.py); add note that 0.700 is the Bronze floor
- Fix T4-1: restate truncation invariant as a single precise assertion —
  sum(columns) == 108 - truncated_amount — instead of two independent checks
  that can both pass while the sum is wrong for unrelated reasons

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-24 16:07:18 -05:00
Cal Corum
f2c09d09e6 docs: add Refractor Phase 2 design validation spec
Seven pre-implementation test cases covering: 108-sum invariant
preservation under profile-based boosts, D20 probability shift
magnitude at T4, pipeline collision risk between T4 rarity upgrade
and live-series post_player_updates, HoF rarity cap (non-contiguous
ID ladder), RP T1 achievability, SP/RP/batter T4 parity, and the
cross-season stat accumulation design decision that must be confirmed
before Phase 2 code is written.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-24 09:07:09 -05:00
cal
8c00bacf59 Merge pull request 'feat: add return type annotations to async DB functions (#20)' (#47) from ai/paper-dynasty-card-creation-20 into main 2026-03-23 19:52:02 +00:00
cal
de9604364c Merge branch 'main' into ai/paper-dynasty-card-creation-20 2026-03-23 19:51:48 +00:00
cal
aa8306e844 Merge pull request 'fix: apply timeout parameter to all aiohttp sessions in db_calls.py (#4)' (#41) from ai/paper-dynasty-card-creation#4 into main 2026-03-23 13:25:37 +00:00
cal
d7c6e6da27 Merge branch 'main' into ai/paper-dynasty-card-creation#4 2026-03-23 13:25:19 +00:00
cal
4392f6c07f Merge pull request 'fix: add @pytest.mark.asyncio to async test methods (#21)' (#30) from ai/paper-dynasty-card-creation#21 into main 2026-03-23 13:25:11 +00:00
cal
3612b0710b Merge branch 'main' into ai/paper-dynasty-card-creation#4 2026-03-23 13:24:43 +00:00
cal
424b7da78d Merge branch 'main' into ai/paper-dynasty-card-creation#21 2026-03-23 13:24:40 +00:00
Cal Corum
82a8dac950 fix: correct url_get return type annotation to dict
url_get returns await resp.json() which is a dict, not aiohttp.ClientResponse.
The wrong annotation was introduced in the original PR and would mislead
static analysis tools into expecting a response object from callers.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-23 08:14:24 -05:00
cal
461a469374 Merge pull request 'feat: implement tweak_archetype() and manual_adjustments() (#12)' (#49) from ai/paper-dynasty-card-creation-12 into main 2026-03-23 12:42:00 +00:00
Cal Corum
962b9cf6f1 feat: implement tweak_archetype() and manual_adjustments() (#12)
Closes #12

- tweak_archetype(): prompts user for updated archetype stats (avg/obp/slg/bb%/k% vs L and R, power and batted-ball profile, baserunning for batters), then recalculates D20 card ratings via the existing calculator
- manual_adjustments(): prompts user to choose a split (vs L or vs R), displays all 22 D20 chance fields with running total, accepts field-number + value edits, and warns if total deviates from 108

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:41:46 -05:00
cal
f2f70bfce5 Merge pull request 'fix: use archetype role ratings in pitcher card creation (#11)' (#46) from ai/paper-dynasty-card-creation#11 into main 2026-03-23 12:41:20 +00:00
Cal Corum
50ee2d0446 fix: use archetype role ratings in pitcher card creation (#11)
Closes #11

`starter_rating`, `relief_rating`, and `closer_rating` were hardcoded
stubs (5/5/None) in `create_pitching_card`. The chosen `PitcherArchetype`
already carries these values; now they are propagated through `card_data`
when the pitcher workflow builds its initial dict and consumed correctly
when writing the pitching card record to the database.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:41:07 -05:00
cal
7286fd2203 Merge pull request 'fix: derive SEASON_PCT from date range instead of hardcoding half-season (#9)' (#36) from ai/paper-dynasty-card-creation#9 into main 2026-03-23 12:40:47 +00:00
Cal Corum
63a30bd434 fix: derive SEASON_PCT from date range instead of hardcoding half-season (#9)
Closes #9

Previously SEASON_PCT was hardcoded to 81/162 (~0.5) while END_DATE was
set to 20050731 (~65% through the season). Running retrosheet_data.py
directly (without the CLI which overrides SEASON_PCT at runtime) would
silently generate cards using half-season normalizations on stats covering
a larger portion of the season.

Fix: move START_DATE/END_DATE before SEASON_PCT and derive SEASON_PCT
from the date range using SEASON_END_DATE (2005 regular season end).
Now changing END_DATE automatically produces the correct SEASON_PCT.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:40:35 -05:00
cal
770f296938 Merge pull request 'fix: remove import-time derived globals in retrosheet_data.py (#14)' (#48) from ai/paper-dynasty-card-creation#14 into main 2026-03-23 12:37:59 +00:00
Cal Corum
d43927258a fix: remove import-time derived globals in retrosheet_data.py (#14)
Closes #14

Five globals (MIN_PA_VL, MIN_PA_VR, MIN_TBF_VL, MIN_TBF_VR, CARDSET_ID)
were derived from PLAYER_DESCRIPTION at module load time, creating a
hidden ordering dependency: any value baked in before the CLI overrides
PLAYER_DESCRIPTION would be silently wrong if a caller relied on the
derived relationship. The CLI explicitly sets all of them anyway, so
replacing with scalar defaults makes the module self-contained and safe.

Also collapses LAST_WEEK_RATIO dead ternary (both branches were 0.0).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:37:46 -05:00
cal
fd142c27d2 Merge pull request 'fix: replace wildcard import from db_calls_card_creation (#13)' (#34) from ai/paper-dynasty-card-creation-13 into main 2026-03-23 12:37:29 +00:00
Cal Corum
df6e96bc76 fix: replace wildcard import from db_calls_card_creation (#13)
Closes #13

Replace `from db_calls_card_creation import *` with an explicit
`from db_calls_card_creation import PitcherData`. Only PitcherData
is referenced in creation_helpers.py; the wildcard was also
pulling in all Peewee ORM internals via a transitive
`from peewee import *`, polluting the namespace.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:37:16 -05:00
cal
dd42f35674 Merge pull request 'fix: use logger.exception() in calculate_pitcher_ratings error handler' (#33) from ai/paper-dynasty-card-creation#17 into main 2026-03-23 12:35:47 +00:00
Cal Corum
9e48616274 fix: use logger.exception() in calculate_pitcher_ratings error handler
Replaces logger.error() with logger.exception() so the full stack trace
is captured when a pitcher card fails to generate, making it possible to
diagnose the root cause rather than just seeing the error message.

Closes #17

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-23 07:35:31 -05:00
cal
deaa43432b Merge pull request 'fix: correct Dict[str, any] to Dict[str, Any] in type annotations (#15)' (#31) from ai/paper-dynasty-card-creation-15 into main 2026-03-23 12:12:28 +00:00
cal
3fd07b6d89 Merge branch 'main' into ai/paper-dynasty-card-creation-15 2026-03-23 12:12:18 +00:00
cal
55f2eda888 Merge pull request 'chore: pin peewee and polars to exact versions (#24)' (#32) from ai/paper-dynasty-card-creation-24 into main 2026-03-23 12:12:07 +00:00
cal
a432d37850 Merge branch 'main' into ai/paper-dynasty-card-creation-24 2026-03-23 12:11:56 +00:00
dde163e2fb Merge pull request 'fix: narrow swallowed exception in get_pitching_peripherals() (#10)' (#35) from ai/paper-dynasty-card-creation#10 into main 2026-03-23 03:53:18 +00:00
f485241dd7 Merge branch 'main' into ai/paper-dynasty-card-creation#10 2026-03-23 03:53:10 +00:00
6d0497431f Merge pull request 'fix: remove dead LAST_WEEK_RATIO ternary — both branches are 0.0 (#19)' (#45) from ai/paper-dynasty-card-creation-19 into main 2026-03-23 03:52:58 +00:00
f5cb72cc26 Merge branch 'main' into ai/paper-dynasty-card-creation-19 2026-03-23 03:52:52 +00:00
f67d111a66 Merge pull request 'fix: remove test_positions_df non-test that always passes (#16)' (#43) from ai/paper-dynasty-card-creation-16 into main 2026-03-23 03:52:48 +00:00
230f3e79ce Merge branch 'main' into ai/paper-dynasty-card-creation-16 2026-03-23 03:52:41 +00:00
ecc62a0521 Merge pull request 'fix: correct get_of() opposite-field direction for switch hitters' (#40) from ai/paper-dynasty-card-creation#5 into main 2026-03-23 03:52:38 +00:00
992feba79e Merge branch 'main' into ai/paper-dynasty-card-creation#5 2026-03-23 03:52:32 +00:00
57c379a8e0 Merge branch 'main' into ai/paper-dynasty-card-creation#10 2026-03-23 03:52:23 +00:00
e413fd5cc8 Merge pull request 'fix: return default 8 on XBT% parse error in running() (#8)' (#37) from ai/paper-dynasty-card-creation#8 into main 2026-03-23 03:52:19 +00:00
6a6767f5d8 Merge branch 'main' into ai/paper-dynasty-card-creation#8 2026-03-23 03:52:13 +00:00
2b955dd8f7 Merge pull request 'fix: resolve unreachable duplicate elif 'DO*' branch in result_string() (#6)' (#39) from ai/paper-dynasty-card-creation#6 into main 2026-03-23 03:51:33 +00:00
0e66ff71e7 Merge branch 'main' into ai/paper-dynasty-card-creation-19 2026-03-23 03:51:06 +00:00
b55820eec8 Merge branch 'main' into ai/paper-dynasty-card-creation-16 2026-03-23 03:51:01 +00:00
b4a3e4b865 Merge branch 'main' into ai/paper-dynasty-card-creation#5 2026-03-23 03:50:56 +00:00
bb546c6ded Merge branch 'main' into ai/paper-dynasty-card-creation#10 2026-03-23 03:50:51 +00:00
5c7c613813 Merge branch 'main' into ai/paper-dynasty-card-creation#8 2026-03-23 03:50:47 +00:00
cbfcba5e26 Merge branch 'main' into ai/paper-dynasty-card-creation#6 2026-03-23 03:50:40 +00:00
006b48e60f Merge pull request 'fix: use player_id instead of key_bbref in create_pit_position() (#7)' (#38) from ai/paper-dynasty-card-creation#7 into main 2026-03-23 03:50:38 +00:00
5e135ff554 Merge branch 'main' into ai/paper-dynasty-card-creation#7 2026-03-23 03:50:35 +00:00
602151fb16 Merge pull request 'Remove hardcoded secrets, load API token from env' (#29) from fix/2-3-security-hardcoded-secrets into main 2026-03-23 03:50:07 +00:00
6c20f93901 Merge branch 'main' into fix/2-3-security-hardcoded-secrets 2026-03-23 03:50:00 +00:00
Cal Corum
0c0eece972 feat: add return type annotations to async DB functions (#20)
Closes #20

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 12:33:01 -05:00
Cal Corum
937620e2e9 fix: remove dead LAST_WEEK_RATIO ternary — both branches are 0.0 (#19)
Closes #19

The conditional `0.0 if PLAYER_DESCRIPTION == 'Live' else 0.0` is dead
code: both branches evaluate to the same value. Simplified to a direct
assignment.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 07:03:38 -05:00
Cal Corum
5b8d027d46 fix: remove test_positions_df non-test that always passes (#16)
Closes #16

Deleted test_positions_df which called an async function synchronously
(returning a coroutine, not a DataFrame) and asserted True == True.
Zero coverage. Also removed the now-unused pd_positions_df import.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 02:02:10 -05:00
Cal Corum
bd1809261e fix: apply timeout parameter to all aiohttp sessions in db_calls.py (#4)
Closes #4

Every async DB function accepted a `timeout` parameter but never passed
it to aiohttp, causing scripts to hang indefinitely if the API became
unresponsive. Fixed by passing `aiohttp.ClientTimeout(total=timeout)` to
each `aiohttp.ClientSession()` constructor across all six functions:
db_get, url_get, db_patch, db_post, db_put, db_delete.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 01:03:03 -05:00
Cal Corum
a2e374cd4f fix: correct get_of() opposite-field direction for switch hitters
Switch hitters batting vs LHP hit right-handed (pull=lf, oppo=rf).
Switch hitters batting vs RHP hit left-handed (pull=rf, oppo=lf).
Copy-paste error had both pull_side branches returning the same value.

Closes #5

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 00:02:01 -05:00
Cal Corum
b52c5418db fix: resolve unreachable duplicate elif 'DO*' branch in result_string() (#6)
The second `elif "DO*" in data_string` was dead code — the first always
matched, so `spaces -= 2` for the DO** variant was silently skipped.
Fix: check "DO**" first (spaces -= 2), then "DO*" (spaces -= 1).

Closes #6

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 23:33:06 -05:00
Cal Corum
1d96223c78 fix: use player_id instead of key_bbref in create_pit_position() (#7)
Closes #7

The fallback branch of create_pit_position() used `int(df_data["key_bbref"])`
which always raises ValueError for string IDs like 'verlaju01'. The exception
was silently swallowed, causing pitchers without defensive stats to receive no
position record at all.

Fix: use `int(float(df_data["player_id"]))` to match the pattern used in
create_pitching_card() on the same file.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 23:03:43 -05:00
Cal Corum
8e24b4e686 fix: return default 8 on XBT% parse error in running() (#8)
Closes #8

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 21:32:14 -05:00
Cal Corum
46fdde3d02 fix: narrow swallowed exception in get_pitching_peripherals() (#10)
Closes #10

Replace `except Exception: pass` with `except KeyError: pass` so only
the expected missing-attribute case (`cell["data-append-csv"]` not
present) is silently skipped. Network errors, encoding issues, and
other unexpected exceptions will now propagate instead of being hidden.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 20:02:55 -05:00
Cal Corum
09cb942435 chore: pin peewee and polars to exact versions (#24)
Closes #24

Pins the two unpinned dependencies in requirements.txt:
- peewee (unversioned → 3.19.0)
- polars (unversioned → 1.36.1)

All other dependencies were already pinned with ==.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 16:33:00 -05:00
Cal Corum
b39d3283fd fix: correct Dict[str, any] to Dict[str, Any] in type annotations (#15)
Closes #15

`any` (lowercase) refers to the builtin function, not `typing.Any`.
Added `Any` to the `typing` imports in both files and updated the
`cardset` parameter annotation accordingly.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 16:03:45 -05:00
Cal Corum
ee4dae0985 fix: add @pytest.mark.asyncio to async test methods (#21)
Closes #21

All 14 async test methods in tests/test_automated_data_fetcher.py were
missing @pytest.mark.asyncio. Without it, pytest collects them and
silently passes without executing the coroutine body, providing no
coverage.

Added explicit @pytest.mark.asyncio to each async def test_* method.
This makes the async intent unambiguous and is robust against any
future asyncio_mode configuration changes.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 14:33:52 -05:00
Cal Corum
e3220bf337 Remove hardcoded secrets, load API token from environment
- Replace hardcoded PD API bearer token in db_calls.py with dotenv/env var
- Delete scripts/supabase_doodling.py (dead scratch file with hardcoded Supabase JWT)
- Add python-dotenv dependency and .env.example template
- Consolidate check_prod_missing_ratings.py to import AUTH_TOKEN from db_calls
- Hard fail if PD_API_TOKEN is missing to prevent silent auth failures

Fixes #2, Fixes #3

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 12:38:01 -05:00
19 changed files with 3274 additions and 199 deletions

View File

@ -1,8 +1,14 @@
import asyncio
import sys
from pathlib import Path
import aiohttp
import pandas as pd
AUTH_TOKEN = {"Authorization": "Bearer Tp3aO3jhYve5NJF1IqOmJTmk"}
# Add project root so we can import db_calls
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
from db_calls import AUTH_TOKEN
PROD_URL = "https://pd.manticorum.com/api"

2
.env.example Normal file
View File

@ -0,0 +1,2 @@
# Paper Dynasty API
PD_API_TOKEN=your-bearer-token-here

View File

@ -573,7 +573,7 @@ def stealing_line(steal_data: dict):
else:
good_jump = "2-12"
return f'{"*" if sd[2] else ""}{good_jump}/- ({sd[1] if sd[1] else "-"}-{sd[0] if sd[0] else "-"})'
return f"{'*' if sd[2] else ''}{good_jump}/- ({sd[1] if sd[1] else '-'}-{sd[0] if sd[0] else '-'})"
def running(extra_base_pct: str):
@ -583,7 +583,7 @@ def running(extra_base_pct: str):
xb_pct = float(extra_base_pct.strip("%")) / 80
except Exception as e:
logger.error(f"calcs_batter running - {e}")
xb_pct = 20
return 8
return max(min(round(6 + (10 * xb_pct)), 17), 8)
@ -693,11 +693,11 @@ def get_batter_ratings(df_data) -> List[dict]:
logger.debug(
f"all on base: {vl.hbp + vl.walk + vl.total_hits()} / all chances: {vl.total_chances()}"
f'{"*******ERROR ABOVE*******" if vl.hbp + vl.walk + vl.total_hits() != vl.total_chances() else ""}'
f"{'*******ERROR ABOVE*******' if vl.hbp + vl.walk + vl.total_hits() != vl.total_chances() else ''}"
)
logger.debug(
f"all on base: {vr.hbp + vr.walk + vr.total_hits()} / all chances: {vr.total_chances()}"
f'{"*******ERROR ABOVE*******" if vr.hbp + vr.walk + vr.total_hits() != vr.total_chances() else ""}'
f"{'*******ERROR ABOVE*******' if vr.hbp + vr.walk + vr.total_hits() != vr.total_chances() else ''}"
)
vl.calculate_strikeouts(df_data["SO_vL"], df_data["AB_vL"], df_data["H_vL"])

View File

@ -3,7 +3,7 @@ import urllib.parse
import pandas as pd
import numpy as np
from typing import Dict
from typing import Any, Dict
from creation_helpers import (
get_all_pybaseball_ids,
sanitize_name,
@ -158,8 +158,8 @@ async def create_new_players(
{
"p_name": f"{f_name} {l_name}",
"cost": NEW_PLAYER_COST,
"image": f'{card_base_url}/{df_data["player_id"]}/battingcard'
f'{urllib.parse.quote("?d=")}{release_dir}',
"image": f"{card_base_url}/{df_data['player_id']}/battingcard"
f"{urllib.parse.quote('?d=')}{release_dir}",
"mlbclub": CLUB_LIST[df_data["Tm_vL"]],
"franchise": FRANCHISE_LIST[df_data["Tm_vL"]],
"cardset_id": cardset["id"],
@ -302,7 +302,7 @@ async def calculate_batting_ratings(offense_stats: pd.DataFrame, to_post: bool):
async def post_player_updates(
cardset: Dict[str, any],
cardset: Dict[str, Any],
card_base_url: str,
release_dir: str,
player_desc: str,
@ -432,8 +432,8 @@ async def post_player_updates(
[
(
"image",
f'{card_base_url}/{df_data["player_id"]}/battingcard'
f'{urllib.parse.quote("?d=")}{release_dir}',
f"{card_base_url}/{df_data['player_id']}/battingcard"
f"{urllib.parse.quote('?d=')}{release_dir}",
)
]
)

View File

@ -10,7 +10,7 @@ import requests
import time
from db_calls import db_get
from db_calls_card_creation import *
from db_calls_card_creation import PitcherData
from bs4 import BeautifulSoup
# Card Creation Constants
@ -533,7 +533,7 @@ def get_pitching_peripherals(season: int):
row_data.append(player_id)
if len(headers) == 0:
col_names.append("key_bbref")
except Exception:
except KeyError:
pass
row_data.append(cell.text)
if len(headers) == 0:
@ -595,21 +595,21 @@ def legal_splits(tot_chances):
def result_string(tba_data, row_num, split_min=None, split_max=None):
bold1 = f'{"<b>" if tba_data["bold"] else ""}'
bold2 = f'{"</b>" if tba_data["bold"] else ""}'
row_string = f'{"<b> </b>" if int(row_num) < 10 else ""}{row_num}'
bold1 = f"{'<b>' if tba_data['bold'] else ''}"
bold2 = f"{'</b>' if tba_data['bold'] else ''}"
row_string = f"{'<b> </b>' if int(row_num) < 10 else ''}{row_num}"
if TESTING:
print(
f'adding {tba_data["string"]} to row {row_num} / '
f"adding {tba_data['string']} to row {row_num} / "
f"split_min: {split_min} / split_max: {split_max}"
)
# No splits; standard result
if not split_min:
return f'{bold1}{row_string}-{tba_data["string"]}{bold2}'
return f"{bold1}{row_string}-{tba_data['string']}{bold2}"
# With splits
split_nums = f'{split_min if split_min != 20 else ""}{"-" if split_min != 20 else ""}{split_max}'
split_nums = f"{split_min if split_min != 20 else ''}{'-' if split_min != 20 else ''}{split_max}"
data_string = (
tba_data["sm-string"] if "sm-string" in tba_data.keys() else tba_data["string"]
)
@ -618,10 +618,10 @@ def result_string(tba_data, row_num, split_min=None, split_max=None):
spaces -= 3
elif "SI**" in data_string:
spaces += 1
elif "DO**" in data_string:
spaces -= 2
elif "DO*" in data_string:
spaces -= 1
elif "DO*" in data_string:
spaces -= 2
elif "so" in data_string:
spaces += 3
elif "gb" in data_string:
@ -638,41 +638,39 @@ def result_string(tba_data, row_num, split_min=None, split_max=None):
row_output = "<b> </b>"
if TESTING:
print(f"row_output: {row_output}")
return f'{bold1}{row_output}{data_string}{" " * spaces}{split_nums}{bold2}'
return f"{bold1}{row_output}{data_string}{' ' * spaces}{split_nums}{bold2}"
def result_data(
tba_data, row_num, tba_data_bottom=None, top_split_max=None, fatigue=False
):
ret_data = {}
top_bold1 = f'{"<b>" if tba_data["bold"] else ""}'
top_bold2 = f'{"</b>" if tba_data["bold"] else ""}'
top_bold1 = f"{'<b>' if tba_data['bold'] else ''}"
top_bold2 = f"{'</b>' if tba_data['bold'] else ''}"
bot_bold1 = None
bot_bold2 = None
if tba_data_bottom:
bot_bold1 = f'{"<b>" if tba_data_bottom["bold"] else ""}'
bot_bold2 = f'{"</b>" if tba_data_bottom["bold"] else ""}'
bot_bold1 = f"{'<b>' if tba_data_bottom['bold'] else ''}"
bot_bold2 = f"{'</b>' if tba_data_bottom['bold'] else ''}"
if tba_data_bottom is None:
ret_data["2d6"] = f"{top_bold1}{int(row_num)}-{top_bold2}"
ret_data["splits"] = f"{top_bold1}{top_bold2}"
ret_data["result"] = (
f"{top_bold1}"
f'{tba_data["string"]}{"" if fatigue else ""}'
f"{top_bold2}"
f"{top_bold1}{tba_data['string']}{'' if fatigue else ''}{top_bold2}"
)
else:
ret_data["2d6"] = f"{top_bold1}{int(row_num)}-{top_bold2}\n"
ret_data["splits"] = (
f'{top_bold1}1{"-" if top_split_max != 1 else ""}'
f'{top_split_max if top_split_max != 1 else ""}{top_bold2}\n'
f'{bot_bold1}{top_split_max+1}{"-20" if top_split_max != 19 else ""}{bot_bold2}'
f"{top_bold1}1{'-' if top_split_max != 1 else ''}"
f"{top_split_max if top_split_max != 1 else ''}{top_bold2}\n"
f"{bot_bold1}{top_split_max + 1}{'-20' if top_split_max != 19 else ''}{bot_bold2}"
)
ret_data["result"] = (
f'{top_bold1}{tba_data["sm-string"] if "sm-string" in tba_data.keys() else tba_data["string"]}'
f"{top_bold1}{tba_data['sm-string'] if 'sm-string' in tba_data.keys() else tba_data['string']}"
f"{top_bold2}\n"
f"{bot_bold1}"
f'{tba_data_bottom["sm-string"] if "sm-string" in tba_data_bottom.keys() else tba_data_bottom["string"]}'
f"{tba_data_bottom['sm-string'] if 'sm-string' in tba_data_bottom.keys() else tba_data_bottom['string']}"
f"{bot_bold2}"
)
@ -688,9 +686,9 @@ def get_of(batter_hand, pitcher_hand, pull_side=True):
if batter_hand == "S":
if pitcher_hand == "L":
return "rf" if pull_side else "rf"
return "lf" if pull_side else "rf"
else:
return "lf" if pull_side else "lf"
return "rf" if pull_side else "lf"
def get_col(col_num):
@ -729,7 +727,7 @@ def get_position_string(all_pos: list, inc_p: bool):
for x in all_pos:
if x.position == "OF":
of_arm = f'{"+" if "-" not in x.arm else ""}{x.arm}'
of_arm = f"{'+' if '-' not in x.arm else ''}{x.arm}"
of_error = x.error
of_innings = x.innings
elif x.position == "CF":
@ -744,7 +742,7 @@ def get_position_string(all_pos: list, inc_p: bool):
elif x.position == "C":
all_def.append(
(
f'c-{x.range}({"+" if int(x.arm) >= 0 else ""}{x.arm}) e{x.error} T-{x.overthrow}(pb-{x.pb})',
f"c-{x.range}({'+' if int(x.arm) >= 0 else ''}{x.arm}) e{x.error} T-{x.overthrow}(pb-{x.pb})",
x.innings,
)
)
@ -1079,7 +1077,7 @@ def mlbteam_and_franchise(mlbam_playerid):
p_data["franchise"] = normalize_franchise(data["currentTeam"]["name"])
else:
logger.error(
f'Could not set team for {mlbam_playerid}; received {data["currentTeam"]["name"]}'
f"Could not set team for {mlbam_playerid}; received {data['currentTeam']['name']}"
)
else:
logger.error(
@ -1222,5 +1220,5 @@ def get_hand(df_data):
else:
return "R"
except Exception:
logger.error(f'Error in get_hand for {df_data["Name"]}')
logger.error(f"Error in get_hand for {df_data['Name']}")
return "R"

View File

@ -6,6 +6,7 @@ baseball archetypes with iterative review and refinement.
"""
import asyncio
import copy
import sys
from typing import Literal
from datetime import datetime
@ -179,7 +180,12 @@ class CustomCardCreator:
else:
calc = PitcherRatingCalculator(archetype)
ratings = calc.calculate_ratings(pitchingcard_id=0) # Temp ID
card_data = {"ratings": ratings}
card_data = {
"ratings": ratings,
"starter_rating": archetype.starter_rating,
"relief_rating": archetype.relief_rating,
"closer_rating": archetype.closer_rating,
}
# Step 4: Review and tweak loop
final_data = await self.review_and_tweak(
@ -347,7 +353,7 @@ class CustomCardCreator:
vs_hand = rating["vs_hand"]
print(f"\nVS {vs_hand}{'HP' if player_type == 'batter' else 'HB'}:")
print(
f" AVG: {rating['avg']:.3f} OBP: {rating['obp']:.3f} SLG: {rating['slg']:.3f} OPS: {rating['obp']+rating['slg']:.3f}"
f" AVG: {rating['avg']:.3f} OBP: {rating['obp']:.3f} SLG: {rating['slg']:.3f} OPS: {rating['obp'] + rating['slg']:.3f}"
)
# Show hit distribution
@ -364,7 +370,7 @@ class CustomCardCreator:
+ rating["bp_single"]
)
print(
f" Hits: {total_hits:.1f} (HR: {rating['homerun']:.1f} 3B: {rating['triple']:.1f} 2B: {rating['double_pull']+rating['double_two']+rating['double_three']:.1f} 1B: {total_hits - rating['homerun'] - rating['bp_homerun'] - rating['triple'] - rating['double_pull'] - rating['double_two'] - rating['double_three']:.1f})"
f" Hits: {total_hits:.1f} (HR: {rating['homerun']:.1f} 3B: {rating['triple']:.1f} 2B: {rating['double_pull'] + rating['double_two'] + rating['double_three']:.1f} 1B: {total_hits - rating['homerun'] - rating['bp_homerun'] - rating['triple'] - rating['double_pull'] - rating['double_two'] - rating['double_three']:.1f})"
)
# Show walk/strikeout
@ -389,7 +395,7 @@ class CustomCardCreator:
)
)
print(
f" Outs: {outs:.1f} (K: {rating['strikeout']:.1f} LD: {rating['lineout']:.1f} FB: {rating['flyout_a']+rating['flyout_bq']+rating['flyout_lf_b']+rating['flyout_rf_b']:.1f} GB: {rating['groundout_a']+rating['groundout_b']+rating['groundout_c']:.1f})"
f" Outs: {outs:.1f} (K: {rating['strikeout']:.1f} LD: {rating['lineout']:.1f} FB: {rating['flyout_a'] + rating['flyout_bq'] + rating['flyout_lf_b'] + rating['flyout_rf_b']:.1f} GB: {rating['groundout_a'] + rating['groundout_b'] + rating['groundout_c']:.1f})"
)
# Calculate and display total OPS
@ -420,10 +426,68 @@ class CustomCardCreator:
print("-" * 70)
print("\nAdjust key percentages (press Enter to keep current value):\n")
# TODO: Implement percentage tweaking
# For now, return unchanged
print("(Feature coming soon - manual adjustments available in option 3)")
return card_data
def prompt_float(label: str, current: float) -> float:
val = input(f" {label} [{current:.3f}]: ").strip()
if not val:
return current
try:
return float(val)
except ValueError:
print(" Invalid value, keeping current.")
return current
def prompt_int(label: str, current: int) -> int:
val = input(f" {label} [{current}]: ").strip()
if not val:
return current
try:
return int(val)
except ValueError:
print(" Invalid value, keeping current.")
return current
arch = copy.copy(archetype)
print("--- vs RHP/RHB ---")
arch.avg_vs_r = prompt_float("AVG vs R", arch.avg_vs_r)
arch.obp_vs_r = prompt_float("OBP vs R", arch.obp_vs_r)
arch.slg_vs_r = prompt_float("SLG vs R", arch.slg_vs_r)
arch.bb_pct_vs_r = prompt_float("BB% vs R", arch.bb_pct_vs_r)
arch.k_pct_vs_r = prompt_float("K% vs R", arch.k_pct_vs_r)
print("\n--- vs LHP/LHB ---")
arch.avg_vs_l = prompt_float("AVG vs L", arch.avg_vs_l)
arch.obp_vs_l = prompt_float("OBP vs L", arch.obp_vs_l)
arch.slg_vs_l = prompt_float("SLG vs L", arch.slg_vs_l)
arch.bb_pct_vs_l = prompt_float("BB% vs L", arch.bb_pct_vs_l)
arch.k_pct_vs_l = prompt_float("K% vs L", arch.k_pct_vs_l)
print("\n--- Power Profile ---")
arch.hr_per_hit = prompt_float("HR/Hit", arch.hr_per_hit)
arch.triple_per_hit = prompt_float("3B/Hit", arch.triple_per_hit)
arch.double_per_hit = prompt_float("2B/Hit", arch.double_per_hit)
print("\n--- Batted Ball Profile ---")
arch.gb_pct = prompt_float("GB%", arch.gb_pct)
arch.fb_pct = prompt_float("FB%", arch.fb_pct)
arch.ld_pct = prompt_float("LD%", arch.ld_pct)
if player_type == "batter":
print("\n--- Baserunning ---")
arch.speed_rating = prompt_int("Speed (1-10)", arch.speed_rating) # type: ignore[arg-type]
arch.steal_jump = prompt_int("Jump (1-10)", arch.steal_jump) # type: ignore[arg-type]
arch.xbt_pct = prompt_float("XBT%", arch.xbt_pct) # type: ignore[union-attr]
# Recalculate card ratings with the modified archetype
if player_type == "batter":
calc = BatterRatingCalculator(arch) # type: ignore[arg-type]
ratings = calc.calculate_ratings(battingcard_id=0)
baserunning = calc.calculate_baserunning()
return {"ratings": ratings, "baserunning": baserunning}
else:
calc_p = PitcherRatingCalculator(arch) # type: ignore[arg-type]
ratings = calc_p.calculate_ratings(pitchingcard_id=0)
return {"ratings": ratings}
async def manual_adjustments(
self, player_type: Literal["batter", "pitcher"], card_data: dict
@ -434,10 +498,99 @@ class CustomCardCreator:
print("-" * 70)
print("\nDirectly edit D20 chances (must sum to 108):\n")
# TODO: Implement manual adjustments
# For now, return unchanged
print("(Feature coming soon)")
return card_data
D20_FIELDS = [
"homerun",
"bp_homerun",
"triple",
"double_three",
"double_two",
"double_pull",
"single_two",
"single_one",
"single_center",
"bp_single",
"hbp",
"walk",
"strikeout",
"lineout",
"popout",
"flyout_a",
"flyout_bq",
"flyout_lf_b",
"flyout_rf_b",
"groundout_a",
"groundout_b",
"groundout_c",
]
# Choose which split to edit
print("Which split to edit?")
for i, rating in enumerate(card_data["ratings"]):
vs = rating["vs_hand"]
print(f" {i + 1}. vs {vs}{'HP' if player_type == 'batter' else 'HB'}")
while True:
choice = input("\nSelect split (1-2): ").strip()
try:
idx = int(choice) - 1
if 0 <= idx < len(card_data["ratings"]):
break
else:
print("Invalid choice.")
except ValueError:
print("Invalid input.")
result = copy.deepcopy(card_data)
rating = result["ratings"][idx]
while True:
vs = rating["vs_hand"]
print(
f"\n--- VS {vs}{'HP' if player_type == 'batter' else 'HB'} D20 Chances ---"
)
total = 0.0
for i, field in enumerate(D20_FIELDS, 1):
val = rating[field]
print(f" {i:2d}. {field:<20s}: {val:.2f}")
total += val
print(f"\n Total: {total:.2f} (target: 108.00)")
user_input = input(
"\nEnter field number and new value (e.g. '1 3.5'), or 'done': "
).strip()
if user_input.lower() in ("done", "q", ""):
break
parts = user_input.split()
if len(parts) != 2:
print(" Enter a field number and a value separated by a space.")
continue
try:
field_idx = int(parts[0]) - 1
new_val = float(parts[1])
except ValueError:
print(" Invalid input.")
continue
if not (0 <= field_idx < len(D20_FIELDS)):
print(f" Field number must be between 1 and {len(D20_FIELDS)}.")
continue
if new_val < 0:
print(" Value cannot be negative.")
continue
rating[D20_FIELDS[field_idx]] = new_val
total = sum(rating[f] for f in D20_FIELDS)
if abs(total - 108.0) > 0.01:
print(
f"\nWarning: Total is {total:.2f} (expected 108.00). "
"Ratings saved but card probabilities may be incorrect."
)
return result
async def create_database_records(
self,
@ -580,9 +733,9 @@ class CustomCardCreator:
"name_first": player_info["name_first"],
"name_last": player_info["name_last"],
"hand": player_info["hand"],
"starter_rating": 5, # TODO: Get from archetype
"relief_rating": 5, # TODO: Get from archetype
"closer_rating": None, # TODO: Get from archetype
"starter_rating": card_data["starter_rating"],
"relief_rating": card_data["relief_rating"],
"closer_rating": card_data["closer_rating"],
}
]
}

View File

@ -1,10 +1,18 @@
import os
import aiohttp
import pybaseball as pb
from dotenv import load_dotenv
from typing import Literal
from typing import Literal, Optional
from exceptions import logger
AUTH_TOKEN = {"Authorization": "Bearer Tp3aO3jhYve5NJF1IqOmJTmk"}
load_dotenv()
_token = os.environ.get("PD_API_TOKEN")
if not _token:
raise EnvironmentError("PD_API_TOKEN environment variable is required")
AUTH_TOKEN = {"Authorization": f"Bearer {_token}"}
DB_URL = "https://pd.manticorum.com/api"
master_debug = True
alt_database = None
@ -25,7 +33,7 @@ def param_char(other_params):
def get_req_url(
endpoint: str, api_ver: int = 2, object_id: int = None, params: list = None
):
req_url = f'{DB_URL}/v{api_ver}/{endpoint}{"/" if object_id is not None else ""}{object_id if object_id is not None else ""}'
req_url = f"{DB_URL}/v{api_ver}/{endpoint}{'/' if object_id is not None else ''}{object_id if object_id is not None else ''}"
if params:
other_params = False
@ -39,11 +47,11 @@ def get_req_url(
def log_return_value(log_string: str):
if master_debug:
logger.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}\n'
f"return: {log_string[:1200]}{' [ S N I P P E D ]' if len(log_string) > 1200 else ''}\n"
)
else:
logger.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}\n'
f"return: {log_string[:1200]}{' [ S N I P P E D ]' if len(log_string) > 1200 else ''}\n"
)
@ -53,13 +61,15 @@ async def db_get(
object_id: int = None,
params: list = None,
none_okay: bool = True,
timeout: int = 3,
):
timeout: int = 30,
) -> Optional[dict]:
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id, params=params)
log_string = f"get:\n{endpoint} id: {object_id} params: {params}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
async with aiohttp.ClientSession(
headers=AUTH_TOKEN, timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.get(req_url) as r:
logger.info(f"session info: {r}")
if r.status == 200:
@ -76,11 +86,13 @@ async def db_get(
raise ValueError(f"DB: {e}")
async def url_get(url: str, timeout: int = 3):
async def url_get(url: str, timeout: int = 30) -> dict:
log_string = f"get:\n{url}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.get(url) as r:
if r.status == 200:
log_string = "200 received"
@ -93,13 +105,15 @@ async def url_get(url: str, timeout: int = 3):
async def db_patch(
endpoint: str, object_id: int, params: list, api_ver: int = 2, timeout: int = 3
):
endpoint: str, object_id: int, params: list, api_ver: int = 2, timeout: int = 30
) -> dict:
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id, params=params)
log_string = f"patch:\n{endpoint} {params}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
async with aiohttp.ClientSession(
headers=AUTH_TOKEN, timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.patch(req_url) as r:
if r.status == 200:
js = await r.json()
@ -112,13 +126,15 @@ async def db_patch(
async def db_post(
endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 3
):
endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 30
) -> dict:
req_url = get_req_url(endpoint, api_ver=api_ver)
log_string = f"post:\n{endpoint} payload: {payload}\ntype: {type(payload)}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
async with aiohttp.ClientSession(
headers=AUTH_TOKEN, timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.post(req_url, json=payload) as r:
if r.status == 200:
js = await r.json()
@ -131,13 +147,15 @@ async def db_post(
async def db_put(
endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 3
):
endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 30
) -> dict:
req_url = get_req_url(endpoint, api_ver=api_ver)
log_string = f"put:\n{endpoint} payload: {payload}\ntype: {type(payload)}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
async with aiohttp.ClientSession(
headers=AUTH_TOKEN, timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.put(req_url, json=payload) as r:
if r.status == 200:
js = await r.json()
@ -149,12 +167,14 @@ async def db_put(
raise ValueError(f"DB: {e}")
async def db_delete(endpoint: str, object_id: int, api_ver: int = 2, timeout=3):
async def db_delete(endpoint: str, object_id: int, api_ver: int = 2, timeout=3) -> dict:
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id)
log_string = f"delete:\n{endpoint} {object_id}"
logger.info(log_string) if master_debug else logger.debug(log_string)
async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
async with aiohttp.ClientSession(
headers=AUTH_TOKEN, timeout=aiohttp.ClientTimeout(total=timeout)
) as session:
async with session.delete(req_url) as r:
if r.status == 200:
js = await r.json()
@ -183,4 +203,4 @@ def get_player_data(
def player_desc(this_player) -> str:
if this_player["p_name"] in this_player["description"]:
return this_player["description"]
return f'{this_player["description"]} {this_player["p_name"]}'
return f"{this_player['description']} {this_player['p_name']}"

View File

@ -418,7 +418,7 @@ pd-cards upload s3 --cardset <name> [OPTIONS]
cd /mnt/NV2/Development/paper-dynasty/database
DATABASE_TYPE=postgresql POSTGRES_HOST=10.10.0.42 POSTGRES_DB=paperdynasty_dev \
POSTGRES_USER=sba_admin POSTGRES_PASSWORD=<pw> POSTGRES_PORT=5432 \
API_TOKEN=Tp3aO3jhYve5NJF1IqOmJTmk \
API_TOKEN=your-api-token-here \
uvicorn app.main:app --host 0.0.0.0 --port 8000
# Terminal 2: Upload with local rendering

View File

@ -0,0 +1,468 @@
# Refractor Phase 2 — Design Validation Spec
## Purpose
This document captures the design validation test cases that must be verified before and during
Phase 2 (rating boosts) of the Refractor card progression system. Phase 1 — tracking,
milestone evaluation, and tier state persistence — is implemented. Phase 2 adds the rating boost
application logic (`apply_evolution_boosts`), rarity upgrade at T4, and variant hash creation.
**When to reference this document:**
- Before beginning Phase 2 implementation: review all cases to understand the design constraints
and edge cases the implementation must handle.
- During implementation: use each test case as an acceptance gate before the corresponding
feature is considered complete.
- During code review: each case documents the "risk if failed" so reviewers can assess whether
a proposed implementation correctly handles that scenario.
- After Phase 2 ships: run the cases as a regression checklist before any future change to the
boost logic, rarity assignment, or milestone evaluator.
## Background: Rating Model
Batter cards have 22 outcome columns summing to exactly 108 chances (derived from the D20
probability system: 2d6 x 3 columns x 6 rows). Each Refractor tier (T1 through T4) awards a
1.0-chance budget — a flat shift from out columns to positive-outcome columns. The total
accumulated budget across all four tiers is 4.0 chances, equal to approximately 3.7% of the
108-chance total (4 / 108 ≈ 0.037).
**Rarity naming cross-reference:** The PRD chapters (`prd-evolution/`) use the player-facing
display names. The codebase and this spec use the internal names from `rarity_thresholds.py`.
They map as follows:
| PRD / Display Name | Codebase Name | ID |
|---|---|---|
| Replacement | Common | 5 |
| Reserve | Bronze | 4 |
| Starter | Silver | 3 |
| All-Star | Gold | 2 |
| MVP | Diamond | 1 |
| Hall of Fame | HoF | 99 |
All rarity references in this spec use the codebase names.
Rarity IDs in the codebase (from `rarity_thresholds.py`):
| Rarity Name | ID |
|---|---|
| Common | 5 |
| Bronze | 4 |
| Silver | 3 |
| Gold | 2 |
| Diamond | 1 |
| Hall of Fame | 99 |
The special value `99` for Hall of Fame means a naive `rarity_id + 1` increment is incorrect;
the upgrade logic must use an ordered rarity ladder, not arithmetic.
---
## Test Cases
---
### T4-1: 108-sum preservation under profile-based boosts
**Status:** Pending — Phase 2
**Scenario:**
`apply_evolution_boosts(card_ratings, boost_tier, player_profile)` redistributes 1.0 chance per
tier across outcome columns according to the player's detected profile (power hitter, contact
hitter, patient hitter, starting pitcher, relief pitcher). Every combination of profile and tier
must leave the 22-column sum exactly equal to 108 after the boost is applied. This must hold for
all four tier applications, cumulative as well as individual.
The edge case: a batter card where `flyout_a = 0`. The power and contact hitter profiles draw
reductions from out columns including `flyout_a`. If the preferred reduction column is at zero,
the implementation must not produce a negative value and must not silently drop the remainder of
the budget. The 0-floor cap is enforced per column (see `05-rating-boosts.md` section 5.1:
"Truncated points are lost, not redistributed").
Verify:
- After each of T1, T2, T3, T4 boost applications, `sum(all outcome columns) == 108` exactly.
- A card with `flyout_a = 0` does not raise an error and does not produce a column below 0.
- When truncation occurs (column already at 0), the lost budget is discarded, not moved
elsewhere — the post-boost sum will be less than 108 + budget_added only in the case of
truncation, but must never exceed 108.
**Expected Outcome:**
Sum remains 108 after every boost under non-truncation conditions. Under truncation conditions
(a column hits 0), the final column sum must equal exactly `108 - truncated_amount` — where
`truncated_amount` is the portion of the 1.0-chance budget that was dropped due to the 0-floor
cap. This is a single combined assertion: `sum(columns) == 108 - truncated_amount`. Checking
"sum <= 108" and "truncated amount was discarded" as two independent conditions is insufficient
— a test can pass both checks while the sum is wrong for an unrelated reason (e.g., a positive
column also lost value due to a bug). No column value falls below 0.
**Risk If Failed:**
A broken 108-sum produces invalid game probabilities. The D20 engine derives per-outcome
probabilities from `column / 108`. If the sum drifts above or below 108, every outcome
probability on that card is subtly wrong for every future game that uses it. This error silently
corrupts game results without any visible failure.
**Files Involved:**
- `docs/prd-evolution/05-rating-boosts.md` — boost budget, profile definitions, cap behavior
- Phase 2: `pd_cards/evo/boost_profiles.py` (to be created) — `apply_evolution_boosts`
- `batters/creation.py``battingcardratings` model column set (22 columns)
- `pitchers/creation.py``pitchingcardratings` model column set (18 columns + 9 x-checks)
---
### T4-2: D20 probability shift at T4
**Status:** Pending — Phase 2
**Scenario:**
Take a representative Bronze-rarity batter (e.g., a player with total OPS near 0.730,
`homerun` ≈ 1.2, `single_one` ≈ 4.0, `walk` ≈ 3.0 in the base ratings). Apply all four
tier boosts cumulatively, distributing the total 4.0-chance budget across positive-outcome
columns (HR, singles, walk) with equal reductions from out columns. Calculate the resulting
absolute and relative probability change per D20 roll outcome.
Design target: the full T4 evolution shifts approximately 3.7% of all outcomes from outs to
positive results (4.0 / 108 = 0.037). The shift should be perceptible to a player reviewing
their card stats but should not fundamentally alter the card's tier or role. A Bronze batter
does not become a Gold batter through evolution — they become an evolved Bronze batter.
Worked example for validation reference:
- Pre-evolution: `homerun = 1.2` → probability per D20 = 1.2 / 108 ≈ 1.11%
- Post T4 with +0.5 to homerun per tier (4 tiers × 0.5 = +2.0 total): `homerun = 3.2`
→ probability per D20 = 3.2 / 108 ≈ 2.96% — an increase of ~1.85 percentage points
- Across all positive outcomes: total shift = 4.0 / 108 ≈ 3.7%
**Expected Outcome:**
The cumulative 4.0-chance shift produces a ~3.7% total movement from negative to positive
outcomes. No single outcome column increases by more than 2.5 chances across the full T4
journey under any profile. The card remains recognizably Bronze — it does not cross the Gold
OPS threshold (0.900 for 2024/2025 thresholds; confirmed in `rarity_thresholds.py`
`BATTER_THRESHOLDS_2024.gold` and `BATTER_THRESHOLDS_2025.gold`) unless it was already near
the boundary. Note: 0.700 is the Bronze floor (`bronze` field), not the Gold threshold.
**Risk If Failed:**
If the shift is too large, evolution becomes a rarity bypass — players grind low-rarity cards
to simulate an upgrade they cannot earn through pack pulls. If the shift is too small, the
system feels unrewarding and players lose motivation to complete tiers. Either miscalibration
undermines the core design intent.
**Files Involved:**
- `docs/prd-evolution/05-rating-boosts.md` — section 5.2 (boost budgets), section 5.3 (profiles)
- `rarity_thresholds.py` — OPS boundary values used to assess whether evolution crosses a rarity
threshold as a side effect (it should not for mid-range cards)
- Phase 2: `pd_cards/evo/boost_profiles.py` — boost distribution logic
---
### T4-3: T4 rarity upgrade — pipeline collision risk
**Status:** Pending — Phase 2
**Scenario:**
The Refractor T4 rarity upgrade (`player.rarity_id` incremented by one ladder step) and the
live-series `post_player_updates()` rarity assignment (OPS-threshold-based, in
`batters/creation.py`) both write to the same `rarity_id` field on the player record. A
collision occurs when both run against the same player:
1. Player completes Refractor T4. Evolution system upgrades rarity: Bronze (4) → Silver (3).
`evolution_card_state.final_rarity_id = 3` is written as an audit record.
2. Live-series update runs two weeks later. `post_player_updates()` recalculates OPS → maps to
Bronze (4) → writes `rarity_id = 4` to the player record.
3. The T4 rarity upgrade is silently overwritten. The player's card reverts to Bronze. The
`evolution_card_state` record still shows `final_rarity_id = 3` but the live card is Bronze.
This is a conflict between two independent systems both writing to the same field without
awareness of each other. The current live-series pipeline has no concept of evolution state.
Proposed resolution strategies (document and evaluate; do not implement during Phase 2 spec):
- **Guard clause in `post_player_updates()`:** Before writing `rarity_id`, check
`evolution_card_state.final_rarity_id` for the player. If an evolution upgrade is on record,
apply `max(ops_rarity, final_rarity_id_ladder_position)` — never downgrade past the T4 result.
- **Separate evolution rarity field:** Add `evolution_rarity_bump` (int, default 0) to the
card model. The game engine resolves effective rarity as `base_rarity + bump`. Live-series
updates only touch `base_rarity`; the bump is immutable once T4 is reached.
- **Deferred rarity upgrade:** T4 does not write `rarity_id` immediately. Instead, it sets a
flag on `evolution_card_state`. `post_player_updates()` checks the flag and applies the bump
after its own rarity calculation, ensuring the evolution upgrade layers on top of the current
OPS-derived rarity rather than competing with it.
**Expected Outcome:**
Phase 2 must implement one of these strategies (or an alternative that provides equivalent
protection). The collision scenario must be explicitly tested: evolve a Bronze card to T4,
run a live-series update that maps the same player to Bronze, confirm the displayed rarity is
Silver or higher — not Bronze.
**Risk If Failed:**
Live-series updates silently revert T4 rarity upgrades. Players invest significant game time
reaching T4, receive the visual rarity upgrade, then lose it after the next live-series run
with no explanation. This is one of the highest-trust violations the system can produce — a
reward that disappears invisibly.
**Files Involved:**
- `batters/creation.py``post_player_updates()` (lines ~304480)
- `pitchers/creation.py` — equivalent `post_player_updates()` for pitchers
- `docs/prd-evolution/05-rating-boosts.md` — section 5.4 (rarity upgrade at T4), note on live
series interaction
- Phase 2: `pd_cards/evo/tier_completion.py` (to be created) — T4 completion handler
- Database: `evolution_card_state` table, `final_rarity_id` column
---
### T4-4: T4 rarity cap for HoF cards
**Status:** Pending — Phase 2
**Scenario:**
A player card currently at Hall of Fame rarity (`rarity_id = 99`) completes Refractor T4. The
design specifies: HoF cards receive the T4 rating boost deltas (1.0 chance shift) but do not
receive a rarity upgrade. The rarity stays at 99.
The implementation must handle this without producing an invalid rarity value. The rarity ID
sequence in `rarity_thresholds.py` is non-contiguous — the IDs are:
```
5 (Common) → 4 (Bronze) → 3 (Silver) → 2 (Gold) → 1 (Diamond) → 99 (Hall of Fame)
```
A naive `rarity_id + 1` would produce `100`, which is not a valid rarity. A lookup-table
approach on the ordered ladder must be used instead. At `99` (HoF), the ladder returns `99`
(no-op). Additionally, Diamond (1) cards that complete T4 should upgrade to HoF (99), not to
`rarity_id = 0` or any other invalid value.
**Expected Outcome:**
- `rarity_id = 99` (HoF): T4 boost applied, rarity unchanged at 99.
- `rarity_id = 1` (Diamond): T4 boost applied, rarity upgrades to 99 (HoF).
- `rarity_id = 2` (Gold): T4 boost applied, rarity upgrades to 1 (Diamond).
- `rarity_id = 3` (Silver): T4 boost applied, rarity upgrades to 2 (Gold).
- `rarity_id = 4` (Bronze): T4 boost applied, rarity upgrades to 3 (Silver).
- `rarity_id = 5` (Common): T4 boost applied, rarity upgrades to 4 (Bronze).
- No card ever receives `rarity_id` outside the set {1, 2, 3, 4, 5, 99}.
**Risk If Failed:**
An invalid rarity ID (e.g., 0, 100, or None) propagates into the game engine and Discord bot
display layer. Cards with invalid rarities may render incorrectly, break sort/filter operations
in pack-opening UX, or cause exceptions in code paths that switch on rarity values.
**Files Involved:**
- `rarity_thresholds.py` — authoritative rarity ID definitions
- `docs/prd-evolution/05-rating-boosts.md` — section 5.4 (HoF cap behavior)
- Phase 2: `pd_cards/evo/tier_completion.py` — rarity ladder lookup, T4 completion handler
- Database: `evolution_card_state.final_rarity_id`
---
### T4-5: RP T1 achievability in realistic timeframe
**Status:** Pending — Phase 2
**Scenario:**
The Relief Pitcher track formula is `IP + K` with a T1 threshold of 3. The design intent is
"almost any active reliever hits this" in approximately 2 appearances (from `04-milestones.md`
section 4.2). The scenario to validate: a reliever who throws 1.2 IP (4 outs) with 1 K in an
appearance scores `1.33 + 1 = 2.33` — below T1. This reliever needs another appearance before
reaching T1.
The validation question is whether this is a blocking problem. If typical active RP usage
(5+ team game appearances) reliably produces T1 within a few sessions of play, the design is
sound. If a reliever can appear 45 times and still not reach T1 due to short, low-strikeout
outings (e.g., a pure groundball closer who throws 1.0 IP / 0 K per outing), the threshold
may be too high for the RP role to feel rewarding.
Reference calibration data from Season 10 (via `evo_milestone_simulator.py`): ~94% of all
relievers reached T1 under the IP+K formula with the threshold of 3. However, this is based on
a full or near-full season of data. The question is whether early-season RP usage (first 35
team games) produces T1 reliably.
Worked example for a pure-groundball closer:
- 5 appearances × (1.0 IP + 0 K) = 5.0 — reaches T1 (threshold 3) after appearance 3
- 5 appearances × (0.2 IP + 0 K) = 1.0 — does not reach T1 after 5 appearances
The second case (mop-up reliever with minimal usage) is expected to not reach T1 quickly, and
the design accepts this. What is NOT acceptable: a dedicated closer or setup man with 2+ IP per
session failing to reach T1 after 5+ appearances.
**Expected Outcome:**
A reliever averaging 1.0+ IP per appearance reaches T1 after 3 appearances. A reliever
averaging 0.5+ IP per appearance reaches T1 after 56 appearances. A reliever with fewer than
3 total appearances in a season is not expected to reach T1 — this is acceptable. The ~94%
Season 10 T1 rate confirms the threshold is calibrated correctly for active relievers.
**Risk If Failed:**
If active relievers (regular bullpen roles) cannot reach T1 within 510 team games, the
Refractor system is effectively dead for RP cards from launch. Players who pick up RP cards
expecting progression will see no reward for multiple play sessions, creating a negative first
impression of the entire system.
**Files Involved:**
- `docs/prd-evolution/04-milestones.md` — section 4.2 (RP track thresholds and design intent),
section 4.3 (Season 10 calibration data)
- `scripts/evo_milestone_simulator.py``formula_rp_ip_k`, `simulate_tiers` — re-run against
current season data to validate T1 achievability in early-season usage windows
- Database: `evolution_track` table — threshold values (admin-tunable, no code change required
if recalibration is needed)
---
### T4-6: SP/RP T4 parity with batters
**Status:** Pending — Phase 2
**Scenario:**
The T4 thresholds are:
| Position | T4 Threshold | Formula |
|---|---|---|
| Batter | 896 | PA + (TB x 2) |
| Starting Pitcher | 240 | IP + K |
| Relief Pitcher | 70 | IP + K |
These were calibrated against Season 10 production data using `evo_milestone_simulator.py`.
The calibration target was approximately 3% of active players reaching T4 over a full season
across all position types. The validation here is that this parity holds: one position type
does not trivially farm Superfractors while another cannot reach T2 without extraordinary
performance.
The specific risk: SP T4 requires 240 IP+K across the full season. Top Season 10 SPs (Harang:
163, deGrom: 143) were on pace for T4 at the time of measurement but had not crossed 240 yet.
If the final-season data shows a spike (e.g., 1015% of SPs reaching T4 vs. 3% of batters),
the SP threshold needs adjustment. Conversely, if no reliever reaches T4 in a full season
where 94% reach T1, the RP T4 threshold of 70 may be achievable only by top closers in
extreme usage scenarios.
Validation requires re-running `evo_milestone_simulator.py --season <current>` with the final
season data for all three position types and comparing T4 reach percentages. Accepted tolerance:
T4 reach rate within 2x across position types (e.g., if batters are at 3%, SP and RP should be
between 1.5% and 6%).
**Expected Outcome:**
All three position types produce T4 rates between 1% and 6% over a full season of active play.
No position type produces T4 rates above 10% (trivially farmable) or below 0.5% (effectively
unachievable). SP and RP T4 rates should be comparable because their thresholds were designed
together with the same 3% target in mind.
**Risk If Failed:**
If SP is easy (T4 in half a season) while RP is hard (T4 only for elite closers), then SP card
owners extract disproportionate value from the system. The Refractor system's balance premise
— "same tier, same reward, regardless of position" — breaks down, undermining player confidence
in the fairness of the progression.
**Files Involved:**
- `docs/prd-evolution/04-milestones.md` — section 4.3 (Season 10 calibration table)
- `scripts/evo_milestone_simulator.py` — primary validation tool; run with `--all-formulas
--pitchers-only` and `--batters-only` flags against final season data
- Database: `evolution_track` table — thresholds are admin-tunable; recalibration does not
require a code deployment
---
### T4-7: Cross-season stat accumulation — design confirmation
**Status:** Pending — Phase 2
**Scenario:**
The milestone evaluator (Phase 1, already implemented) queries `BattingSeasonStats` and
`PitchingSeasonStats` and SUMs the formula metric across all rows for a given
`(player_id, team_id)` pair, regardless of season number. This means a player's Refractor
progress is cumulative across seasons: if a player reaches 400 batter points in Season 10 and
another 400 in Season 11, their total is 800 — within range of T4 (threshold: 896).
This design must be confirmed as intentional before Phase 2 is implemented, because it has
significant downstream implications:
1. **Progress does not reset between seasons.** A player who earns a card across multiple
seasons continues progressing the same Refractor state. Season boundaries are invisible to
the evaluator.
2. **New teams start from zero.** If a player trades away a card and acquires a new copy of the
same player, the new card's `evolution_card_state` row starts at T0. The stat accumulation
query is scoped to `(player_id, team_id)`, so historical stats from the previous owner are
not inherited.
3. **Live-series stat updates do not retroactively change progress.** The evaluator reads
finalized season stat rows. If a player's Season 10 stats are adjusted via a data correction,
the evaluator will pick up the change on the next evaluation run — progress could shift
backward if a data correction removes a game's stats.
4. **The "full season" targets in the design docs (e.g., "T4 requires ~120 games") assume
cumulative multi-season play, not a single season.** At ~7.5 batter points per game, T4 of
896 requires approximately 120 in-game appearances. A player who plays 40 games per season
across three seasons reaches T4 in their third season.
This is the confirmed intended design per `04-milestones.md`: "Cumulative within a season —
progress never resets mid-season." The document does not explicitly state "cumulative across
seasons," but the evaluator implementation (SUM across all rows, no season filter) makes this
behavior implicit. This test case exists to surface that ambiguity and require an explicit
design decision before Phase 2 ships.
**Expected Outcome:**
Before Phase 2 implementation begins, the design intent must be explicitly confirmed in writing
(update `04-milestones.md` section 4.1 with a cross-season statement) or the evaluator query
must be updated to add a season boundary. The options are:
- **Option A (current behavior — accumulate across seasons):** Document explicitly. The
Refractor journey can span multiple seasons. Long-term card holders are rewarded for loyalty.
- **Option B (reset per season):** Add a season filter to the evaluator query. Refractor
progress resets at season start. T4 is achievable within a single full season. Cards earned
mid-season have a natural catch-up disadvantage.
This spec takes no position on which option is correct. It records that the choice exists,
that the current implementation defaults to Option A, and that Phase 2 must not be built on
an unexamined assumption about which option is in effect.
**Risk If Failed:**
If Option A is unintentional and players discover their Refractor progress carries over across
seasons before it is documented as a feature, they will optimize around it in ways the design
did not anticipate (e.g., holding cards across seasons purely to farm Refractor tiers). If
Option B is unintentional and progress resets each season without warning, players who invested
heavily in T3 at season end will be angry when their progress disappears.
**Files Involved:**
- `docs/prd-evolution/04-milestones.md` — section 4.1 (design principles) — **requires update
to state the cross-season policy explicitly**
- Phase 1 (implemented): `pd_cards/evo/evaluator.py` — stat accumulation query; inspect the
WHERE clause for any season filter
- Database: `BattingSeasonStats`, `PitchingSeasonStats` — confirm schema includes `season`
column and whether the evaluator query filters on it
- Database: `evolution_card_state` — confirm there is no season-reset logic in the state
management layer
---
## Summary Status
| ID | Title | Status |
|---|---|---|
| T4-1 | 108-sum preservation under profile-based boosts | Pending — Phase 2 |
| T4-2 | D20 probability shift at T4 | Pending — Phase 2 |
| T4-3 | T4 rarity upgrade — pipeline collision risk | Pending — Phase 2 |
| T4-4 | T4 rarity cap for HoF cards | Pending — Phase 2 |
| T4-5 | RP T1 achievability in realistic timeframe | Pending — Phase 2 |
| T4-6 | SP/RP T4 parity with batters | Pending — Phase 2 |
| T4-7 | Cross-season stat accumulation — design confirmation | Pending — Phase 2 |
All cases are unblocked pending Phase 2 implementation. T4-7 requires a design decision before
any Phase 2 code is written. T4-3 requires a resolution strategy to be selected before the T4
completion handler is implemented.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,243 @@
# Refractor Tier Visual Spec — Cherry-Pick Reference
Approved effects from `wip/refractor-card-art` mockup (`docs/refractor-tier-mockup.html`).
This document is the handoff reference for applying these visuals to the production card renderer.
---
## 1. Tier Diamond Indicator
A 4-quadrant diamond icon centered at the intersection of the left/right column headers.
Replaces all per-tier emoji badges.
### Positioning & Structure
- **Position**: `left: 600px; top: 78.5px` (centered between column header top/bottom borders)
- **Size**: `19px × 19px` (tips fit within header row bounds)
- **Rotation**: `transform: translate(-50%, -50%) rotate(45deg)`
- **Layout**: CSS Grid `2×2`, `gap: 2px`
- **Background (gap color)**: `rgba(0,0,0,0.75)` with `border-radius: 2px`
- **Base shadow**: `0 0 0 1.5px rgba(0,0,0,0.7), 0 2px 5px rgba(0,0,0,0.5)`
- **z-index**: 20
### Fill Order (Baseball Base Path)
Quadrants fill progressively following the base path: **1st → 2nd → 3rd → Home**.
| Tier | Quadrants Filled | Visual |
|------|-----------------|--------|
| T0 (Base) | 0 — no diamond shown | — |
| T1 | 1st base (right) | ◇ with right quadrant filled |
| T2 | 1st + 2nd (right + top) | ◇ with two quadrants |
| T3 | 1st + 2nd + 3rd (right + top + left) | ◇ with three quadrants |
| T4 | All 4 (full diamond) | ◆ fully filled |
### Grid-to-Visual Mapping (after 45° rotation)
The CSS grid positions map to visual positions as follows:
| Grid Slot | Visual Position | Base |
|-----------|----------------|------|
| div 1 (top-left) | TOP | 2nd base |
| div 2 (top-right) | RIGHT | 1st base |
| div 3 (bottom-left) | LEFT | 3rd base |
| div 4 (bottom-right) | BOTTOM | Home plate |
**Render order in HTML**: `[2nd, 1st, 3rd, home]` (matches grid slot order above).
### Quadrant Fill Styling
Unfilled quads: `background: rgba(0,0,0,0.3)` (dark placeholder).
Filled quads use a gradient + inset shadow for depth:
```css
/* Standard filled quad */
.diamond-quad.filled {
background: linear-gradient(135deg, {highlight} 0%, {color} 50%, {color-darkened-75%} 100%);
box-shadow:
inset 0 1px 2px rgba(255,255,255,0.45),
inset 0 -1px 2px rgba(0,0,0,0.35),
inset 1px 0 2px rgba(255,255,255,0.15);
}
```
### Approved Effect: Metallic Sheen + Pulse Glow
The approved diamond effect combines metallic inset highlights with an animated glow pulse.
**Metallic gradient** (replaces standard gradient on filled quads):
```css
background: linear-gradient(135deg,
rgba(255,255,255,0.9) 0%,
{highlight} 20%,
{color} 50%,
{color-darkened-60%} 80%,
{highlight} 100%);
```
**Metallic inset shadows** (boosted highlights):
```css
.diamond-quad.metallic.filled {
box-shadow:
inset 0 1px 3px rgba(255,255,255,0.7),
inset 0 -1px 2px rgba(0,0,0,0.5),
inset 1px 0 3px rgba(255,255,255,0.3),
inset -1px 0 2px rgba(0,0,0,0.2);
}
```
**Glow pulse animation** (tight diameter, applied to `.tier-diamond` container):
```css
@keyframes diamond-glow-pulse {
0% { box-shadow:
0 0 0 1.5px rgba(0,0,0,0.7),
0 2px 5px rgba(0,0,0,0.5),
0 0 5px 1px var(--diamond-glow-color);
}
50% { box-shadow:
0 0 0 1.5px rgba(0,0,0,0.5),
0 2px 4px rgba(0,0,0,0.3),
0 0 8px 3px var(--diamond-glow-color),
0 0 14px 5px color-mix(in srgb, var(--diamond-glow-color) 25%, transparent);
}
100% { /* same as 0% */ }
}
.tier-diamond.diamond-glow {
animation: diamond-glow-pulse 2s ease-in-out infinite;
}
```
Metallic effect **automatically** enables the glow pulse (no separate toggle needed in production).
---
## 2. Tier Diamond Colors
| Tier | Color (body) | Highlight (bright edge) | Glow Color | Intent |
|------|-------------|------------------------|------------|--------|
| T1 | `#d46a1a` | `#f0a050` | `#d46a1a` | Orange |
| T2 | `#b82020` | `#e85050` | `#b82020` | Red |
| T3 | `#7b2d8e` | `#b860d0` | `#7b2d8e` | Purple |
| T4 | `#1a6af0` | `#60b0ff` | `#1a6af0` | Blue flame |
Progression: warm → hot → regal → transcendent.
---
## 3. T3 Gold Shimmer Sweep (Header Animation)
A single narrow gold stripe sweeps left-to-right across the card header.
- **Duration**: 2.5s loop, ease-in-out
- **Gradient**: 105° diagonal, peak opacity 0.38
- **Key colors**: `rgba(255,240,140,0.18)``rgba(255,220,80,0.38)``rgba(255,200,60,0.30)`
- **Scope**: Header only (`.card-header` has `overflow: hidden`)
- **z-index**: 5
```css
@keyframes t3-shimmer {
0% { transform: translateX(-130%); }
100% { transform: translateX(230%); }
}
```
### Playwright APNG Capture
For static card rendering, the shimmer position is driven by `--anim-progress` (0.01.0) instead of CSS animation. Playwright captures 8 frames to produce an APNG.
---
## 4. T4 Superfractor — Layered Animation System
T4 stacks four independent effect layers for a premium look qualitatively different from T3.
### Layer 1: Prismatic Rainbow Header Sweep
- Seamless loop using a 200%-wide element with two mirrored rainbow bands
- `translateX(-50%)` over 6s linear = continuous wrap
- Colors: red → gold → green → blue → violet → pink, all at ~0.28 opacity
- z-index: 1 (behind header text at z-index: 2)
- Header children get `position: relative; z-index: 2` to sit above rainbow
### Layer 2+3: Gold/Teal Dual Glow Pulse
- Applied via `::before` on the card element
- Gold and teal in opposition: when gold brightens, teal dims and vice versa
- 2s ease-in-out loop
- Inset box-shadows (`45px 12px` gold, `80px 5px` teal)
- z-index: 4
```css
@keyframes t4-dual-pulse {
0% { box-shadow: inset 0 0 45px 12px rgba(201,169,78,0.40),
inset 0 0 80px 5px rgba(45,212,191,0.08); }
50% { box-shadow: inset 0 0 45px 12px rgba(201,169,78,0.08),
inset 0 0 80px 5px rgba(45,212,191,0.38); }
100% { /* same as 0% */ }
}
```
### Layer 4: Column Bar Shimmer
- White highlight (`rgba(255,255,255,0.28)`) sweeps across each column header bar
- 1.6s ease-in-out loop, staggered by -0.25s per bar for a ripple effect
- 6 bars total (3 left group, 3 right group)
---
## 5. T4b Variant — Full-Card Rainbow
Same as T4 but the prismatic rainbow covers the entire card height (not just header).
- Applied via `::after` on `.pd-card` instead of `.card-header::after`
- Slightly reduced opacity (0.180.22 vs 0.280.32)
- z-index: 6 (above content)
- Dual glow pulse uses a separate `.dual-pulse-overlay` div at 2.8s (slightly slower)
- Column bar shimmer identical to T4
**Status**: Experimental variant. May or may not ship — kept as an option.
---
## 6. Corner Accents (T4 Only)
L-shaped corner brackets on all four card corners.
- **Color**: `#c9a94e` (gold)
- **Size**: 35px arms, 3px thick
- **Implementation**: Four absolutely-positioned divs with two-sided borders each
- **z-index**: 6
---
## 7. Implementation Notes for Production
### What to port
1. **Diamond indicator CSS** (`.tier-diamond`, `.diamond-quad`, keyframes) → add to card template stylesheet
2. **Diamond HTML generation** → add to Playwright card renderer (4 divs in a grid)
3. **Metallic + glow effect** → always apply metallic class + glow animation to filled diamonds
4. **T3 shimmer** → APNG capture with `--anim-progress` variable (8 frames)
5. **T4 layered effects** → APNG capture with `--anim-progress` driving all 4 layers
6. **Diamond colors** → store in tier config or derive from tier level
7. **Corner accents** → T4 only, simple border divs
### What NOT to port
- The mockup control panel UI (sliders, dropdowns, color pickers)
- The `diamondEffect` dropdown with 5 options (we chose metallic — hardcode it)
- The separate `diamondGlow` toggle (metallic always includes glow)
- Border preset / header type controls (these are already in production tier configs)
- T4b full-card rainbow (unless explicitly promoted later)
### Database/API considerations
The diamond fill count is already derivable from the tier level — no new database fields needed:
- `refractor_tier = 1``diamondFill = 1`, color = orange
- `refractor_tier = 2``diamondFill = 2`, color = red
- `refractor_tier = 3``diamondFill = 3`, color = purple
- `refractor_tier = 4``diamondFill = 4`, color = blue-flame
Diamond colors are purely visual (CSS) — they don't need to be stored.

View File

@ -1,7 +1,7 @@
import datetime
import urllib.parse
import pandas as pd
from typing import Dict
from typing import Any, Dict
from creation_helpers import (
get_all_pybaseball_ids,
@ -196,8 +196,8 @@ async def create_new_players(
{
"p_name": f"{f_name} {l_name}",
"cost": NEW_PLAYER_COST,
"image": f'{card_base_url}/{df_data["player_id"]}/'
f'pitchingcard{urllib.parse.quote("?d=")}{release_dir}',
"image": f"{card_base_url}/{df_data['player_id']}/"
f"pitchingcard{urllib.parse.quote('?d=')}{release_dir}",
"mlbclub": CLUB_LIST[df_data["Tm_vL"]],
"franchise": FRANCHISE_LIST[df_data["Tm_vL"]],
"cardset_id": cardset["id"],
@ -268,7 +268,7 @@ async def calculate_pitching_cards(
def create_pitching_card(df_data):
logger.info(
f'Creating pitching card for {df_data["name_first"]} {df_data["name_last"]} / fg ID: {df_data["key_fangraphs"]}'
f"Creating pitching card for {df_data['name_first']} {df_data['name_last']} / fg ID: {df_data['key_fangraphs']}"
)
pow_data = cde.pow_ratings(
float(df_data["Inn_def"]), df_data["GS"], df_data["G"]
@ -298,11 +298,13 @@ async def calculate_pitching_cards(
int(df_data["GF"]), int(df_data["SV"]), int(df_data["G"])
),
"hand": df_data["pitch_hand"],
"batting": f'#1W{df_data["pitch_hand"]}-C',
"batting": f"#1W{df_data['pitch_hand']}-C",
}
)
except Exception as e:
logger.error(f'Skipping fg ID {df_data["key_fangraphs"]} due to: {e}')
except Exception:
logger.exception(
f"Skipping fg ID {df_data['key_fangraphs']} due to exception"
)
print("Calculating pitching cards...")
pitching_stats.apply(create_pitching_card, axis=1)
@ -333,7 +335,7 @@ async def create_position(
def create_pit_position(df_data):
if df_data["key_bbref"] in df_p.index:
logger.debug(f'Running P stats for {df_data["p_name"]}')
logger.debug(f"Running P stats for {df_data['p_name']}")
pit_positions.append(
{
"player_id": int(df_data["player_id"]),
@ -355,7 +357,7 @@ async def create_position(
try:
pit_positions.append(
{
"player_id": int(df_data["key_bbref"]),
"player_id": int(float(df_data["player_id"])),
"position": "P",
"innings": 1,
"range": 5,
@ -364,7 +366,7 @@ async def create_position(
)
except Exception:
logger.error(
f'Could not create pitcher position for {df_data["key_bbref"]}'
f"Could not create pitcher position for {df_data['key_bbref']}"
)
print("Calculating pitcher fielding lines now...")
@ -386,7 +388,7 @@ async def calculate_pitcher_ratings(pitching_stats: pd.DataFrame, post_pitchers:
pitching_ratings.extend(cpi.get_pitcher_ratings(df_data))
except Exception:
logger.error(
f'Could not create a pitching card for {df_data["key_fangraphs"]}'
f"Could not create a pitching card for {df_data['key_fangraphs']}"
)
print("Calculating card ratings...")
@ -400,7 +402,7 @@ async def calculate_pitcher_ratings(pitching_stats: pd.DataFrame, post_pitchers:
async def post_player_updates(
cardset: Dict[str, any],
cardset: Dict[str, Any],
player_description: str,
card_base_url: str,
release_dir: str,
@ -525,8 +527,8 @@ async def post_player_updates(
[
(
"image",
f'{card_base_url}/{df_data["player_id"]}/pitchingcard'
f'{urllib.parse.quote("?d=")}{release_dir}',
f"{card_base_url}/{df_data['player_id']}/pitchingcard"
f"{urllib.parse.quote('?d=')}{release_dir}",
)
]
)

View File

@ -23,6 +23,8 @@ dependencies = [
"pydantic>=2.9.0",
# AWS
"boto3>=1.35.0",
# Environment
"python-dotenv>=1.0.0",
# Scraping
"beautifulsoup4>=4.12.0",
"lxml>=5.0.0",

View File

@ -23,9 +23,9 @@ multidict==6.1.0
numpy==2.1.2
packaging==24.1
pandas==2.2.3
peewee
peewee==3.19.0
pillow==11.0.0
polars
polars==1.36.1
pluggy==1.5.0
propcache==0.2.0
# pyarrow==17.0.0

View File

@ -53,21 +53,30 @@ PROMO_INCLUSION_RETRO_IDS = [
# 'haraa001', # Aaron Harang (SP)
# 'hofft001', # Trevor Hoffman (RP)
]
MIN_PA_VL = 20 if "live" in PLAYER_DESCRIPTION.lower() else 1 # 1 for PotM
MIN_PA_VR = 40 if "live" in PLAYER_DESCRIPTION.lower() else 1 # 1 for PotM
MIN_TBF_VL = MIN_PA_VL
MIN_TBF_VR = MIN_PA_VR
CARDSET_ID = (
27 if "live" in PLAYER_DESCRIPTION.lower() else 28
) # 27: 2005 Live, 28: 2005 Promos
MIN_PA_VL = 20 # 1 for PotM
MIN_PA_VR = 40 # 1 for PotM
MIN_TBF_VL = 20
MIN_TBF_VR = 40
CARDSET_ID = 27 # 27: 2005 Live, 28: 2005 Promos
# Per-Update Parameters
SEASON_PCT = 81 / 162 # Through end of July (~half season)
START_DATE = 20050403 # YYYYMMDD format - 2005 Opening Day
# END_DATE = 20050531 # YYYYMMDD format - May PotM
END_DATE = 20050731 # End of July 2005
SEASON_END_DATE = 20051002 # 2005 regular season end date (used to derive SEASON_PCT)
SEASON_PCT = min(
(
datetime.datetime.strptime(str(END_DATE), "%Y%m%d")
- datetime.datetime.strptime(str(START_DATE), "%Y%m%d")
).days
/ (
datetime.datetime.strptime(str(SEASON_END_DATE), "%Y%m%d")
- datetime.datetime.strptime(str(START_DATE), "%Y%m%d")
).days,
1.0,
)
POST_DATA = True
LAST_WEEK_RATIO = 0.0 if PLAYER_DESCRIPTION == "Live" else 0.0
LAST_WEEK_RATIO = 0.0
LAST_TWOWEEKS_RATIO = 0.0
LAST_MONTH_RATIO = 0.0
@ -1429,7 +1438,7 @@ def calc_pitching_cards(ps: pd.DataFrame, season_pct: float) -> pd.DataFrame:
"closer_rating": [
cpi.closer_rating(int(row["GF"]), int(row["SV"]), int(row["G"]))
],
"batting": [f'#1W{row["pitch_hand"].upper()}-C'],
"batting": [f"#1W{row['pitch_hand'].upper()}-C"],
}
)
return y.loc[0]
@ -1598,7 +1607,7 @@ def calc_positions(bs: pd.DataFrame) -> pd.DataFrame:
]:
if row["key_bbref"] in pos_df.index:
logger.info(
f'Running {position} stats for {row["use_name"]} {row["last_name"]}'
f"Running {position} stats for {row['use_name']} {row['last_name']}"
)
try:
if "bis_runs_total" in pos_df.columns:
@ -1865,8 +1874,8 @@ async def get_or_post_players(
def new_player_payload(row, ratings_df: pd.DataFrame):
return {
"p_name": f'{row["use_name"]} {row["last_name"]}',
"cost": f'{ratings_df.loc[row['key_bbref']]["cost"]}',
"p_name": f"{row['use_name']} {row['last_name']}",
"cost": f"{ratings_df.loc[row['key_bbref']]['cost']}",
"image": "change-me",
"mlbclub": CLUB_LIST[row["Tm"]],
"franchise": FRANCHISE_LIST[row["Tm"]],
@ -1916,11 +1925,11 @@ async def get_or_post_players(
# Update positions for existing players too
all_pos = get_player_record_pos(def_rat_df, row)
patch_params = [
("cost", f'{bat_rat_df.loc[row['key_bbref']]["cost"]}'),
("cost", f"{bat_rat_df.loc[row['key_bbref']]['cost']}"),
("rarity_id", int(bat_rat_df.loc[row["key_bbref"]]["rarity_id"])),
(
"image",
f'{CARD_BASE_URL}{player_id}/battingcard{urllib.parse.quote("?d=")}{RELEASE_DIRECTORY}',
f"{CARD_BASE_URL}{player_id}/battingcard{urllib.parse.quote('?d=')}{RELEASE_DIRECTORY}",
),
]
# Add position updates - set all 8 slots to clear any old positions
@ -1964,7 +1973,7 @@ async def get_or_post_players(
params=[
(
"image",
f'{CARD_BASE_URL}{player_id}/battingcard{urllib.parse.quote("?d=")}{RELEASE_DIRECTORY}',
f"{CARD_BASE_URL}{player_id}/battingcard{urllib.parse.quote('?d=')}{RELEASE_DIRECTORY}",
)
],
)
@ -2003,11 +2012,11 @@ async def get_or_post_players(
# Determine pitcher positions based on ratings
patch_params = [
("cost", f'{pit_rat_df.loc[row['key_bbref']]["cost"]}'),
("cost", f"{pit_rat_df.loc[row['key_bbref']]['cost']}"),
("rarity_id", int(pit_rat_df.loc[row["key_bbref"]]["rarity_id"])),
(
"image",
f'{CARD_BASE_URL}{player_id}/pitchingcard{urllib.parse.quote("?d=")}{RELEASE_DIRECTORY}',
f"{CARD_BASE_URL}{player_id}/pitchingcard{urllib.parse.quote('?d=')}{RELEASE_DIRECTORY}",
),
]
@ -2081,7 +2090,7 @@ async def get_or_post_players(
params=[
(
"image",
f'{CARD_BASE_URL}{player_id}/pitchingcard{urllib.parse.quote("?d=")}{RELEASE_DIRECTORY}',
f"{CARD_BASE_URL}{player_id}/pitchingcard{urllib.parse.quote('?d=')}{RELEASE_DIRECTORY}",
)
],
)
@ -2105,10 +2114,10 @@ async def get_or_post_players(
raise KeyError("Could not get players - not enough stat DFs were supplied")
pd.DataFrame(player_deltas[1:], columns=player_deltas[0]).to_csv(
f'{"batter" if bstat_df is not None else "pitcher"}-deltas.csv'
f"{'batter' if bstat_df is not None else 'pitcher'}-deltas.csv"
)
pd.DataFrame(new_players[1:], columns=new_players[0]).to_csv(
f'new-{"batter" if bstat_df is not None else "pitcher"}s.csv'
f"new-{'batter' if bstat_df is not None else 'pitcher'}s.csv"
)
players_df = pd.DataFrame(all_players).set_index("bbref_id")
@ -2280,7 +2289,7 @@ async def post_positions(pos_df: pd.DataFrame, delete_existing: bool = False):
deleted_count += 1
except Exception as e:
logger.warning(
f'Failed to delete cardposition {pos["id"]}: {e}'
f"Failed to delete cardposition {pos['id']}: {e}"
)
logger.info(f"Deleted {deleted_count} positions for players in current run")

View File

@ -96,7 +96,7 @@ def build_c_throw(all_positions, pos_code):
async def fetch_data(data):
start_time = log_time("start", print_to_console=False)
this_query = await db_get(endpoint=data[0], params=data[1])
this_query = await db_get(endpoint=data[0], params=data[1], timeout=120)
log_time("end", print_to_console=False, start_time=start_time)
return this_query

View File

@ -1,75 +0,0 @@
from typing import Literal
import requests
from exceptions import logger, log_exception
AUTH_TOKEN = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNucGhwbnV2aGp2cXprY2J3emRrIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTc0NTgxMTc4NCwiZXhwIjoyMDYxMzg3Nzg0fQ.7dG_y2zU2PajBwTD8vut5GcWf3CSaZePkYW_hMf0fVg",
"apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNucGhwbnV2aGp2cXprY2J3emRrIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTc0NTgxMTc4NCwiZXhwIjoyMDYxMzg3Nzg0fQ.7dG_y2zU2PajBwTD8vut5GcWf3CSaZePkYW_hMf0fVg",
}
DB_URL = "https://cnphpnuvhjvqzkcbwzdk.supabase.co/rest/v1"
def get_req_url(endpoint: str, params: list = None):
req_url = f"{DB_URL}/{endpoint}?"
if params:
other_params = False
for x in params:
req_url += f'{"&" if other_params else "?"}{x[0]}={x[1]}'
other_params = True
return req_url
def log_return_value(log_string: str, log_type: Literal["info", "debug"]):
if log_type == "info":
logger.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}\n'
)
else:
logger.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}\n'
)
def db_get(
endpoint: str,
params: dict = None,
limit: int = 1000,
offset: int = 0,
none_okay: bool = True,
timeout: int = 3,
):
req_url = f"{DB_URL}/{endpoint}?limit={limit}&offset={offset}"
logger.info(f"HTTP GET: {req_url}, params: {params}")
response = requests.request("GET", req_url, params=params, headers=AUTH_TOKEN)
logger.info(response)
if response.status_code != requests.codes.ok:
log_exception(Exception, response.text)
data = response.json()
if isinstance(data, list) and len(data) == 0:
if none_okay:
return None
else:
log_exception(Exception, "Query returned no results and none_okay = False")
return data
# async with aiohttp.ClientSession(headers=AUTH_TOKEN) as session:
# async with session.get(req_url) as r:
# logger.info(f'session info: {r}')
# if r.status == 200:
# js = await r.json()
# log_return_value(f'{js}')
# return js
# elif none_okay:
# e = await r.text()
# logger.error(e)
# return None
# else:
# e = await r.text()
# logger.error(e)
# raise ValueError(f'DB: {e}')

View File

@ -170,6 +170,7 @@ class TestDataFetcher:
@patch("automated_data_fetcher.pb.batting_stats_bref")
@patch("automated_data_fetcher.pb.pitching_stats_bref")
@pytest.mark.asyncio
async def test_fetch_baseball_reference_data(
self,
mock_pitching,
@ -206,6 +207,7 @@ class TestDataFetcher:
@patch("automated_data_fetcher.pb.batting_stats")
@patch("automated_data_fetcher.pb.pitching_stats")
@pytest.mark.asyncio
async def test_fetch_fangraphs_data(
self,
mock_pitching,
@ -231,6 +233,7 @@ class TestDataFetcher:
@patch("automated_data_fetcher.pb.batting_stats_range")
@patch("automated_data_fetcher.pb.pitching_stats_range")
@pytest.mark.asyncio
async def test_fetch_fangraphs_data_with_dates(
self,
mock_pitching,
@ -253,6 +256,7 @@ class TestDataFetcher:
mock_pitching.assert_called_once_with(start_date, end_date)
@patch("automated_data_fetcher.get_all_pybaseball_ids")
@pytest.mark.asyncio
async def test_get_active_players_existing_function(self, mock_get_ids, fetcher):
"""Test getting player IDs using existing function"""
mock_get_ids.return_value = ["12345", "67890", "11111"]
@ -264,6 +268,7 @@ class TestDataFetcher:
@patch("automated_data_fetcher.get_all_pybaseball_ids")
@patch("automated_data_fetcher.pb.batting_stats")
@pytest.mark.asyncio
async def test_get_active_players_fallback(
self, mock_batting, mock_get_ids, fetcher, sample_batting_data
):
@ -279,6 +284,7 @@ class TestDataFetcher:
assert result == expected_ids
@patch("automated_data_fetcher.pb.get_splits")
@pytest.mark.asyncio
async def test_fetch_player_splits(
self, mock_get_splits, fetcher, sample_splits_data
):
@ -333,6 +339,7 @@ class TestLiveSeriesDataFetcher:
@patch.object(DataFetcher, "fetch_baseball_reference_data")
@patch.object(DataFetcher, "fetch_fangraphs_data")
@pytest.mark.asyncio
async def test_fetch_live_data(self, mock_fg_data, mock_bref_data, live_fetcher):
"""Test fetching live series data"""
# Mock return values
@ -360,6 +367,7 @@ class TestUtilityFunctions:
"""Test cases for utility functions"""
@patch("automated_data_fetcher.DataFetcher")
@pytest.mark.asyncio
async def test_fetch_season_data(self, mock_fetcher_class):
"""Test fetch_season_data function"""
# Create mock fetcher instance
@ -389,6 +397,7 @@ class TestUtilityFunctions:
assert any("AUTOMATED DOWNLOAD COMPLETE" in call for call in print_calls)
@patch("automated_data_fetcher.LiveSeriesDataFetcher")
@pytest.mark.asyncio
async def test_fetch_live_series_data(self, mock_fetcher_class):
"""Test fetch_live_series_data function"""
# Create mock fetcher instance
@ -416,6 +425,7 @@ class TestErrorHandling:
return DataFetcher(2023, "Season")
@patch("automated_data_fetcher.pb.pitching_stats_bref")
@pytest.mark.asyncio
async def test_fetch_baseball_reference_data_error(self, mock_pitching, fetcher):
"""Test error handling in Baseball Reference data fetch"""
# Mock function to raise an exception
@ -425,6 +435,7 @@ class TestErrorHandling:
await fetcher.fetch_baseball_reference_data()
@patch("automated_data_fetcher.pb.batting_stats")
@pytest.mark.asyncio
async def test_fetch_fangraphs_data_error(self, mock_batting, fetcher):
"""Test error handling in FanGraphs data fetch"""
# Mock function to raise an exception
@ -435,6 +446,7 @@ class TestErrorHandling:
@patch("automated_data_fetcher.get_all_pybaseball_ids")
@patch("automated_data_fetcher.pb.batting_stats")
@pytest.mark.asyncio
async def test_get_active_players_complete_failure(
self, mock_batting, mock_get_ids, fetcher
):
@ -449,6 +461,7 @@ class TestErrorHandling:
assert result == []
@patch("automated_data_fetcher.pb.get_splits")
@pytest.mark.asyncio
async def test_fetch_player_splits_individual_errors(
self, mock_get_splits, fetcher
):
@ -479,6 +492,7 @@ class TestIntegration:
"""Integration tests that require network access"""
@pytest.mark.skip(reason="Requires network access and may be slow")
@pytest.mark.asyncio
async def test_real_data_fetch(self):
"""Test fetching real data from pybaseball (skip by default)"""
fetcher = DataFetcher(2022, "Season") # Use a complete season

View File

@ -1,10 +1,4 @@
from creation_helpers import pd_positions_df, mround, sanitize_chance_output
def test_positions_df():
cardset_19_pos = pd_positions_df(19)
assert True == True
from creation_helpers import mround, sanitize_chance_output
def test_mround():