Compare commits

..

1 Commits

Author SHA1 Message Date
Cal Corum
ecbe29f507 fix: remove debug print(req.scope) from get_docs route (#32)
All checks were successful
Build Docker Image / build (pull_request) Successful in 3m2s
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-03 09:05:07 -06:00
84 changed files with 1821 additions and 14357 deletions

4
.env
View File

@ -59,9 +59,9 @@ API_TOKEN=Tp3aO3jhYve5NJF1IqOmJTmk
# PRIVATE_IN_SCHEMA=true
# Testing mode
# Set to 'True' to use development database URL (pddev.manticorum.com)
# Set to 'False' to use development database URL (pddev.manticorum.com)
# Leave unset or set to any other value for production
TESTING=True
TESTING=TRUE
# =============================================================================
# EXAMPLE CONFIGURATIONS

View File

@ -1,48 +1,30 @@
# Gitea Actions: Docker Build, Push, and Notify
#
# CI/CD pipeline for Paper Dynasty Database API:
# - Triggered by pushing a CalVer tag (e.g., 2026.3.11) or "dev" tag
# - CalVer tags push with version + "production" Docker tags
# - "dev" tag pushes with "dev" Docker tag for the dev environment
# - Builds Docker images on every push/PR
# - Auto-generates CalVer version (YYYY.MM.BUILD) on main branch merges
# - Pushes to Docker Hub and creates git tag on main
# - Sends Discord notifications on success/failure
#
# To release: git tag 2026.3.11 && git push origin 2026.3.11
# To deploy dev: git tag -f dev && git push origin dev --force
name: Build Docker Image
on:
push:
tags:
- '20*' # matches CalVer tags like 2026.3.11
- 'dev' # dev environment builds
branches:
- main
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
container:
volumes:
- pd-buildx-cache:/opt/buildx-cache
steps:
- name: Checkout code
uses: https://github.com/actions/checkout@v4
with:
fetch-depth: 0
- name: Extract version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/}
SHA_SHORT=$(git rev-parse --short HEAD)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "sha_short=$SHA_SHORT" >> $GITHUB_OUTPUT
echo "timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_OUTPUT
if [ "$VERSION" = "dev" ]; then
echo "environment=dev" >> $GITHUB_OUTPUT
else
echo "environment=production" >> $GITHUB_OUTPUT
fi
fetch-depth: 0 # Full history for tag counting
- name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
@ -53,52 +35,80 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push Docker image
- name: Generate CalVer version
id: calver
uses: cal/gitea-actions/calver@main
# Dev build: push with dev + dev-SHA tags (PR/feature branches)
- name: Build Docker image (dev)
if: github.ref != 'refs/heads/main'
uses: https://github.com/docker/build-push-action@v5
with:
context: .
push: true
tags: |
manticorum67/paper-dynasty-database:${{ steps.version.outputs.version }}
manticorum67/paper-dynasty-database:${{ steps.version.outputs.environment }}
cache-from: type=local,src=/opt/buildx-cache/pd-database
cache-to: type=local,dest=/opt/buildx-cache/pd-database-new,mode=max
manticorum67/paper-dynasty-database:dev
manticorum67/paper-dynasty-database:dev-${{ steps.calver.outputs.sha_short }}
cache-from: type=registry,ref=manticorum67/paper-dynasty-database:buildcache
cache-to: type=registry,ref=manticorum67/paper-dynasty-database:buildcache,mode=max
- name: Rotate cache
run: |
rm -rf /opt/buildx-cache/pd-database
mv /opt/buildx-cache/pd-database-new /opt/buildx-cache/pd-database
# Production build: push with latest + CalVer tags (main only)
- name: Build Docker image (production)
if: github.ref == 'refs/heads/main'
uses: https://github.com/docker/build-push-action@v5
with:
context: .
push: true
tags: |
manticorum67/paper-dynasty-database:latest
manticorum67/paper-dynasty-database:${{ steps.calver.outputs.version }}
manticorum67/paper-dynasty-database:${{ steps.calver.outputs.version_sha }}
cache-from: type=registry,ref=manticorum67/paper-dynasty-database:buildcache
cache-to: type=registry,ref=manticorum67/paper-dynasty-database:buildcache,mode=max
- name: Tag release
if: success() && github.ref == 'refs/heads/main'
uses: cal/gitea-actions/gitea-tag@main
with:
version: ${{ steps.calver.outputs.version }}
token: ${{ github.token }}
- name: Build Summary
run: |
echo "## Docker Build Successful" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Version:** \`${{ steps.version.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Image Tags:**" >> $GITHUB_STEP_SUMMARY
echo "- \`manticorum67/paper-dynasty-database:${{ steps.version.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`manticorum67/paper-dynasty-database:${{ steps.version.outputs.environment }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`manticorum67/paper-dynasty-database:latest\`" >> $GITHUB_STEP_SUMMARY
echo "- \`manticorum67/paper-dynasty-database:${{ steps.calver.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`manticorum67/paper-dynasty-database:${{ steps.calver.outputs.version_sha }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Build Details:**" >> $GITHUB_STEP_SUMMARY
echo "- Commit: \`${{ steps.version.outputs.sha_short }}\`" >> $GITHUB_STEP_SUMMARY
echo "- Timestamp: \`${{ steps.version.outputs.timestamp }}\`" >> $GITHUB_STEP_SUMMARY
echo "- Branch: \`${{ steps.calver.outputs.branch }}\`" >> $GITHUB_STEP_SUMMARY
echo "- Commit: \`${{ github.sha }}\`" >> $GITHUB_STEP_SUMMARY
echo "- Timestamp: \`${{ steps.calver.outputs.timestamp }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Pull with: \`docker pull manticorum67/paper-dynasty-database:${{ steps.version.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
if [ "${{ github.ref }}" == "refs/heads/main" ]; then
echo "Pushed to Docker Hub!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Pull with: \`docker pull manticorum67/paper-dynasty-database:latest\`" >> $GITHUB_STEP_SUMMARY
else
echo "_PR build - image not pushed to Docker Hub_" >> $GITHUB_STEP_SUMMARY
fi
- name: Discord Notification - Success
if: success()
if: success() && github.ref == 'refs/heads/main'
uses: cal/gitea-actions/discord-notify@main
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK }}
title: "Paper Dynasty Database"
status: success
version: ${{ steps.version.outputs.version }}
image_tag: ${{ steps.version.outputs.version }}
commit_sha: ${{ steps.version.outputs.sha_short }}
timestamp: ${{ steps.version.outputs.timestamp }}
version: ${{ steps.calver.outputs.version }}
image_tag: ${{ steps.calver.outputs.version_sha }}
commit_sha: ${{ steps.calver.outputs.sha_short }}
timestamp: ${{ steps.calver.outputs.timestamp }}
- name: Discord Notification - Failure
if: failure()
if: failure() && github.ref == 'refs/heads/main'
uses: cal/gitea-actions/discord-notify@main
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK }}

View File

@ -1,31 +0,0 @@
#!/bin/bash
#
# Install git hooks for this repository
#
REPO_ROOT=$(git rev-parse --show-toplevel 2>/dev/null)
if [ -z "$REPO_ROOT" ]; then
echo "Error: Not in a git repository"
exit 1
fi
HOOKS_DIR="$REPO_ROOT/.githooks"
GIT_HOOKS_DIR="$REPO_ROOT/.git/hooks"
echo "Installing git hooks..."
if [ -f "$HOOKS_DIR/pre-commit" ]; then
cp "$HOOKS_DIR/pre-commit" "$GIT_HOOKS_DIR/pre-commit"
chmod +x "$GIT_HOOKS_DIR/pre-commit"
echo "Installed pre-commit hook"
else
echo "pre-commit hook not found in $HOOKS_DIR"
fi
echo ""
echo "The pre-commit hook will:"
echo " - Auto-fix ruff lint violations (unused imports, formatting, etc.)"
echo " - Block commits only on truly unfixable issues"
echo ""
echo "To bypass in emergency: git commit --no-verify"

View File

@ -1,53 +0,0 @@
#!/bin/bash
#
# Pre-commit hook: ruff lint check on staged Python files.
# Catches syntax errors, unused imports, and basic issues before commit.
# To bypass in emergency: git commit --no-verify
#
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
REPO_ROOT=$(git rev-parse --show-toplevel)
cd "$REPO_ROOT"
STAGED_PY=$(git diff --cached --name-only --diff-filter=ACM -z -- '*.py')
if [ -z "$STAGED_PY" ]; then
exit 0
fi
echo "ruff check on staged files..."
# Stash unstaged changes so ruff only operates on staged content.
# Without this, ruff --fix runs on the full working tree file (staged +
# unstaged), and the subsequent git add would silently include unstaged
# changes in the commit — breaking git add -p workflows.
STASHED=0
if git stash --keep-index -q 2>/dev/null; then
STASHED=1
fi
# Auto-fix what we can, then re-stage the fixed files
printf '%s' "$STAGED_PY" | xargs -0 ruff check --fix --exit-zero
printf '%s' "$STAGED_PY" | xargs -0 git add
# Restore unstaged changes
if [ $STASHED -eq 1 ]; then
git stash pop -q
fi
# Now check for remaining unfixable issues
printf '%s' "$STAGED_PY" | xargs -0 ruff check
RUFF_EXIT=$?
if [ $RUFF_EXIT -ne 0 ]; then
echo ""
echo -e "${RED}Pre-commit checks failed (unfixable issues). Commit blocked.${NC}"
echo -e "${YELLOW}To bypass (not recommended): git commit --no-verify${NC}"
exit 1
fi
echo -e "${GREEN}All checks passed.${NC}"
exit 0

2
.gitignore vendored
View File

@ -59,8 +59,6 @@ pyenv.cfg
pyvenv.cfg
docker-compose.override.yml
docker-compose.*.yml
.run-local.pid
.env.local
*.db
venv
.claude/

View File

@ -1,6 +1,6 @@
# Paper Dynasty Database API
FastAPI backend for baseball card game data. Peewee ORM with PostgreSQL.
FastAPI backend for baseball card game data. Peewee ORM with SQLite (WAL mode).
## Commands
@ -14,7 +14,7 @@ docker build -t paper-dynasty-db . # Build image
## Architecture
- **Routers**: Domain-based in `app/routers_v2/` (cards, players, teams, packs, stats, gauntlets, scouting)
- **ORM**: Peewee with PostgreSQL
- **ORM**: Peewee with SQLite (`storage/pd_master.db`, WAL journaling)
- **Card images**: Playwright/Chromium renders HTML templates → screenshots (see `routers_v2/players.py`)
- **Logging**: Rotating files in `logs/database/{date}.log`
@ -42,14 +42,14 @@ docker build -t paper-dynasty-db . # Build image
- **API docs**: `/api/docs` and `/api/redoc`
### Key Env Vars
`API_TOKEN`, `LOG_LEVEL`, `DATABASE_TYPE`, `POSTGRES_HOST`, `POSTGRES_DB`, `POSTGRES_USER`, `POSTGRES_PASSWORD`
`API_TOKEN`, `LOG_LEVEL`, `DATABASE_TYPE` (sqlite/postgresql), `POSTGRES_HOST`, `POSTGRES_DB`, `POSTGRES_USER`, `POSTGRES_PASSWORD`
### Common Issues
- 502 Bad Gateway → API container crashed; check `docker logs pd_api`
- Card image generation failures → Playwright/Chromium issue; check for missing dependencies
- SQLite locking (dev) → WAL mode should prevent, but check for long-running writes
- DB connection errors → verify `POSTGRES_HOST` points to correct container name
- **CI/CD**: Gitea Actions on CalVer tag push — builds Docker image and pushes to Docker Hub
- **Release**: `git tag YYYY.M.BUILD && git push origin YYYY.M.BUILD` → CI builds + pushes image + notifies Discord
- **CI/CD**: Gitea Actions on PR to `main` — builds Docker image, auto-generates CalVer version (`YYYY.MM.BUILD`) on merge
## Important

View File

@ -1,12 +1,41 @@
FROM python:3.11-slim-bookworm
FROM tiangolo/uvicorn-gunicorn-fastapi:latest
WORKDIR /usr/src/app
# Chrome dependency Instalation
# RUN apt-get update && apt-get install -y \
# fonts-liberation \
# libasound2 \
# libatk-bridge2.0-0 \
# libatk1.0-0 \
# libatspi2.0-0 \
# libcups2 \
# libdbus-1-3 \
# libdrm2 \
# libgbm1 \
# libgtk-3-0 \
# # libgtk-4-1 \
# libnspr4 \
# libnss3 \
# libwayland-client0 \
# libxcomposite1 \
# libxdamage1 \
# libxfixes3 \
# libxkbcommon0 \
# libxrandr2 \
# xdg-utils \
# libu2f-udev \
# libvulkan1
# # Chrome instalation
# RUN curl -LO https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
# RUN apt-get install -y ./google-chrome-stable_current_amd64.deb
# RUN rm google-chrome-stable_current_amd64.deb
# # Check chrome version
# RUN echo "Chrome: " && google-chrome --version
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
RUN playwright install chromium
RUN playwright install-deps chromium
COPY ./app /usr/src/app/app
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "80"]
COPY ./app /app/app

View File

@ -30,20 +30,20 @@ if DATABASE_TYPE.lower() == "postgresql":
autorollback=True, # Automatically rollback failed transactions
)
else:
# SQLite configuration for local development only.
# Production always uses PostgreSQL (see DATABASE_TYPE env var).
#
# synchronous=0 (OFF): SQLite skips fsync() after every write, maximising
# throughput at the cost of durability — a hard crash could corrupt the DB.
# This is an acceptable trade-off in dev where data loss is tolerable and
# write speed matters. WAL journal mode reduces (but does not eliminate)
# the corruption window by keeping the main database file consistent while
# writes land in the WAL file first.
# Default SQLite configuration for local development
db = SqliteDatabase(
"storage/pd_master.db",
pragmas={"journal_mode": "wal", "cache_size": -1 * 64000, "synchronous": 0},
)
date = f"{datetime.now().year}-{datetime.now().month}-{datetime.now().day}"
log_level = logging.INFO if os.environ.get("LOG_LEVEL") == "INFO" else "WARN"
logging.basicConfig(
filename=f"logs/database/{date}.log",
format="%(asctime)s - database - %(levelname)s - %(message)s",
level=log_level,
)
# 2025, 2005
ranked_cardsets = [24, 25, 26, 27, 28, 29]
LIVE_CARDSET_ID = 27
@ -474,7 +474,6 @@ class Card(BaseModel):
team = ForeignKeyField(Team, null=True)
pack = ForeignKeyField(Pack, null=True)
value = IntegerField(default=0)
variant = IntegerField(null=True, default=None)
def __str__(self):
if self.player:
@ -499,34 +498,51 @@ class Roster(BaseModel):
team = ForeignKeyField(Team)
name = CharField()
roster_num = IntegerField()
card_1 = ForeignKeyField(Card)
card_2 = ForeignKeyField(Card)
card_3 = ForeignKeyField(Card)
card_4 = ForeignKeyField(Card)
card_5 = ForeignKeyField(Card)
card_6 = ForeignKeyField(Card)
card_7 = ForeignKeyField(Card)
card_8 = ForeignKeyField(Card)
card_9 = ForeignKeyField(Card)
card_10 = ForeignKeyField(Card)
card_11 = ForeignKeyField(Card)
card_12 = ForeignKeyField(Card)
card_13 = ForeignKeyField(Card)
card_14 = ForeignKeyField(Card)
card_15 = ForeignKeyField(Card)
card_16 = ForeignKeyField(Card)
card_17 = ForeignKeyField(Card)
card_18 = ForeignKeyField(Card)
card_19 = ForeignKeyField(Card)
card_20 = ForeignKeyField(Card)
card_21 = ForeignKeyField(Card)
card_22 = ForeignKeyField(Card)
card_23 = ForeignKeyField(Card)
card_24 = ForeignKeyField(Card)
card_25 = ForeignKeyField(Card)
card_26 = ForeignKeyField(Card)
def __str__(self):
return f"{self.team} Roster"
def get_cards(self):
return (
Card.select()
.join(RosterSlot)
.where(RosterSlot.roster == self)
.order_by(RosterSlot.slot)
)
# def get_cards(self, team):
# all_cards = Card.select().where(Card.roster == self)
# this_roster = []
# return [this_roster.card1, this_roster.card2, this_roster.card3, this_roster.card4, this_roster.card5,
# this_roster.card6, this_roster.card7, this_roster.card8, this_roster.card9, this_roster.card10,
# this_roster.card11, this_roster.card12, this_roster.card13, this_roster.card14, this_roster.card15,
# this_roster.card16, this_roster.card17, this_roster.card18, this_roster.card19, this_roster.card20,
# this_roster.card21, this_roster.card22, this_roster.card23, this_roster.card24, this_roster.card25,
# this_roster.card26]
class Meta:
database = db
table_name = "roster"
class RosterSlot(BaseModel):
roster = ForeignKeyField(Roster, backref="slots")
slot = IntegerField()
card = ForeignKeyField(Card, backref="roster_slots")
class Meta:
database = db
table_name = "rosterslot"
indexes = ((("roster", "slot"), True),)
class Result(BaseModel):
away_team = ForeignKeyField(Team)
home_team = ForeignKeyField(Team)
@ -728,7 +744,6 @@ if not SKIP_TABLE_CREATION:
db.create_tables(
[
Roster,
RosterSlot,
BattingStat,
PitchingStat,
Result,
@ -756,7 +771,6 @@ class BattingCard(BaseModel):
running = IntegerField()
offense_col = IntegerField()
hand = CharField(default="R")
image_url = CharField(null=True, max_length=500)
class Meta:
database = db
@ -826,7 +840,6 @@ class PitchingCard(BaseModel):
batting = CharField(null=True)
offense_col = IntegerField()
hand = CharField(default="R")
image_url = CharField(null=True, max_length=500)
class Meta:
database = db
@ -912,13 +925,7 @@ CardPosition.add_index(pos_index)
if not SKIP_TABLE_CREATION:
db.create_tables(
[
BattingCard,
BattingCardRatings,
PitchingCard,
PitchingCardRatings,
CardPosition,
],
[BattingCard, BattingCardRatings, PitchingCard, PitchingCardRatings, CardPosition],
safe=True,
)
@ -1053,235 +1060,8 @@ decision_index = ModelIndex(Decision, (Decision.game, Decision.pitcher), unique=
Decision.add_index(decision_index)
class BattingSeasonStats(BaseModel):
player = ForeignKeyField(Player)
team = ForeignKeyField(Team)
season = IntegerField()
games = IntegerField(default=0)
pa = IntegerField(default=0)
ab = IntegerField(default=0)
hits = IntegerField(default=0)
doubles = IntegerField(default=0)
triples = IntegerField(default=0)
hr = IntegerField(default=0)
rbi = IntegerField(default=0)
runs = IntegerField(default=0)
bb = IntegerField(default=0)
strikeouts = IntegerField(default=0)
hbp = IntegerField(default=0)
sac = IntegerField(default=0)
ibb = IntegerField(default=0)
gidp = IntegerField(default=0)
sb = IntegerField(default=0)
cs = IntegerField(default=0)
last_game = ForeignKeyField(StratGame, null=True)
last_updated_at = DateTimeField(null=True)
class Meta:
database = db
table_name = "batting_season_stats"
bss_unique_index = ModelIndex(
BattingSeasonStats,
(BattingSeasonStats.player, BattingSeasonStats.team, BattingSeasonStats.season),
unique=True,
)
BattingSeasonStats.add_index(bss_unique_index)
bss_team_season_index = ModelIndex(
BattingSeasonStats,
(BattingSeasonStats.team, BattingSeasonStats.season),
unique=False,
)
BattingSeasonStats.add_index(bss_team_season_index)
bss_player_season_index = ModelIndex(
BattingSeasonStats,
(BattingSeasonStats.player, BattingSeasonStats.season),
unique=False,
)
BattingSeasonStats.add_index(bss_player_season_index)
class PitchingSeasonStats(BaseModel):
player = ForeignKeyField(Player)
team = ForeignKeyField(Team)
season = IntegerField()
games = IntegerField(default=0)
games_started = IntegerField(default=0)
outs = IntegerField(default=0)
strikeouts = IntegerField(default=0)
bb = IntegerField(default=0)
hits_allowed = IntegerField(default=0)
runs_allowed = IntegerField(default=0)
earned_runs = IntegerField(default=0)
hr_allowed = IntegerField(default=0)
hbp = IntegerField(default=0)
wild_pitches = IntegerField(default=0)
balks = IntegerField(default=0)
wins = IntegerField(default=0)
losses = IntegerField(default=0)
holds = IntegerField(default=0)
saves = IntegerField(default=0)
blown_saves = IntegerField(default=0)
last_game = ForeignKeyField(StratGame, null=True)
last_updated_at = DateTimeField(null=True)
class Meta:
database = db
table_name = "pitching_season_stats"
pitss_unique_index = ModelIndex(
PitchingSeasonStats,
(PitchingSeasonStats.player, PitchingSeasonStats.team, PitchingSeasonStats.season),
unique=True,
)
PitchingSeasonStats.add_index(pitss_unique_index)
pitss_team_season_index = ModelIndex(
PitchingSeasonStats,
(PitchingSeasonStats.team, PitchingSeasonStats.season),
unique=False,
)
PitchingSeasonStats.add_index(pitss_team_season_index)
pitss_player_season_index = ModelIndex(
PitchingSeasonStats,
(PitchingSeasonStats.player, PitchingSeasonStats.season),
unique=False,
)
PitchingSeasonStats.add_index(pitss_player_season_index)
class ProcessedGame(BaseModel):
game = ForeignKeyField(StratGame, primary_key=True)
processed_at = DateTimeField(default=datetime.now)
class Meta:
database = db
table_name = "processed_game"
if not SKIP_TABLE_CREATION:
db.create_tables(
[
StratGame,
StratPlay,
Decision,
BattingSeasonStats,
PitchingSeasonStats,
ProcessedGame,
],
safe=True,
)
class ScoutOpportunity(BaseModel):
pack = ForeignKeyField(Pack, null=True)
opener_team = ForeignKeyField(Team)
card_ids = CharField() # JSON array of card IDs
expires_at = BigIntegerField()
created = BigIntegerField()
class Meta:
database = db
table_name = "scout_opportunity"
class ScoutClaim(BaseModel):
scout_opportunity = ForeignKeyField(ScoutOpportunity)
card = ForeignKeyField(Card)
claimed_by_team = ForeignKeyField(Team)
created = BigIntegerField()
class Meta:
database = db
table_name = "scout_claim"
scout_claim_index = ModelIndex(
ScoutClaim,
(ScoutClaim.scout_opportunity, ScoutClaim.claimed_by_team),
unique=True,
)
ScoutClaim.add_index(scout_claim_index)
if not SKIP_TABLE_CREATION:
db.create_tables([ScoutOpportunity, ScoutClaim], safe=True)
class RefractorTrack(BaseModel):
name = CharField(unique=True)
card_type = CharField() # 'batter', 'sp', 'rp'
formula = CharField() # e.g. "pa + tb * 2"
t1_threshold = IntegerField()
t2_threshold = IntegerField()
t3_threshold = IntegerField()
t4_threshold = IntegerField()
class Meta:
database = db
table_name = "refractor_track"
class RefractorCardState(BaseModel):
player = ForeignKeyField(Player)
team = ForeignKeyField(Team)
track = ForeignKeyField(RefractorTrack)
current_tier = IntegerField(default=0) # 0-4
current_value = FloatField(default=0.0)
fully_evolved = BooleanField(default=False)
last_evaluated_at = DateTimeField(null=True)
variant = IntegerField(null=True)
class Meta:
database = db
table_name = "refractor_card_state"
refractor_card_state_index = ModelIndex(
RefractorCardState,
(RefractorCardState.player, RefractorCardState.team),
unique=True,
)
RefractorCardState.add_index(refractor_card_state_index)
refractor_card_state_team_index = ModelIndex(
RefractorCardState,
(RefractorCardState.team,),
unique=False,
)
RefractorCardState.add_index(refractor_card_state_team_index)
class RefractorBoostAudit(BaseModel):
card_state = ForeignKeyField(RefractorCardState, on_delete="CASCADE")
tier = IntegerField() # 1-4
battingcard = ForeignKeyField(BattingCard, null=True)
pitchingcard = ForeignKeyField(PitchingCard, null=True)
variant_created = IntegerField()
boost_delta_json = (
TextField()
) # JSONB in PostgreSQL; TextField for SQLite test compat
applied_at = DateTimeField(default=datetime.now)
class Meta:
database = db
table_name = "refractor_boost_audit"
if not SKIP_TABLE_CREATION:
db.create_tables(
[
RefractorTrack,
RefractorCardState,
RefractorBoostAudit,
],
safe=True,
)
db.create_tables([StratGame, StratPlay, Decision], safe=True)
db.close()

View File

@ -1,72 +1,74 @@
import datetime
import hmac
import logging
import os
import requests
from fastapi.security import OAuth2PasswordBearer
date = f'{datetime.datetime.now().year}-{datetime.datetime.now().month}-{datetime.datetime.now().day}'
LOG_DATA = {
'filename': f'logs/database/{date}.log',
'format': '%(asctime)s - database - %(levelname)s - %(message)s',
'log_level': logging.INFO if os.environ.get('LOG_LEVEL') == 'INFO' else 'WARN'
}
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
master_debug = False
DB_URL = "https://pd.manticorum.com/api/"
DB_URL = 'https://pd.manticorum.com/api/'
AUTH_TOKEN = f'{os.environ.get("API_TOKEN")}'
AUTH_HEADER = {"Authorization": f"Bearer {AUTH_TOKEN}"}
AUTH_HEADER = {'Authorization': f'Bearer {AUTH_TOKEN}'}
priv_help = (
False
if not os.environ.get("PRIVATE_IN_SCHEMA")
else os.environ.get("PRIVATE_IN_SCHEMA").upper()
)
priv_help = False if not os.environ.get('PRIVATE_IN_SCHEMA') else os.environ.get('PRIVATE_IN_SCHEMA').upper()
PRIVATE_IN_SCHEMA = True if priv_help else False
if os.environ.get("TESTING") == "True":
DB_URL = "https://pddev.manticorum.com/api/"
if os.environ.get('TESTING') == 'False':
DB_URL = 'https://pddev.manticorum.com/api/'
def valid_token(token):
return hmac.compare_digest(token, AUTH_TOKEN)
return token == AUTH_TOKEN
def int_timestamp(datetime_obj: datetime) -> int:
return int(datetime.datetime.timestamp(datetime_obj) * 1000)
def mround(x, prec=2, base=0.05):
def mround(x, prec=2, base=.05):
return round(base * round(float(x) / base), prec)
def param_char(other_params):
if other_params:
return "&"
return '&'
else:
return "?"
return '?'
def get_req_url(
endpoint: str, api_ver: int = 2, object_id: int = None, params: list = None
):
def get_req_url(endpoint: str, api_ver: int = 2, object_id: int = None, params: list = None):
req_url = f'{DB_URL}/v{api_ver}/{endpoint}{"/" if object_id is not None else ""}{object_id if object_id is not None else ""}'
if params:
other_params = False
for x in params:
req_url += f"{param_char(other_params)}{x[0]}={x[1]}"
req_url += f'{param_char(other_params)}{x[0]}={x[1]}'
other_params = True
return req_url
async def db_get(
endpoint: str,
api_ver: int = 2,
object_id: int = None,
params: list = None,
none_okay: bool = True,
timeout: int = 3,
):
async def db_get(endpoint: str, api_ver: int = 2, object_id: int = None, params: list = None, none_okay: bool = True,
timeout: int = 3):
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id, params=params)
log_string = f"get:\n{endpoint} id: {object_id} params: {params}"
log_string = f'get:\n{endpoint} id: {object_id} params: {params}'
logging.info(log_string) if master_debug else logging.debug(log_string)
retries = 0
@ -75,51 +77,37 @@ async def db_get(
resp = requests.get(req_url, timeout=timeout)
break
except requests.ReadTimeout as e:
logging.error(
f"Get Timeout: {req_url} / retries: {retries} / timeout: {timeout}"
)
logging.error(f'Get Timeout: {req_url} / retries: {retries} / timeout: {timeout}')
if retries > 1:
raise ConnectionError(
f"DB: The internet was a bit too slow for me to grab the data I needed. Please "
f"hang on a few extra seconds and try again."
)
raise ConnectionError(f'DB: The internet was a bit too slow for me to grab the data I needed. Please '
f'hang on a few extra seconds and try again.')
timeout += [2, 5][retries]
retries += 1
if resp.status_code == 200:
data = resp.json()
log_string = f"{data}"
log_string = f'{data}'
if master_debug:
logging.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.info(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
else:
logging.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.debug(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
return data
elif none_okay:
data = resp.json()
log_string = f"{data}"
log_string = f'{data}'
if master_debug:
logging.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.info(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
else:
logging.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.debug(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
return None
else:
logging.warning(resp.text)
raise ValueError(f"DB: {resp.text}")
raise ValueError(f'DB: {resp.text}')
async def db_patch(
endpoint: str, object_id: int, params: list, api_ver: int = 2, timeout: int = 3
):
async def db_patch(endpoint: str, object_id: int, params: list, api_ver: int = 2, timeout: int = 3):
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id, params=params)
log_string = f"patch:\n{endpoint} {params}"
log_string = f'patch:\n{endpoint} {params}'
logging.info(log_string) if master_debug else logging.debug(log_string)
retries = 0
@ -128,80 +116,60 @@ async def db_patch(
resp = requests.patch(req_url, headers=AUTH_HEADER, timeout=timeout)
break
except requests.Timeout as e:
logging.error(
f"Patch Timeout: {req_url} / retries: {retries} / timeout: {timeout}"
)
logging.error(f'Patch Timeout: {req_url} / retries: {retries} / timeout: {timeout}')
if retries > 1:
raise ConnectionError(
f"DB: The internet was a bit too slow for me to grab the data I needed. Please "
f"hang on a few extra seconds and try again."
)
raise ConnectionError(f'DB: The internet was a bit too slow for me to grab the data I needed. Please '
f'hang on a few extra seconds and try again.')
timeout += [min(3, timeout), min(5, timeout)][retries]
retries += 1
if resp.status_code == 200:
data = resp.json()
log_string = f"{data}"
log_string = f'{data}'
if master_debug:
logging.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.info(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
else:
logging.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.debug(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
return data
else:
logging.warning(resp.text)
raise ValueError(f"DB: {resp.text}")
raise ValueError(f'DB: {resp.text}')
async def db_post(
endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 3
):
async def db_post(endpoint: str, api_ver: int = 2, payload: dict = None, timeout: int = 3):
req_url = get_req_url(endpoint, api_ver=api_ver)
log_string = f"post:\n{endpoint} payload: {payload}\ntype: {type(payload)}"
log_string = f'post:\n{endpoint} payload: {payload}\ntype: {type(payload)}'
logging.info(log_string) if master_debug else logging.debug(log_string)
retries = 0
while True:
try:
resp = requests.post(
req_url, json=payload, headers=AUTH_HEADER, timeout=timeout
)
resp = requests.post(req_url, json=payload, headers=AUTH_HEADER, timeout=timeout)
break
except requests.Timeout as e:
logging.error(
f"Post Timeout: {req_url} / retries: {retries} / timeout: {timeout}"
)
logging.error(f'Post Timeout: {req_url} / retries: {retries} / timeout: {timeout}')
if retries > 1:
raise ConnectionError(
f"DB: The internet was a bit too slow for me to grab the data I needed. Please "
f"hang on a few extra seconds and try again."
)
raise ConnectionError(f'DB: The internet was a bit too slow for me to grab the data I needed. Please '
f'hang on a few extra seconds and try again.')
timeout += [min(3, timeout), min(5, timeout)][retries]
retries += 1
if resp.status_code == 200:
data = resp.json()
log_string = f"{data}"
log_string = f'{data}'
if master_debug:
logging.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.info(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
else:
logging.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.debug(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
return data
else:
logging.warning(resp.text)
raise ValueError(f"DB: {resp.text}")
raise ValueError(f'DB: {resp.text}')
async def db_delete(endpoint: str, object_id: int, api_ver: int = 2, timeout=3):
req_url = get_req_url(endpoint, api_ver=api_ver, object_id=object_id)
log_string = f"delete:\n{endpoint} {object_id}"
log_string = f'delete:\n{endpoint} {object_id}'
logging.info(log_string) if master_debug else logging.debug(log_string)
retries = 0
@ -210,29 +178,21 @@ async def db_delete(endpoint: str, object_id: int, api_ver: int = 2, timeout=3):
resp = requests.delete(req_url, headers=AUTH_HEADER, timeout=timeout)
break
except requests.ReadTimeout as e:
logging.error(
f"Delete Timeout: {req_url} / retries: {retries} / timeout: {timeout}"
)
logging.error(f'Delete Timeout: {req_url} / retries: {retries} / timeout: {timeout}')
if retries > 1:
raise ConnectionError(
f"DB: The internet was a bit too slow for me to grab the data I needed. Please "
f"hang on a few extra seconds and try again."
)
raise ConnectionError(f'DB: The internet was a bit too slow for me to grab the data I needed. Please '
f'hang on a few extra seconds and try again.')
timeout += [min(3, timeout), min(5, timeout)][retries]
retries += 1
if resp.status_code == 200:
data = resp.json()
log_string = f"{data}"
log_string = f'{data}'
if master_debug:
logging.info(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.info(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
else:
logging.debug(
f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}'
)
logging.debug(f'return: {log_string[:1200]}{" [ S N I P P E D ]" if len(log_string) > 1200 else ""}')
return True
else:
logging.warning(resp.text)
raise ValueError(f"DB: {resp.text}")
raise ValueError(f'DB: {resp.text}')

View File

@ -1,25 +1,11 @@
import logging
import os
from contextlib import asynccontextmanager
from datetime import datetime
from fastapi import FastAPI, Request
from fastapi.openapi.docs import get_swagger_ui_html
from fastapi.openapi.utils import get_openapi
_log_date = f"{datetime.now().year}-{datetime.now().month}-{datetime.now().day}"
logging.basicConfig(
filename=f"logs/database/{_log_date}.log",
format="%(asctime)s - database - %(levelname)s - %(message)s",
level=logging.INFO if os.environ.get("LOG_LEVEL") == "INFO" else logging.WARNING,
)
# from fastapi.staticfiles import StaticFiles
# from fastapi.templating import Jinja2Templates
from .db_engine import db # noqa: E402
from .routers_v2.players import get_browser, shutdown_browser # noqa: E402
from .routers_v2 import ( # noqa: E402
from .routers_v2 import (
current,
awards,
teams,
@ -49,25 +35,10 @@ from .routers_v2 import ( # noqa: E402
mlbplayers,
stratgame,
stratplays,
scout_opportunities,
scout_claims,
refractor,
season_stats,
)
@asynccontextmanager
async def lifespan(app):
# Startup: warm up the persistent Chromium browser
await get_browser()
yield
# Shutdown: clean up browser and playwright
await shutdown_browser()
app = FastAPI(
# root_path='/api',
lifespan=lifespan,
responses={404: {"description": "Not found"}},
docs_url="/api/docs",
redoc_url="/api/redoc",
@ -105,21 +76,6 @@ app.include_router(mlbplayers.router)
app.include_router(stratgame.router)
app.include_router(stratplays.router)
app.include_router(decisions.router)
app.include_router(scout_opportunities.router)
app.include_router(scout_claims.router)
app.include_router(refractor.router)
app.include_router(season_stats.router)
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
try:
db.connect(reuse_if_open=True)
response = await call_next(request)
return response
finally:
if not db.is_closed():
db.close()
@app.get("/api/docs", include_in_schema=False)
@ -131,4 +87,4 @@ async def get_docs(req: Request):
@app.get("/api/openapi.json", include_in_schema=False)
async def openapi():
return get_openapi(title="Paper Dynasty API", version="0.1.1", routes=app.routes)
return get_openapi(title="Paper Dynasty API", version=f"0.1.1", routes=app.routes)

View File

View File

@ -1,7 +0,0 @@
"""Season stats ORM models.
Models are defined in db_engine alongside all other Peewee models; this
module re-exports them so callers can import from `app.models.season_stats`.
"""
from ..db_engine import BattingSeasonStats, PitchingSeasonStats # noqa: F401

View File

@ -1,9 +1,14 @@
from fastapi import APIRouter, Depends, HTTPException
import logging
from ..db_engine import Player
from ..dependencies import oauth2_scheme, valid_token, PRIVATE_IN_SCHEMA
from ..db_engine import db, Player
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, PRIVATE_IN_SCHEMA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/admin',
@ -14,7 +19,8 @@ router = APIRouter(
@router.post('/stl-fix', include_in_schema=PRIVATE_IN_SCHEMA)
async def stl_cardinals_fix(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post. This event has been logged.'
@ -23,6 +29,7 @@ async def stl_cardinals_fix(token: str = Depends(oauth2_scheme)):
p_query = Player.update(mlbclub='St Louis Cardinals', franchise='St Louis Cardinals').where(
Player.mlbclub == 'St. Louis Cardinals'
).execute()
db.close()
return {'detail': f'Removed the period from St Louis'}

View File

@ -4,17 +4,25 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Award, model_to_dict, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token, PRIVATE_IN_SCHEMA
from ..db_engine import db, Award, model_to_dict
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, PRIVATE_IN_SCHEMA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/awards", tags=["awards"])
router = APIRouter(
prefix='/api/v2/awards',
tags=['awards']
)
class AwardModel(pydantic.BaseModel):
name: str
season: int
timing: str = "In-Season"
timing: str = 'In-Season'
card_id: Optional[int] = None
team_id: Optional[int] = None
image: Optional[str] = None
@ -25,21 +33,16 @@ class AwardReturnList(pydantic.BaseModel):
awards: list[AwardModel]
@router.get("")
@router.get('')
async def get_awards(
name: Optional[str] = None,
season: Optional[int] = None,
timing: Optional[str] = None,
card_id: Optional[int] = None,
team_id: Optional[int] = None,
image: Optional[str] = None,
csv: Optional[bool] = None,
limit: int = 100,
):
name: Optional[str] = None, season: Optional[int] = None, timing: Optional[str] = None,
card_id: Optional[int] = None, team_id: Optional[int] = None, image: Optional[str] = None,
csv: Optional[bool] = None):
all_awards = Award.select().order_by(Award.id)
if all_awards.count() == 0:
raise HTTPException(status_code=404, detail="There are no awards to filter")
db.close()
raise HTTPException(status_code=404, detail=f'There are no awards to filter')
if name is not None:
all_awards = all_awards.where(Award.name == name)
@ -54,74 +57,59 @@ async def get_awards(
if image is not None:
all_awards = all_awards.where(Award.image == image)
limit = max(0, min(limit, 500))
total_count = all_awards.count() if not csv else 0
all_awards = all_awards.limit(limit)
if csv:
data_list = [["id", "name", "season", "timing", "card", "team", "image"]]
data_list = [['id', 'name', 'season', 'timing', 'card', 'team', 'image']]
for line in all_awards:
data_list.append(
[
line.id,
line.name,
line.season,
line.timing,
line.card,
line.team,
line.image,
]
)
data_list.append([
line.id, line.name, line.season, line.timing, line.card, line.team, line.image
])
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "awards": []}
return_val = {'count': all_awards.count(), 'awards': []}
for x in all_awards:
return_val["awards"].append(model_to_dict(x))
return_val['awards'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{award_id}")
@router.get('/{award_id}')
async def get_one_award(award_id, csv: Optional[bool] = None):
try:
this_award = Award.get_by_id(award_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No award found with id {award_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No award found with id {award_id}')
if csv:
data_list = [
["id", "name", "season", "timing", "card", "team", "image"],
[
this_award.id,
this_award.name,
this_award.season,
this_award.timing,
this_award.card,
this_award.team,
this_award.image,
],
['id', 'name', 'season', 'timing', 'card', 'team', 'image'],
[this_award.id, this_award.name, this_award.season, this_award.timing, this_award.card,
this_award.team, this_award.image]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_award)
db.close()
return return_val
@router.post("", include_in_schema=PRIVATE_IN_SCHEMA)
@router.post('', include_in_schema=PRIVATE_IN_SCHEMA)
async def post_awards(award: AwardModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post awards. This event has been logged.",
detail='You are not authorized to post awards. This event has been logged.'
)
this_award = Award(
@ -130,40 +118,43 @@ async def post_awards(award: AwardModel, token: str = Depends(oauth2_scheme)):
timing=award.season,
card_id=award.card_id,
team_id=award.team_id,
image=award.image,
image=award.image
)
saved = this_award.save()
if saved == 1:
return_val = model_to_dict(this_award)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that roster",
detail='Well slap my ass and call me a teapot; I could not save that roster'
)
@router.delete("/{award_id}", include_in_schema=PRIVATE_IN_SCHEMA)
@router.delete('/{award_id}', include_in_schema=PRIVATE_IN_SCHEMA)
async def delete_award(award_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete awards. This event has been logged.",
detail='You are not authorized to delete awards. This event has been logged.'
)
try:
this_award = Award.get_by_id(award_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No award found with id {award_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No award found with id {award_id}')
count = this_award.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Award {award_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Award {award_id} has been deleted')
else:
raise HTTPException(status_code=500, detail=f"Award {award_id} was not deleted")
raise HTTPException(status_code=500, detail=f'Award {award_id} was not deleted')

View File

@ -6,20 +6,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import (
db,
BattingStat,
model_to_dict,
fn,
Card,
Player,
Current,
DoesNotExist,
from ..db_engine import db, BattingStat, model_to_dict, fn, Card, Player, Current
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, PRIVATE_IN_SCHEMA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
from ..dependencies import oauth2_scheme, valid_token, PRIVATE_IN_SCHEMA
router = APIRouter(prefix="/api/v2/batstats", tags=["Pre-Season 7 Batting Stats"])
router = APIRouter(
prefix='/api/v2/batstats',
tags=['Pre-Season 7 Batting Stats']
)
class BatStat(pydantic.BaseModel):
@ -56,7 +55,7 @@ class BatStat(pydantic.BaseModel):
csc: Optional[int] = 0
week: int
season: int
created: Optional[int] = int(datetime.timestamp(datetime.now()) * 1000)
created: Optional[int] = int(datetime.timestamp(datetime.now())*1000)
game_id: int
@ -69,20 +68,10 @@ class BatStatReturnList(pydantic.BaseModel):
stats: list[BatStat]
@router.get("", response_model=BatStatReturnList)
@router.get('', response_model=BatStatReturnList)
async def get_batstats(
card_id: int = None,
player_id: int = None,
team_id: int = None,
vs_team_id: int = None,
week: int = None,
season: int = None,
week_start: int = None,
week_end: int = None,
created: int = None,
csv: bool = None,
limit: Optional[int] = 100,
):
card_id: int = None, player_id: int = None, team_id: int = None, vs_team_id: int = None, week: int = None,
season: int = None, week_start: int = None, week_end: int = None, created: int = None, csv: bool = None):
all_stats = BattingStat.select().join(Card).join(Player).order_by(BattingStat.id)
if season is not None:
@ -114,123 +103,43 @@ async def get_batstats(
# db.close()
# raise HTTPException(status_code=404, detail=f'No batting stats found')
limit = max(0, min(limit, 500))
total_count = all_stats.count() if not csv else 0
all_stats = all_stats.limit(limit)
if csv:
data_list = [
[
"id",
"card_id",
"player_id",
"cardset",
"team",
"vs_team",
"pos",
"pa",
"ab",
"run",
"hit",
"rbi",
"double",
"triple",
"hr",
"bb",
"so",
"hbp",
"sac",
"ibb",
"gidp",
"sb",
"cs",
"bphr",
"bpfo",
"bp1b",
"bplo",
"xch",
"xhit",
"error",
"pb",
"sbc",
"csc",
"week",
"season",
"created",
"game_id",
"roster_num",
]
]
data_list = [['id', 'card_id', 'player_id', 'cardset', 'team', 'vs_team', 'pos', 'pa', 'ab', 'run', 'hit', 'rbi', 'double',
'triple', 'hr', 'bb', 'so', 'hbp', 'sac', 'ibb', 'gidp', 'sb', 'cs', 'bphr', 'bpfo', 'bp1b',
'bplo', 'xch', 'xhit', 'error', 'pb', 'sbc', 'csc', 'week', 'season', 'created', 'game_id', 'roster_num']]
for line in all_stats:
data_list.append(
[
line.id,
line.card.id,
line.card.player.player_id,
line.card.player.cardset.name,
line.team.abbrev,
line.vs_team.abbrev,
line.pos,
line.pa,
line.ab,
line.run,
line.hit,
line.rbi,
line.double,
line.triple,
line.hr,
line.bb,
line.so,
line.hbp,
line.sac,
line.ibb,
line.gidp,
line.sb,
line.cs,
line.bphr,
line.bpfo,
line.bp1b,
line.bplo,
line.xch,
line.xhit,
line.error,
line.pb,
line.sbc,
line.csc,
line.week,
line.season,
line.created,
line.game_id,
line.roster_num,
line.id, line.card.id, line.card.player.player_id, line.card.player.cardset.name, line.team.abbrev, line.vs_team.abbrev,
line.pos, line.pa, line.ab, line.run, line.hit, line.rbi, line.double, line.triple, line.hr,
line.bb, line.so, line.hbp, line.sac, line.ibb, line.gidp, line.sb, line.cs, line.bphr, line.bpfo,
line.bp1b, line.bplo, line.xch, line.xhit, line.error, line.pb, line.sbc, line.csc, line.week,
line.season, line.created, line.game_id, line.roster_num
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "stats": []}
return_val = {'count': all_stats.count(), 'stats': []}
for x in all_stats:
return_val["stats"].append(model_to_dict(x, recurse=False))
return_val['stats'].append(model_to_dict(x, recurse=False))
db.close()
return return_val
@router.get("/player/{player_id}", response_model=BatStat)
@router.get('/player/{player_id}', response_model=BatStat)
async def get_player_stats(
player_id: int,
team_id: int = None,
vs_team_id: int = None,
week_start: int = None,
week_end: int = None,
csv: bool = None,
):
all_stats = (
BattingStat.select(fn.COUNT(BattingStat.created).alias("game_count"))
.join(Card)
.group_by(BattingStat.card)
.where(BattingStat.card.player == player_id)
).scalar()
player_id: int, team_id: int = None, vs_team_id: int = None, week_start: int = None, week_end: int = None,
csv: bool = None):
all_stats = (BattingStat
.select(fn.COUNT(BattingStat.created).alias('game_count'))
.join(Card)
.group_by(BattingStat.card)
.where(BattingStat.card.player == player_id)).scalar()
if team_id is not None:
all_stats = all_stats.where(BattingStat.team_id == team_id)
@ -244,82 +153,40 @@ async def get_player_stats(
if csv:
data_list = [
[
"pa",
"ab",
"run",
"hit",
"rbi",
"double",
"triple",
"hr",
"bb",
"so",
"hbp",
"sac",
"ibb",
"gidp",
"sb",
"cs",
"bphr",
"bpfo",
"bp1b",
"bplo",
"xch",
"xhit",
"error",
"pb",
"sbc",
"csc",
],
[
all_stats.pa_sum,
all_stats.ab_sum,
all_stats.run,
all_stats.hit_sum,
all_stats.rbi_sum,
all_stats.double_sum,
all_stats.triple_sum,
all_stats.hr_sum,
all_stats.bb_sum,
all_stats.so_sum,
all_stats.hbp_sum,
all_stats.sac,
all_stats.ibb_sum,
all_stats.gidp_sum,
all_stats.sb_sum,
all_stats.cs_sum,
all_stats.bphr_sum,
all_stats.bpfo_sum,
all_stats.bp1b_sum,
all_stats.bplo_sum,
all_stats.xch,
all_stats.xhit_sum,
all_stats.error_sum,
all_stats.pb_sum,
all_stats.sbc_sum,
all_stats.csc_sum,
],
'pa', 'ab', 'run', 'hit', 'rbi', 'double', 'triple', 'hr', 'bb', 'so', 'hbp', 'sac', 'ibb', 'gidp',
'sb', 'cs', 'bphr', 'bpfo', 'bp1b', 'bplo', 'xch', 'xhit', 'error', 'pb', 'sbc', 'csc',
],[
all_stats.pa_sum, all_stats.ab_sum, all_stats.run, all_stats.hit_sum, all_stats.rbi_sum,
all_stats.double_sum, all_stats.triple_sum, all_stats.hr_sum, all_stats.bb_sum, all_stats.so_sum,
all_stats.hbp_sum, all_stats.sac, all_stats.ibb_sum, all_stats.gidp_sum, all_stats.sb_sum,
all_stats.cs_sum, all_stats.bphr_sum, all_stats.bpfo_sum, all_stats.bp1b_sum, all_stats.bplo_sum,
all_stats.xch, all_stats.xhit_sum, all_stats.error_sum, all_stats.pb_sum, all_stats.sbc_sum,
all_stats.csc_sum
]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
logging.debug(f"stat pull query: {all_stats}\n")
logging.debug(f'stat pull query: {all_stats}\n')
# logging.debug(f'result 0: {all_stats[0]}\n')
for x in all_stats:
logging.debug(f"this_line: {model_to_dict(x)}")
logging.debug(f'this_line: {model_to_dict(x)}')
return_val = model_to_dict(all_stats[0])
db.close()
return return_val
@router.post("", include_in_schema=PRIVATE_IN_SCHEMA)
@router.post('', include_in_schema=PRIVATE_IN_SCHEMA)
async def post_batstats(stats: BattingStatModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post stats. This event has been logged.",
detail='You are not authorized to post stats. This event has been logged.'
)
new_stats = []
@ -358,40 +225,40 @@ async def post_batstats(stats: BattingStatModel, token: str = Depends(oauth2_sch
csc=x.csc,
week=x.week,
season=x.season,
created=datetime.fromtimestamp(x.created / 1000)
if x.created
else datetime.now(),
game_id=x.game_id,
created=datetime.fromtimestamp(x.created / 1000) if x.created else datetime.now(),
game_id=x.game_id
)
new_stats.append(this_stat)
with db.atomic():
BattingStat.bulk_create(new_stats, batch_size=15)
db.close()
raise HTTPException(
status_code=200, detail=f"{len(new_stats)} batting lines have been added"
)
raise HTTPException(status_code=200, detail=f'{len(new_stats)} batting lines have been added')
@router.delete("/{stat_id}", include_in_schema=PRIVATE_IN_SCHEMA)
@router.delete('/{stat_id}', include_in_schema=PRIVATE_IN_SCHEMA)
async def delete_batstat(stat_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete stats. This event has been logged.",
detail='You are not authorized to delete stats. This event has been logged.'
)
try:
this_stat = BattingStat.get_by_id(stat_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"No stat found with id {stat_id}")
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No stat found with id {stat_id}')
count = this_stat.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f"Stat {stat_id} has been deleted")
raise HTTPException(status_code=200, detail=f'Stat {stat_id} has been deleted')
else:
raise HTTPException(status_code=500, detail=f"Stat {stat_id} was not deleted")
raise HTTPException(status_code=500, detail=f'Stat {stat_id} was not deleted')
# @app.get('/api/v1/plays/batting')
@ -596,3 +463,4 @@ async def delete_batstat(stat_id, token: str = Depends(oauth2_scheme)):
# }
# db.close()
# return return_stats

View File

@ -2,7 +2,8 @@ import os
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from fastapi.responses import FileResponse
from typing import Literal, List
from scipy import stats
from typing import Literal, Optional, List
import logging
import pandas as pd
import pydantic
@ -12,14 +13,21 @@ from ..db_engine import (
db,
BattingCardRatings,
model_to_dict,
chunked,
BattingCard,
Player,
query_to_csv,
Team,
CardPosition,
)
from ..db_helpers import upsert_batting_card_ratings
from ..dependencies import oauth2_scheme, valid_token, PRIVATE_IN_SCHEMA
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, PRIVATE_IN_SCHEMA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/battingcardratings", tags=["battingcardratings"])
RATINGS_FILE = "storage/batting-ratings.csv"
@ -145,17 +153,17 @@ async def get_card_ratings(
vs_hand: Literal["R", "L", "vR", "vL"] = None,
short_output: bool = False,
csv: bool = False,
limit: int = 100,
):
this_team = Team.get_or_none(Team.id == team_id)
logging.debug(f"Team: {this_team} / has_guide: {this_team.has_guide}")
if this_team is None or ts != this_team.team_hash() or this_team.has_guide != 1:
logging.warning(f"Team_id {team_id} attempted to pull ratings")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to pull card ratings."
)
# elif not valid_token(token):
# logging.warning('Bad Token: [REDACTED]')
# logging.warning(f'Bad Token: {token}')
# db.close()
# raise HTTPException(
# status_code=401,
@ -179,9 +187,6 @@ async def get_card_ratings(
)
all_ratings = all_ratings.where(BattingCardRatings.battingcard << set_cards)
total_count = all_ratings.count() if not csv else 0
all_ratings = all_ratings.limit(max(0, min(limit, 500)))
if csv:
# return_val = query_to_csv(all_ratings)
return_vals = [model_to_dict(x) for x in all_ratings]
@ -190,17 +195,19 @@ async def get_card_ratings(
x["player_id"] = x["battingcard"]["player"]["player_id"]
del x["battingcard"], x["player"]
db.close()
return Response(
content=pd.DataFrame(return_vals).to_csv(index=False), media_type="text/csv"
)
else:
return_val = {
"count": total_count,
"count": all_ratings.count(),
"ratings": [
model_to_dict(x, recurse=not short_output) for x in all_ratings
],
}
db.close()
return return_val
@ -285,7 +292,7 @@ def get_scouting_dfs(cardset_id: list = None):
)
]
),
name="Arm OF",
name=f"Arm OF",
)
)
series_list.append(
@ -296,7 +303,7 @@ def get_scouting_dfs(cardset_id: list = None):
for x in positions.where(CardPosition.position == "C")
]
),
name="Arm C",
name=f"Arm C",
)
)
series_list.append(
@ -307,7 +314,7 @@ def get_scouting_dfs(cardset_id: list = None):
for x in positions.where(CardPosition.position == "C")
]
),
name="PB C",
name=f"PB C",
)
)
series_list.append(
@ -318,9 +325,10 @@ def get_scouting_dfs(cardset_id: list = None):
for x in positions.where(CardPosition.position == "C")
]
),
name="Throw C",
name=f"Throw C",
)
)
db.close()
logging.debug(f"series_list: {series_list}")
return bat_df.join(series_list)
@ -332,15 +340,16 @@ async def get_card_scouting(team_id: int, ts: str):
logging.debug(f"Team: {this_team} / has_guide: {this_team.has_guide}")
if this_team is None or ts != this_team.team_hash() or this_team.has_guide != 1:
logging.warning(f"Team_id {team_id} attempted to pull ratings")
db.close()
return (
"Your team does not have the ratings guide enabled. If you have purchased a copy ping Cal to "
"make sure it is enabled on your team. If you are interested you can pick it up here (thank you!): "
"https://ko-fi.com/manticorum/shop"
)
if os.path.isfile("storage/batting-ratings.csv"):
if os.path.isfile(f"storage/batting-ratings.csv"):
return FileResponse(
path="storage/batting-ratings.csv",
path=f"storage/batting-ratings.csv",
media_type="text/csv",
# headers=headers
)
@ -353,12 +362,13 @@ async def get_card_scouting(team_id: int, ts: str):
@router.post("/calculate/scouting", include_in_schema=PRIVATE_IN_SCHEMA)
async def post_calc_scouting(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to calculate card ratings."
)
logging.warning("Re-calculating batting ratings\n\n")
logging.warning(f"Re-calculating batting ratings\n\n")
output = get_scouting_dfs()
first = ["player_id", "player_name", "cardset_name", "rarity", "hand", "variant"]
@ -374,9 +384,9 @@ async def post_calc_scouting(token: str = Depends(oauth2_scheme)):
@router.get("/basic")
async def get_basic_scouting(cardset_id: list = Query(default=None)):
if os.path.isfile("storage/batting-basic.csv"):
if os.path.isfile(f"storage/batting-basic.csv"):
return FileResponse(
path="storage/batting-basic.csv",
path=f"storage/batting-basic.csv",
media_type="text/csv",
# headers=headers
)
@ -389,12 +399,13 @@ async def get_basic_scouting(cardset_id: list = Query(default=None)):
@router.post("/calculate/basic", include_in_schema=PRIVATE_IN_SCHEMA)
async def post_calc_basic(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to calculate basic ratings."
)
logging.warning("Re-calculating basic batting ratings\n\n")
logging.warning(f"Re-calculating basic batting ratings\n\n")
raw_data = get_scouting_dfs()
logging.debug(f"output: {raw_data}")
@ -635,18 +646,21 @@ async def post_calc_basic(token: str = Depends(oauth2_scheme)):
@router.get("/{ratings_id}")
async def get_one_rating(ratings_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to pull card ratings."
)
this_rating = BattingCardRatings.get_or_none(BattingCardRatings.id == ratings_id)
if this_rating is None:
db.close()
raise HTTPException(
status_code=404, detail=f"BattingCardRating id {ratings_id} not found"
)
r_data = model_to_dict(this_rating)
db.close()
return r_data
@ -658,7 +672,8 @@ async def get_player_ratings(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to pull card ratings."
)
@ -671,23 +686,23 @@ async def get_player_ratings(
if variant is not None:
all_cards = all_cards.where(BattingCard.variant << variant)
all_ratings = (
BattingCardRatings.select()
.where(BattingCardRatings.battingcard << all_cards)
.order_by(BattingCardRatings.id)
)
all_ratings = BattingCardRatings.select().where(
BattingCardRatings.battingcard << all_cards
).order_by(BattingCardRatings.id)
return_val = {
"count": all_ratings.count(),
"ratings": [model_to_dict(x, recurse=not short_output) for x in all_ratings],
}
db.close()
return return_val
@router.put("", include_in_schema=PRIVATE_IN_SCHEMA)
async def put_ratings(ratings: RatingsList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to post card ratings."
)
@ -715,24 +730,28 @@ async def put_ratings(ratings: RatingsList, token: str = Depends(oauth2_scheme))
# Use PostgreSQL-compatible upsert helper
upsert_batting_card_ratings(new_ratings, batch_size=30)
db.close()
return f"Updated ratings: {updates}; new ratings: {len(new_ratings)}"
@router.delete("/{ratings_id}", include_in_schema=PRIVATE_IN_SCHEMA)
async def delete_rating(ratings_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to post card ratings."
)
this_rating = BattingCardRatings.get_or_none(BattingCardRatings.id == ratings_id)
if this_rating is None:
db.close()
raise HTTPException(
status_code=404, detail=f"BattingCardRating id {ratings_id} not found"
)
count = this_rating.delete_instance()
db.close()
if count == 1:
return f"Rating {this_rating} has been deleted"

View File

@ -5,10 +5,15 @@ from typing import Literal, Optional, List
import logging
import pydantic
from ..db_engine import db, BattingCard, model_to_dict, fn, Player, MlbPlayer
from ..db_engine import db, BattingCard, model_to_dict, fn, chunked, Player, MlbPlayer
from ..db_helpers import upsert_batting_cards
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/battingcards", tags=["battingcards"])
@ -60,6 +65,7 @@ async def get_batting_cards(
"count": all_cards.count(),
"cards": [model_to_dict(x, recurse=not short_output) for x in all_cards],
}
db.close()
return return_val
@ -67,11 +73,13 @@ async def get_batting_cards(
async def get_one_card(card_id: int):
this_card = BattingCard.get_or_none(BattingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(
status_code=404, detail=f"BattingCard id {card_id} not found"
)
r_card = model_to_dict(this_card)
db.close()
return r_card
@ -91,13 +99,15 @@ async def get_player_cards(
"count": all_cards.count(),
"cards": [model_to_dict(x, recurse=not short_output) for x in all_cards],
}
db.close()
return return_val
@router.put("")
async def put_cards(cards: BattingCardList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post batting cards. This event has been logged.",
@ -147,6 +157,7 @@ async def put_cards(cards: BattingCardList, token: str = Depends(oauth2_scheme))
# Use PostgreSQL-compatible upsert helper
upsert_batting_cards(new_cards, batch_size=30)
db.close()
return f"Updated cards: {updates}; new cards: {len(new_cards)}"
@ -165,7 +176,8 @@ async def patch_card(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch batting cards. This event has been logged.",
@ -173,6 +185,7 @@ async def patch_card(
this_card = BattingCard.get_or_none(BattingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(
status_code=404, detail=f"BattingCard id {card_id} not found"
)
@ -198,8 +211,10 @@ async def patch_card(
if this_card.save() == 1:
return_val = model_to_dict(this_card)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that card",
@ -209,7 +224,8 @@ async def patch_card(
@router.delete("/{card_id}")
async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete batting cards. This event has been logged.",
@ -217,11 +233,13 @@ async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
this_card = BattingCard.get_or_none(BattingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(
status_code=404, detail=f"BattingCard id {card_id} not found"
)
count = this_card.delete_instance()
db.close()
if count == 1:
return f"Card {this_card} has been deleted"
@ -234,7 +252,8 @@ async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
@router.delete("")
async def delete_all_cards(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete batting cards. This event has been logged.",

View File

@ -4,10 +4,15 @@ import logging
import pydantic
from pydantic import root_validator
from ..db_engine import db, CardPosition, model_to_dict, Player, fn
from ..db_engine import db, CardPosition, model_to_dict, chunked, Player, fn
from ..db_helpers import upsert_card_positions
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/cardpositions", tags=["cardpositions"])
@ -51,7 +56,6 @@ async def get_card_positions(
cardset_id: list = Query(default=None),
short_output: Optional[bool] = False,
sort: Optional[str] = "innings-desc",
limit: int = 100,
):
all_pos = (
CardPosition.select()
@ -87,13 +91,11 @@ async def get_card_positions(
elif sort == "range-asc":
all_pos = all_pos.order_by(CardPosition.range, CardPosition.id)
limit = max(0, min(limit, 500))
all_pos = all_pos.limit(limit)
return_val = {
"count": all_pos.count(),
"positions": [model_to_dict(x, recurse=not short_output) for x in all_pos],
}
db.close()
return return_val
@ -101,18 +103,21 @@ async def get_card_positions(
async def get_one_position(position_id: int):
this_pos = CardPosition.get_or_none(CardPosition.id == position_id)
if this_pos is None:
db.close()
raise HTTPException(
status_code=404, detail=f"CardPosition id {position_id} not found"
)
r_data = model_to_dict(this_pos)
db.close()
return r_data
@router.put("")
async def put_positions(positions: PositionList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post card positions. This event has been logged.",
@ -144,13 +149,15 @@ async def put_positions(positions: PositionList, token: str = Depends(oauth2_sch
# Use PostgreSQL-compatible upsert helper
upsert_card_positions(new_cards, batch_size=30)
db.close()
return f"Updated cards: {updates}; new cards: {len(new_cards)}"
@router.delete("/{position_id}")
async def delete_position(position_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete card positions. This event has been logged.",
@ -158,11 +165,13 @@ async def delete_position(position_id: int, token: str = Depends(oauth2_scheme))
this_pos = CardPosition.get_or_none(CardPosition.id == position_id)
if this_pos is None:
db.close()
raise HTTPException(
status_code=404, detail=f"CardPosition id {position_id} not found"
)
count = this_pos.delete_instance()
db.close()
if count == 1:
return f"Card Position {this_pos} has been deleted"

View File

@ -4,21 +4,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import (
db,
Card,
model_to_dict,
Team,
Player,
Pack,
Paperdex,
CARDSETS,
DoesNotExist,
)
from ..dependencies import oauth2_scheme, valid_token
from ..services.refractor_init import _determine_card_type, initialize_card_refractor
from ..db_engine import db, Card, model_to_dict, Team, Player, Pack, Paperdex, CARDSETS
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
router = APIRouter(prefix="/api/v2/cards", tags=["cards"])
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/cards',
tags=['cards']
)
class CardPydantic(pydantic.BaseModel):
@ -33,20 +31,12 @@ class CardModel(pydantic.BaseModel):
cards: List[CardPydantic]
@router.get("")
@router.get('')
async def get_cards(
player_id: Optional[int] = None,
team_id: Optional[int] = None,
pack_id: Optional[int] = None,
value: Optional[int] = None,
min_value: Optional[int] = None,
max_value: Optional[int] = None,
variant: Optional[int] = None,
order_by: Optional[str] = None,
limit: Optional[int] = None,
dupes: Optional[bool] = None,
csv: Optional[bool] = None,
):
player_id: Optional[int] = None, team_id: Optional[int] = None, pack_id: Optional[int] = None,
value: Optional[int] = None, min_value: Optional[int] = None, max_value: Optional[int] = None, variant: Optional[int] = None,
order_by: Optional[str] = None, limit: Optional[int] = None, dupes: Optional[bool] = None,
csv: Optional[bool] = None):
all_cards = Card.select()
# if all_cards.count() == 0:
@ -56,26 +46,23 @@ async def get_cards(
if team_id is not None:
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No team found with id {team_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {team_id}')
all_cards = all_cards.where(Card.team == this_team)
if player_id is not None:
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No player found with id {player_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No player found with id {player_id}')
all_cards = all_cards.where(Card.player == this_player)
if pack_id is not None:
try:
this_pack = Pack.get_by_id(pack_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No pack found with id {pack_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No pack found with id {pack_id}')
all_cards = all_cards.where(Card.pack == this_pack)
if value is not None:
all_cards = all_cards.where(Card.value == value)
@ -86,7 +73,7 @@ async def get_cards(
if max_value is not None:
all_cards = all_cards.where(Card.value <= max_value)
if order_by is not None:
if order_by.lower() == "new":
if order_by.lower() == 'new':
all_cards = all_cards.order_by(-Card.id)
else:
all_cards = all_cards.order_by(Card.id)
@ -94,10 +81,8 @@ async def get_cards(
all_cards = all_cards.limit(limit)
if dupes:
if team_id is None:
raise HTTPException(
status_code=400, detail="Dupe checking must include a team_id"
)
logging.debug("dupe check")
raise HTTPException(status_code=400, detail='Dupe checking must include a team_id')
logging.debug(f'dupe check')
p_query = Card.select(Card.player).where(Card.team_id == team_id)
seen = set()
dupes = []
@ -113,90 +98,81 @@ async def get_cards(
# raise HTTPException(status_code=404, detail=f'No cards found')
if csv:
data_list = [
["id", "player", "cardset", "rarity", "team", "pack", "value"]
] # , 'variant']]
data_list = [['id', 'player', 'cardset', 'rarity', 'team', 'pack', 'value']] #, 'variant']]
for line in all_cards:
data_list.append(
[
line.id,
line.player.p_name,
line.player.cardset,
line.player.rarity,
line.team.abbrev,
line.pack,
line.id, line.player.p_name, line.player.cardset, line.player.rarity, line.team.abbrev, line.pack,
line.value, # line.variant
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
card_list = list(all_cards)
player_ids = [c.player_id for c in card_list if c.player_id is not None]
dex_by_player = {}
if player_ids:
for row in Paperdex.select().where(Paperdex.player_id << player_ids):
dex_by_player.setdefault(row.player_id, []).append(row)
return_val = {"count": len(card_list), "cards": []}
for x in card_list:
return_val = {'count': all_cards.count(), 'cards': []}
for x in all_cards:
this_record = model_to_dict(x)
logging.debug(f"this_record: {this_record}")
logging.debug(f'this_record: {this_record}')
entries = dex_by_player.get(x.player_id, [])
this_record["player"]["paperdex"] = {
"count": len(entries),
"paperdex": [model_to_dict(y, recurse=False) for y in entries],
}
this_dex = Paperdex.select().where(Paperdex.player == x)
this_record['player']['paperdex'] = {'count': this_dex.count(), 'paperdex': []}
for y in this_dex:
this_record['player']['paperdex']['paperdex'].append(model_to_dict(y, recurse=False))
return_val["cards"].append(this_record)
return_val['cards'].append(this_record)
# return_val['cards'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{card_id}")
@router.get('/{card_id}')
async def v1_cards_get_one(card_id, csv: Optional[bool] = False):
try:
this_card = Card.get_by_id(card_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"No card found with id {card_id}")
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No card found with id {card_id}')
if csv:
data_list = [
["id", "player", "team", "pack", "value"],
[
this_card.id,
this_card.player,
this_card.team.abbrev,
this_card.pack,
this_card.value,
],
['id', 'player', 'team', 'pack', 'value', 'roster1', 'roster2', 'roster3'],
[this_card.id, this_card.player, this_card.team.abbrev, this_card.pack, this_card.value,
this_card.roster1.name, this_card.roster2.name, this_card.roster3.name]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_card)
db.close()
return return_val
@router.post("")
@router.post('')
async def v1_cards_post(cards: CardModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post cards. This event has been logged.",
detail='You are not authorized to post cards. This event has been logged.'
)
last_card = Card.select(Card.id).order_by(-Card.id).limit(1)
lc_id = last_card[0].id
new_cards = []
player_ids = []
inc_dex = True
this_team = Team.get_by_id(cards.cards[0].team_id)
if this_team.is_ai or "Gauntlet" in this_team.abbrev:
if this_team.is_ai or 'Gauntlet' in this_team.abbrev:
inc_dex = False
# new_dex = []
@ -216,34 +192,18 @@ async def v1_cards_post(cards: CardModel, token: str = Depends(oauth2_scheme)):
with db.atomic():
Card.bulk_create(new_cards, batch_size=15)
cost_query = Player.update(cost=Player.cost + 1).where(
Player.player_id << player_ids
)
cost_query = Player.update(cost=Player.cost + 1).where(Player.player_id << player_ids)
cost_query.execute()
# sheets.post_new_cards(SHEETS_AUTH, lc_id)
db.close()
# WP-10: initialize refractor state for each new card (fire-and-forget)
for x in cards.cards:
try:
this_player = Player.get_by_id(x.player_id)
card_type = _determine_card_type(this_player)
initialize_card_refractor(x.player_id, x.team_id, card_type)
except Exception:
logging.exception(
"refractor hook: unexpected error for player_id=%s team_id=%s",
x.player_id,
x.team_id,
)
raise HTTPException(
status_code=200, detail=f"{len(new_cards)} cards have been added"
)
raise HTTPException(status_code=200, detail=f'{len(new_cards)} cards have been added')
# @router.post('/ai-update')
# async def v1_cards_ai_update(token: str = Depends(oauth2_scheme)):
# if not valid_token(token):
# logging.warning('Bad Token: [REDACTED]')
# logging.warning(f'Bad Token: {token}')
# db.close()
# raise HTTPException(
# status_code=401,
@ -254,27 +214,22 @@ async def v1_cards_post(cards: CardModel, token: str = Depends(oauth2_scheme)):
# raise HTTPException(status_code=200, detail=f'Just sent AI cards to sheets')
@router.post("/legal-check/{rarity_name}")
@router.post('/legal-check/{rarity_name}')
async def v1_cards_legal_check(
rarity_name: str,
card_id: list = Query(default=None),
token: str = Depends(oauth2_scheme),
):
rarity_name: str, card_id: list = Query(default=None), token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='Unauthorized'
)
if rarity_name not in CARDSETS.keys():
return f"Rarity name {rarity_name} not a valid check"
return f'Rarity name {rarity_name} not a valid check'
# Handle case where card_id is passed as a stringified list
if (
card_id
and len(card_id) == 1
and isinstance(card_id[0], str)
and card_id[0].startswith("[")
):
if card_id and len(card_id) == 1 and isinstance(card_id[0], str) and card_id[0].startswith('['):
import ast
try:
card_id = [int(x) for x in ast.literal_eval(card_id[0])]
except (ValueError, SyntaxError):
@ -284,86 +239,82 @@ async def v1_cards_legal_check(
all_cards = Card.select().where(Card.id << card_id)
for x in all_cards:
if x.player.cardset_id not in CARDSETS[rarity_name]["human"]:
if x.player.cardset_id not in CARDSETS[rarity_name]['human']:
if x.player.p_name in x.player.description:
bad_cards.append(x.player.description)
else:
bad_cards.append(f"{x.player.description} {x.player.p_name}")
bad_cards.append(f'{x.player.description} {x.player.p_name}')
return {"count": len(bad_cards), "bad_cards": bad_cards}
return {'count': len(bad_cards), 'bad_cards': bad_cards}
@router.post("/post-update/{starting_id}")
@router.post('/post-update/{starting_id}')
async def v1_cards_post_update(starting_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to update card lists. This event has been logged.",
detail='You are not authorized to update card lists. This event has been logged.'
)
# sheets.post_new_cards(SHEETS_AUTH, starting_id)
raise HTTPException(
status_code=200,
detail=f"Just sent cards to sheets starting at ID {starting_id}",
)
db.close()
raise HTTPException(status_code=200, detail=f'Just sent cards to sheets starting at ID {starting_id}')
@router.post("/post-delete")
@router.post('/post-delete')
async def v1_cards_post_delete(del_ids: str, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete card lists. This event has been logged.",
detail='You are not authorized to delete card lists. This event has been logged.'
)
logging.info(f"del_ids: {del_ids} / type: {type(del_ids)}")
logging.info(f'del_ids: {del_ids} / type: {type(del_ids)}')
# sheets.post_deletion(SHEETS_AUTH, del_ids.split(','))
@router.post("/wipe-team/{team_id}")
@router.post('/wipe-team/{team_id}')
async def v1_cards_wipe_team(team_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to wipe teams. This event has been logged.",
detail='You are not authorized to wipe teams. This event has been logged.'
)
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
logging.error(f"/cards/wipe-team/{team_id} - could not find team")
raise HTTPException(status_code=404, detail=f"Team {team_id} not found")
except Exception as e:
logging.error(f'/cards/wipe-team/{team_id} - could not find team')
raise HTTPException(status_code=404, detail=f'Team {team_id} not found')
t_query = Card.update(team=None).where(Card.team == this_team).execute()
return f"Wiped {t_query} cards"
db.close()
return f'Wiped {t_query} cards'
@router.patch("/{card_id}")
@router.patch('/{card_id}')
async def v1_cards_patch(
card_id,
player_id: Optional[int] = None,
team_id: Optional[int] = None,
pack_id: Optional[int] = None,
value: Optional[int] = None,
variant: Optional[int] = None,
roster1_id: Optional[int] = None,
roster2_id: Optional[int] = None,
roster3_id: Optional[int] = None,
token: str = Depends(oauth2_scheme),
):
card_id, player_id: Optional[int] = None, team_id: Optional[int] = None, pack_id: Optional[int] = None,
value: Optional[int] = None, variant: Optional[int] = None, roster1_id: Optional[int] = None, roster2_id: Optional[int] = None,
roster3_id: Optional[int] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch cards. This event has been logged.",
detail='You are not authorized to patch cards. This event has been logged.'
)
try:
this_card = Card.get_by_id(card_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"No card found with id {card_id}")
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No card found with id {card_id}')
if player_id is not None:
this_card.player_id = player_id
@ -387,30 +338,35 @@ async def v1_cards_patch(
if this_card.save() == 1:
return_val = model_to_dict(this_card)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that rarity",
detail='Well slap my ass and call me a teapot; I could not save that rarity'
)
@router.delete("/{card_id}")
@router.delete('/{card_id}')
async def v1_cards_delete(card_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete packs. This event has been logged.",
detail='You are not authorized to delete packs. This event has been logged.'
)
try:
this_card = Card.get_by_id(card_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"No cards found with id {card_id}")
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No cards found with id {card_id}')
count = this_card.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f"Card {card_id} has been deleted")
raise HTTPException(status_code=200, detail=f'Card {card_id} has been deleted')
else:
raise HTTPException(status_code=500, detail=f"Card {card_id} was not deleted")
raise HTTPException(status_code=500, detail=f'Card {card_id} was not deleted')

View File

@ -4,9 +4,14 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Cardset, model_to_dict, fn, Event, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Cardset, model_to_dict, fn, Event
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/cardsets',
@ -31,6 +36,7 @@ async def get_cardsets(
all_cardsets = Cardset.select().order_by(Cardset.id)
if all_cardsets.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'There are no cardsets to filter')
if name is not None:
@ -41,7 +47,7 @@ async def get_cardsets(
try:
this_event = Event.get_by_id(event_id)
all_cardsets = all_cardsets.where(Cardset.event == this_event)
except DoesNotExist as e:
except Exception as e:
logging.error(f'Failed to find event {event_id}: {e}')
raise HTTPException(status_code=404, detail=f'Event id {event_id} not found')
if in_packs is not None:
@ -50,6 +56,7 @@ async def get_cardsets(
all_cardsets = all_cardsets.where(Cardset.ranked_legal == ranked_legal)
if all_cardsets.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'No cardsets found')
if csv:
@ -65,6 +72,7 @@ async def get_cardsets(
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
@ -72,6 +80,7 @@ async def get_cardsets(
for x in all_cardsets:
return_val['cardsets'].append(model_to_dict(x))
db.close()
return return_val
@ -104,8 +113,9 @@ async def search_cardsets(
try:
this_event = Event.get_by_id(event_id)
all_cardsets = all_cardsets.where(Cardset.event == this_event)
except DoesNotExist as e:
except Exception as e:
logging.error(f'Failed to find event {event_id}: {e}')
db.close()
raise HTTPException(status_code=404, detail=f'Event id {event_id} not found')
# Convert to list for sorting
@ -143,6 +153,7 @@ async def search_cardsets(
'cardsets': [model_to_dict(x) for x in limited_results]
}
db.close()
return return_val
@ -150,7 +161,8 @@ async def search_cardsets(
async def get_one_cardset(cardset_id, csv: Optional[bool] = False):
try:
this_cardset = Cardset.get_by_id(cardset_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No cardset found with id {cardset_id}')
if csv:
@ -160,16 +172,19 @@ async def get_one_cardset(cardset_id, csv: Optional[bool] = False):
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_cardset)
db.close()
return return_val
@router.post('')
async def post_cardsets(cardset: CardsetModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post cardsets. This event has been logged.'
@ -177,6 +192,7 @@ async def post_cardsets(cardset: CardsetModel, token: str = Depends(oauth2_schem
dupe_set = Cardset.get_or_none(Cardset.name == cardset.name)
if dupe_set:
db.close()
raise HTTPException(status_code=400, detail=f'There is already a cardset using {cardset.name}')
this_cardset = Cardset(**cardset.__dict__)
@ -184,6 +200,7 @@ async def post_cardsets(cardset: CardsetModel, token: str = Depends(oauth2_schem
saved = this_cardset.save()
if saved == 1:
return_val = model_to_dict(this_cardset)
db.close()
return return_val
else:
raise HTTPException(
@ -198,14 +215,16 @@ async def patch_cardsets(
for_purchase: Optional[bool] = None, total_cards: Optional[int] = None, ranked_legal: Optional[bool] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to patch cardsets. This event has been logged.'
)
try:
this_cardset = Cardset.get_by_id(cardset_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No cardset found with id {cardset_id}')
if name is not None:
@ -223,6 +242,7 @@ async def patch_cardsets(
if this_cardset.save() == 1:
return_val = model_to_dict(this_cardset)
db.close()
return return_val
else:
raise HTTPException(
@ -234,17 +254,20 @@ async def patch_cardsets(
@router.delete('/{cardset_id}')
async def delete_cardsets(cardset_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to delete cardsets. This event has been logged.'
)
try:
this_cardset = Cardset.get_by_id(cardset_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No cardset found with id {cardset_id}')
count = this_cardset.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f'Cardset {cardset_id} has been deleted')

View File

@ -4,9 +4,14 @@ from typing import Optional
import logging
import pydantic
from ..db_engine import Current, model_to_dict, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token, PRIVATE_IN_SCHEMA
from ..db_engine import db, Current, model_to_dict
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, PRIVATE_IN_SCHEMA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/current',
@ -35,9 +40,11 @@ async def get_current(season: Optional[int] = None, csv: Optional[bool] = False)
]
return_val = DataFrame(current_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(current)
db.close()
return return_val
@ -45,7 +52,8 @@ async def get_current(season: Optional[int] = None, csv: Optional[bool] = False)
async def get_one_current(current_id, csv: Optional[bool] = False):
try:
current = Current.get_by_id(current_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No current found with id {current_id}')
if csv:
@ -55,16 +63,19 @@ async def get_one_current(current_id, csv: Optional[bool] = False):
]
return_val = DataFrame(current_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(current)
db.close()
return return_val
@router.post('', include_in_schema=PRIVATE_IN_SCHEMA)
async def post_current(current: CurrentModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post current. This event has been logged.'
@ -72,6 +83,7 @@ async def post_current(current: CurrentModel, token: str = Depends(oauth2_scheme
dupe_curr = Current.get_or_none(Current.season == current.season)
if dupe_curr:
db.close()
raise HTTPException(status_code=400, detail=f'There is already a current for season {current.season}')
this_curr = Current(
@ -84,6 +96,7 @@ async def post_current(current: CurrentModel, token: str = Depends(oauth2_scheme
saved = this_curr.save()
if saved == 1:
return_val = model_to_dict(this_curr)
db.close()
return return_val
else:
raise HTTPException(status_code=418, detail='Well slap my ass and call me a teapot; I could not save that team')
@ -95,14 +108,16 @@ async def patch_current(
gsheet_template: Optional[str] = None, gsheet_version: Optional[str] = None,
live_scoreboard: Optional[int] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to patch current. This event has been logged.'
)
try:
current = Current.get_by_id(current_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No current found with id {current_id}')
if season is not None:
@ -118,6 +133,7 @@ async def patch_current(
if current.save() == 1:
return_val = model_to_dict(current)
db.close()
return return_val
else:
raise HTTPException(
@ -129,17 +145,20 @@ async def patch_current(
@router.delete('/{current_id}', include_in_schema=PRIVATE_IN_SCHEMA)
async def delete_current(current_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to delete current. This event has been logged.'
)
try:
this_curr = Current.get_by_id(current_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No current found with id {current_id}')
count = this_curr.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f'Current {current_id} has been deleted')

View File

@ -1,5 +1,6 @@
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from typing import List, Optional
from typing import List, Optional, Literal
import copy
import logging
import pandas as pd
import pydantic
@ -10,14 +11,20 @@ from ..db_engine import (
StratGame,
Player,
model_to_dict,
chunked,
fn,
Team,
Card,
StratPlay,
)
from ..db_helpers import upsert_decisions
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/decisions", tags=["decisions"])
@ -105,6 +112,7 @@ async def get_decisions(
"count": all_dec.count(),
"decisions": [model_to_dict(x, recurse=not short_output) for x in all_dec],
}
db.close()
if csv:
return_vals = return_dec["decisions"]
@ -128,6 +136,7 @@ async def get_decisions(
exclude = first + ["lob_all", "lob_all_rate", "lob_2outs", "rbi%"]
output = output[first + [col for col in output.columns if col not in exclude]]
db.close()
return Response(
content=pd.DataFrame(output).to_csv(index=False), media_type="text/csv"
)
@ -180,6 +189,7 @@ async def get_decisions_for_rest(
return_dec.append(this_val)
db.close()
return Response(
content=pd.DataFrame(return_dec).to_csv(index=False, header=False),
media_type="text/csv",
@ -201,11 +211,12 @@ async def patch_decision(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("patch_decision - Bad Token: [REDACTED]")
logging.warning(f"patch_decision - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
this_dec = Decision.get_or_none(Decision.id == decision_id)
if this_dec is None:
db.close()
raise HTTPException(
status_code=404, detail=f"Decision ID {decision_id} not found"
)
@ -231,8 +242,10 @@ async def patch_decision(
if this_dec.save() == 1:
d_result = model_to_dict(this_dec)
db.close()
return d_result
else:
db.close()
raise HTTPException(
status_code=500, detail=f"Unable to patch decision {decision_id}"
)
@ -241,7 +254,7 @@ async def patch_decision(
@router.post("")
async def post_decisions(dec_list: DecisionList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("post_decisions - Bad Token: [REDACTED]")
logging.warning(f"post_decisions - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
new_dec = []
@ -264,6 +277,7 @@ async def post_decisions(dec_list: DecisionList, token: str = Depends(oauth2_sch
with db.atomic():
# Use PostgreSQL-compatible upsert helper
upsert_decisions(new_dec, batch_size=10)
db.close()
return f"Inserted {len(new_dec)} decisions"
@ -271,16 +285,18 @@ async def post_decisions(dec_list: DecisionList, token: str = Depends(oauth2_sch
@router.delete("/{decision_id}")
async def delete_decision(decision_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("delete_decision - Bad Token: [REDACTED]")
logging.warning(f"delete_decision - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
this_dec = Decision.get_or_none(Decision.id == decision_id)
if this_dec is None:
db.close()
raise HTTPException(
status_code=404, detail=f"Decision ID {decision_id} not found"
)
count = this_dec.delete_instance()
db.close()
if count == 1:
return f"Decision {decision_id} has been deleted"
@ -293,14 +309,16 @@ async def delete_decision(decision_id: int, token: str = Depends(oauth2_scheme))
@router.delete("/game/{game_id}")
async def delete_decisions_game(game_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("delete_decisions_game - Bad Token: [REDACTED]")
logging.warning(f"delete_decisions_game - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
this_game = StratGame.get_or_none(StratGame.id == game_id)
if not this_game:
db.close()
raise HTTPException(status_code=404, detail=f"Game ID {game_id} not found")
count = Decision.delete().where(Decision.game == this_game).execute()
db.close()
if count > 0:
return f"Deleted {count} decisions matching Game ID {game_id}"

View File

@ -4,11 +4,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Event, model_to_dict, fn, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Event, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/events", tags=["events"])
router = APIRouter(
prefix='/api/v2/events',
tags=['events']
)
class EventModel(pydantic.BaseModel):
@ -20,102 +28,83 @@ class EventModel(pydantic.BaseModel):
active: Optional[bool] = False
@router.get("")
@router.get('')
async def v1_events_get(
name: Optional[str] = None,
in_desc: Optional[str] = None,
active: Optional[bool] = None,
csv: Optional[bool] = None,
limit: Optional[int] = 100,
):
name: Optional[str] = None, in_desc: Optional[str] = None, active: Optional[bool] = None,
csv: Optional[bool] = None):
all_events = Event.select().order_by(Event.id)
if name is not None:
all_events = all_events.where(fn.Lower(Event.name) == name.lower())
if in_desc is not None:
all_events = all_events.where(
(fn.Lower(Event.short_desc).contains(in_desc.lower()))
| (fn.Lower(Event.long_desc).contains(in_desc.lower()))
(fn.Lower(Event.short_desc).contains(in_desc.lower())) |
(fn.Lower(Event.long_desc).contains(in_desc.lower()))
)
if active is not None:
all_events = all_events.where(Event.active == active)
total_count = all_events.count() if not csv else 0
all_events = all_events.limit(max(0, min(limit, 500)))
if csv:
data_list = [
["id", "name", "short_desc", "long_desc", "url", "thumbnail", "active"]
]
data_list = [['id', 'name', 'short_desc', 'long_desc', 'url', 'thumbnail', 'active']]
for line in all_events:
data_list.append(
[
line.id,
line.name,
line.short_desc,
line.long_desc,
line.url,
line.thumbnail,
line.active,
line.id, line.name, line.short_desc, line.long_desc, line.url, line.thumbnail, line.active
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "events": []}
return_val = {'count': all_events.count(), 'events': []}
for x in all_events:
return_val["events"].append(model_to_dict(x))
return_val['events'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{event_id}")
@router.get('/{event_id}')
async def v1_events_get_one(event_id, csv: Optional[bool] = False):
try:
this_event = Event.get_by_id(event_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No event found with id {event_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No event found with id {event_id}')
if csv:
data_list = [
["id", "name", "short_desc", "long_desc", "url", "thumbnail", "active"],
[
this_event.id,
this_event.name,
this_event.short_desc,
this_event.long_desc,
this_event.url,
this_event.thumbnail,
this_event.active,
],
['id', 'name', 'short_desc', 'long_desc', 'url', 'thumbnail', 'active'],
[this_event.id, this_event.name, this_event.short_desc, this_event.long_desc, this_event.url,
this_event.thumbnail, this_event.active]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_event)
db.close()
return return_val
@router.post("")
@router.post('')
async def v1_events_post(event: EventModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post events. This event has been logged.",
detail='You are not authorized to post events. This event has been logged.'
)
dupe_event = Event.get_or_none(Event.name == event.name)
if dupe_event:
raise HTTPException(
status_code=400, detail=f"There is already an event using {event.name}"
)
db.close()
raise HTTPException(status_code=400, detail=f'There is already an event using {event.name}')
this_event = Event(
name=event.name,
@ -123,43 +112,39 @@ async def v1_events_post(event: EventModel, token: str = Depends(oauth2_scheme))
long_desc=event.long_desc,
url=event.url,
thumbnail=event.thumbnail,
active=event.active,
active=event.active
)
saved = this_event.save()
if saved == 1:
return_val = model_to_dict(this_event)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that cardset",
detail='Well slap my ass and call me a teapot; I could not save that cardset'
)
@router.patch("/{event_id}")
@router.patch('/{event_id}')
async def v1_events_patch(
event_id,
name: Optional[str] = None,
short_desc: Optional[str] = None,
long_desc: Optional[str] = None,
url: Optional[str] = None,
thumbnail: Optional[str] = None,
active: Optional[bool] = None,
token: str = Depends(oauth2_scheme),
):
event_id, name: Optional[str] = None, short_desc: Optional[str] = None, long_desc: Optional[str] = None,
url: Optional[str] = None, thumbnail: Optional[str] = None, active: Optional[bool] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch events. This event has been logged.",
detail='You are not authorized to patch events. This event has been logged.'
)
try:
this_event = Event.get_by_id(event_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No event found with id {event_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No event found with id {event_id}')
if name is not None:
this_event.name = name
@ -176,34 +161,35 @@ async def v1_events_patch(
if this_event.save() == 1:
return_val = model_to_dict(this_event)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that event",
detail='Well slap my ass and call me a teapot; I could not save that event'
)
@router.delete("/{event_id}")
@router.delete('/{event_id}')
async def v1_events_delete(event_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete events. This event has been logged.",
detail='You are not authorized to delete events. This event has been logged.'
)
try:
this_event = Event.get_by_id(event_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No event found with id {event_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No event found with id {event_id}')
count = this_event.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Event {event_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Event {event_id} has been deleted')
else:
raise HTTPException(status_code=500, detail=f"Event {event_id} was not deleted")
raise HTTPException(status_code=500, detail=f'Event {event_id} was not deleted')

View File

@ -4,11 +4,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import GameRewards, model_to_dict, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, GameRewards, model_to_dict
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/gamerewards", tags=["gamerewards"])
router = APIRouter(
prefix='/api/v2/gamerewards',
tags=['gamerewards']
)
class GameRewardModel(pydantic.BaseModel):
@ -18,15 +26,10 @@ class GameRewardModel(pydantic.BaseModel):
money: Optional[int] = None
@router.get("")
@router.get('')
async def v1_gamerewards_get(
name: Optional[str] = None,
pack_type_id: Optional[int] = None,
player_id: Optional[int] = None,
money: Optional[int] = None,
csv: Optional[bool] = None,
limit: int = 100,
):
name: Optional[str] = None, pack_type_id: Optional[int] = None, player_id: Optional[int] = None,
money: Optional[int] = None, csv: Optional[bool] = None):
all_rewards = GameRewards.select().order_by(GameRewards.id)
# if all_rewards.count() == 0:
@ -42,111 +45,98 @@ async def v1_gamerewards_get(
if money is not None:
all_rewards = all_rewards.where(GameRewards.money == money)
limit = max(0, min(limit, 500))
total_count = all_rewards.count() if not csv else 0
all_rewards = all_rewards.limit(limit)
if csv:
data_list = [["id", "pack_type_id", "player_id", "money"]]
data_list = [['id', 'pack_type_id', 'player_id', 'money']]
for line in all_rewards:
data_list.append(
[
line.id,
line.pack_type_id if line.pack_type else None,
line.player_id if line.player else None,
line.money,
]
)
data_list.append([
line.id, line.pack_type_id if line.pack_type else None, line.player_id if line.player else None,
line.money
])
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "gamerewards": []}
return_val = {'count': all_rewards.count(), 'gamerewards': []}
for x in all_rewards:
return_val["gamerewards"].append(model_to_dict(x))
return_val['gamerewards'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{gameaward_id}")
@router.get('/{gameaward_id}')
async def v1_gamerewards_get_one(gamereward_id, csv: Optional[bool] = None):
try:
this_game_reward = GameRewards.get_by_id(gamereward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No game reward found with id {gamereward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No game reward found with id {gamereward_id}')
if csv:
data_list = [
["id", "pack_type_id", "player_id", "money"],
[
this_game_reward.id,
this_game_reward.pack_type_id if this_game_reward.pack_type else None,
this_game_reward.player_id if this_game_reward.player else None,
this_game_reward.money,
],
['id', 'pack_type_id', 'player_id', 'money'],
[this_game_reward.id, this_game_reward.pack_type_id if this_game_reward.pack_type else None,
this_game_reward.player_id if this_game_reward.player else None, this_game_reward.money]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_game_reward)
db.close()
return return_val
@router.post("")
async def v1_gamerewards_post(
game_reward: GameRewardModel, token: str = Depends(oauth2_scheme)
):
@router.post('')
async def v1_gamerewards_post(game_reward: GameRewardModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post game rewards. This event has been logged.",
detail='You are not authorized to post game rewards. This event has been logged.'
)
this_award = GameRewards(
name=game_reward.name,
pack_type_id=game_reward.pack_type_id,
player_id=game_reward.player_id,
money=game_reward.money,
money=game_reward.money
)
saved = this_award.save()
if saved == 1:
return_val = model_to_dict(this_award)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that roster",
detail='Well slap my ass and call me a teapot; I could not save that roster'
)
@router.patch("/{game_reward_id}")
@router.patch('/{game_reward_id}')
async def v1_gamerewards_patch(
game_reward_id: int,
name: Optional[str] = None,
pack_type_id: Optional[int] = None,
player_id: Optional[int] = None,
money: Optional[int] = None,
token: str = Depends(oauth2_scheme),
):
game_reward_id: int, name: Optional[str] = None, pack_type_id: Optional[int] = None,
player_id: Optional[int] = None, money: Optional[int] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch gamerewards. This event has been logged.",
detail='You are not authorized to patch gamerewards. This event has been logged.'
)
try:
this_game_reward = GameRewards.get_by_id(game_reward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No game reward found with id {game_reward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No game reward found with id {game_reward_id}')
if name is not None:
this_game_reward.name = name
@ -168,36 +158,35 @@ async def v1_gamerewards_patch(
if this_game_reward.save() == 1:
return_val = model_to_dict(this_game_reward)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that rarity",
detail='Well slap my ass and call me a teapot; I could not save that rarity'
)
@router.delete("/{gamereward_id}")
@router.delete('/{gamereward_id}')
async def v1_gamerewards_delete(gamereward_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete awards. This event has been logged.",
detail='You are not authorized to delete awards. This event has been logged.'
)
try:
this_award = GameRewards.get_by_id(gamereward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No award found with id {gamereward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No award found with id {gamereward_id}')
count = this_award.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Game Reward {gamereward_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Game Reward {gamereward_id} has been deleted')
else:
raise HTTPException(
status_code=500, detail=f"Game Reward {gamereward_id} was not deleted"
)
raise HTTPException(status_code=500, detail=f'Game Reward {gamereward_id} was not deleted')

View File

@ -3,10 +3,15 @@ from typing import Optional, List
import logging
import pydantic
from ..db_engine import db, GauntletReward, model_to_dict, DatabaseError, DoesNotExist
from ..db_engine import db, GauntletReward, model_to_dict, chunked, DatabaseError
from ..db_helpers import upsert_gauntlet_rewards
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/gauntletrewards", tags=["gauntletrewards"])
@ -30,7 +35,6 @@ async def v1_gauntletreward_get(
reward_id: list = Query(default=None),
win_num: Optional[int] = None,
loss_max: Optional[int] = None,
limit: int = 100,
):
all_rewards = GauntletReward.select().order_by(GauntletReward.id)
@ -47,14 +51,11 @@ async def v1_gauntletreward_get(
all_rewards = all_rewards.order_by(-GauntletReward.loss_max, GauntletReward.win_num)
limit = max(0, min(limit, 500))
total_count = all_rewards.count()
all_rewards = all_rewards.limit(limit)
return_val = {"count": total_count, "rewards": []}
return_val = {"count": all_rewards.count(), "rewards": []}
for x in all_rewards:
return_val["rewards"].append(model_to_dict(x))
db.close()
return return_val
@ -62,13 +63,15 @@ async def v1_gauntletreward_get(
async def v1_gauntletreward_get_one(gauntletreward_id):
try:
this_reward = GauntletReward.get_by_id(gauntletreward_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404,
detail=f"No gauntlet reward found with id {gauntletreward_id}",
)
return_val = model_to_dict(this_reward)
db.close()
return return_val
@ -83,7 +86,8 @@ async def v1_gauntletreward_patch(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch gauntlet rewards. This event has been logged.",
@ -91,6 +95,7 @@ async def v1_gauntletreward_patch(
this_reward = GauntletReward.get_or_none(GauntletReward.id == gauntletreward_id)
if this_reward is None:
db.close()
raise KeyError(f"Gauntlet Reward ID {gauntletreward_id} not found")
if gauntlet_id is not None:
@ -106,8 +111,10 @@ async def v1_gauntletreward_patch(
if this_reward.save():
r_curr = model_to_dict(this_reward)
db.close()
return r_curr
else:
db.close()
raise DatabaseError(f"Unable to patch gauntlet reward {gauntletreward_id}")
@ -116,7 +123,8 @@ async def v1_gauntletreward_post(
gauntletreward: GauntletRewardList, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post gauntlets. This event has been logged.",
@ -129,6 +137,7 @@ async def v1_gauntletreward_post(
with db.atomic():
# Use PostgreSQL-compatible upsert helper
upsert_gauntlet_rewards(all_rewards, batch_size=15)
db.close()
return f"Inserted {len(all_rewards)} gauntlet rewards"

View File

@ -4,11 +4,19 @@ from typing import Optional
import logging
import pydantic
from ..db_engine import GauntletRun, model_to_dict, DatabaseError, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, GauntletRun, model_to_dict, DatabaseError
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/gauntletruns", tags=["notifs"])
router = APIRouter(
prefix='/api/v2/gauntletruns',
tags=['notifs']
)
class GauntletRunModel(pydantic.BaseModel):
@ -21,25 +29,13 @@ class GauntletRunModel(pydantic.BaseModel):
ended: Optional[int] = None
@router.get("")
@router.get('')
async def get_gauntletruns(
team_id: list = Query(default=None),
wins: Optional[int] = None,
wins_min: Optional[int] = None,
wins_max: Optional[int] = None,
losses: Optional[int] = None,
losses_min: Optional[int] = None,
losses_max: Optional[int] = None,
gsheet: Optional[str] = None,
created_after: Optional[int] = None,
created_before: Optional[int] = None,
ended_after: Optional[int] = None,
ended_before: Optional[int] = None,
is_active: Optional[bool] = None,
gauntlet_id: list = Query(default=None),
season: list = Query(default=None),
limit: int = 100,
):
team_id: list = Query(default=None), wins: Optional[int] = None, wins_min: Optional[int] = None,
wins_max: Optional[int] = None, losses: Optional[int] = None, losses_min: Optional[int] = None,
losses_max: Optional[int] = None, gsheet: Optional[str] = None, created_after: Optional[int] = None,
created_before: Optional[int] = None, ended_after: Optional[int] = None, ended_before: Optional[int] = None,
is_active: Optional[bool] = None, gauntlet_id: list = Query(default=None), season: list = Query(default=None)):
all_gauntlets = GauntletRun.select().order_by(GauntletRun.id)
if team_id is not None:
@ -82,48 +78,44 @@ async def get_gauntletruns(
if season is not None:
all_gauntlets = all_gauntlets.where(GauntletRun.team.season << season)
limit = max(0, min(limit, 500))
return_val = {"count": all_gauntlets.count(), "runs": []}
for x in all_gauntlets.limit(limit):
return_val["runs"].append(model_to_dict(x))
return_val = {'count': all_gauntlets.count(), 'runs': []}
for x in all_gauntlets:
return_val['runs'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{gauntletrun_id}")
@router.get('/{gauntletrun_id}')
async def get_one_gauntletrun(gauntletrun_id):
try:
this_gauntlet = GauntletRun.get_by_id(gauntletrun_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No gauntlet found with id {gauntletrun_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No gauntlet found with id {gauntletrun_id}')
return_val = model_to_dict(this_gauntlet)
db.close()
return return_val
@router.patch("/{gauntletrun_id}")
@router.patch('/{gauntletrun_id}')
async def patch_gauntletrun(
gauntletrun_id,
team_id: Optional[int] = None,
wins: Optional[int] = None,
losses: Optional[int] = None,
gsheet: Optional[str] = None,
created: Optional[bool] = None,
ended: Optional[bool] = None,
token: str = Depends(oauth2_scheme),
):
gauntletrun_id, team_id: Optional[int] = None, wins: Optional[int] = None, losses: Optional[int] = None,
gsheet: Optional[str] = None, created: Optional[bool] = None, ended: Optional[bool] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch gauntlet runs. This event has been logged.",
detail='You are not authorized to patch gauntlet runs. This event has been logged.'
)
this_run = GauntletRun.get_or_none(GauntletRun.id == gauntletrun_id)
if this_run is None:
raise KeyError(f"Gauntlet Run ID {gauntletrun_id} not found")
db.close()
raise KeyError(f'Gauntlet Run ID {gauntletrun_id} not found')
if team_id is not None:
this_run.team_id = team_id
@ -146,44 +138,48 @@ async def patch_gauntletrun(
if this_run.save():
r_curr = model_to_dict(this_run)
db.close()
return r_curr
else:
raise DatabaseError(f"Unable to patch gauntlet run {gauntletrun_id}")
db.close()
raise DatabaseError(f'Unable to patch gauntlet run {gauntletrun_id}')
@router.post("")
async def post_gauntletrun(
gauntletrun: GauntletRunModel, token: str = Depends(oauth2_scheme)
):
@router.post('')
async def post_gauntletrun(gauntletrun: GauntletRunModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post gauntlets. This event has been logged.",
detail='You are not authorized to post gauntlets. This event has been logged.'
)
run_data = gauntletrun.dict()
# Convert milliseconds timestamps to datetime for PostgreSQL
if run_data.get("created"):
run_data["created"] = datetime.fromtimestamp(run_data["created"] / 1000)
if run_data.get('created'):
run_data['created'] = datetime.fromtimestamp(run_data['created'] / 1000)
else:
run_data["created"] = datetime.now()
if run_data.get("ended"):
run_data["ended"] = datetime.fromtimestamp(run_data["ended"] / 1000)
run_data['created'] = datetime.now()
if run_data.get('ended'):
run_data['ended'] = datetime.fromtimestamp(run_data['ended'] / 1000)
else:
run_data["ended"] = None
run_data['ended'] = None
this_run = GauntletRun(**run_data)
if this_run.save():
r_run = model_to_dict(this_run)
db.close()
return r_run
else:
raise DatabaseError("Unable to post gauntlet run")
db.close()
raise DatabaseError(f'Unable to post gauntlet run')
@router.delete("/{gauntletrun_id}")
@router.delete('/{gauntletrun_id}')
async def delete_gauntletrun(gauntletrun_id):
if GauntletRun.delete_by_id(gauntletrun_id) == 1:
return f"Deleted gauntlet run ID {gauntletrun_id}"
return f'Deleted gauntlet run ID {gauntletrun_id}'
raise DatabaseError(f'Unable to delete gauntlet run {gauntletrun_id}')
raise DatabaseError(f"Unable to delete gauntlet run {gauntletrun_id}")

View File

@ -5,19 +5,27 @@ from fastapi import APIRouter, Depends, HTTPException, Response, Query
from typing import Optional, List
import logging
import pydantic
from pandas import DataFrame
from ..db_engine import (
db,
MlbPlayer,
Player,
BattingCard,
PitchingCard,
model_to_dict,
fn,
chunked,
query_to_csv,
)
from ..db_helpers import upsert_mlb_players
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/mlbplayers", tags=["mlbplayers"])
@ -29,7 +37,7 @@ class PlayerModel(pydantic.BaseModel):
key_fangraphs: int = None
key_bbref: str = None
key_retro: str = None
offense_col: int = pydantic.Field(default_factory=lambda: random.randint(1, 3))
offense_col: int = random.randint(1, 3)
class PlayerList(pydantic.BaseModel):
@ -73,7 +81,6 @@ async def get_players(
key_mlbam: list = Query(default=None),
offense_col: list = Query(default=None),
csv: Optional[bool] = False,
limit: int = 100,
):
all_players = MlbPlayer.select().order_by(MlbPlayer.id)
@ -102,17 +109,16 @@ async def get_players(
if offense_col is not None:
all_players = all_players.where(MlbPlayer.offense_col << offense_col)
total_count = all_players.count() if not csv else 0
all_players = all_players.limit(max(0, min(limit, 500)))
if csv:
return_val = query_to_csv(all_players)
db.close()
return Response(content=return_val, media_type="text/csv")
return_val = {
"count": total_count,
"count": all_players.count(),
"players": [model_to_dict(x) for x in all_players],
}
db.close()
return return_val
@ -120,11 +126,13 @@ async def get_players(
async def get_one_player(player_id: int):
this_player = MlbPlayer.get_or_none(MlbPlayer.id == player_id)
if this_player is None:
db.close()
raise HTTPException(
status_code=404, detail=f"MlbPlayer id {player_id} not found"
)
r_data = model_to_dict(this_player)
db.close()
return r_data
@ -141,7 +149,8 @@ async def patch_player(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch mlb players. This event has been logged.",
@ -149,6 +158,7 @@ async def patch_player(
this_player = MlbPlayer.get_or_none(MlbPlayer.id == player_id)
if this_player is None:
db.close()
raise HTTPException(
status_code=404, detail=f"MlbPlayer id {player_id} not found"
)
@ -170,8 +180,10 @@ async def patch_player(
if this_player.save() == 1:
return_val = model_to_dict(this_player)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that player",
@ -181,7 +193,8 @@ async def patch_player(
@router.post("")
async def post_players(players: PlayerList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post mlb players. This event has been logged.",
@ -196,6 +209,7 @@ async def post_players(players: PlayerList, token: str = Depends(oauth2_scheme))
| (MlbPlayer.key_bbref == x.key_bbref)
)
if dupes.count() > 0:
db.close()
raise HTTPException(
status_code=400,
detail=f"{x.first_name} {x.last_name} has a key already in the database",
@ -207,6 +221,7 @@ async def post_players(players: PlayerList, token: str = Depends(oauth2_scheme))
# Use PostgreSQL-compatible upsert helper
# Note: Duplicate check is already done above, so this is effectively just insert
upsert_mlb_players(new_players, batch_size=15)
db.close()
return f"Inserted {len(new_players)} new MLB players"
@ -214,7 +229,8 @@ async def post_players(players: PlayerList, token: str = Depends(oauth2_scheme))
@router.post("/one")
async def post_one_player(player: PlayerModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post mlb players. This event has been logged.",
@ -226,9 +242,10 @@ async def post_one_player(player: PlayerModel, token: str = Depends(oauth2_schem
| (MlbPlayer.key_bbref == player.key_bbref)
)
if dupes.count() > 0:
logging.info("POST /mlbplayers/one - dupes found:")
logging.info(f"POST /mlbplayers/one - dupes found:")
for x in dupes:
logging.info(f"{x}")
db.close()
raise HTTPException(
status_code=400,
detail=f"{player.first_name} {player.last_name} has a key already in the database",
@ -238,6 +255,7 @@ async def post_one_player(player: PlayerModel, token: str = Depends(oauth2_schem
saved = new_player.save()
if saved == 1:
return_val = model_to_dict(new_player)
db.close()
return return_val
else:
raise HTTPException(
@ -249,7 +267,8 @@ async def post_one_player(player: PlayerModel, token: str = Depends(oauth2_schem
@router.delete("/{player_id}")
async def delete_player(player_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete mlb players. This event has been logged.",
@ -257,11 +276,13 @@ async def delete_player(player_id: int, token: str = Depends(oauth2_scheme)):
this_player = MlbPlayer.get_or_none(MlbPlayer.id == player_id)
if this_player is None:
db.close()
raise HTTPException(
status_code=404, detail=f"MlbPlayer id {player_id} not found"
)
count = this_player.delete_instance()
db.close()
if count == 1:
raise HTTPException(
@ -279,7 +300,8 @@ async def update_columns(
mlbplayer_id: Optional[int] = None, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to update mlb players. This event has been logged.",
@ -305,6 +327,7 @@ async def update_columns(
logging.info(f"Updated {count} batting cards for {x.first_name} {x.last_name}")
update_card_urls(x)
db.close()
return f"Updated {total_count} batting cards"
@ -314,7 +337,8 @@ async def update_names(
mlbplayer_id: Optional[int] = None, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to update mlb players. This event has been logged.",
@ -336,13 +360,14 @@ async def update_names(
logging.info(f"Update {count} player records for {x.first_name} {x.last_name}")
update_card_urls(x)
db.close()
return f"Updated {total_count} names"
# @router.post('/link-players')
# async def post_players(token: str = Depends(oauth2_scheme)):
# if not valid_token(token):
# logging.warning('Bad Token: [REDACTED]')
# logging.warning(f'Bad Token: {token}')
# db.close()
# raise HTTPException(
# status_code=401,

View File

@ -5,11 +5,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Notification, model_to_dict, fn, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Notification, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/notifs", tags=["notifs"])
router = APIRouter(
prefix='/api/v2/notifs',
tags=['notifs']
)
class NotifModel(pydantic.BaseModel):
@ -18,30 +26,20 @@ class NotifModel(pydantic.BaseModel):
desc: Optional[str] = None
field_name: str
message: str
about: Optional[str] = "blank"
about: Optional[str] = 'blank'
ack: Optional[bool] = False
@router.get("")
@router.get('')
async def get_notifs(
created_after: Optional[int] = None,
title: Optional[str] = None,
desc: Optional[str] = None,
field_name: Optional[str] = None,
in_desc: Optional[str] = None,
about: Optional[str] = None,
ack: Optional[bool] = None,
csv: Optional[bool] = None,
limit: Optional[int] = 100,
):
if limit is not None:
limit = max(0, min(limit, 500))
created_after: Optional[int] = None, title: Optional[str] = None, desc: Optional[str] = None,
field_name: Optional[str] = None, in_desc: Optional[str] = None, about: Optional[str] = None,
ack: Optional[bool] = None, csv: Optional[bool] = None):
all_notif = Notification.select().order_by(Notification.id)
if all_notif.count() == 0:
raise HTTPException(
status_code=404, detail="There are no notifications to filter"
)
db.close()
raise HTTPException(status_code=404, detail=f'There are no notifications to filter')
if created_after is not None:
# Convert milliseconds timestamp to datetime for PostgreSQL comparison
@ -54,90 +52,68 @@ async def get_notifs(
if field_name is not None:
all_notif = all_notif.where(Notification.field_name == field_name)
if in_desc is not None:
all_notif = all_notif.where(
fn.Lower(Notification.desc).contains(in_desc.lower())
)
all_notif = all_notif.where(fn.Lower(Notification.desc).contains(in_desc.lower()))
if about is not None:
all_notif = all_notif.where(Notification.about == about)
if ack is not None:
all_notif = all_notif.where(Notification.ack == ack)
total_count = all_notif.count()
if limit is not None:
all_notif = all_notif.limit(limit)
if csv:
data_list = [
["id", "created", "title", "desc", "field_name", "message", "about", "ack"]
]
data_list = [['id', 'created', 'title', 'desc', 'field_name', 'message', 'about', 'ack']]
for line in all_notif:
data_list.append(
[
line.id,
line.created,
line.title,
line.desc,
line.field_name,
line.message,
line.about,
line.ack,
]
)
data_list.append([
line.id, line.created, line.title, line.desc, line.field_name, line.message, line.about, line.ack
])
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "notifs": []}
return_val = {'count': all_notif.count(), 'notifs': []}
for x in all_notif:
return_val["notifs"].append(model_to_dict(x))
return_val['notifs'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{notif_id}")
@router.get('/{notif_id}')
async def get_one_notif(notif_id, csv: Optional[bool] = None):
try:
this_notif = Notification.get_by_id(notif_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No notification found with id {notif_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No notification found with id {notif_id}')
if csv:
data_list = [
["id", "created", "title", "desc", "field_name", "message", "about", "ack"],
[
this_notif.id,
this_notif.created,
this_notif.title,
this_notif.desc,
this_notif.field_name,
this_notif.message,
this_notif.about,
this_notif.ack,
],
['id', 'created', 'title', 'desc', 'field_name', 'message', 'about', 'ack'],
[this_notif.id, this_notif.created, this_notif.title, this_notif.desc, this_notif.field_name,
this_notif.message, this_notif.about, this_notif.ack]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_notif)
db.close()
return return_val
@router.post("")
@router.post('')
async def post_notif(notif: NotifModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post notifications. This event has been logged.",
detail='You are not authorized to post notifications. This event has been logged.'
)
logging.info(f"new notif: {notif}")
logging.info(f'new notif: {notif}')
this_notif = Notification(
created=datetime.fromtimestamp(notif.created / 1000),
title=notif.title,
@ -150,38 +126,33 @@ async def post_notif(notif: NotifModel, token: str = Depends(oauth2_scheme)):
saved = this_notif.save()
if saved == 1:
return_val = model_to_dict(this_notif)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that notification",
detail='Well slap my ass and call me a teapot; I could not save that notification'
)
@router.patch("/{notif_id}")
@router.patch('/{notif_id}')
async def patch_notif(
notif_id,
created: Optional[int] = None,
title: Optional[str] = None,
desc: Optional[str] = None,
field_name: Optional[str] = None,
message: Optional[str] = None,
about: Optional[str] = None,
ack: Optional[bool] = None,
token: str = Depends(oauth2_scheme),
):
notif_id, created: Optional[int] = None, title: Optional[str] = None, desc: Optional[str] = None,
field_name: Optional[str] = None, message: Optional[str] = None, about: Optional[str] = None,
ack: Optional[bool] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch notifications. This event has been logged.",
detail='You are not authorized to patch notifications. This event has been logged.'
)
try:
this_notif = Notification.get_by_id(notif_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No notification found with id {notif_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No notification found with id {notif_id}')
if title is not None:
this_notif.title = title
@ -200,36 +171,34 @@ async def patch_notif(
if this_notif.save() == 1:
return_val = model_to_dict(this_notif)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that rarity",
detail='Well slap my ass and call me a teapot; I could not save that rarity'
)
@router.delete("/{notif_id}")
@router.delete('/{notif_id}')
async def delete_notif(notif_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete notifications. This event has been logged.",
detail='You are not authorized to delete notifications. This event has been logged.'
)
try:
this_notif = Notification.get_by_id(notif_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No notification found with id {notif_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No notification found with id {notif_id}')
count = this_notif.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Notification {notif_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Notification {notif_id} has been deleted')
else:
raise HTTPException(
status_code=500, detail=f"Notification {notif_id} was not deleted"
)
raise HTTPException(status_code=500, detail=f'Notification {notif_id} was not deleted')

View File

@ -6,9 +6,14 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import db, Cardset, model_to_dict, Pack, Team, PackType, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Cardset, model_to_dict, Pack, Team, PackType
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/packs',
@ -36,25 +41,29 @@ async def get_packs(
all_packs = Pack.select()
if all_packs.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'There are no packs to filter')
if team_id is not None:
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {team_id}')
all_packs = all_packs.where(Pack.team == this_team)
if pack_type_id is not None:
try:
this_pack_type = PackType.get_by_id(pack_type_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No pack type found with id {pack_type_id}')
all_packs = all_packs.where(Pack.pack_type == this_pack_type)
if pack_team_id is not None:
try:
this_pack_team = Team.get_by_id(pack_team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {pack_team_id}')
all_packs = all_packs.where(Pack.pack_team == this_pack_team)
elif exact_match:
@ -63,7 +72,8 @@ async def get_packs(
if pack_cardset_id is not None:
try:
this_pack_cardset = Cardset.get_by_id(pack_cardset_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No cardset found with id {pack_cardset_id}')
all_packs = all_packs.where(Pack.pack_cardset == this_pack_cardset)
elif exact_match:
@ -93,6 +103,7 @@ async def get_packs(
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
@ -100,14 +111,16 @@ async def get_packs(
for x in all_packs:
return_val['packs'].append(model_to_dict(x))
db.close()
return return_val
@router.get('/{pack_id}')
async def get_one_pack(pack_id: int, csv: Optional[bool] = False):
async def get_one_pack(pack_id, csv: Optional[bool] = False):
try:
this_pack = Pack.get_by_id(pack_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No pack found with id {pack_id}')
if csv:
@ -118,17 +131,20 @@ async def get_one_pack(pack_id: int, csv: Optional[bool] = False):
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_pack)
db.close()
return return_val
@router.post('')
async def post_pack(packs: PackModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post packs. This event has been logged.'
@ -147,6 +163,7 @@ async def post_pack(packs: PackModel, token: str = Depends(oauth2_scheme)):
with db.atomic():
Pack.bulk_create(new_packs, batch_size=15)
db.close()
raise HTTPException(status_code=200, detail=f'{len(new_packs)} packs have been added')
@ -154,7 +171,8 @@ async def post_pack(packs: PackModel, token: str = Depends(oauth2_scheme)):
@router.post('/one')
async def post_one_pack(pack: PackPydantic, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post packs. This event has been logged.'
@ -171,6 +189,7 @@ async def post_one_pack(pack: PackPydantic, token: str = Depends(oauth2_scheme))
saved = this_pack.save()
if saved == 1:
return_val = model_to_dict(this_pack)
db.close()
return return_val
else:
raise HTTPException(
@ -184,14 +203,16 @@ async def patch_pack(
pack_id, team_id: Optional[int] = None, pack_type_id: Optional[int] = None, open_time: Optional[int] = None,
pack_team_id: Optional[int] = None, pack_cardset_id: Optional[int] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to patch packs. This event has been logged.'
)
try:
this_pack = Pack.get_by_id(pack_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No pack found with id {pack_id}')
if team_id is not None:
@ -216,6 +237,7 @@ async def patch_pack(
if this_pack.save() == 1:
return_val = model_to_dict(this_pack)
db.close()
return return_val
else:
raise HTTPException(
@ -227,17 +249,20 @@ async def patch_pack(
@router.delete('/{pack_id}')
async def delete_pack(pack_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to delete packs. This event has been logged.'
)
try:
this_pack = Pack.get_by_id(pack_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No packs found with id {pack_id}')
count = this_pack.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f'Pack {pack_id} has been deleted')

View File

@ -4,9 +4,14 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import PackType, model_to_dict, fn, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, PackType, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/packtypes',
@ -29,6 +34,7 @@ async def get_packtypes(
all_packtypes = PackType.select().order_by(PackType.id)
if all_packtypes.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'There are no packtypes to filter')
if name is not None:
@ -54,6 +60,7 @@ async def get_packtypes(
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
@ -61,6 +68,7 @@ async def get_packtypes(
for x in all_packtypes:
return_val['packtypes'].append(model_to_dict(x))
db.close()
return return_val
@ -68,7 +76,8 @@ async def get_packtypes(
async def get_one_packtype(packtype_id, csv: Optional[bool] = False):
try:
this_packtype = PackType.get_by_id(packtype_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No packtype found with id {packtype_id}')
if csv:
@ -78,17 +87,20 @@ async def get_one_packtype(packtype_id, csv: Optional[bool] = False):
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_packtype)
db.close()
return return_val
@router.post('')
async def post_packtypes(packtype: PacktypeModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post packtypes. This event has been logged.'
@ -96,6 +108,7 @@ async def post_packtypes(packtype: PacktypeModel, token: str = Depends(oauth2_sc
dupe_packtype = PackType.get_or_none(PackType.name == packtype.name)
if dupe_packtype:
db.close()
raise HTTPException(status_code=400, detail=f'There is already a packtype using {packtype.name}')
this_packtype = PackType(
@ -109,6 +122,7 @@ async def post_packtypes(packtype: PacktypeModel, token: str = Depends(oauth2_sc
saved = this_packtype.save()
if saved == 1:
return_val = model_to_dict(this_packtype)
db.close()
return return_val
else:
raise HTTPException(
@ -122,14 +136,16 @@ async def patch_packtype(
packtype_id, name: Optional[str] = None, card_count: Optional[int] = None, description: Optional[str] = None,
cost: Optional[int] = None, available: Optional[bool] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to patch packtypes. This event has been logged.'
)
try:
this_packtype = PackType.get_by_id(packtype_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No packtype found with id {packtype_id}')
if name is not None:
@ -145,6 +161,7 @@ async def patch_packtype(
if this_packtype.save() == 1:
return_val = model_to_dict(this_packtype)
db.close()
return return_val
else:
raise HTTPException(
@ -156,17 +173,20 @@ async def patch_packtype(
@router.delete('/{packtype_id}')
async def delete_packtype(packtype_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to delete packtypes. This event has been logged.'
)
try:
this_packtype = PackType.get_by_id(packtype_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No packtype found with id {packtype_id}')
count = this_packtype.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f'Packtype {packtype_id} has been deleted')

View File

@ -5,40 +5,44 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Paperdex, model_to_dict, Player, Cardset, Team, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Paperdex, model_to_dict, Player, Cardset, Team
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/paperdex", tags=["paperdex"])
router = APIRouter(
prefix='/api/v2/paperdex',
tags=['paperdex']
)
class PaperdexModel(pydantic.BaseModel):
team_id: int
player_id: int
created: Optional[int] = int(datetime.timestamp(datetime.now()) * 1000)
created: Optional[int] = int(datetime.timestamp(datetime.now())*1000)
@router.get("")
@router.get('')
async def get_paperdex(
team_id: Optional[int] = None,
player_id: Optional[int] = None,
created_after: Optional[int] = None,
cardset_id: Optional[int] = None,
created_before: Optional[int] = None,
flat: Optional[bool] = False,
csv: Optional[bool] = None,
limit: int = 100,
):
team_id: Optional[int] = None, player_id: Optional[int] = None, created_after: Optional[int] = None,
cardset_id: Optional[int] = None, created_before: Optional[int] = None, flat: Optional[bool] = False,
csv: Optional[bool] = None):
all_dex = Paperdex.select().join(Player).join(Cardset).order_by(Paperdex.id)
if all_dex.count() == 0:
raise HTTPException(status_code=404, detail="There are no paperdex to filter")
db.close()
raise HTTPException(status_code=404, detail=f'There are no paperdex to filter')
if team_id is not None:
all_dex = all_dex.where(Paperdex.team_id == team_id)
if player_id is not None:
all_dex = all_dex.where(Paperdex.player_id == player_id)
if cardset_id is not None:
all_sets = Cardset.select().where(Cardset.id == cardset_id)
all_dex = all_dex.where(Paperdex.player.cardset.id == cardset_id)
if created_after is not None:
# Convert milliseconds timestamp to datetime for PostgreSQL comparison
@ -53,103 +57,102 @@ async def get_paperdex(
# db.close()
# raise HTTPException(status_code=404, detail=f'No paperdex found')
limit = max(0, min(limit, 500))
all_dex = all_dex.limit(limit)
if csv:
data_list = [["id", "team_id", "player_id", "created"]]
data_list = [['id', 'team_id', 'player_id', 'created']]
for line in all_dex:
data_list.append(
[line.id, line.team.id, line.player.player_id, line.created]
[
line.id, line.team.id, line.player.player_id, line.created
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": all_dex.count(), "paperdex": []}
return_val = {'count': all_dex.count(), 'paperdex': []}
for x in all_dex:
return_val["paperdex"].append(model_to_dict(x, recurse=not flat))
return_val['paperdex'].append(model_to_dict(x, recurse=not flat))
db.close()
return return_val
@router.get("/{paperdex_id}")
@router.get('/{paperdex_id}')
async def get_one_paperdex(paperdex_id, csv: Optional[bool] = False):
try:
this_dex = Paperdex.get_by_id(paperdex_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No paperdex found with id {paperdex_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No paperdex found with id {paperdex_id}')
if csv:
data_list = [
["id", "team_id", "player_id", "created"],
[this_dex.id, this_dex.team.id, this_dex.player.id, this_dex.created],
['id', 'team_id', 'player_id', 'created'],
[this_dex.id, this_dex.team.id, this_dex.player.id, this_dex.created]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_dex)
db.close()
return return_val
@router.post("")
@router.post('')
async def post_paperdex(paperdex: PaperdexModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post paperdex. This event has been logged.",
detail='You are not authorized to post paperdex. This event has been logged.'
)
dupe_dex = Paperdex.get_or_none(
Paperdex.team_id == paperdex.team_id, Paperdex.player_id == paperdex.player_id
)
dupe_dex = Paperdex.get_or_none(Paperdex.team_id == paperdex.team_id, Paperdex.player_id == paperdex.player_id)
if dupe_dex:
return_val = model_to_dict(dupe_dex)
db.close()
return return_val
this_dex = Paperdex(
team_id=paperdex.team_id,
player_id=paperdex.player_id,
created=datetime.fromtimestamp(paperdex.created / 1000),
created=datetime.fromtimestamp(paperdex.created / 1000)
)
saved = this_dex.save()
if saved == 1:
return_val = model_to_dict(this_dex)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that dex",
detail='Well slap my ass and call me a teapot; I could not save that dex'
)
@router.patch("/{paperdex_id}")
@router.patch('/{paperdex_id}')
async def patch_paperdex(
paperdex_id,
team_id: Optional[int] = None,
player_id: Optional[int] = None,
created: Optional[int] = None,
token: str = Depends(oauth2_scheme),
):
paperdex_id, team_id: Optional[int] = None, player_id: Optional[int] = None, created: Optional[int] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch paperdex. This event has been logged.",
detail='You are not authorized to patch paperdex. This event has been logged.'
)
try:
this_dex = Paperdex.get_by_id(paperdex_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No paperdex found with id {paperdex_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No paperdex found with id {paperdex_id}')
if team_id is not None:
this_dex.team_id = team_id
@ -160,47 +163,49 @@ async def patch_paperdex(
if this_dex.save() == 1:
return_val = model_to_dict(this_dex)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that rarity",
detail='Well slap my ass and call me a teapot; I could not save that rarity'
)
@router.delete("/{paperdex_id}")
@router.delete('/{paperdex_id}')
async def delete_paperdex(paperdex_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete rewards. This event has been logged.",
detail='You are not authorized to delete rewards. This event has been logged.'
)
try:
this_dex = Paperdex.get_by_id(paperdex_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No paperdex found with id {paperdex_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No paperdex found with id {paperdex_id}')
count = this_dex.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Paperdex {this_dex} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Paperdex {this_dex} has been deleted')
else:
raise HTTPException(
status_code=500, detail=f"Paperdex {this_dex} was not deleted"
)
raise HTTPException(status_code=500, detail=f'Paperdex {this_dex} was not deleted')
@router.post("/wipe-ai")
@router.post('/wipe-ai')
async def wipe_ai_paperdex(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='Unauthorized'
)
g_teams = Team.select().where(Team.abbrev.contains("Gauntlet"))
g_teams = Team.select().where(Team.abbrev.contains('Gauntlet'))
count = Paperdex.delete().where(Paperdex.team << g_teams).execute()
return f"Deleted {count} records"
return f'Deleted {count} records'

View File

@ -2,7 +2,7 @@ import os
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from fastapi.responses import FileResponse
from typing import Literal, List
from typing import Literal, Optional, List
import logging
import pandas as pd
import pydantic
@ -12,6 +12,7 @@ from ..db_engine import (
db,
PitchingCardRatings,
model_to_dict,
chunked,
PitchingCard,
Player,
query_to_csv,
@ -19,8 +20,13 @@ from ..db_engine import (
CardPosition,
)
from ..db_helpers import upsert_pitching_card_ratings
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/pitchingcardratings", tags=["pitchingcardratings"])
RATINGS_FILE = "storage/pitching-ratings.csv"
@ -143,11 +149,11 @@ async def get_card_ratings(
short_output: bool = False,
csv: bool = False,
cardset_id: list = Query(default=None),
limit: int = 100,
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to pull card ratings."
)
@ -169,20 +175,19 @@ async def get_card_ratings(
)
all_ratings = all_ratings.where(PitchingCardRatings.pitchingcard << set_cards)
total_count = all_ratings.count() if not csv else 0
all_ratings = all_ratings.limit(max(0, min(limit, 500)))
if csv:
return_val = query_to_csv(all_ratings)
db.close()
return Response(content=return_val, media_type="text/csv")
else:
return_val = {
"count": total_count,
"count": all_ratings.count(),
"ratings": [
model_to_dict(x, recurse=not short_output) for x in all_ratings
],
}
db.close()
return return_val
@ -235,12 +240,13 @@ def get_scouting_dfs(cardset_id: list = None):
series_list = [
pd.Series(
dict([(x.player.player_id, x.range) for x in positions]), name="Range P"
dict([(x.player.player_id, x.range) for x in positions]), name=f"Range P"
),
pd.Series(
dict([(x.player.player_id, x.error) for x in positions]), name="Error P"
dict([(x.player.player_id, x.error) for x in positions]), name=f"Error P"
),
]
db.close()
logging.debug(f"series_list: {series_list}")
return pit_df.join(series_list)
@ -252,6 +258,7 @@ async def get_card_scouting(team_id: int, ts: str):
logging.debug(f"Team: {this_team} / has_guide: {this_team.has_guide}")
if this_team is None or ts != this_team.team_hash() or this_team.has_guide != 1:
logging.warning(f"Team_id {team_id} attempted to pull ratings")
db.close()
return (
"Your team does not have the ratings guide enabled. If you have purchased a copy ping Cal to "
"make sure it is enabled on your team. If you are interested you can pick it up here (thank you!): "
@ -273,12 +280,13 @@ async def get_card_scouting(team_id: int, ts: str):
@router.post("/calculate/scouting")
async def post_calc_scouting(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to calculate card ratings."
)
logging.warning("Re-calculating pitching ratings\n\n")
logging.warning(f"Re-calculating pitching ratings\n\n")
output = get_scouting_dfs()
first = ["player_id", "player_name", "cardset_name", "rarity", "hand", "variant"]
@ -309,12 +317,13 @@ async def get_basic_scouting():
@router.post("/calculate/basic")
async def post_calc_basic(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to calculate basic ratings."
)
logging.warning("Re-calculating basic pitching ratings\n\n")
logging.warning(f"Re-calculating basic pitching ratings\n\n")
raw_data = get_scouting_dfs()
logging.debug(f"output: {raw_data}")
@ -488,18 +497,21 @@ async def post_calc_basic(token: str = Depends(oauth2_scheme)):
@router.get("/{ratings_id}")
async def get_one_rating(ratings_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to pull card ratings."
)
this_rating = PitchingCardRatings.get_or_none(PitchingCardRatings.id == ratings_id)
if this_rating is None:
db.close()
raise HTTPException(
status_code=404, detail=f"PitchingCardRating id {ratings_id} not found"
)
r_data = model_to_dict(this_rating)
db.close()
return r_data
@ -523,13 +535,15 @@ async def get_player_ratings(
"count": all_ratings.count(),
"ratings": [model_to_dict(x, recurse=not short_output) for x in all_ratings],
}
db.close()
return return_val
@router.put("")
async def put_ratings(ratings: RatingsList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to post card ratings."
)
@ -557,24 +571,28 @@ async def put_ratings(ratings: RatingsList, token: str = Depends(oauth2_scheme))
# Use PostgreSQL-compatible upsert helper
upsert_pitching_card_ratings(new_ratings, batch_size=30)
db.close()
return f"Updated ratings: {updates}; new ratings: {len(new_ratings)}"
@router.delete("/{ratings_id}")
async def delete_rating(ratings_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401, detail="You are not authorized to post card ratings."
)
this_rating = PitchingCardRatings.get_or_none(PitchingCardRatings.id == ratings_id)
if this_rating is None:
db.close()
raise HTTPException(
status_code=404, detail=f"PitchingCardRating id {ratings_id} not found"
)
count = this_rating.delete_instance()
db.close()
if count == 1:
return f"Rating {this_rating} has been deleted"

View File

@ -5,10 +5,15 @@ from typing import Literal, Optional, List
import logging
import pydantic
from ..db_engine import db, PitchingCard, model_to_dict, Player, fn, MlbPlayer
from ..db_engine import db, PitchingCard, model_to_dict, chunked, Player, fn, MlbPlayer
from ..db_helpers import upsert_pitching_cards
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/pitchingcards", tags=["pitchingcards"])
@ -57,6 +62,7 @@ async def get_pitching_cards(
"count": all_cards.count(),
"cards": [model_to_dict(x, recurse=not short_output) for x in all_cards],
}
db.close()
return return_val
@ -64,11 +70,13 @@ async def get_pitching_cards(
async def get_one_card(card_id: int):
this_card = PitchingCard.get_or_none(PitchingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(
status_code=404, detail=f"PitchingCard id {card_id} not found"
)
r_card = model_to_dict(this_card)
db.close()
return r_card
@ -88,13 +96,15 @@ async def get_player_cards(
"count": all_cards.count(),
"cards": [model_to_dict(x, recurse=not short_output) for x in all_cards],
}
db.close()
return return_val
@router.put("")
async def put_cards(cards: PitchingCardList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post pitching cards. This event has been logged.",
@ -143,6 +153,7 @@ async def put_cards(cards: PitchingCardList, token: str = Depends(oauth2_scheme)
# Use PostgreSQL-compatible upsert helper
upsert_pitching_cards(new_cards, batch_size=30)
db.close()
return f"Updated cards: {updates}; new cards: {len(new_cards)}"
@ -159,7 +170,8 @@ async def patch_card(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch pitching cards. This event has been logged.",
@ -167,6 +179,7 @@ async def patch_card(
this_card = PitchingCard.get_or_none(PitchingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(
status_code=404, detail=f"PitchingCard id {card_id} not found"
)
@ -188,8 +201,10 @@ async def patch_card(
if this_card.save() == 1:
return_val = model_to_dict(this_card)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that card",
@ -199,7 +214,8 @@ async def patch_card(
@router.delete("/{card_id}")
async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete pitching cards. This event has been logged.",
@ -207,9 +223,11 @@ async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
this_card = PitchingCard.get_or_none(PitchingCard.id == card_id)
if this_card is None:
db.close()
raise HTTPException(status_code=404, detail=f"Pitching id {card_id} not found")
count = this_card.delete_instance()
db.close()
if count == 1:
return f"Card {this_card} has been deleted"
@ -222,7 +240,8 @@ async def delete_card(card_id: int, token: str = Depends(oauth2_scheme)):
@router.delete("")
async def delete_all_cards(token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete pitching cards. This event has been logged.",

View File

@ -5,19 +5,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import (
db,
PitchingStat,
model_to_dict,
Card,
Player,
Current,
DoesNotExist,
from ..db_engine import db, PitchingStat, model_to_dict, Card, Player, Current
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
from ..dependencies import oauth2_scheme, valid_token
router = APIRouter(prefix="/api/v2/pitstats", tags=["pitstats"])
router = APIRouter(
prefix='/api/v2/pitstats',
tags=['pitstats']
)
class PitStat(pydantic.BaseModel):
@ -45,7 +45,7 @@ class PitStat(pydantic.BaseModel):
bsv: Optional[int] = 0
week: int
season: int
created: Optional[int] = int(datetime.timestamp(datetime.now()) * 1000)
created: Optional[int] = int(datetime.timestamp(datetime.now())*1000)
game_id: int
@ -53,23 +53,13 @@ class PitchingStatModel(pydantic.BaseModel):
stats: List[PitStat]
@router.get("")
@router.get('')
async def get_pit_stats(
card_id: int = None,
player_id: int = None,
team_id: int = None,
vs_team_id: int = None,
week: int = None,
season: int = None,
week_start: int = None,
week_end: int = None,
created: int = None,
gs: bool = None,
csv: bool = None,
limit: Optional[int] = 100,
):
card_id: int = None, player_id: int = None, team_id: int = None, vs_team_id: int = None, week: int = None,
season: int = None, week_start: int = None, week_end: int = None, created: int = None, gs: bool = None,
csv: bool = None):
all_stats = PitchingStat.select().join(Card).join(Player).order_by(PitchingStat.id)
logging.debug(f"pit query:\n\n{all_stats}")
logging.debug(f'pit query:\n\n{all_stats}')
if season is not None:
all_stats = all_stats.where(PitchingStat.season == season)
@ -98,100 +88,46 @@ async def get_pit_stats(
if gs is not None:
all_stats = all_stats.where(PitchingStat.gs == 1 if gs else 0)
total_count = all_stats.count() if not csv else 0
all_stats = all_stats.limit(max(0, min(limit, 500)))
# if all_stats.count() == 0:
# db.close()
# raise HTTPException(status_code=404, detail=f'No pitching stats found')
if csv:
data_list = [
[
"id",
"card_id",
"player_id",
"cardset",
"team",
"vs_team",
"ip",
"hit",
"run",
"erun",
"so",
"bb",
"hbp",
"wp",
"balk",
"hr",
"ir",
"irs",
"gs",
"win",
"loss",
"hold",
"sv",
"bsv",
"week",
"season",
"created",
"game_id",
"roster_num",
]
]
data_list = [['id', 'card_id', 'player_id', 'cardset', 'team', 'vs_team', 'ip', 'hit', 'run', 'erun', 'so', 'bb', 'hbp',
'wp', 'balk', 'hr', 'ir', 'irs', 'gs', 'win', 'loss', 'hold', 'sv', 'bsv', 'week', 'season',
'created', 'game_id', 'roster_num']]
for line in all_stats:
data_list.append(
[
line.id,
line.card.id,
line.card.player.player_id,
line.card.player.cardset.name,
line.team.abbrev,
line.vs_team.abbrev,
line.ip,
line.hit,
line.run,
line.erun,
line.so,
line.bb,
line.hbp,
line.wp,
line.balk,
line.hr,
line.ir,
line.irs,
line.gs,
line.win,
line.loss,
line.hold,
line.sv,
line.bsv,
line.week,
line.season,
line.created,
line.game_id,
line.roster_num,
line.id, line.card.id, line.card.player.player_id, line.card.player.cardset.name, line.team.abbrev,
line.vs_team.abbrev, line.ip, line.hit,
line.run, line.erun, line.so, line.bb, line.hbp, line.wp, line.balk, line.hr, line.ir, line.irs,
line.gs, line.win, line.loss, line.hold, line.sv, line.bsv, line.week, line.season, line.created,
line.game_id, line.roster_num
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "stats": []}
return_val = {'count': all_stats.count(), 'stats': []}
for x in all_stats:
return_val["stats"].append(model_to_dict(x, recurse=False))
return_val['stats'].append(model_to_dict(x, recurse=False))
db.close()
return return_val
@router.post("")
@router.post('')
async def post_pitstat(stats: PitchingStatModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post stats. This event has been logged.",
detail='You are not authorized to post stats. This event has been logged.'
)
new_stats = []
@ -221,37 +157,37 @@ async def post_pitstat(stats: PitchingStatModel, token: str = Depends(oauth2_sch
bsv=x.bsv,
week=x.week,
season=x.season,
created=datetime.fromtimestamp(x.created / 1000)
if x.created
else datetime.now(),
game_id=x.game_id,
created=datetime.fromtimestamp(x.created / 1000) if x.created else datetime.now(),
game_id=x.game_id
)
new_stats.append(this_stat)
with db.atomic():
PitchingStat.bulk_create(new_stats, batch_size=15)
db.close()
raise HTTPException(
status_code=200, detail=f"{len(new_stats)} pitching lines have been added"
)
raise HTTPException(status_code=200, detail=f'{len(new_stats)} pitching lines have been added')
@router.delete("/{stat_id}")
@router.delete('/{stat_id}')
async def delete_pitstat(stat_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete stats. This event has been logged.",
detail='You are not authorized to delete stats. This event has been logged.'
)
try:
this_stat = PitchingStat.get_by_id(stat_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"No stat found with id {stat_id}")
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No stat found with id {stat_id}')
count = this_stat.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f"Stat {stat_id} has been deleted")
raise HTTPException(status_code=200, detail=f'Stat {stat_id} has been deleted')
else:
raise HTTPException(status_code=500, detail=f"Stat {stat_id} was not deleted")
raise HTTPException(status_code=500, detail=f'Stat {stat_id} was not deleted')

View File

@ -1,17 +1,17 @@
import datetime
import os.path
import base64
import pandas as pd
from fastapi import APIRouter, Depends, HTTPException, Request, Response, Query
from fastapi.responses import FileResponse
from fastapi.templating import Jinja2Templates
from html2image import Html2Image
from typing import Optional, List, Literal
import logging
import pydantic
from pandas import DataFrame
import asyncio as _asyncio
from playwright.async_api import async_playwright, Browser, Playwright
from playwright.async_api import async_playwright
from ..card_creation import get_batter_card_data, get_pitcher_card_data
from ..db_engine import (
@ -19,6 +19,7 @@ from ..db_engine import (
Player,
model_to_dict,
fn,
chunked,
Paperdex,
Cardset,
Rarity,
@ -28,68 +29,10 @@ from ..db_engine import (
PitchingCardRatings,
CardPosition,
MlbPlayer,
DoesNotExist,
)
from ..db_helpers import upsert_players
from ..dependencies import oauth2_scheme, valid_token
from ..services.refractor_boost import compute_variant_hash
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
# ---------------------------------------------------------------------------
# Persistent browser instance (WP-02)
# ---------------------------------------------------------------------------
_browser: Browser | None = None
_playwright: Playwright | None = None
_browser_lock = _asyncio.Lock()
async def get_browser() -> Browser:
"""Get or create persistent Chromium browser instance.
Reuses a single browser across all card renders, eliminating the ~1-1.5s
per-request launch/teardown overhead. Automatically reconnects if the
browser process has died.
Uses an asyncio.Lock to prevent concurrent requests from racing to
launch multiple Chromium processes.
"""
global _browser, _playwright
async with _browser_lock:
if _browser is None or not _browser.is_connected():
if _playwright is not None:
try:
await _playwright.stop()
except Exception:
pass
_playwright = await async_playwright().start()
_browser = await _playwright.chromium.launch(
args=["--no-sandbox", "--disable-dev-shm-usage"]
)
return _browser
async def shutdown_browser():
"""Clean shutdown of the persistent browser.
Called by the FastAPI lifespan handler on application exit so the
Chromium process is not left orphaned.
"""
global _browser, _playwright
if _browser:
try:
await _browser.close()
except Exception:
pass
_browser = None
if _playwright:
try:
await _playwright.stop()
except Exception:
pass
_playwright = None
# ---------------------------------------------------------------------------
# Franchise normalization: Convert city+team names to city-agnostic team names
# This enables cross-era player matching (e.g., 'Oakland Athletics' -> 'Athletics')
FRANCHISE_NORMALIZE = {
@ -133,18 +76,11 @@ def normalize_franchise(franchise: str) -> str:
return FRANCHISE_NORMALIZE.get(titled, titled)
def resolve_refractor_tier(player_id: int, variant: int) -> int:
"""Determine the refractor tier (0-4) from a player's variant hash.
Pure math no DB query needed. Returns 0 for base cards or unknown variants.
"""
if variant == 0:
return 0
for tier in range(1, 5):
if compute_variant_hash(player_id, tier) == variant:
return tier
return 0
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/players", tags=["players"])
@ -216,7 +152,8 @@ async def get_players(
):
all_players = Player.select()
if all_players.count() == 0:
raise HTTPException(status_code=404, detail="There are no players to filter")
db.close()
raise HTTPException(status_code=404, detail=f"There are no players to filter")
if name is not None:
all_players = all_players.where(fn.Lower(Player.p_name) == name.lower())
@ -302,6 +239,7 @@ async def get_players(
if csv:
card_vals = [model_to_dict(x) for x in all_players]
db.close()
for x in card_vals:
x["player_name"] = x["p_name"]
@ -390,6 +328,7 @@ async def get_players(
# return_val['players'].append(model_to_dict(x, recurse=not flat))
db.close()
return return_val
@ -540,6 +479,7 @@ async def get_random_player(
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type="text/csv")
else:
@ -557,6 +497,7 @@ async def get_random_player(
return_val["players"].append(this_record)
# return_val['players'].append(model_to_dict(x))
db.close()
return return_val
@ -650,14 +591,16 @@ async def search_players(
return_val["players"].append(this_record)
db.close()
return return_val
@router.get("/{player_id}")
async def get_one_player(player_id: int, csv: Optional[bool] = False):
async def get_one_player(player_id, csv: Optional[bool] = False):
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No player found with id {player_id}"
)
@ -689,6 +632,7 @@ async def get_one_player(player_id: int, csv: Optional[bool] = False):
"description",
]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
data_list.append(
[
this_player.id,
@ -715,8 +659,8 @@ async def get_one_player(player_id: int, csv: Optional[bool] = False):
this_player.description,
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type="text/csv")
else:
return_val = model_to_dict(this_player)
@ -724,6 +668,7 @@ async def get_one_player(player_id: int, csv: Optional[bool] = False):
return_val["paperdex"] = {"count": this_dex.count(), "paperdex": []}
for x in this_dex:
return_val["paperdex"]["paperdex"].append(model_to_dict(x, recurse=False))
db.close()
return return_val
@ -737,19 +682,17 @@ async def get_batter_card(
variant: int = 0,
d: str = None,
html: Optional[bool] = False,
tier: Optional[int] = Query(
None, ge=0, le=4, description="Override refractor tier for preview (dev only)"
),
):
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No player found with id {player_id}"
)
headers = {"Cache-Control": "public, max-age=86400"}
_filename = (
filename = (
f"{this_player.description} {this_player.p_name} {card_type} {d}-v{variant}"
)
if (
@ -757,8 +700,8 @@ async def get_batter_card(
f"storage/cards/cardset-{this_player.cardset.id}/{card_type}/{player_id}-{d}-v{variant}.png"
)
and html is False
and tier is None
):
db.close()
return FileResponse(
path=f"storage/cards/cardset-{this_player.cardset.id}/{card_type}/{player_id}-{d}-v{variant}.png",
media_type="image/png",
@ -804,9 +747,6 @@ async def get_batter_card(
card_data["cardset_name"] = this_player.cardset.name
else:
card_data["cardset_name"] = this_player.description
card_data["refractor_tier"] = (
tier if tier is not None else resolve_refractor_tier(player_id, variant)
)
card_data["request"] = request
html_response = templates.TemplateResponse("player_card.html", card_data)
@ -844,13 +784,11 @@ async def get_batter_card(
card_data["cardset_name"] = this_player.cardset.name
else:
card_data["cardset_name"] = this_player.description
card_data["refractor_tier"] = (
tier if tier is not None else resolve_refractor_tier(player_id, variant)
)
card_data["request"] = request
html_response = templates.TemplateResponse("player_card.html", card_data)
if html:
db.close()
return html_response
updates = 0
@ -881,17 +819,16 @@ async def get_batter_card(
logging.debug(f"body:\n{html_response.body.decode('UTF-8')}")
file_path = f"storage/cards/cardset-{this_player.cardset.id}/{card_type}/{player_id}-{d}-v{variant}.png"
browser = await get_browser()
page = await browser.new_page(viewport={"width": 1280, "height": 720})
try:
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.set_content(html_response.body.decode("UTF-8"))
await page.screenshot(
path=file_path,
type="png",
clip={"x": 0.0, "y": 0, "width": 1200, "height": 600},
)
finally:
await page.close()
await browser.close()
# hti = Html2Image(
# browser='chrome',
@ -906,6 +843,7 @@ async def get_batter_card(
# save_as=f'{player_id}-{d}-v{variant}.png'
# )
db.close()
return FileResponse(path=file_path, media_type="image/png", headers=headers)
@ -943,7 +881,8 @@ async def v1_players_patch(
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch players. This event has been logged.",
@ -951,7 +890,8 @@ async def v1_players_patch(
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No player found with id {player_id}"
)
@ -974,7 +914,8 @@ async def v1_players_patch(
if cardset_id is not None:
try:
this_cardset = Cardset.get_by_id(cardset_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No cardset found with id {cardset_id}"
)
@ -982,7 +923,8 @@ async def v1_players_patch(
if rarity_id is not None:
try:
this_rarity = Rarity.get_by_id(rarity_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No rarity found with id {rarity_id}"
)
@ -1044,6 +986,7 @@ async def v1_players_patch(
if this_player.save() == 1:
return_val = model_to_dict(this_player)
db.close()
return return_val
else:
raise HTTPException(
@ -1055,7 +998,8 @@ async def v1_players_patch(
@router.put("")
async def put_players(players: PlayerModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post players. This event has been logged.",
@ -1124,6 +1068,7 @@ async def put_players(players: PlayerModel, token: str = Depends(oauth2_scheme))
with db.atomic():
# Use PostgreSQL-compatible upsert helper (preserves SQLite compatibility)
upsert_players(new_players, batch_size=15)
db.close()
# sheets.update_all_players(SHEETS_AUTH)
raise HTTPException(
@ -1134,7 +1079,8 @@ async def put_players(players: PlayerModel, token: str = Depends(oauth2_scheme))
@router.post("")
async def post_players(new_player: PlayerPydantic, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post players. This event has been logged.",
@ -1145,6 +1091,7 @@ async def post_players(new_player: PlayerPydantic, token: str = Depends(oauth2_s
& (Player.cardset_id == new_player.cardset_id)
)
if dupe_query.count() != 0:
db.close()
raise HTTPException(
status_code=400,
detail=f"This appears to be a duplicate with player {dupe_query[0].player_id}",
@ -1157,6 +1104,7 @@ async def post_players(new_player: PlayerPydantic, token: str = Depends(oauth2_s
p_id = Player.insert(new_player.dict()).execute()
return_val = model_to_dict(Player.get_by_id(p_id))
db.close()
return return_val
@ -1165,7 +1113,8 @@ async def post_image_reset(
player_id: int, dev: bool = False, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to modify players. This event has been logged.",
@ -1173,6 +1122,7 @@ async def post_image_reset(
this_player = Player.get_or_none(Player.player_id == player_id)
if this_player is None:
db.close()
raise HTTPException(status_code=404, detail=f"Player ID {player_id} not found")
now = datetime.datetime.now()
@ -1193,13 +1143,15 @@ async def post_image_reset(
this_player.save()
r_player = model_to_dict(this_player)
db.close()
return r_player
@router.delete("/{player_id}")
async def delete_player(player_id: int, token: str = Depends(oauth2_scheme)):
async def delete_player(player_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete players. This event has been logged.",
@ -1207,12 +1159,14 @@ async def delete_player(player_id: int, token: str = Depends(oauth2_scheme)):
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No player found with id {player_id}"
)
count = this_player.delete_instance()
db.close()
if count == 1:
raise HTTPException(

View File

@ -4,9 +4,14 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Rarity, model_to_dict, fn, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Rarity, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/rarities',
@ -26,6 +31,7 @@ async def get_rarities(value: Optional[int] = None, name: Optional[str] = None,
all_rarities = Rarity.select().order_by(Rarity.id)
if all_rarities.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'There are no rarities to filter')
if value is not None:
@ -38,6 +44,7 @@ async def get_rarities(value: Optional[int] = None, name: Optional[str] = None,
all_rarities = all_rarities.where(Rarity.value <= max_value)
if all_rarities.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'No rarities found')
if csv:
@ -50,6 +57,7 @@ async def get_rarities(value: Optional[int] = None, name: Optional[str] = None,
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
@ -57,6 +65,7 @@ async def get_rarities(value: Optional[int] = None, name: Optional[str] = None,
for x in all_rarities:
return_val['rarities'].append(model_to_dict(x))
db.close()
return return_val
@ -64,7 +73,8 @@ async def get_rarities(value: Optional[int] = None, name: Optional[str] = None,
async def get_one_rarity(rarity_id, csv: Optional[bool] = False):
try:
this_rarity = Rarity.get_by_id(rarity_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No rarity found with id {rarity_id}')
if csv:
@ -77,16 +87,19 @@ async def get_one_rarity(rarity_id, csv: Optional[bool] = False):
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_rarity)
db.close()
return return_val
@router.post('')
async def post_rarity(rarity: RarityModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to post rarities. This event has been logged.'
@ -94,6 +107,7 @@ async def post_rarity(rarity: RarityModel, token: str = Depends(oauth2_scheme)):
dupe_team = Rarity.get_or_none(Rarity.name)
if dupe_team:
db.close()
raise HTTPException(status_code=400, detail=f'There is already a rarity using {rarity.name}')
this_rarity = Rarity(
@ -105,6 +119,7 @@ async def post_rarity(rarity: RarityModel, token: str = Depends(oauth2_scheme)):
saved = this_rarity.save()
if saved == 1:
return_val = model_to_dict(this_rarity)
db.close()
return return_val
else:
raise HTTPException(
@ -118,14 +133,16 @@ async def patch_rarity(
rarity_id, value: Optional[int] = None, name: Optional[str] = None, color: Optional[str] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to patch rarities. This event has been logged.'
)
try:
this_rarity = Rarity.get_by_id(rarity_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No rarity found with id {rarity_id}')
if value is not None:
@ -137,6 +154,7 @@ async def patch_rarity(
if this_rarity.save() == 1:
return_val = model_to_dict(this_rarity)
db.close()
return return_val
else:
raise HTTPException(
@ -148,17 +166,20 @@ async def patch_rarity(
@router.delete('/{rarity_id}')
async def v1_rarities_delete(rarity_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning('Bad Token: [REDACTED]')
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to delete rarities. This event has been logged.'
)
try:
this_rarity = Rarity.get_by_id(rarity_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No rarity found with id {rarity_id}')
count = this_rarity.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f'Rarity {rarity_id} has been deleted')

View File

@ -1,434 +0,0 @@
import os
from fastapi import APIRouter, Depends, HTTPException, Query
import logging
from typing import Optional
from ..db_engine import model_to_dict
from ..dependencies import oauth2_scheme, valid_token
from ..services.refractor_init import initialize_card_refractor, _determine_card_type
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/v2/refractor", tags=["refractor"])
# Tier -> threshold attribute name. Index = current_tier; value is the
# attribute on RefractorTrack whose value is the *next* threshold to reach.
# Tier 4 is fully evolved so there is no next threshold (None sentinel).
_NEXT_THRESHOLD_ATTR = {
0: "t1_threshold",
1: "t2_threshold",
2: "t3_threshold",
3: "t4_threshold",
4: None,
}
def _build_card_state_response(state, player_name=None) -> dict:
"""Serialise a RefractorCardState into the standard API response shape.
Produces a flat dict with player_id and team_id as plain integers,
a nested 'track' dict with all threshold fields, and computed fields:
- 'next_threshold': threshold for the tier immediately above (None when fully evolved).
- 'progress_pct': current_value / next_threshold * 100, rounded to 1 decimal
(None when fully evolved or next_threshold is zero).
- 'player_name': included when passed (e.g. from a list join); omitted otherwise.
Uses model_to_dict(recurse=False) internally so FK fields are returned
as IDs rather than nested objects, then promotes the needed IDs up to
the top level.
"""
track = state.track
track_dict = model_to_dict(track, recurse=False)
next_attr = _NEXT_THRESHOLD_ATTR.get(state.current_tier)
next_threshold = getattr(track, next_attr) if next_attr else None
progress_pct = None
if next_threshold is not None and next_threshold > 0:
progress_pct = round((state.current_value / next_threshold) * 100, 1)
result = {
"player_id": state.player_id,
"team_id": state.team_id,
"current_tier": state.current_tier,
"current_value": state.current_value,
"fully_evolved": state.fully_evolved,
"last_evaluated_at": (
state.last_evaluated_at.isoformat()
if hasattr(state.last_evaluated_at, "isoformat")
else state.last_evaluated_at or None
),
"track": track_dict,
"next_threshold": next_threshold,
"progress_pct": progress_pct,
}
if player_name is not None:
result["player_name"] = player_name
return result
@router.get("/tracks")
async def list_tracks(
card_type: Optional[str] = Query(default=None),
token: str = Depends(oauth2_scheme),
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import RefractorTrack
query = RefractorTrack.select()
if card_type is not None:
query = query.where(RefractorTrack.card_type == card_type)
items = [model_to_dict(t, recurse=False) for t in query]
return {"count": len(items), "items": items}
@router.get("/tracks/{track_id}")
async def get_track(track_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import RefractorTrack
try:
track = RefractorTrack.get_by_id(track_id)
except Exception:
raise HTTPException(status_code=404, detail=f"Track {track_id} not found")
return model_to_dict(track, recurse=False)
@router.get("/cards")
async def list_card_states(
team_id: int = Query(...),
card_type: Optional[str] = Query(default=None),
tier: Optional[int] = Query(default=None, ge=0, le=4),
season: Optional[int] = Query(default=None),
progress: Optional[str] = Query(default=None),
evaluated_only: bool = Query(default=True),
limit: int = Query(default=10, ge=1, le=100),
offset: int = Query(default=0, ge=0),
token: str = Depends(oauth2_scheme),
):
"""List RefractorCardState rows for a team, with optional filters and pagination.
Required:
team_id -- filter to this team's cards; returns empty list if team has no states
Optional filters:
card_type -- one of 'batter', 'sp', 'rp'; filters by RefractorTrack.card_type
tier -- filter by current_tier (0-4)
season -- filter to players who have batting or pitching season stats in that
season (EXISTS subquery against batting/pitching_season_stats)
progress -- 'close' = only cards within 80% of their next tier threshold;
fully evolved cards are always excluded from this filter
evaluated_only -- default True; when True, excludes cards where last_evaluated_at
is NULL (cards created but never run through the evaluator).
Set to False to include all rows, including zero-value placeholders.
Pagination:
limit -- page size (1-100, default 10)
offset -- items to skip (default 0)
Response: {"count": N, "items": [...]}
count is the total matching rows before limit/offset.
Each item includes player_name and progress_pct in addition to the
standard single-card response fields.
Sort order: current_tier DESC, current_value DESC.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import (
RefractorCardState,
RefractorTrack,
Player,
BattingSeasonStats,
PitchingSeasonStats,
fn,
Case,
JOIN,
)
query = (
RefractorCardState.select(RefractorCardState, RefractorTrack, Player)
.join(RefractorTrack)
.switch(RefractorCardState)
.join(
Player, JOIN.LEFT_OUTER, on=(RefractorCardState.player == Player.player_id)
)
.where(RefractorCardState.team == team_id)
.order_by(
RefractorCardState.current_tier.desc(),
RefractorCardState.current_value.desc(),
)
)
if card_type is not None:
query = query.where(RefractorTrack.card_type == card_type)
if tier is not None:
query = query.where(RefractorCardState.current_tier == tier)
if season is not None:
batter_exists = BattingSeasonStats.select().where(
(BattingSeasonStats.player == RefractorCardState.player)
& (BattingSeasonStats.team == RefractorCardState.team)
& (BattingSeasonStats.season == season)
)
pitcher_exists = PitchingSeasonStats.select().where(
(PitchingSeasonStats.player == RefractorCardState.player)
& (PitchingSeasonStats.team == RefractorCardState.team)
& (PitchingSeasonStats.season == season)
)
query = query.where(fn.EXISTS(batter_exists) | fn.EXISTS(pitcher_exists))
if progress == "close":
next_threshold_expr = Case(
RefractorCardState.current_tier,
(
(0, RefractorTrack.t1_threshold),
(1, RefractorTrack.t2_threshold),
(2, RefractorTrack.t3_threshold),
(3, RefractorTrack.t4_threshold),
),
None,
)
query = query.where(
(RefractorCardState.fully_evolved == False) # noqa: E712
& (RefractorCardState.current_value >= next_threshold_expr * 0.8)
)
if evaluated_only:
query = query.where(RefractorCardState.last_evaluated_at.is_null(False))
total = query.count()
items = []
for state in query.offset(offset).limit(limit):
player_name = None
try:
player_name = state.player.p_name
except Exception:
pass
items.append(_build_card_state_response(state, player_name=player_name))
return {"count": total, "items": items}
@router.get("/cards/{card_id}")
async def get_card_state(card_id: int, token: str = Depends(oauth2_scheme)):
"""Return the RefractorCardState for a card identified by its Card.id.
Resolves card_id -> (player_id, team_id) via the Card table, then looks
up the matching RefractorCardState row. Because duplicate cards for the
same player+team share one state row (unique-(player,team) constraint),
any card_id belonging to that player on that team returns the same state.
Returns 404 when:
- The card_id does not exist in the Card table.
- The card exists but has no corresponding RefractorCardState yet.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import Card, RefractorCardState, RefractorTrack, DoesNotExist
# Resolve card_id to player+team
try:
card = Card.get_by_id(card_id)
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"Card {card_id} not found")
# Look up the refractor state for this (player, team) pair, joining the
# track so a single query resolves both rows.
try:
state = (
RefractorCardState.select(RefractorCardState, RefractorTrack)
.join(RefractorTrack)
.where(
(RefractorCardState.player == card.player_id)
& (RefractorCardState.team == card.team_id)
)
.get()
)
except DoesNotExist:
raise HTTPException(
status_code=404,
detail=f"No refractor state for card {card_id}",
)
return _build_card_state_response(state)
@router.post("/cards/{card_id}/evaluate")
async def evaluate_card(card_id: int, token: str = Depends(oauth2_scheme)):
"""Force-recalculate refractor state for a card from career stats.
Resolves card_id to (player_id, team_id), then recomputes the refractor
tier from all player_season_stats rows for that pair. Idempotent.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import Card
from ..services.refractor_evaluator import evaluate_card as _evaluate
try:
card = Card.get_by_id(card_id)
except Exception:
raise HTTPException(status_code=404, detail=f"Card {card_id} not found")
try:
result = _evaluate(card.player_id, card.team_id)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc))
return result
@router.post("/evaluate-game/{game_id}")
async def evaluate_game(game_id: int, token: str = Depends(oauth2_scheme)):
"""Evaluate refractor state for all players who appeared in a game.
Finds all unique (player_id, team_id) pairs from the game's StratPlay rows,
then for each pair that has a RefractorCardState, re-computes the refractor
tier. Pairs without a state row are auto-initialized on-the-fly via
initialize_card_refractor (idempotent). Per-player errors are logged but
do not abort the batch.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import RefractorCardState, Player, StratPlay
from ..services.refractor_boost import apply_tier_boost
from ..services.refractor_evaluator import evaluate_card
plays = list(StratPlay.select().where(StratPlay.game == game_id))
pairs: set[tuple[int, int]] = set()
for play in plays:
if play.batter_id is not None:
pairs.add((play.batter_id, play.batter_team_id))
if play.pitcher_id is not None:
pairs.add((play.pitcher_id, play.pitcher_team_id))
evaluated = 0
tier_ups = []
boost_enabled = os.environ.get("REFRACTOR_BOOST_ENABLED", "true").lower() != "false"
for player_id, team_id in pairs:
try:
state = RefractorCardState.get_or_none(
(RefractorCardState.player_id == player_id)
& (RefractorCardState.team_id == team_id)
)
if state is None:
try:
player = Player.get_by_id(player_id)
card_type = _determine_card_type(player)
state = initialize_card_refractor(player_id, team_id, card_type)
except Exception:
logger.warning(
f"Refractor auto-init failed for player={player_id} "
f"team={team_id} — skipping"
)
if state is None:
continue
old_tier = state.current_tier
# Use dry_run=True so that current_tier is NOT written here.
# apply_tier_boost() writes current_tier + variant atomically on
# tier-up. If no tier-up occurs, apply_tier_boost is not called
# and the tier stays at old_tier (correct behaviour).
result = evaluate_card(player_id, team_id, dry_run=True)
evaluated += 1
# Use computed_tier (what the formula says) to detect tier-ups.
computed_tier = result.get("computed_tier", old_tier)
if computed_tier > old_tier:
player_name = "Unknown"
try:
p = Player.get_by_id(player_id)
player_name = p.p_name
except Exception:
pass
# Phase 2: Apply rating boosts for each tier gained.
# apply_tier_boost() writes current_tier + variant atomically.
# If it fails, current_tier stays at old_tier — automatic retry next game.
boost_result = None
if not boost_enabled:
# Boost disabled via REFRACTOR_BOOST_ENABLED=false.
# Skip notification — current_tier was not written (dry_run),
# so reporting a tier-up would be a false notification.
continue
card_type = state.track.card_type if state.track else None
if card_type:
last_successful_tier = old_tier
failing_tier = old_tier + 1
try:
for tier in range(old_tier + 1, computed_tier + 1):
failing_tier = tier
boost_result = apply_tier_boost(
player_id, team_id, tier, card_type
)
last_successful_tier = tier
except Exception as boost_exc:
logger.warning(
f"Refractor boost failed for player={player_id} "
f"team={team_id} tier={failing_tier}: {boost_exc}"
)
# Report only the tiers that actually succeeded.
# If none succeeded, skip the tier_up notification entirely.
if last_successful_tier == old_tier:
continue
# At least one intermediate tier was committed; report that.
computed_tier = last_successful_tier
else:
# No card_type means no track — skip boost and skip notification.
# A false tier-up notification must not be sent when the boost
# was never applied (current_tier was never written to DB).
logger.warning(
f"Refractor boost skipped for player={player_id} "
f"team={team_id}: no card_type on track"
)
continue
tier_up_entry = {
"player_id": player_id,
"team_id": team_id,
"player_name": player_name,
"old_tier": old_tier,
"new_tier": computed_tier,
"current_value": result.get("current_value", 0),
"track_name": state.track.name if state.track else "Unknown",
}
# Non-breaking addition: include boost info when available.
if boost_result:
tier_up_entry["variant_created"] = boost_result.get(
"variant_created"
)
tier_ups.append(tier_up_entry)
except Exception as exc:
logger.warning(
f"Refractor eval failed for player={player_id} team={team_id}: {exc}"
)
return {"evaluated": evaluated, "tier_ups": tier_ups}

View File

@ -4,11 +4,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Result, model_to_dict, Team, DataError, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Result, model_to_dict, Team, DataError
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/results", tags=["results"])
router = APIRouter(
prefix='/api/v2/results',
tags=['results']
)
class ResultModel(pydantic.BaseModel):
@ -28,29 +36,15 @@ class ResultModel(pydantic.BaseModel):
game_type: str
@router.get("")
@router.get('')
async def get_results(
away_team_id: Optional[int] = None,
home_team_id: Optional[int] = None,
team_one_id: Optional[int] = None,
team_two_id: Optional[int] = None,
away_score_min: Optional[int] = None,
away_score_max: Optional[int] = None,
home_score_min: Optional[int] = None,
home_score_max: Optional[int] = None,
bothscore_min: Optional[int] = None,
bothscore_max: Optional[int] = None,
season: Optional[int] = None,
week: Optional[int] = None,
week_start: Optional[int] = None,
week_end: Optional[int] = None,
ranked: Optional[bool] = None,
short_game: Optional[bool] = None,
game_type: Optional[str] = None,
vs_ai: Optional[bool] = None,
csv: Optional[bool] = None,
limit: int = 100,
):
away_team_id: Optional[int] = None, home_team_id: Optional[int] = None, team_one_id: Optional[int] = None,
team_two_id: Optional[int] = None, away_score_min: Optional[int] = None, away_score_max: Optional[int] = None,
home_score_min: Optional[int] = None, home_score_max: Optional[int] = None, bothscore_min: Optional[int] = None,
bothscore_max: Optional[int] = None, season: Optional[int] = None, week: Optional[int] = None,
week_start: Optional[int] = None, week_end: Optional[int] = None, ranked: Optional[bool] = None,
short_game: Optional[bool] = None, game_type: Optional[str] = None, vs_ai: Optional[bool] = None,
csv: Optional[bool] = None):
all_results = Result.select()
# if all_results.count() == 0:
@ -61,41 +55,33 @@ async def get_results(
try:
this_team = Team.get_by_id(away_team_id)
all_results = all_results.where(Result.away_team == this_team)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No team found with id {away_team_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {away_team_id}')
if home_team_id is not None:
try:
this_team = Team.get_by_id(home_team_id)
all_results = all_results.where(Result.home_team == this_team)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No team found with id {home_team_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {home_team_id}')
if team_one_id is not None:
try:
this_team = Team.get_by_id(team_one_id)
all_results = all_results.where(
(Result.home_team == this_team) | (Result.away_team == this_team)
)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No team found with id {team_one_id}"
)
all_results = all_results.where((Result.home_team == this_team) | (Result.away_team == this_team))
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {team_one_id}')
if team_two_id is not None:
try:
this_team = Team.get_by_id(team_two_id)
all_results = all_results.where(
(Result.home_team == this_team) | (Result.away_team == this_team)
)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No team found with id {team_two_id}"
)
all_results = all_results.where((Result.home_team == this_team) | (Result.away_team == this_team))
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No team found with id {team_two_id}')
if away_score_min is not None:
all_results = all_results.where(Result.away_score >= away_score_min)
@ -110,14 +96,10 @@ async def get_results(
all_results = all_results.where(Result.home_score <= home_score_max)
if bothscore_min is not None:
all_results = all_results.where(
(Result.home_score >= bothscore_min) & (Result.away_score >= bothscore_min)
)
all_results = all_results.where((Result.home_score >= bothscore_min) & (Result.away_score >= bothscore_min))
if bothscore_max is not None:
all_results = all_results.where(
(Result.home_score <= bothscore_max) & (Result.away_score <= bothscore_max)
)
all_results = all_results.where((Result.home_score <= bothscore_max) & (Result.away_score <= bothscore_max))
if season is not None:
all_results = all_results.where(Result.season == season)
@ -141,9 +123,6 @@ async def get_results(
all_results = all_results.where(Result.game_type == game_type)
all_results = all_results.order_by(Result.id)
limit = max(0, min(limit, 500))
total_count = all_results.count() if not csv else 0
all_results = all_results.limit(limit)
# Not functional
# if vs_ai is not None:
# AwayTeam = Team.alias()
@ -164,115 +143,65 @@ async def get_results(
# logging.info(f'Result Query:\n\n{all_results}')
if csv:
data_list = [
[
"id",
"away_abbrev",
"home_abbrev",
"away_score",
"home_score",
"away_tv",
"home_tv",
"game_type",
"season",
"week",
"short_game",
"ranked",
]
]
data_list = [['id', 'away_abbrev', 'home_abbrev', 'away_score', 'home_score', 'away_tv', 'home_tv',
'game_type', 'season', 'week', 'short_game', 'ranked']]
for line in all_results:
data_list.append(
[
line.id,
line.away_team.abbrev,
line.home_team.abbrev,
line.away_score,
line.home_score,
line.away_team_value,
line.home_team_value,
line.game_type if line.game_type else "minor-league",
line.season,
line.week,
line.short_game,
line.ranked,
]
)
data_list.append([
line.id, line.away_team.abbrev, line.home_team.abbrev, line.away_score, line.home_score,
line.away_team_value, line.home_team_value, line.game_type if line.game_type else 'minor-league',
line.season, line.week, line.short_game, line.ranked
])
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "results": []}
return_val = {'count': all_results.count(), 'results': []}
for x in all_results:
return_val["results"].append(model_to_dict(x))
return_val['results'].append(model_to_dict(x))
db.close()
return return_val
@router.get("/{result_id}")
@router.get('/{result_id}')
async def get_one_results(result_id, csv: Optional[bool] = None):
try:
this_result = Result.get_by_id(result_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No result found with id {result_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No result found with id {result_id}')
if csv:
data_list = [
[
"id",
"away_abbrev",
"home_abbrev",
"away_score",
"home_score",
"away_tv",
"home_tv",
"game_type",
"season",
"week",
"game_type",
],
[
this_result.id,
this_result.away_team.abbrev,
this_result.away_team.abbrev,
this_result.away_score,
this_result.home_score,
this_result.away_team_value,
this_result.home_team_value,
this_result.game_type if this_result.game_type else "minor-league",
this_result.season,
this_result.week,
this_result.game_type,
],
['id', 'away_abbrev', 'home_abbrev', 'away_score', 'home_score', 'away_tv', 'home_tv', 'game_type',
'season', 'week', 'game_type'],
[this_result.id, this_result.away_team.abbrev, this_result.away_team.abbrev, this_result.away_score,
this_result.home_score, this_result.away_team_value, this_result.home_team_value,
this_result.game_type if this_result.game_type else 'minor-league',
this_result.season, this_result.week, this_result.game_type]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_result)
db.close()
return return_val
@router.get("/team/{team_id}")
@router.get('/team/{team_id}')
async def get_team_results(
team_id: int,
season: Optional[int] = None,
week: Optional[int] = None,
csv: Optional[bool] = False,
):
all_results = (
Result.select()
.where((Result.away_team_id == team_id) | (Result.home_team_id == team_id))
.order_by(Result.id)
)
team_id: int, season: Optional[int] = None, week: Optional[int] = None, csv: Optional[bool] = False):
all_results = Result.select().where((Result.away_team_id == team_id) | (Result.home_team_id == team_id)).order_by(Result.id)
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
logging.error(f"Unknown team id {team_id} trying to pull team results")
raise HTTPException(404, f"Team id {team_id} not found")
except Exception as e:
logging.error(f'Unknown team id {team_id} trying to pull team results')
raise HTTPException(404, f'Team id {team_id} not found')
if season is not None:
all_results = all_results.where(Result.season == season)
@ -309,38 +238,34 @@ async def get_team_results(
if csv:
data_list = [
[
"team_id",
"ranked_wins",
"ranked_losses",
"casual_wins",
"casual_losses",
"team_ranking",
],
[team_id, r_wins, r_loss, c_wins, c_loss, this_team.ranking],
['team_id', 'ranked_wins', 'ranked_losses', 'casual_wins', 'casual_losses', 'team_ranking'],
[team_id, r_wins, r_loss, c_wins, c_loss, this_team.ranking]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {
"team": model_to_dict(this_team),
"ranked_wins": r_wins,
"ranked_losses": r_loss,
"casual_wins": c_wins,
"casual_losses": c_loss,
'team': model_to_dict(this_team),
'ranked_wins': r_wins,
'ranked_losses': r_loss,
'casual_wins': c_wins,
'casual_losses': c_loss,
}
db.close()
return return_val
@router.post("")
@router.post('')
async def post_result(result: ResultModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post results. This event has been logged.",
detail='You are not authorized to post results. This event has been logged.'
)
this_result = Result(**result.__dict__)
@ -348,28 +273,26 @@ async def post_result(result: ResultModel, token: str = Depends(oauth2_scheme)):
if result.ranked:
if not result.away_team_ranking:
error = f"Ranked game did not include away team ({result.away_team_id}) ranking."
db.close()
error = f'Ranked game did not include away team ({result.away_team_id}) ranking.'
logging.error(error)
raise DataError(error)
if not result.home_team_ranking:
error = f"Ranked game did not include home team ({result.home_team_id}) ranking."
db.close()
error = f'Ranked game did not include home team ({result.home_team_id}) ranking.'
logging.error(error)
raise DataError(error)
k_value = 20 if result.short_game else 60
ratio = (result.home_team_ranking - result.away_team_ranking) / 400
exp_score = 1 / (1 + (10**ratio))
exp_score = 1 / (1 + (10 ** ratio))
away_win = True if result.away_score > result.home_score else False
total_delta = k_value * exp_score
high_delta = (
total_delta * exp_score
if exp_score > 0.5
else total_delta * (1 - exp_score)
)
high_delta = total_delta * exp_score if exp_score > .5 else total_delta * (1 - exp_score)
low_delta = total_delta - high_delta
# exp_score > .5 means away team is favorite
if exp_score > 0.5 and away_win:
if exp_score > .5 and away_win:
final_delta = low_delta
away_delta = low_delta * 3
home_delta = -low_delta
@ -377,7 +300,7 @@ async def post_result(result: ResultModel, token: str = Depends(oauth2_scheme)):
final_delta = high_delta
away_delta = high_delta * 3
home_delta = -high_delta
elif exp_score <= 0.5 and not away_win:
elif exp_score <= .5 and not away_win:
final_delta = low_delta
away_delta = -low_delta
home_delta = low_delta * 3
@ -390,59 +313,50 @@ async def post_result(result: ResultModel, token: str = Depends(oauth2_scheme)):
away_delta = 0
home_delta = 0
logging.debug(
f"/results ranking deltas\n\nk_value: {k_value} / ratio: {ratio} / "
f"exp_score: {exp_score} / away_win: {away_win} / total_delta: {total_delta} / "
f"high_delta: {high_delta} / low_delta: {low_delta} / final_delta: {final_delta} / "
)
logging.debug(f'/results ranking deltas\n\nk_value: {k_value} / ratio: {ratio} / '
f'exp_score: {exp_score} / away_win: {away_win} / total_delta: {total_delta} / '
f'high_delta: {high_delta} / low_delta: {low_delta} / final_delta: {final_delta} / ')
away_team = Team.get_by_id(result.away_team_id)
away_team.ranking += away_delta
away_team.save()
logging.info(f"Just updated {away_team.abbrev} ranking to {away_team.ranking}")
logging.info(f'Just updated {away_team.abbrev} ranking to {away_team.ranking}')
home_team = Team.get_by_id(result.home_team_id)
home_team.ranking += home_delta
home_team.save()
logging.info(f"Just updated {home_team.abbrev} ranking to {home_team.ranking}")
logging.info(f'Just updated {home_team.abbrev} ranking to {home_team.ranking}')
if saved == 1:
return_val = model_to_dict(this_result)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that roster",
detail='Well slap my ass and call me a teapot; I could not save that roster'
)
@router.patch("/{result_id}")
@router.patch('/{result_id}')
async def patch_result(
result_id,
away_team_id: Optional[int] = None,
home_team_id: Optional[int] = None,
away_score: Optional[int] = None,
home_score: Optional[int] = None,
away_team_value: Optional[int] = None,
home_team_value: Optional[int] = None,
scorecard: Optional[str] = None,
week: Optional[int] = None,
season: Optional[int] = None,
short_game: Optional[bool] = None,
game_type: Optional[str] = None,
token: str = Depends(oauth2_scheme),
):
result_id, away_team_id: Optional[int] = None, home_team_id: Optional[int] = None,
away_score: Optional[int] = None, home_score: Optional[int] = None, away_team_value: Optional[int] = None,
home_team_value: Optional[int] = None, scorecard: Optional[str] = None, week: Optional[int] = None,
season: Optional[int] = None, short_game: Optional[bool] = None, game_type: Optional[str] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch results. This event has been logged.",
detail='You are not authorized to patch results. This event has been logged.'
)
try:
this_result = Result.get_by_id(result_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No result found with id {result_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No result found with id {result_id}')
if away_team_id is not None:
this_result.away_team_id = away_team_id
@ -482,36 +396,36 @@ async def patch_result(
if this_result.save() == 1:
return_val = model_to_dict(this_result)
db.close()
return return_val
else:
db.close()
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that event",
detail='Well slap my ass and call me a teapot; I could not save that event'
)
@router.delete("/{result_id}")
@router.delete('/{result_id}')
async def delete_result(result_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post results. This event has been logged.",
detail='You are not authorized to post results. This event has been logged.'
)
try:
this_result = Result.get_by_id(result_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No result found with id {result_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No result found with id {result_id}')
count = this_result.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Result {result_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Result {result_id} has been deleted')
else:
raise HTTPException(
status_code=500, detail=f"Result {result_id} was not deleted"
)
raise HTTPException(status_code=500, detail=f'Result {result_id} was not deleted')

View File

@ -5,11 +5,19 @@ import logging
import pydantic
from pandas import DataFrame
from ..db_engine import Reward, model_to_dict, fn, DoesNotExist
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, Reward, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/rewards", tags=["rewards"])
router = APIRouter(
prefix='/api/v2/rewards',
tags=['rewards']
)
class RewardModel(pydantic.BaseModel):
@ -17,23 +25,20 @@ class RewardModel(pydantic.BaseModel):
season: int
week: int
team_id: int
created: Optional[int] = int(datetime.timestamp(datetime.now()) * 1000)
created: Optional[int] = int(datetime.timestamp(datetime.now())*1000)
@router.get("")
@router.get('')
async def get_rewards(
name: Optional[str] = None,
in_name: Optional[str] = None,
team_id: Optional[int] = None,
season: Optional[int] = None,
week: Optional[int] = None,
created_after: Optional[int] = None,
flat: Optional[bool] = False,
csv: Optional[bool] = None,
limit: Optional[int] = 100,
):
name: Optional[str] = None, in_name: Optional[str] = None, team_id: Optional[int] = None,
season: Optional[int] = None, week: Optional[int] = None, created_after: Optional[int] = None,
flat: Optional[bool] = False, csv: Optional[bool] = None):
all_rewards = Reward.select().order_by(Reward.id)
if all_rewards.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'There are no rewards to filter')
if name is not None:
all_rewards = all_rewards.where(fn.Lower(Reward.name) == name.lower())
if team_id is not None:
@ -49,106 +54,100 @@ async def get_rewards(
if week is not None:
all_rewards = all_rewards.where(Reward.week == week)
total_count = all_rewards.count()
if total_count == 0:
raise HTTPException(status_code=404, detail="No rewards found")
limit = max(0, min(limit, 500))
all_rewards = all_rewards.limit(limit)
if all_rewards.count() == 0:
db.close()
raise HTTPException(status_code=404, detail=f'No rewards found')
if csv:
data_list = [["id", "name", "team", "daily", "created"]]
data_list = [['id', 'name', 'team', 'daily', 'created']]
for line in all_rewards:
data_list.append(
[line.id, line.name, line.team.id, line.daily, line.created]
[
line.id, line.name, line.team.id, line.daily, line.created
]
)
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = {"count": total_count, "rewards": []}
return_val = {'count': all_rewards.count(), 'rewards': []}
for x in all_rewards:
return_val["rewards"].append(model_to_dict(x, recurse=not flat))
return_val['rewards'].append(model_to_dict(x, recurse=not flat))
db.close()
return return_val
@router.get("/{reward_id}")
@router.get('/{reward_id}')
async def get_one_reward(reward_id, csv: Optional[bool] = False):
try:
this_reward = Reward.get_by_id(reward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No reward found with id {reward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No reward found with id {reward_id}')
if csv:
data_list = [
["id", "name", "card_count", "description"],
[
this_reward.id,
this_reward.name,
this_reward.team.id,
this_reward.daily,
this_reward.created,
],
['id', 'name', 'card_count', 'description'],
[this_reward.id, this_reward.name, this_reward.team.id, this_reward.daily, this_reward.created]
]
return_val = DataFrame(data_list).to_csv(header=False, index=False)
return Response(content=return_val, media_type="text/csv")
db.close()
return Response(content=return_val, media_type='text/csv')
else:
return_val = model_to_dict(this_reward)
db.close()
return return_val
@router.post("")
@router.post('')
async def post_rewards(reward: RewardModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post rewards. This event has been logged.",
detail='You are not authorized to post rewards. This event has been logged.'
)
reward_data = reward.dict()
# Convert milliseconds timestamp to datetime for PostgreSQL
if reward_data.get("created"):
reward_data["created"] = datetime.fromtimestamp(reward_data["created"] / 1000)
if reward_data.get('created'):
reward_data['created'] = datetime.fromtimestamp(reward_data['created'] / 1000)
this_reward = Reward(**reward_data)
saved = this_reward.save()
if saved == 1:
return_val = model_to_dict(this_reward)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that cardset",
detail='Well slap my ass and call me a teapot; I could not save that cardset'
)
@router.patch("/{reward_id}")
@router.patch('/{reward_id}')
async def patch_reward(
reward_id,
name: Optional[str] = None,
team_id: Optional[int] = None,
created: Optional[int] = None,
token: str = Depends(oauth2_scheme),
):
reward_id, name: Optional[str] = None, team_id: Optional[int] = None, created: Optional[int] = None,
token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to patch rewards. This event has been logged.",
detail='You are not authorized to patch rewards. This event has been logged.'
)
try:
this_reward = Reward.get_by_id(reward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No reward found with id {reward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No reward found with id {reward_id}')
if name is not None:
this_reward.name = name
@ -160,36 +159,36 @@ async def patch_reward(
if this_reward.save() == 1:
return_val = model_to_dict(this_reward)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that rarity",
detail='Well slap my ass and call me a teapot; I could not save that rarity'
)
@router.delete("/{reward_id}")
@router.delete('/{reward_id}')
async def delete_reward(reward_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete rewards. This event has been logged.",
detail='You are not authorized to delete rewards. This event has been logged.'
)
try:
this_reward = Reward.get_by_id(reward_id)
except DoesNotExist:
raise HTTPException(
status_code=404, detail=f"No reward found with id {reward_id}"
)
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f'No reward found with id {reward_id}')
count = this_reward.delete_instance()
db.close()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Reward {reward_id} has been deleted"
)
raise HTTPException(status_code=200, detail=f'Reward {reward_id} has been deleted')
else:
raise HTTPException(
status_code=500, detail=f"Reward {reward_id} was not deleted"
)
raise HTTPException(status_code=500, detail=f'Reward {reward_id} was not deleted')

View File

@ -1,99 +0,0 @@
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException
from typing import Optional
import logging
import pydantic
from ..db_engine import ScoutClaim, model_to_dict
from ..dependencies import oauth2_scheme, valid_token
router = APIRouter(prefix="/api/v2/scout_claims", tags=["scout_claims"])
class ScoutClaimModel(pydantic.BaseModel):
scout_opportunity_id: int
card_id: int
claimed_by_team_id: int
@router.get("")
async def get_scout_claims(
scout_opportunity_id: Optional[int] = None,
claimed_by_team_id: Optional[int] = None,
limit: Optional[int] = 100,
):
query = ScoutClaim.select().order_by(ScoutClaim.id)
if scout_opportunity_id is not None:
query = query.where(ScoutClaim.scout_opportunity_id == scout_opportunity_id)
if claimed_by_team_id is not None:
query = query.where(ScoutClaim.claimed_by_team_id == claimed_by_team_id)
total_count = query.count()
if limit is not None:
limit = max(0, min(limit, 500))
query = query.limit(limit)
results = [model_to_dict(x, recurse=False) for x in query]
return {"count": total_count, "results": results}
@router.get("/{claim_id}")
async def get_one_scout_claim(claim_id: int):
try:
claim = ScoutClaim.get_by_id(claim_id)
except Exception:
raise HTTPException(
status_code=404, detail=f"No scout claim found with id {claim_id}"
)
return model_to_dict(claim)
@router.post("")
async def post_scout_claim(claim: ScoutClaimModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning(f"Bad Token: {token}")
raise HTTPException(
status_code=401,
detail="You are not authorized to post scout claims. This event has been logged.",
)
claim_data = claim.dict()
claim_data["created"] = int(datetime.timestamp(datetime.now()) * 1000)
this_claim = ScoutClaim(**claim_data)
saved = this_claim.save()
if saved == 1:
return model_to_dict(this_claim)
else:
raise HTTPException(status_code=418, detail="Could not save scout claim")
@router.delete("/{claim_id}")
async def delete_scout_claim(claim_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning(f"Bad Token: {token}")
raise HTTPException(
status_code=401,
detail="You are not authorized to delete scout claims. This event has been logged.",
)
try:
claim = ScoutClaim.get_by_id(claim_id)
except Exception:
raise HTTPException(
status_code=404, detail=f"No scout claim found with id {claim_id}"
)
count = claim.delete_instance()
if count == 1:
raise HTTPException(
status_code=200, detail=f"Scout claim {claim_id} has been deleted"
)
else:
raise HTTPException(
status_code=500, detail=f"Scout claim {claim_id} was not deleted"
)

View File

@ -1,127 +0,0 @@
import json
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException
from typing import Optional, List
import logging
import pydantic
from ..db_engine import ScoutOpportunity, ScoutClaim, model_to_dict
from ..dependencies import oauth2_scheme, valid_token
router = APIRouter(prefix="/api/v2/scout_opportunities", tags=["scout_opportunities"])
class ScoutOpportunityModel(pydantic.BaseModel):
pack_id: Optional[int] = None
opener_team_id: int
card_ids: List[int]
expires_at: int
created: Optional[int] = None
def opportunity_to_dict(opp, recurse=True):
"""Convert a ScoutOpportunity to dict with card_ids deserialized."""
result = model_to_dict(opp, recurse=recurse)
if isinstance(result.get("card_ids"), str):
result["card_ids"] = json.loads(result["card_ids"])
return result
@router.get("")
async def get_scout_opportunities(
claimed: Optional[bool] = None,
expired_before: Optional[int] = None,
opener_team_id: Optional[int] = None,
limit: Optional[int] = 100,
):
limit = max(0, min(limit, 500))
query = ScoutOpportunity.select().order_by(ScoutOpportunity.id)
if opener_team_id is not None:
query = query.where(ScoutOpportunity.opener_team_id == opener_team_id)
if expired_before is not None:
query = query.where(ScoutOpportunity.expires_at < expired_before)
if claimed is not None:
# Check whether any scout_claims exist for each opportunity
claim_subquery = ScoutClaim.select(ScoutClaim.scout_opportunity)
if claimed:
query = query.where(ScoutOpportunity.id.in_(claim_subquery))
else:
query = query.where(ScoutOpportunity.id.not_in(claim_subquery))
total_count = query.count()
query = query.limit(limit)
results = [opportunity_to_dict(x, recurse=False) for x in query]
return {"count": total_count, "results": results}
@router.get("/{opportunity_id}")
async def get_one_scout_opportunity(opportunity_id: int):
try:
opp = ScoutOpportunity.get_by_id(opportunity_id)
except Exception:
raise HTTPException(
status_code=404,
detail=f"No scout opportunity found with id {opportunity_id}",
)
return opportunity_to_dict(opp)
@router.post("")
async def post_scout_opportunity(
opportunity: ScoutOpportunityModel, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning(f"Bad Token: {token}")
raise HTTPException(
status_code=401,
detail="You are not authorized to post scout opportunities. This event has been logged.",
)
opp_data = opportunity.dict()
opp_data["card_ids"] = json.dumps(opp_data["card_ids"])
if opp_data["created"] is None:
opp_data["created"] = int(datetime.timestamp(datetime.now()) * 1000)
this_opp = ScoutOpportunity(**opp_data)
saved = this_opp.save()
if saved == 1:
return opportunity_to_dict(this_opp)
else:
raise HTTPException(status_code=418, detail="Could not save scout opportunity")
@router.delete("/{opportunity_id}")
async def delete_scout_opportunity(
opportunity_id: int, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning(f"Bad Token: {token}")
raise HTTPException(
status_code=401,
detail="You are not authorized to delete scout opportunities. This event has been logged.",
)
try:
opp = ScoutOpportunity.get_by_id(opportunity_id)
except Exception:
raise HTTPException(
status_code=404,
detail=f"No scout opportunity found with id {opportunity_id}",
)
count = opp.delete_instance()
if count == 1:
raise HTTPException(
status_code=200,
detail=f"Scout opportunity {opportunity_id} has been deleted",
)
else:
raise HTTPException(
status_code=500,
detail=f"Scout opportunity {opportunity_id} was not deleted",
)

View File

@ -1,20 +1,33 @@
from fastapi import APIRouter, Depends, HTTPException, Query
import csv
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, Response, Query
from typing import Optional
import logging
import pydantic
import pandas as pd
from ..db_engine import Player
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, model_to_dict, fn, query_to_csv, complex_data_to_csv, Player, BattingCardRatings
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA, int_timestamp
from ..player_scouting import get_player_ids
router = APIRouter(prefix="/api/v2/scouting", tags=["scouting"])
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(
prefix='/api/v2/scouting',
tags=['scouting']
)
class BattingFiles(pydantic.BaseModel):
vl_basic: str = "vl-basic.csv"
vl_rate: str = "vl-rate.csv"
vr_basic: str = "vr-basic.csv"
vr_rate: str = "vr-rate.csv"
running: str = "running.csv"
vl_basic: str = 'vl-basic.csv'
vl_rate: str = 'vl-rate.csv'
vr_basic: str = 'vr-basic.csv'
vr_rate: str = 'vr-rate.csv'
running: str = 'running.csv'
# def csv_file_to_dataframe(filename: str) -> pd.DataFrame | None:
@ -24,15 +37,66 @@ class BattingFiles(pydantic.BaseModel):
# for row in reader:
@router.get("/playerkeys")
@router.get('/playerkeys')
async def get_player_keys(player_id: list = Query(default=None)):
all_keys = []
for x in player_id:
this_player = Player.get_or_none(Player.player_id == x)
if this_player is not None:
this_keys = get_player_ids(this_player.bbref_id, id_type="bbref")
this_keys = get_player_ids(this_player.bbref_id, id_type='bbref')
if this_keys is not None:
all_keys.append(this_keys)
return_val = {"count": len(all_keys), "keys": [dict(x) for x in all_keys]}
return_val = {'count': len(all_keys), 'keys': [
dict(x) for x in all_keys
]}
db.close()
return return_val
@router.post('/live-update/batting')
def live_update_batting(files: BattingFiles, cardset_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to initiate live updates.'
)
data = {} # <fg id>: { 'vL': [combined vl stat data], 'vR': [combined vr stat data] }
for row in files.vl_basic:
if row['pa'] >= 20:
data[row['fgid']]['vL'] = row
for row in files.vl_rate:
if row['fgid'] in data.keys():
data[row['fgid']]['vL'].extend(row)
for row in files.vr_basic:
if row['pa'] >= 40 and row['fgid'] in data.keys():
data[row['fgid']]['vR'] = row
for row in files.vr_rate:
if row['fgid'] in data.keys():
data[row['fgid']]['vR'].extend(row)
for x in data.items():
pass
# Create BattingCardRating object for vL
# Create BattingCardRating object for vR
# Read running stats and create/update BattingCard object
return files.dict()
@router.post('/live-update/pitching')
def live_update_pitching(files: BattingFiles, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning(f'Bad Token: {token}')
db.close()
raise HTTPException(
status_code=401,
detail='You are not authorized to initiate live updates.'
)
return files.dict()

View File

@ -1,69 +0,0 @@
"""Season stats API endpoints.
Covers WP-13 (Post-Game Callback Integration):
POST /api/v2/season-stats/update-game/{game_id}
Delegates to app.services.season_stats.update_season_stats() which
recomputes full-season stats from all StratPlay and Decision rows for
every player who appeared in the game, then writes those totals into
batting_season_stats and pitching_season_stats.
Idempotency is enforced by the service layer: re-delivery of the same
game_id returns {"updated": 0, "skipped": true} without modifying stats.
Pass force=true to bypass the idempotency guard and force recalculation.
"""
import logging
from fastapi import APIRouter, Depends, HTTPException
from ..dependencies import oauth2_scheme, valid_token
router = APIRouter(prefix="/api/v2/season-stats", tags=["season-stats"])
logger = logging.getLogger(__name__)
@router.post("/update-game/{game_id}")
async def update_game_season_stats(
game_id: int, force: bool = False, token: str = Depends(oauth2_scheme)
):
"""Recalculate season stats from all StratPlay and Decision rows for a game.
Calls update_season_stats(game_id, force=force) from the service layer which:
- Recomputes full-season totals from all StratPlay rows for each player
- Aggregates Decision rows for pitching win/loss/save/hold stats
- Writes totals into batting_season_stats and pitching_season_stats
- Guards against redundant work via the ProcessedGame ledger
Query params:
- force: if true, bypasses the idempotency guard and reprocesses a
previously seen game_id (useful for correcting stats after data fixes)
Response: {"updated": N, "skipped": false}
- N: total player_season_stats rows upserted (batters + pitchers)
- skipped: true when this game_id was already processed and force=false
Errors from the service are logged but re-raised as 500 so the bot
knows to retry.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..services.season_stats import update_season_stats
try:
result = update_season_stats(game_id, force=force)
except Exception as exc:
logger.error("update-game/%d failed: %s", game_id, exc, exc_info=True)
raise HTTPException(
status_code=500,
detail=f"Season stats update failed for game {game_id}: {exc}",
)
updated = result.get("batters_updated", 0) + result.get("pitchers_updated", 0)
return {
"updated": updated,
"skipped": result.get("skipped", False),
}

View File

@ -1,14 +1,23 @@
from fastapi import APIRouter, Depends, HTTPException, Query, Response
from typing import Optional, List
from typing import Literal, Optional, List
import logging
import pandas as pd
import pydantic
from pydantic import validator
from ..db_engine import StratGame, model_to_dict, fn
from ..dependencies import oauth2_scheme, valid_token
from ..db_engine import db, StratGame, model_to_dict, chunked, PitchingCard, Player, query_to_csv, Team, fn
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA['filename'],
format=LOG_DATA['format'],
level=LOG_DATA['log_level']
)
router = APIRouter(prefix="/api/v2/games", tags=["games"])
router = APIRouter(
prefix='/api/v2/games',
tags=['games']
)
class GameModel(pydantic.BaseModel):
@ -32,22 +41,13 @@ class GameList(pydantic.BaseModel):
games: List[GameModel]
@router.get("")
@router.get('')
async def get_games(
season: list = Query(default=None),
forfeit: Optional[bool] = None,
away_team_id: list = Query(default=None),
home_team_id: list = Query(default=None),
team1_id: list = Query(default=None),
team2_id: list = Query(default=None),
game_type: list = Query(default=None),
ranked: Optional[bool] = None,
short_game: Optional[bool] = None,
csv: Optional[bool] = False,
short_output: bool = False,
gauntlet_id: Optional[int] = None,
limit: int = 100,
):
season: list = Query(default=None), forfeit: Optional[bool] = None, away_team_id: list = Query(default=None),
home_team_id: list = Query(default=None), team1_id: list = Query(default=None),
team2_id: list = Query(default=None), game_type: list = Query(default=None), ranked: Optional[bool] = None,
short_game: Optional[bool] = None, csv: Optional[bool] = False, short_output: bool = False,
gauntlet_id: Optional[int] = None):
all_games = StratGame.select().order_by(StratGame.id)
if season is not None:
@ -74,71 +74,54 @@ async def get_games(
if short_game is not None:
all_games = all_games.where(StratGame.short_game == short_game)
if gauntlet_id is not None:
all_games = all_games.where(
StratGame.game_type.contains(f"gauntlet-{gauntlet_id}")
)
total_count = all_games.count() if not csv else 0
all_games = all_games.limit(max(0, min(limit, 500)))
all_games = all_games.where(StratGame.game_type.contains(f'gauntlet-{gauntlet_id}'))
if csv:
return_vals = [model_to_dict(x) for x in all_games]
for x in return_vals:
x["away_abbrev"] = x["away_team"]["abbrev"]
x["home_abbrev"] = x["home_team"]["abbrev"]
del x["away_team"], x["home_team"]
x['away_abbrev'] = x['away_team']['abbrev']
x['home_abbrev'] = x['home_team']['abbrev']
del x['away_team'], x['home_team']
output = pd.DataFrame(return_vals)[
[
"id",
"away_abbrev",
"home_abbrev",
"away_score",
"home_score",
"away_team_value",
"home_team_value",
"game_type",
"season",
"week",
"short_game",
"ranked",
]
]
db.close()
output = pd.DataFrame(return_vals)[[
'id', 'away_abbrev', 'home_abbrev', 'away_score', 'home_score', 'away_team_value', 'home_team_value',
'game_type', 'season', 'week', 'short_game', 'ranked'
]]
return Response(content=output.to_csv(index=False), media_type="text/csv")
return Response(content=output.to_csv(index=False), media_type='text/csv')
return_val = {
"count": total_count,
"games": [model_to_dict(x, recurse=not short_output) for x in all_games],
}
return_val = {'count': all_games.count(), 'games': [
model_to_dict(x, recurse=not short_output) for x in all_games
]}
db.close()
return return_val
@router.get("/{game_id}")
@router.get('/{game_id}')
async def get_one_game(game_id: int):
this_game = StratGame.get_or_none(StratGame.id == game_id)
if not this_game:
raise HTTPException(status_code=404, detail=f"StratGame ID {game_id} not found")
db.close()
raise HTTPException(status_code=404, detail=f'StratGame ID {game_id} not found')
g_result = model_to_dict(this_game)
db.close()
return g_result
@router.patch("/{game_id}")
@router.patch('/{game_id}')
async def patch_game(
game_id: int,
game_type: Optional[str] = None,
away_score: Optional[int] = None,
home_score: Optional[int] = None,
token: str = Depends(oauth2_scheme),
):
game_id: int, game_type: Optional[str] = None, away_score: Optional[int] = None,
home_score: Optional[int] = None, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("patch_game - Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
logging.warning(f'patch_game - Bad Token: {token}')
raise HTTPException(status_code=401, detail='Unauthorized')
this_game = StratGame.get_or_none(StratGame.id == game_id)
if not this_game:
raise HTTPException(status_code=404, detail=f"StratGame ID {game_id} not found")
db.close()
raise HTTPException(status_code=404, detail=f'StratGame ID {game_id} not found')
if away_score is not None:
this_game.away_score = away_score
@ -149,45 +132,50 @@ async def patch_game(
if this_game.save() == 1:
g_result = model_to_dict(this_game)
db.close()
return g_result
else:
raise HTTPException(status_code=500, detail=f"Unable to patch game {game_id}")
db.close()
raise HTTPException(status_code=500, detail=f'Unable to patch game {game_id}')
@router.post("")
@router.post('')
async def post_game(this_game: GameModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("post_games - Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
logging.warning(f'post_games - Bad Token: {token}')
raise HTTPException(status_code=401, detail='Unauthorized')
this_game = StratGame(**this_game.dict())
saved = this_game.save()
if saved == 1:
return_val = model_to_dict(this_game)
db.close()
return return_val
else:
raise HTTPException(
status_code=418,
detail="Well slap my ass and call me a teapot; I could not save that game",
detail='Well slap my ass and call me a teapot; I could not save that game'
)
@router.delete("/{game_id}")
@router.delete('/{game_id}')
async def delete_game(game_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("delete_game - Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
logging.warning(f'delete_game - Bad Token: {token}')
raise HTTPException(status_code=401, detail='Unauthorized')
this_game = StratGame.get_or_none(StratGame.id == game_id)
if not this_game:
raise HTTPException(status_code=404, detail=f"StratGame ID {game_id} not found")
db.close()
raise HTTPException(status_code=404, detail=f'StratGame ID {game_id} not found')
count = this_game.delete_instance()
db.close()
if count == 1:
return f"StratGame {game_id} has been deleted"
return f'StratGame {game_id} has been deleted'
else:
raise HTTPException(
status_code=500, detail=f"StratGame {game_id} could not be deleted"
)
raise HTTPException(status_code=500, detail=f'StratGame {game_id} could not be deleted')

View File

@ -13,14 +13,21 @@ from ..db_engine import (
Team,
Player,
model_to_dict,
chunked,
fn,
SQL,
Case,
complex_data_to_csv,
Decision,
)
from ..db_helpers import upsert_strat_plays
from ..dependencies import oauth2_scheme, valid_token
from ..dependencies import oauth2_scheme, valid_token, LOG_DATA
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/plays", tags=["plays"])
@ -360,6 +367,7 @@ async def get_plays(
x["runner_team"],
)
db.close()
return Response(
content=pd.DataFrame(return_vals).to_csv(index=False), media_type="text/csv"
)
@ -368,6 +376,7 @@ async def get_plays(
"count": all_plays.count(),
"plays": [model_to_dict(x, recurse=not short_output) for x in all_plays],
}
db.close()
return return_plays
@ -799,10 +808,12 @@ async def get_batting_totals(
exclude = first + ["lob_all", "lob_all_rate", "lob_2outs", "rbi%"]
output = output[first + [col for col in output.columns if col not in exclude]]
db.close()
return Response(
content=pd.DataFrame(output).to_csv(index=False), media_type="text/csv"
)
db.close()
return return_stats
@ -1164,6 +1175,7 @@ async def get_pitching_totals(
"rbi%": rbi_rate,
}
)
db.close()
if csv:
return_vals = return_stats["stats"]
@ -1197,6 +1209,7 @@ async def get_pitching_totals(
exclude = first + ["lob_2outs", "rbi%"]
output = output[first + [col for col in output.columns if col not in exclude]]
db.close()
return Response(
content=pd.DataFrame(output).to_csv(index=False), media_type="text/csv"
)
@ -1214,6 +1227,7 @@ async def get_game_summary(
):
this_game = StratGame.get_or_none(StratGame.id == game_id)
if this_game is None:
db.close()
raise HTTPException(status_code=404, detail=f"Game {game_id} not found")
game_plays = StratPlay.select().where(StratPlay.game_id == game_id)
@ -1390,10 +1404,12 @@ async def get_game_summary(
@router.get("/{play_id}")
async def get_one_play(play_id: int):
play = StratPlay.get_or_none(StratPlay.id == play_id)
if play is None:
if StratPlay.get_or_none(StratPlay.id == play_id) is None:
db.close()
raise HTTPException(status_code=404, detail=f"Play ID {play_id} not found")
return model_to_dict(play)
r_play = model_to_dict(StratPlay.get_by_id(play_id))
db.close()
return r_play
@router.patch("/{play_id}")
@ -1401,21 +1417,23 @@ async def patch_play(
play_id: int, new_play: PlayModel, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("patch_play - Bad Token: [REDACTED]")
logging.warning(f"patch_play - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
if StratPlay.get_or_none(StratPlay.id == play_id) is None:
db.close()
raise HTTPException(status_code=404, detail=f"Play ID {play_id} not found")
StratPlay.update(**new_play.dict()).where(StratPlay.id == play_id).execute()
r_play = model_to_dict(StratPlay.get_by_id(play_id))
db.close()
return r_play
@router.post("")
async def post_plays(p_list: PlayList, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("post_plays - Bad Token: [REDACTED]")
logging.warning(f"post_plays - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
new_plays = []
@ -1458,6 +1476,7 @@ async def post_plays(p_list: PlayList, token: str = Depends(oauth2_scheme)):
with db.atomic():
# Use PostgreSQL-compatible upsert helper
upsert_strat_plays(new_plays, batch_size=20)
db.close()
return f"Inserted {len(new_plays)} plays"
@ -1465,14 +1484,16 @@ async def post_plays(p_list: PlayList, token: str = Depends(oauth2_scheme)):
@router.delete("/{play_id}")
async def delete_play(play_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("delete_play - Bad Token: [REDACTED]")
logging.warning(f"delete_play - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
this_play = StratPlay.get_or_none(StratPlay.id == play_id)
if not this_play:
db.close()
raise HTTPException(status_code=404, detail=f"Play ID {play_id} not found")
count = this_play.delete_instance()
db.close()
if count == 1:
return f"Play {play_id} has been deleted"
@ -1485,14 +1506,16 @@ async def delete_play(play_id: int, token: str = Depends(oauth2_scheme)):
@router.delete("/game/{game_id}")
async def delete_plays_game(game_id: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("delete_plays_game - Bad Token: [REDACTED]")
logging.warning(f"delete_plays_game - Bad Token: {token}")
raise HTTPException(status_code=401, detail="Unauthorized")
this_game = StratGame.get_or_none(StratGame.id == game_id)
if not this_game:
db.close()
raise HTTPException(status_code=404, detail=f"Game ID {game_id} not found")
count = StratPlay.delete().where(StratPlay.game == this_game).execute()
db.close()
if count > 0:
return f"Deleted {count} plays matching Game ID {game_id}"

View File

@ -31,14 +31,21 @@ from ..db_engine import (
PitchingCardRatings,
StratGame,
LIVE_PROMO_CARDSET_ID,
DoesNotExist,
)
from ..dependencies import (
oauth2_scheme,
valid_token,
LOG_DATA,
int_timestamp,
PRIVATE_IN_SCHEMA,
)
logging.basicConfig(
filename=LOG_DATA["filename"],
format=LOG_DATA["format"],
level=LOG_DATA["log_level"],
)
router = APIRouter(prefix="/api/v2/teams", tags=["teams"])
@ -132,18 +139,18 @@ async def get_teams(
if ranking_max is not None:
all_teams = all_teams.where(Team.ranking <= ranking_max)
if ranking_max is not None:
all_teams = all_teams.where(Team.ranking <= ranking_max)
if has_guide is not None:
# Use boolean comparison (PostgreSQL-compatible)
if not has_guide:
all_teams = all_teams.where(Team.has_guide == False) # noqa: E712
all_teams = all_teams.where(Team.has_guide == False)
else:
all_teams = all_teams.where(Team.has_guide == True) # noqa: E712
all_teams = all_teams.where(Team.has_guide == True)
if is_ai is not None:
if not is_ai:
all_teams = all_teams.where(Team.is_ai == False) # noqa: E712
else:
all_teams = all_teams.where(Team.is_ai == True) # noqa: E712
all_teams = all_teams.where(Team.is_ai)
if event_id is not None:
all_teams = all_teams.where(Team.event_id == event_id)
@ -156,6 +163,7 @@ async def get_teams(
if csv:
return_val = query_to_csv(all_teams, exclude=[Team.career])
db.close()
return Response(content=return_val, media_type="text/csv")
else:
@ -163,16 +171,16 @@ async def get_teams(
for x in all_teams:
return_teams["teams"].append(model_to_dict(x))
db.close()
return return_teams
@router.get("/{team_id}")
async def get_one_team(
team_id: int, inc_packs: bool = True, csv: Optional[bool] = False
):
async def get_one_team(team_id, inc_packs: bool = True, csv: Optional[bool] = False):
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
p_query = Pack.select().where(
@ -187,6 +195,7 @@ async def get_one_team(
if inc_packs:
return_val["sealed_packs"] = [model_to_dict(x) for x in p_query]
db.close()
return return_val
@ -254,27 +263,29 @@ def get_scouting_dfs(allowed_players, position: str):
if position in ["LF", "CF", "RF"]:
series_list.append(
pd.Series(
dict([(x.player.player_id, x.arm) for x in positions]), name="Arm OF"
dict([(x.player.player_id, x.arm) for x in positions]), name=f"Arm OF"
)
)
elif position == "C":
series_list.append(
pd.Series(
dict([(x.player.player_id, x.arm) for x in positions]), name="Arm C"
dict([(x.player.player_id, x.arm) for x in positions]), name=f"Arm C"
)
)
series_list.append(
pd.Series(
dict([(x.player.player_id, x.pb) for x in positions]), name="PB C"
dict([(x.player.player_id, x.pb) for x in positions]), name=f"PB C"
)
)
series_list.append(
pd.Series(
dict([(x.player.player_id, x.overthrow) for x in positions]),
name="Throw C",
name=f"Throw C",
)
)
db.close()
def get_total_ops(df_data):
ops_vl = df_data["obp_vl"] + df_data["slg_vl"]
ops_vr = df_data["obp_vr"] + df_data["slg_vr"]
@ -300,9 +311,11 @@ async def get_team_lineup(
"""
this_team = Team.get_or_none(Team.id == team_id)
if this_team is None:
db.close()
raise HTTPException(status_code=404, detail=f"Team id {team_id} not found")
if difficulty_name not in CARDSETS.keys() and difficulty_name != "exhibition":
db.close()
raise HTTPException(
status_code=400,
detail=f"Difficulty name {difficulty_name} not a valid check",
@ -314,11 +327,12 @@ async def get_team_lineup(
all_players = Player.select().where(Player.franchise == this_team.sname)
if difficulty_name == "exhibition":
logging.info("pulling an exhibition lineup")
logging.info(f"pulling an exhibition lineup")
if cardset_id is None:
db.close()
raise HTTPException(
status_code=400,
detail="Must provide at least one cardset_id for exhibition lineups",
detail=f"Must provide at least one cardset_id for exhibition lineups",
)
legal_players = all_players.where(Player.cardset_id << cardset_id)
@ -356,35 +370,17 @@ async def get_team_lineup(
"DH": {"player": None, "vl": None, "vr": None, "ops": 0},
}
# Batch-fetch BattingCards and ratings for all candidate players to avoid
# per-player DB round trips inside the lineup construction loop below.
if backup_players is not None:
_batch_bcards = BattingCard.select().where(
(BattingCard.player << legal_players)
| (BattingCard.player << backup_players)
)
else:
_batch_bcards = BattingCard.select().where(BattingCard.player << legal_players)
_batting_cards_by_player = {bc.player_id: bc for bc in _batch_bcards}
_all_bratings = (
BattingCardRatings.select().where(
BattingCardRatings.battingcard << list(_batting_cards_by_player.values())
)
if _batting_cards_by_player
else []
)
_ratings_by_card_hand = {}
for _r in _all_bratings:
_ratings_by_card_hand.setdefault(_r.battingcard_id, {})[_r.vs_hand] = _r
def get_bratings(player_id):
this_bcard = _batting_cards_by_player.get(player_id)
card_ratings = (
_ratings_by_card_hand.get(this_bcard.id, {}) if this_bcard else {}
this_bcard = BattingCard.get_or_none(BattingCard.player_id == player_id)
vl_ratings = BattingCardRatings.get_or_none(
BattingCardRatings.battingcard == this_bcard,
BattingCardRatings.vs_hand == "L",
)
vl_ratings = card_ratings.get("L")
vr_ratings = card_ratings.get("R")
vl_ops = vl_ratings.obp + vl_ratings.slg
vr_ratings = BattingCardRatings.get_or_none(
BattingCardRatings.battingcard == this_bcard,
BattingCardRatings.vs_hand == "R",
)
vr_ops = vr_ratings.obp + vr_ratings.slg
return (
model_to_dict(vl_ratings),
@ -404,17 +400,17 @@ async def get_team_lineup(
# if x.battingcard.player.p_name not in player_names:
# starting_nine['DH'] = x.battingcard.player
# break
logging.debug("Searching for a DH!")
logging.debug(f"Searching for a DH!")
dh_query = legal_players.order_by(Player.cost.desc())
for x in dh_query:
logging.debug(f"checking {x.p_name} for {position}")
if x.p_name not in player_names and "P" not in x.pos_1:
logging.debug("adding!")
logging.debug(f"adding!")
starting_nine["DH"]["player"] = model_to_dict(x)
try:
vl, vr, total_ops = get_bratings(x.player_id)
except AttributeError:
logging.debug("Could not find batting lines")
except AttributeError as e:
logging.debug(f"Could not find batting lines")
else:
# starting_nine[position]['vl'] = vl
# starting_nine[position]['vr'] = vr
@ -429,12 +425,12 @@ async def get_team_lineup(
for x in dh_query:
logging.debug(f"checking {x.p_name} for {position}")
if x.p_name not in player_names:
logging.debug("adding!")
logging.debug(f"adding!")
starting_nine["DH"]["player"] = model_to_dict(x)
try:
vl, vr, total_ops = get_bratings(x.player_id)
except AttributeError:
logging.debug("Could not find batting lines")
except AttributeError as e:
logging.debug(f"Could not find batting lines")
else:
vl, vr, total_ops = get_bratings(x.player_id)
starting_nine[position]["vl"] = vl["obp"] + vl["slg"]
@ -464,7 +460,7 @@ async def get_team_lineup(
x.player.p_name not in player_names
and x.player.p_name.lower() != pitcher_name
):
logging.debug("adding!")
logging.debug(f"adding!")
starting_nine[position]["player"] = model_to_dict(x.player)
vl, vr, total_ops = get_bratings(x.player.player_id)
starting_nine[position]["vl"] = vl
@ -542,7 +538,7 @@ async def get_team_lineup(
x.player.p_name not in player_names
and x.player.p_name.lower() != pitcher_name
):
logging.debug("adding!")
logging.debug(f"adding!")
starting_nine[position]["player"] = model_to_dict(x.player)
vl, vr, total_ops = get_bratings(x.player.player_id)
starting_nine[position]["vl"] = vl["obp"] + vl["slg"]
@ -600,26 +596,20 @@ def sort_pitchers(pitching_card_query) -> DataFrame | None:
pitcher_df = pd.DataFrame(all_s).set_index("player", drop=False)
logging.debug(f"pitcher_df: {pitcher_df}")
card_ids = pitcher_df["id"].tolist()
ratings_map = {
(r.pitchingcard_id, r.vs_hand): r
for r in PitchingCardRatings.select().where(
(PitchingCardRatings.pitchingcard_id << card_ids)
& (PitchingCardRatings.vs_hand << ["L", "R"])
)
}
def get_total_ops(df_data):
vlval = ratings_map.get((df_data["id"], "L"))
vrval = ratings_map.get((df_data["id"], "R"))
vlval = PitchingCardRatings.get_or_none(
PitchingCardRatings.pitchingcard_id == df_data["id"],
PitchingCardRatings.vs_hand == "L",
)
vrval = PitchingCardRatings.get_or_none(
PitchingCardRatings.pitchingcard_id == df_data["id"],
PitchingCardRatings.vs_hand == "R",
)
if vlval is None or vrval is None:
return float("inf")
ops_vl = vlval.obp + vlval.slg
ops_vr = vrval.obp + vrval.slg
# Weight the weaker split (higher OPS allowed) so platoon weaknesses are penalized.
# Starters face both LHH and RHH, so vulnerability against either hand matters.
return (ops_vr + ops_vl + max(ops_vl, ops_vr)) / 3
# TODO: should this be max??
return (ops_vr + ops_vl + min(ops_vl, ops_vr)) / 3
pitcher_df["total_ops"] = pitcher_df.apply(get_total_ops, axis=1)
return pitcher_df.sort_values(by="total_ops")
@ -638,9 +628,11 @@ async def get_team_sp(
)
this_team = Team.get_or_none(Team.id == team_id)
if this_team is None:
db.close()
raise HTTPException(status_code=404, detail=f"Team id {team_id} not found")
if difficulty_name not in CARDSETS.keys() and difficulty_name != "exhibition":
db.close()
raise HTTPException(
status_code=400,
detail=f"Difficulty name {difficulty_name} not a valid check",
@ -649,11 +641,12 @@ async def get_team_sp(
all_players = Player.select().where(Player.franchise == this_team.sname)
if difficulty_name == "exhibition":
logging.info("pulling an exhibition lineup")
logging.info(f"pulling an exhibition lineup")
if cardset_id is None:
db.close()
raise HTTPException(
status_code=400,
detail="Must provide at least one cardset_id for exhibition lineups",
detail=f"Must provide at least one cardset_id for exhibition lineups",
)
legal_players = all_players.where(Player.cardset_id << cardset_id)
@ -683,25 +676,19 @@ async def get_team_sp(
starter_df = pd.DataFrame(all_s).set_index("player", drop=False)
logging.debug(f"starter_df: {starter_df}")
card_ids = starter_df["id"].tolist()
ratings_map = {
(r.pitchingcard_id, r.vs_hand): r
for r in PitchingCardRatings.select().where(
(PitchingCardRatings.pitchingcard_id << card_ids)
& (PitchingCardRatings.vs_hand << ["L", "R"])
)
}
def get_total_ops(df_data):
vlval = ratings_map.get((df_data["id"], "L"))
vrval = ratings_map.get((df_data["id"], "R"))
vlval = PitchingCardRatings.get_or_none(
PitchingCardRatings.pitchingcard_id == df_data["id"],
PitchingCardRatings.vs_hand == "L",
)
vrval = PitchingCardRatings.get_or_none(
PitchingCardRatings.pitchingcard_id == df_data["id"],
PitchingCardRatings.vs_hand == "R",
)
if vlval is None or vrval is None:
return float("inf")
ops_vl = vlval.obp + vlval.slg
ops_vr = vrval.obp + vrval.slg
# Weight the weaker split (higher OPS allowed) so platoon weaknesses are penalized.
return (ops_vr + ops_vl + max(ops_vl, ops_vr)) / 3
return (ops_vr + ops_vl + min(ops_vl, ops_vr)) / 3
starter_df["total_ops"] = starter_df.apply(get_total_ops, axis=1)
return starter_df.sort_values(by="total_ops")
@ -720,11 +707,13 @@ async def get_team_sp(
if all_starters is not None and len(all_starters.index) >= sp_rank:
this_player_id = all_starters.iloc[sp_rank - 1].player
this_player = model_to_dict(Player.get_by_id(this_player_id), recurse=False)
db.close()
return this_player
if all_starters is not None and len(all_starters.index) > 0:
this_player_id = all_starters.iloc[len(all_starters.index) - 1].player
this_player = model_to_dict(Player.get_by_id(this_player_id), recurse=False)
db.close()
return this_player
# Include backup cardsets
@ -737,11 +726,13 @@ async def get_team_sp(
if all_starters is not None and len(all_starters.index) >= sp_rank:
this_player_id = all_starters.iloc[sp_rank - 1].player
this_player = model_to_dict(Player.get_by_id(this_player_id), recurse=False)
db.close()
return this_player
if all_starters is not None and len(all_starters.index) > 0:
this_player_id = all_starters.iloc[len(all_starters.index) - 1].player
this_player = model_to_dict(Player.get_by_id(this_player_id), recurse=False)
db.close()
return this_player
raise HTTPException(
@ -764,9 +755,11 @@ async def get_team_rp(
)
this_team = Team.get_or_none(Team.id == team_id)
if this_team is None:
db.close()
raise HTTPException(status_code=404, detail=f"Team id {team_id} not found")
if difficulty_name not in CARDSETS.keys() and difficulty_name != "exhibition":
db.close()
raise HTTPException(
status_code=400,
detail=f"Difficulty name {difficulty_name} not a valid check",
@ -778,11 +771,12 @@ async def get_team_rp(
)
if difficulty_name == "exhibition":
logging.info("pulling an exhibition RP")
logging.info(f"pulling an exhibition RP")
if cardset_id is None:
db.close()
raise HTTPException(
status_code=400,
detail="Must provide at least one cardset_id for exhibition lineups",
detail=f"Must provide at least one cardset_id for exhibition lineups",
)
legal_players = all_players.where(Player.cardset_id << cardset_id)
@ -851,6 +845,7 @@ async def get_team_rp(
this_player = model_to_dict(
Player.get_by_id(this_player_id), recurse=False
)
db.close()
return this_player
elif need == "setup":
@ -875,6 +870,7 @@ async def get_team_rp(
this_player = model_to_dict(
Player.get_by_id(this_player_id), recurse=False
)
db.close()
return this_player
elif need == "length" or len(used_pitcher_ids) > 4:
@ -908,6 +904,7 @@ async def get_team_rp(
this_player = model_to_dict(
Player.get_by_id(this_player_id), recurse=False
)
db.close()
return this_player
elif need == "middle":
@ -932,9 +929,10 @@ async def get_team_rp(
this_player = model_to_dict(
Player.get_by_id(this_player_id), recurse=False
)
db.close()
return this_player
logging.info("Falling to last chance pitcher")
logging.info(f"Falling to last chance pitcher")
all_relievers = sort_pitchers(
PitchingCard.select()
.join(Player)
@ -947,6 +945,7 @@ async def get_team_rp(
if all_relievers is not None:
this_player_id = all_relievers.iloc[len(all_relievers.index) - 1].player
this_player = model_to_dict(Player.get_by_id(this_player_id), recurse=False)
db.close()
return this_player
raise HTTPException(status_code=400, detail=f"No RP found for Team {team_id}")
@ -957,7 +956,7 @@ async def get_team_record(team_id: int, season: int):
all_games = StratGame.select().where(
((StratGame.away_team_id == team_id) | (StratGame.home_team_id == team_id))
& (StratGame.season == season)
& (StratGame.short_game == False) # noqa: E712
& (StratGame.short_game == False)
)
template = {
@ -1032,6 +1031,7 @@ async def get_team_record(team_id: int, season: int):
# team_games = lg_query.where((StratGame.away_team_id == x) | (StratGame.home_team_id == x))
# for game in team_games:
db.close()
return standings
@ -1039,16 +1039,21 @@ async def get_team_record(team_id: int, season: int):
async def team_buy_players(team_id: int, ids: str, ts: str):
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
if ts != this_team.team_hash():
logging.warning(f"Bad Team Secret: {ts} ({this_team.team_hash()})")
db.close()
raise HTTPException(
status_code=401,
detail=f"You are not authorized to buy {this_team.abbrev} cards. This event has been logged.",
)
last_card = Card.select(Card.id).order_by(-Card.id).limit(1)
lc_id = last_card[0].id
all_ids = ids.split(",")
conf_message = ""
total_cost = 0
@ -1056,7 +1061,8 @@ async def team_buy_players(team_id: int, ids: str, ts: str):
if player_id != "":
try:
this_player = Player.get_by_id(player_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404,
detail=f"No player found with id {player_id} /// "
@ -1069,6 +1075,7 @@ async def team_buy_players(team_id: int, ids: str, ts: str):
f"{this_player} was not purchased. {this_team.lname} only has {this_team.wallet}₼, but "
f"{this_player} costs {this_player.cost}₼."
)
db.close()
raise HTTPException(
200,
detail=f"{this_player} was not purchased. {this_team.lname} only has {this_team.wallet}₼, but "
@ -1095,7 +1102,7 @@ async def team_buy_players(team_id: int, ids: str, ts: str):
if this_player.rarity.value >= 2:
new_notif = Notification(
created=datetime.now(),
title="Price Change",
title=f"Price Change",
desc="Modified by buying and selling",
field_name=f"{this_player.description} "
f"{this_player.p_name if this_player.p_name not in this_player.description else ''}",
@ -1124,18 +1131,21 @@ async def team_buy_packs(
):
try:
this_packtype = PackType.get_by_id(packtype_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No pack type found with id {packtype_id}"
)
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
if ts != this_team.team_hash():
logging.warning(f"Bad Team Secret: {ts} ({this_team.team_hash()})")
db.close()
logging.warning(
f"team: {this_team} / pack_type: {this_packtype} / secret: {ts} / "
f"actual: {this_team.team_hash()}"
@ -1148,6 +1158,7 @@ async def team_buy_packs(
# check wallet balance
total_cost = this_packtype.cost * quantity
if this_team.wallet < total_cost:
db.close()
raise HTTPException(
200,
detail=f"{this_packtype} was not purchased. {this_team.lname} only has {this_team.wallet} bucks, but "
@ -1175,6 +1186,7 @@ async def team_buy_packs(
with db.atomic():
Pack.bulk_create(all_packs, batch_size=15)
db.close()
raise HTTPException(
status_code=200,
@ -1187,11 +1199,13 @@ async def team_buy_packs(
async def team_sell_cards(team_id: int, ids: str, ts: str):
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
if ts != this_team.team_hash():
logging.warning(f"Bad Team Secret: {ts} ({this_team.team_hash()})")
db.close()
raise HTTPException(
status_code=401,
detail=f"You are not authorized to sell {this_team.abbrev} cards. This event has been logged.",
@ -1205,7 +1219,8 @@ async def team_sell_cards(team_id: int, ids: str, ts: str):
if card_id != "":
try:
this_card = Card.get_by_id(card_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(
status_code=404, detail=f"No card found with id {card_id}"
)
@ -1239,7 +1254,7 @@ async def team_sell_cards(team_id: int, ids: str, ts: str):
if this_player.rarity.value >= 2:
new_notif = Notification(
created=datetime.now(),
title="Price Change",
title=f"Price Change",
desc="Modified by buying and selling",
field_name=f"{this_player.description} "
f"{this_player.p_name if this_player.p_name not in this_player.description else ''}",
@ -1273,10 +1288,12 @@ async def get_team_cards(team_id, csv: Optional[bool] = True):
"""
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
if not csv:
db.close()
raise HTTPException(
status_code=400,
detail="The /teams/{team_id}/cards endpoint only supports csv output.",
@ -1290,9 +1307,11 @@ async def get_team_cards(team_id, csv: Optional[bool] = True):
.order_by(-Card.player.rarity.value, Card.player.p_name)
)
if all_cards.count() == 0:
raise HTTPException(status_code=404, detail="No cards found")
db.close()
raise HTTPException(status_code=404, detail=f"No cards found")
card_vals = [model_to_dict(x) for x in all_cards]
db.close()
for x in card_vals:
x.update(x["player"])
@ -1336,7 +1355,8 @@ async def get_team_cards(team_id, csv: Optional[bool] = True):
@router.post("", include_in_schema=PRIVATE_IN_SCHEMA)
async def post_team(team: TeamModel, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post teams. This event has been logged.",
@ -1344,6 +1364,7 @@ async def post_team(team: TeamModel, token: str = Depends(oauth2_scheme)):
dupe_team = Team.get_or_none(Team.season == team.season, Team.abbrev == team.abbrev)
if dupe_team:
db.close()
raise HTTPException(
status_code=400,
detail=f"There is already a season {team.season} team using {team.abbrev}",
@ -1371,6 +1392,7 @@ async def post_team(team: TeamModel, token: str = Depends(oauth2_scheme)):
saved = this_team.save()
if saved == 1:
return_team = model_to_dict(this_team)
db.close()
return return_team
else:
raise HTTPException(
@ -1382,18 +1404,20 @@ async def post_team(team: TeamModel, token: str = Depends(oauth2_scheme)):
@router.post("/new-season/{new_season}", include_in_schema=PRIVATE_IN_SCHEMA)
async def team_season_update(new_season: int, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to post teams. This event has been logged.",
)
Team.update(
r_query = Team.update(
ranking=1000, season=new_season, wallet=Team.wallet + 250, has_guide=False
).execute()
current = Current.latest()
current.season = new_season
current.save()
db.close()
return {
"detail": f"Team rankings, season, guides, and wallets updated for season {new_season}"
@ -1405,7 +1429,8 @@ async def team_update_money(
team_id: int, delta: int, token: str = Depends(oauth2_scheme)
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to adjust wallets. This event has been logged.",
@ -1413,13 +1438,15 @@ async def team_update_money(
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
this_team.wallet += delta
if this_team.save() == 1:
return_team = model_to_dict(this_team)
db.close()
return return_team
else:
raise HTTPException(
@ -1450,14 +1477,16 @@ async def patch_team(
abbrev: Optional[str] = None,
):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete teams. This event has been logged.",
)
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
if abbrev is not None:
@ -1501,6 +1530,7 @@ async def patch_team(
if this_team.save() == 1:
return_team = model_to_dict(this_team)
db.close()
return return_team
else:
raise HTTPException(
@ -1512,73 +1542,22 @@ async def patch_team(
@router.delete("/{team_id}", include_in_schema=PRIVATE_IN_SCHEMA)
async def delete_team(team_id, token: str = Depends(oauth2_scheme)):
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
logging.warning(f"Bad Token: {token}")
db.close()
raise HTTPException(
status_code=401,
detail="You are not authorized to delete teams. This event has been logged.",
)
try:
this_team = Team.get_by_id(team_id)
except DoesNotExist:
except Exception:
db.close()
raise HTTPException(status_code=404, detail=f"No team found with id {team_id}")
count = this_team.delete_instance()
db.close()
if count == 1:
raise HTTPException(status_code=200, detail=f"Team {team_id} has been deleted")
else:
raise HTTPException(status_code=500, detail=f"Team {team_id} was not deleted")
@router.get("/{team_id}/refractors")
async def list_team_refractors(
team_id: int,
card_type: Optional[str] = Query(default=None),
tier: Optional[int] = Query(default=None),
page: int = Query(default=1, ge=1),
per_page: int = Query(default=10, ge=1, le=100),
token: str = Depends(oauth2_scheme),
):
"""List all RefractorCardState rows for a team, with optional filters.
Joins RefractorCardState to RefractorTrack so that card_type filtering
works without a second query. Results are paginated via page/per_page
(1-indexed pages); items are ordered by player_id for stable ordering.
Query parameters:
card_type -- filter to states whose track.card_type matches (e.g. 'batter', 'sp')
tier -- filter to states at a specific current_tier (0-4)
page -- 1-indexed page number (default 1)
per_page -- items per page (default 10, max 100)
Response shape:
{"count": N, "items": [card_state_with_threshold_context, ...]}
Each item in 'items' has the same shape as GET /refractor/cards/{card_id}.
"""
if not valid_token(token):
logging.warning("Bad Token: [REDACTED]")
raise HTTPException(status_code=401, detail="Unauthorized")
from ..db_engine import RefractorCardState, RefractorTrack
from ..routers_v2.refractor import _build_card_state_response
query = (
RefractorCardState.select(RefractorCardState, RefractorTrack)
.join(RefractorTrack)
.where(RefractorCardState.team == team_id)
.order_by(RefractorCardState.player_id)
)
if card_type is not None:
query = query.where(RefractorTrack.card_type == card_type)
if tier is not None:
query = query.where(RefractorCardState.current_tier == tier)
total = query.count()
offset = (page - 1) * per_page
page_query = query.offset(offset).limit(per_page)
items = [_build_card_state_response(state) for state in page_query]
return {"count": total, "items": items}

View File

View File

@ -1,29 +0,0 @@
[
{
"name": "Batter Track",
"card_type": "batter",
"formula": "pa + tb * 2",
"t1_threshold": 37,
"t2_threshold": 149,
"t3_threshold": 448,
"t4_threshold": 896
},
{
"name": "Starting Pitcher Track",
"card_type": "sp",
"formula": "ip + k",
"t1_threshold": 10,
"t2_threshold": 40,
"t3_threshold": 120,
"t4_threshold": 240
},
{
"name": "Relief Pitcher Track",
"card_type": "rp",
"formula": "ip + k",
"t1_threshold": 3,
"t2_threshold": 12,
"t3_threshold": 35,
"t4_threshold": 70
}
]

View File

@ -1,66 +0,0 @@
"""Seed script for RefractorTrack records.
Loads track definitions from refractor_tracks.json and upserts them into the
database using get_or_create keyed on name. Existing tracks have their
thresholds and formula updated to match the JSON in case values have changed.
Can be run standalone:
python -m app.seed.refractor_tracks
"""
import json
import logging
from pathlib import Path
from app.db_engine import RefractorTrack
logger = logging.getLogger(__name__)
_JSON_PATH = Path(__file__).parent / "refractor_tracks.json"
def seed_refractor_tracks() -> list[RefractorTrack]:
"""Upsert refractor tracks from JSON seed data.
Returns a list of RefractorTrack instances that were created or updated.
"""
raw = _JSON_PATH.read_text(encoding="utf-8")
track_defs = json.loads(raw)
results: list[RefractorTrack] = []
for defn in track_defs:
track, created = RefractorTrack.get_or_create(
name=defn["name"],
defaults={
"card_type": defn["card_type"],
"formula": defn["formula"],
"t1_threshold": defn["t1_threshold"],
"t2_threshold": defn["t2_threshold"],
"t3_threshold": defn["t3_threshold"],
"t4_threshold": defn["t4_threshold"],
},
)
if not created:
# Update mutable fields in case the JSON values changed.
track.card_type = defn["card_type"]
track.formula = defn["formula"]
track.t1_threshold = defn["t1_threshold"]
track.t2_threshold = defn["t2_threshold"]
track.t3_threshold = defn["t3_threshold"]
track.t4_threshold = defn["t4_threshold"]
track.save()
action = "created" if created else "updated"
logger.info("[%s] %s (card_type=%s)", action, track.name, track.card_type)
results.append(track)
return results
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger.info("Seeding refractor tracks...")
tracks = seed_refractor_tracks()
logger.info("Done. %d track(s) processed.", len(tracks))

View File

@ -1,119 +0,0 @@
"""Formula engine for refractor value computation (WP-09).
Three pure functions that compute a numeric refractor value from career stats,
plus helpers for formula dispatch and tier classification.
Stats attributes expected by each formula:
compute_batter_value: pa, hits, doubles, triples, hr (from BattingSeasonStats)
compute_sp_value: outs, strikeouts (from PitchingSeasonStats)
compute_rp_value: outs, strikeouts (from PitchingSeasonStats)
"""
from typing import Protocol
class BatterStats(Protocol):
pa: int
hits: int
doubles: int
triples: int
hr: int
class PitcherStats(Protocol):
outs: int
strikeouts: int
# ---------------------------------------------------------------------------
# Core formula functions
# ---------------------------------------------------------------------------
def compute_batter_value(stats) -> float:
"""PA + (TB x 2) where TB = 1B + 2x2B + 3x3B + 4xHR."""
singles = stats.hits - stats.doubles - stats.triples - stats.hr
tb = singles + 2 * stats.doubles + 3 * stats.triples + 4 * stats.hr
return float(stats.pa + tb * 2)
def _pitcher_value(stats) -> float:
return stats.outs / 3 + stats.strikeouts
def compute_sp_value(stats) -> float:
"""IP + K where IP = outs / 3."""
return _pitcher_value(stats)
def compute_rp_value(stats) -> float:
"""IP + K (same formula as SP; thresholds differ)."""
return _pitcher_value(stats)
# ---------------------------------------------------------------------------
# Dispatch and tier helpers
# ---------------------------------------------------------------------------
_FORMULA_DISPATCH = {
"batter": compute_batter_value,
"sp": compute_sp_value,
"rp": compute_rp_value,
}
def compute_value_for_track(card_type: str, stats) -> float:
"""Dispatch to the correct formula function by card_type.
Args:
card_type: One of 'batter', 'sp', 'rp'.
stats: Object with the attributes required by the formula.
Raises:
ValueError: If card_type is not recognised.
"""
fn = _FORMULA_DISPATCH.get(card_type)
if fn is None:
raise ValueError(f"Unknown card_type: {card_type!r}")
return fn(stats)
def tier_from_value(value: float, track) -> int:
"""Return the refractor tier (0-4) for a computed value against a track.
Tier boundaries are inclusive on the lower end:
T0: value < t1
T1: t1 <= value < t2
T2: t2 <= value < t3
T3: t3 <= value < t4
T4: value >= t4
Args:
value: Computed formula value.
track: Object (or dict-like) with t1_threshold..t4_threshold attributes/keys.
"""
# Support both attribute-style (Peewee model) and dict (seed fixture)
if isinstance(track, dict):
t1, t2, t3, t4 = (
track["t1_threshold"],
track["t2_threshold"],
track["t3_threshold"],
track["t4_threshold"],
)
else:
t1, t2, t3, t4 = (
track.t1_threshold,
track.t2_threshold,
track.t3_threshold,
track.t4_threshold,
)
if value >= t4:
return 4
if value >= t3:
return 3
if value >= t2:
return 2
if value >= t1:
return 1
return 0

View File

@ -1,698 +0,0 @@
"""Refractor rating boost service (Phase 2).
Pure functions for computing boosted card ratings when a player
reaches a new Refractor tier. The module-level 'db' variable is used by
apply_tier_boost() for atomic writes; tests patch this reference to redirect
writes to a shared-memory SQLite database.
Batter boost: fixed +0.5 to four offensive columns per tier.
Pitcher boost: 1.5 TB-budget priority algorithm per tier.
"""
from decimal import Decimal, ROUND_HALF_UP
import hashlib
import json
import logging
# Module-level db reference imported lazily so that this module can be
# imported before app.db_engine is fully initialised (e.g. in tests that
# patch DATABASE_TYPE before importing db_engine).
# Tests that need to redirect DB writes should patch this attribute at module
# level: `import app.services.refractor_boost as m; m.db = test_db`.
db = None
def _get_db():
"""Return the module-level db, importing lazily on first use."""
global db
if db is None:
from app.db_engine import db as _db # noqa: PLC0415
db = _db
return db
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Batter constants
# ---------------------------------------------------------------------------
BATTER_POSITIVE_DELTAS: dict[str, Decimal] = {
"homerun": Decimal("0.50"),
"double_pull": Decimal("0.50"),
"single_one": Decimal("0.50"),
"walk": Decimal("0.50"),
}
BATTER_NEGATIVE_DELTAS: dict[str, Decimal] = {
"strikeout": Decimal("-1.50"),
"groundout_a": Decimal("-0.50"),
}
# All 22 outcome columns that must sum to 108.
BATTER_OUTCOME_COLUMNS: list[str] = [
"homerun",
"bp_homerun",
"triple",
"double_three",
"double_two",
"double_pull",
"single_two",
"single_one",
"single_center",
"bp_single",
"hbp",
"walk",
"strikeout",
"lineout",
"popout",
"flyout_a",
"flyout_bq",
"flyout_lf_b",
"flyout_rf_b",
"groundout_a",
"groundout_b",
"groundout_c",
]
# ---------------------------------------------------------------------------
# Pitcher constants
# ---------------------------------------------------------------------------
# (column, tb_cost) pairs in priority order.
PITCHER_PRIORITY: list[tuple[str, int]] = [
("double_cf", 2),
("double_three", 2),
("double_two", 2),
("single_center", 1),
("single_two", 1),
("single_one", 1),
("bp_single", 1),
("walk", 1),
("homerun", 4),
("bp_homerun", 4),
("triple", 3),
("hbp", 1),
]
# All 18 variable outcome columns (sum to 79; x-checks add 29 for 108 total).
PITCHER_OUTCOME_COLUMNS: list[str] = [
"homerun",
"bp_homerun",
"triple",
"double_three",
"double_two",
"double_cf",
"single_two",
"single_one",
"single_center",
"bp_single",
"hbp",
"walk",
"strikeout",
"flyout_lf_b",
"flyout_cf_b",
"flyout_rf_b",
"groundout_a",
"groundout_b",
]
# Cross-check columns that are NEVER modified by the boost algorithm.
PITCHER_XCHECK_COLUMNS: list[str] = [
"xcheck_p",
"xcheck_c",
"xcheck_1b",
"xcheck_2b",
"xcheck_3b",
"xcheck_ss",
"xcheck_lf",
"xcheck_cf",
"xcheck_rf",
]
PITCHER_TB_BUDGET = Decimal("1.5")
# ---------------------------------------------------------------------------
# Batter boost
# ---------------------------------------------------------------------------
def apply_batter_boost(ratings_dict: dict) -> dict:
"""Apply one Refractor tier boost to a batter's outcome ratings.
Adds fixed positive deltas to four offensive columns (homerun, double_pull,
single_one, walk) while funding that increase by reducing strikeout and
groundout_a. A 0-floor is enforced on negative columns: if the full
reduction cannot be taken, positive deltas are scaled proportionally so that
the invariant (22 columns sum to 108.0) is always preserved.
Args:
ratings_dict: Dict containing at minimum all 22 BATTER_OUTCOME_COLUMNS
as numeric (int or float) values.
Returns:
New dict with the same keys as ratings_dict, with boosted outcome column
values as floats. All other keys are passed through unchanged.
Raises:
KeyError: If any BATTER_OUTCOME_COLUMNS key is missing from ratings_dict.
"""
result = dict(ratings_dict)
# Step 1 — convert the 22 outcome columns to Decimal for precise arithmetic.
ratings: dict[str, Decimal] = {
col: Decimal(str(result[col])) for col in BATTER_OUTCOME_COLUMNS
}
# Step 2 — apply negative deltas with 0-floor, tracking how much was
# actually removed versus how much was requested.
total_requested_reduction = Decimal("0")
total_actually_reduced = Decimal("0")
for col, delta in BATTER_NEGATIVE_DELTAS.items():
requested = abs(delta)
total_requested_reduction += requested
actual = min(requested, ratings[col])
ratings[col] -= actual
total_actually_reduced += actual
# Step 3 — check whether any truncation occurred.
total_truncated = total_requested_reduction - total_actually_reduced
# Step 4 — scale positive deltas if we couldn't take the full reduction.
if total_truncated > Decimal("0"):
# Positive additions must equal what was actually reduced so the
# 108-sum is preserved.
total_requested_addition = sum(BATTER_POSITIVE_DELTAS.values())
if total_requested_addition > Decimal("0"):
scale = total_actually_reduced / total_requested_addition
else:
scale = Decimal("0")
logger.warning(
"refractor_boost: batter truncation occurred — "
"requested_reduction=%.4f actually_reduced=%.4f scale=%.6f",
float(total_requested_reduction),
float(total_actually_reduced),
float(scale),
)
# Quantize the first N-1 deltas independently, then assign the last
# delta as the remainder so the total addition equals
# total_actually_reduced exactly (no quantize drift across 4 ops).
pos_cols = list(BATTER_POSITIVE_DELTAS.keys())
positive_deltas = {}
running_sum = Decimal("0")
for col in pos_cols[:-1]:
scaled = (BATTER_POSITIVE_DELTAS[col] * scale).quantize(
Decimal("0.000001"), rounding=ROUND_HALF_UP
)
positive_deltas[col] = scaled
running_sum += scaled
last_delta = total_actually_reduced - running_sum
positive_deltas[pos_cols[-1]] = max(last_delta, Decimal("0"))
else:
positive_deltas = BATTER_POSITIVE_DELTAS
# Step 5 — apply (possibly scaled) positive deltas.
for col, delta in positive_deltas.items():
ratings[col] += delta
# Write boosted values back as floats.
for col in BATTER_OUTCOME_COLUMNS:
result[col] = float(ratings[col])
return result
# ---------------------------------------------------------------------------
# Pitcher boost
# ---------------------------------------------------------------------------
def apply_pitcher_boost(ratings_dict: dict, tb_budget: float = 1.5) -> dict:
"""Apply one Refractor tier boost to a pitcher's outcome ratings.
Iterates through PITCHER_PRIORITY in order, converting as many outcome
chances as the TB budget allows into strikeouts. The TB cost per chance
varies by outcome type (e.g. a double costs 2 TB budget units, a single
costs 1). The strikeout column absorbs all converted chances.
X-check columns (xcheck_p through xcheck_rf) are never touched.
Args:
ratings_dict: Dict containing at minimum all 18 PITCHER_OUTCOME_COLUMNS
as numeric (int or float) values.
tb_budget: Total base budget available for this boost tier. Defaults
to 1.5 (PITCHER_TB_BUDGET).
Returns:
New dict with the same keys as ratings_dict, with boosted outcome column
values as floats. All other keys are passed through unchanged.
Raises:
KeyError: If any PITCHER_OUTCOME_COLUMNS key is missing from ratings_dict.
"""
result = dict(ratings_dict)
# Step 1 — convert outcome columns to Decimal, set remaining budget.
ratings: dict[str, Decimal] = {
col: Decimal(str(result[col])) for col in PITCHER_OUTCOME_COLUMNS
}
remaining = Decimal(str(tb_budget))
# Step 2 — iterate priority list, draining budget.
for col, tb_cost in PITCHER_PRIORITY:
if ratings[col] <= Decimal("0"):
continue
tb_cost_d = Decimal(str(tb_cost))
max_chances = remaining / tb_cost_d
chances_to_take = min(ratings[col], max_chances)
ratings[col] -= chances_to_take
ratings["strikeout"] += chances_to_take
remaining -= chances_to_take * tb_cost_d
if remaining <= Decimal("0"):
break
# Step 3 — warn if budget was not fully spent (rare, indicates all priority
# columns were already at zero).
if remaining > Decimal("0"):
logger.warning(
"refractor_boost: pitcher TB budget not fully spent — "
"remaining=%.4f of tb_budget=%.4f",
float(remaining),
tb_budget,
)
# Write boosted values back as floats.
for col in PITCHER_OUTCOME_COLUMNS:
result[col] = float(ratings[col])
return result
# ---------------------------------------------------------------------------
# Variant hash
# ---------------------------------------------------------------------------
def compute_variant_hash(
player_id: int,
refractor_tier: int,
cosmetics: list[str] | None = None,
) -> int:
"""Compute a stable, deterministic variant identifier for a boosted card.
Hashes the combination of player_id, refractor_tier, and an optional sorted
list of cosmetic identifiers to produce a compact integer suitable for use
as a database variant key. The result is derived from the first 8 hex
characters of a SHA-256 digest, so collisions are extremely unlikely in
practice.
variant=0 is reserved and will never be returned; any hash that resolves to
0 is remapped to 1.
Args:
player_id: Player primary key.
refractor_tier: Refractor tier (04) the card has reached.
cosmetics: Optional list of cosmetic tag strings (e.g. special art
identifiers). Order is normalised callers need not sort.
Returns:
A positive integer in the range [1, 2^32 - 1].
"""
inputs = {
"player_id": player_id,
"refractor_tier": refractor_tier,
"cosmetics": sorted(cosmetics or []),
}
raw = hashlib.sha256(json.dumps(inputs, sort_keys=True).encode()).hexdigest()
result = int(raw[:8], 16)
return result if result != 0 else 1 # variant=0 is reserved
# ---------------------------------------------------------------------------
# Display stat helpers
# ---------------------------------------------------------------------------
def compute_batter_display_stats(ratings: dict) -> dict:
"""Compute avg/obp/slg from batter outcome columns.
Uses the same formulas as the BattingCardRatingsModel Pydantic validator
so that variant card display stats are always consistent with the boosted
chance values. All denominators are 108 (the full card chance total).
Args:
ratings: Dict containing at minimum all BATTER_OUTCOME_COLUMNS as
numeric (int or float) values.
Returns:
Dict with keys 'avg', 'obp', 'slg' as floats.
"""
avg = (
ratings["homerun"]
+ ratings["bp_homerun"] / 2
+ ratings["triple"]
+ ratings["double_three"]
+ ratings["double_two"]
+ ratings["double_pull"]
+ ratings["single_two"]
+ ratings["single_one"]
+ ratings["single_center"]
+ ratings["bp_single"] / 2
) / 108
obp = (ratings["hbp"] + ratings["walk"]) / 108 + avg
slg = (
ratings["homerun"] * 4
+ ratings["bp_homerun"] * 2
+ ratings["triple"] * 3
+ ratings["double_three"] * 2
+ ratings["double_two"] * 2
+ ratings["double_pull"] * 2
+ ratings["single_two"]
+ ratings["single_one"]
+ ratings["single_center"]
+ ratings["bp_single"] / 2
) / 108
return {"avg": avg, "obp": obp, "slg": slg}
def compute_pitcher_display_stats(ratings: dict) -> dict:
"""Compute avg/obp/slg from pitcher outcome columns.
Uses the same formulas as the PitchingCardRatingsModel Pydantic validator
so that variant card display stats are always consistent with the boosted
chance values. All denominators are 108 (the full card chance total).
Args:
ratings: Dict containing at minimum all PITCHER_OUTCOME_COLUMNS as
numeric (int or float) values.
Returns:
Dict with keys 'avg', 'obp', 'slg' as floats.
"""
avg = (
ratings["homerun"]
+ ratings["bp_homerun"] / 2
+ ratings["triple"]
+ ratings["double_three"]
+ ratings["double_two"]
+ ratings["double_cf"]
+ ratings["single_two"]
+ ratings["single_one"]
+ ratings["single_center"]
+ ratings["bp_single"] / 2
) / 108
obp = (ratings["hbp"] + ratings["walk"]) / 108 + avg
slg = (
ratings["homerun"] * 4
+ ratings["bp_homerun"] * 2
+ ratings["triple"] * 3
+ ratings["double_three"] * 2
+ ratings["double_two"] * 2
+ ratings["double_cf"] * 2
+ ratings["single_two"]
+ ratings["single_one"]
+ ratings["single_center"]
+ ratings["bp_single"] / 2
) / 108
return {"avg": avg, "obp": obp, "slg": slg}
# ---------------------------------------------------------------------------
# Orchestration: apply_tier_boost
# ---------------------------------------------------------------------------
def apply_tier_boost(
player_id: int,
team_id: int,
new_tier: int,
card_type: str,
_batting_card_model=None,
_batting_ratings_model=None,
_pitching_card_model=None,
_pitching_ratings_model=None,
_card_model=None,
_state_model=None,
_audit_model=None,
) -> dict:
"""Create a boosted variant card for a tier-up.
IMPORTANT: This function is the SOLE writer of current_tier on
RefractorCardState when a tier-up occurs. The evaluator computes
the new tier but does NOT write it this function writes tier +
variant + audit atomically inside a single db.atomic() block.
If this function fails, the tier stays at its old value and will
be retried on the next game evaluation.
Orchestrates the full flow (card creation outside atomic; state
mutations inside db.atomic()):
1. Determine source variant (variant=0 for T1, previous tier's hash for T2+)
2. Fetch source card and ratings rows
3. Apply boost formula (batter or pitcher) per vs_hand split
4. Assert 108-sum after boost for both batters and pitchers
5. Compute new variant hash
6. Create new card row with new variant (idempotency: skip if exists)
7. Create new ratings rows for both vs_hand splits (idempotency: skip if exists)
8. Inside db.atomic():
a. Write RefractorBoostAudit record
b. Update RefractorCardState: current_tier, variant, fully_evolved
c. Propagate variant to all Card rows for (player_id, team_id)
Args:
player_id: Player primary key.
team_id: Team primary key.
new_tier: The tier being reached (1-4).
card_type: One of 'batter', 'sp', 'rp'.
_batting_card_model: Injectable stub for BattingCard (used in tests).
_batting_ratings_model: Injectable stub for BattingCardRatings.
_pitching_card_model: Injectable stub for PitchingCard.
_pitching_ratings_model: Injectable stub for PitchingCardRatings.
_card_model: Injectable stub for Card.
_state_model: Injectable stub for RefractorCardState.
_audit_model: Injectable stub for RefractorBoostAudit.
Returns:
Dict with 'variant_created' (int) and 'boost_deltas' (per-split dict).
Raises:
ValueError: If the source card or ratings are missing, or if
RefractorCardState is not found for (player_id, team_id).
"""
# Lazy model imports — same pattern as refractor_evaluator.py.
if _batting_card_model is None:
from app.db_engine import BattingCard as _batting_card_model # noqa: PLC0415
if _batting_ratings_model is None:
from app.db_engine import BattingCardRatings as _batting_ratings_model # noqa: PLC0415
if _pitching_card_model is None:
from app.db_engine import PitchingCard as _pitching_card_model # noqa: PLC0415
if _pitching_ratings_model is None:
from app.db_engine import PitchingCardRatings as _pitching_ratings_model # noqa: PLC0415
if _card_model is None:
from app.db_engine import Card as _card_model # noqa: PLC0415
if _state_model is None:
from app.db_engine import RefractorCardState as _state_model # noqa: PLC0415
if _audit_model is None:
from app.db_engine import RefractorBoostAudit as _audit_model # noqa: PLC0415
_db = _get_db()
if card_type not in ("batter", "sp", "rp"):
raise ValueError(
f"Invalid card_type={card_type!r}; expected one of 'batter', 'sp', 'rp'"
)
is_batter = card_type == "batter"
CardModel = _batting_card_model if is_batter else _pitching_card_model
RatingsModel = _batting_ratings_model if is_batter else _pitching_ratings_model
fk_field = "battingcard" if is_batter else "pitchingcard"
# 1. Determine source variant.
if new_tier == 1:
source_variant = 0
else:
source_variant = compute_variant_hash(player_id, new_tier - 1)
# 2. Fetch source card and ratings rows.
source_card = CardModel.get_or_none(
(CardModel.player == player_id) & (CardModel.variant == source_variant)
)
if source_card is None:
raise ValueError(
f"No {'batting' if is_batter else 'pitching'}card for "
f"player={player_id} variant={source_variant}"
)
ratings_rows = list(
RatingsModel.select().where(getattr(RatingsModel, fk_field) == source_card.id)
)
if not ratings_rows:
raise ValueError(f"No ratings rows for card_id={source_card.id}")
# 3. Apply boost to each vs_hand split.
boost_fn = apply_batter_boost if is_batter else apply_pitcher_boost
outcome_cols = BATTER_OUTCOME_COLUMNS if is_batter else PITCHER_OUTCOME_COLUMNS
boosted_splits: dict[str, dict] = {}
for row in ratings_rows:
# Build the ratings dict: outcome columns + (pitcher) x-check columns.
ratings_dict: dict = {col: getattr(row, col) for col in outcome_cols}
if not is_batter:
for col in PITCHER_XCHECK_COLUMNS:
ratings_dict[col] = getattr(row, col)
boosted = boost_fn(ratings_dict)
# 4. Assert 108-sum invariant after boost (Peewee bypasses Pydantic validators).
if is_batter:
boosted_sum = sum(boosted[col] for col in BATTER_OUTCOME_COLUMNS)
else:
boosted_sum = sum(boosted[col] for col in PITCHER_OUTCOME_COLUMNS) + sum(
boosted[col] for col in PITCHER_XCHECK_COLUMNS
)
if abs(boosted_sum - 108.0) >= 0.01:
raise ValueError(
f"108-sum invariant violated after boost for player={player_id} "
f"vs_hand={row.vs_hand}: sum={boosted_sum:.6f}"
)
boosted_splits[row.vs_hand] = boosted
# 5. Compute new variant hash.
new_variant = compute_variant_hash(player_id, new_tier)
# 6. Create new card row (idempotency: skip if exists).
existing_card = CardModel.get_or_none(
(CardModel.player == player_id) & (CardModel.variant == new_variant)
)
if existing_card is not None:
new_card = existing_card
else:
if is_batter:
clone_fields = [
"steal_low",
"steal_high",
"steal_auto",
"steal_jump",
"bunting",
"hit_and_run",
"running",
"offense_col",
"hand",
]
else:
clone_fields = [
"balk",
"wild_pitch",
"hold",
"starter_rating",
"relief_rating",
"closer_rating",
"batting",
"offense_col",
"hand",
]
card_data: dict = {
"player": player_id,
"variant": new_variant,
"image_url": None, # No rendered image for variant cards yet.
}
for fname in clone_fields:
card_data[fname] = getattr(source_card, fname)
new_card = CardModel.create(**card_data)
# 7. Create new ratings rows for each split (idempotency: skip if exists).
display_stats_fn = (
compute_batter_display_stats if is_batter else compute_pitcher_display_stats
)
for vs_hand, boosted_ratings in boosted_splits.items():
existing_ratings = RatingsModel.get_or_none(
(getattr(RatingsModel, fk_field) == new_card.id)
& (RatingsModel.vs_hand == vs_hand)
)
if existing_ratings is not None:
continue # Idempotency: already written.
ratings_data: dict = {
fk_field: new_card.id,
"vs_hand": vs_hand,
}
# Outcome columns (boosted values).
ratings_data.update({col: boosted_ratings[col] for col in outcome_cols})
# X-check columns for pitchers (unchanged by boost, copy from boosted dict).
if not is_batter:
for col in PITCHER_XCHECK_COLUMNS:
ratings_data[col] = boosted_ratings[col]
# Direction rates for batters: copy from source row.
if is_batter:
source_row = next(r for r in ratings_rows if r.vs_hand == vs_hand)
for rate_col in ("pull_rate", "center_rate", "slap_rate"):
ratings_data[rate_col] = getattr(source_row, rate_col)
# Compute fresh display stats from boosted chance columns.
display_stats = display_stats_fn(boosted_ratings)
ratings_data.update(display_stats)
RatingsModel.create(**ratings_data)
# 8. Load card state — needed for atomic state mutations.
card_state = _state_model.get_or_none(
(_state_model.player == player_id) & (_state_model.team == team_id)
)
if card_state is None:
raise ValueError(
f"No refractor_card_state for player={player_id} team={team_id}"
)
# All state mutations in a single atomic block.
with _db.atomic():
# 8a. Write audit record.
# boost_delta_json stores per-split boosted values including x-check columns
# for pitchers so the full card can be reconstructed from the audit.
audit_data: dict = {
"card_state": card_state.id,
"tier": new_tier,
"variant_created": new_variant,
"boost_delta_json": json.dumps(boosted_splits, default=str),
}
if is_batter:
audit_data["battingcard"] = new_card.id
else:
audit_data["pitchingcard"] = new_card.id
existing_audit = _audit_model.get_or_none(
(_audit_model.card_state == card_state.id) & (_audit_model.tier == new_tier)
)
if existing_audit is None:
_audit_model.create(**audit_data)
# 8b. Update RefractorCardState — this is the SOLE tier write on tier-up.
card_state.current_tier = new_tier
card_state.fully_evolved = new_tier >= 4
card_state.variant = new_variant
card_state.save()
# 8c. Propagate variant to all Card rows for (player_id, team_id).
_card_model.update(variant=new_variant).where(
(_card_model.player == player_id) & (_card_model.team == team_id)
).execute()
logger.debug(
"refractor_boost: applied T%s boost for player=%s team=%s variant=%s",
new_tier,
player_id,
team_id,
new_variant,
)
return {
"variant_created": new_variant,
"boost_deltas": dict(boosted_splits),
}

View File

@ -1,232 +0,0 @@
"""Refractor evaluator service (WP-08).
Force-recalculates a card's refractor state from career totals.
evaluate_card() is the main entry point:
1. Load career totals: SUM all BattingSeasonStats/PitchingSeasonStats rows for (player_id, team_id)
2. Determine track from card_state.track
3. Compute formula value (delegated to formula engine, WP-09)
4. Compare value to track thresholds to determine new_tier
5. Update card_state.current_value = computed value
6. Update card_state.current_tier = max(current_tier, new_tier) no regression
(SKIPPED when dry_run=True)
7. Update card_state.fully_evolved = (current_tier >= 4)
(SKIPPED when dry_run=True)
8. Update card_state.last_evaluated_at = NOW()
When dry_run=True, only steps 5 and 8 are written (current_value and
last_evaluated_at). Steps 67 (current_tier and fully_evolved) are intentionally
skipped so that the evaluate-game endpoint can detect a pending tier-up and
delegate the tier write to apply_tier_boost(), which writes tier + variant
atomically. The return dict always includes both "computed_tier" (what the
formula says the tier should be) and "computed_fully_evolved" (whether the
computed tier implies full evolution) so callers can make decisions without
reading the database again.
Idempotent: calling multiple times with the same data produces the same result.
Depends on WP-05 (RefractorCardState), WP-07 (BattingSeasonStats/PitchingSeasonStats),
and WP-09 (formula engine). Models and formula functions are imported lazily so
this module can be imported before those PRs merge.
"""
from datetime import datetime
import logging
class _CareerTotals:
"""Aggregated career stats for a (player_id, team_id) pair.
Passed to the formula engine as a stats-duck-type object with the attributes
required by compute_value_for_track:
batter: pa, hits, doubles, triples, hr
sp/rp: outs, strikeouts
"""
__slots__ = ("pa", "hits", "doubles", "triples", "hr", "outs", "strikeouts")
def __init__(self, pa, hits, doubles, triples, hr, outs, strikeouts):
self.pa = pa
self.hits = hits
self.doubles = doubles
self.triples = triples
self.hr = hr
self.outs = outs
self.strikeouts = strikeouts
def evaluate_card(
player_id: int,
team_id: int,
dry_run: bool = False,
_stats_model=None,
_state_model=None,
_compute_value_fn=None,
_tier_from_value_fn=None,
) -> dict:
"""Force-recalculate a card's refractor tier from career stats.
Sums all BattingSeasonStats or PitchingSeasonStats rows (based on
card_type) for (player_id, team_id) across all seasons, then delegates
formula computation and tier classification to the formula engine. The
result is written back to refractor_card_state and returned as a dict.
current_tier never decreases (no regression):
card_state.current_tier = max(card_state.current_tier, new_tier)
When dry_run=True, only current_value and last_evaluated_at are written
current_tier and fully_evolved are NOT updated. This allows the caller
(evaluate-game endpoint) to detect a tier-up and delegate the tier write
to apply_tier_boost(), which writes tier + variant atomically. The return
dict always includes "computed_tier" (what the formula says the tier should
be) in addition to "current_tier" (what is actually stored in the DB).
Args:
player_id: Player primary key.
team_id: Team primary key.
dry_run: When True, skip writing current_tier and fully_evolved so
that apply_tier_boost() can write them atomically with variant
creation. Defaults to False (existing behaviour for the manual
/evaluate endpoint).
_stats_model: Override for BattingSeasonStats/PitchingSeasonStats
(used in tests to inject a stub model with all stat fields).
_state_model: Override for RefractorCardState (used in tests to avoid
importing from db_engine before WP-05 merges).
_compute_value_fn: Override for formula_engine.compute_value_for_track
(used in tests to avoid importing formula_engine before WP-09 merges).
_tier_from_value_fn: Override for formula_engine.tier_from_value
(used in tests).
Returns:
Dict with current_tier, computed_tier, current_value, fully_evolved,
last_evaluated_at (ISO-8601 string). "computed_tier" reflects what
the formula computed; "current_tier" reflects what is stored in the DB
(which may differ when dry_run=True and a tier-up is pending).
Raises:
ValueError: If no refractor_card_state row exists for (player_id, team_id).
"""
if _state_model is None:
from app.db_engine import RefractorCardState as _state_model # noqa: PLC0415
if _compute_value_fn is None or _tier_from_value_fn is None:
from app.services.formula_engine import ( # noqa: PLC0415
compute_value_for_track,
tier_from_value,
)
if _compute_value_fn is None:
_compute_value_fn = compute_value_for_track
if _tier_from_value_fn is None:
_tier_from_value_fn = tier_from_value
# 1. Load card state
card_state = _state_model.get_or_none(
(_state_model.player_id == player_id) & (_state_model.team_id == team_id)
)
if card_state is None:
raise ValueError(
f"No refractor_card_state for player_id={player_id} team_id={team_id}"
)
# 2. Load career totals from the appropriate season stats table
if _stats_model is not None:
# Test override: use the injected stub model for all fields
rows = list(
_stats_model.select().where(
(_stats_model.player_id == player_id)
& (_stats_model.team_id == team_id)
)
)
totals = _CareerTotals(
pa=sum(r.pa for r in rows),
hits=sum(r.hits for r in rows),
doubles=sum(r.doubles for r in rows),
triples=sum(r.triples for r in rows),
hr=sum(r.hr for r in rows),
outs=sum(r.outs for r in rows),
strikeouts=sum(r.strikeouts for r in rows),
)
else:
from app.db_engine import (
BattingSeasonStats,
PitchingSeasonStats,
) # noqa: PLC0415
card_type = card_state.track.card_type
if card_type == "batter":
rows = list(
BattingSeasonStats.select().where(
(BattingSeasonStats.player == player_id)
& (BattingSeasonStats.team == team_id)
)
)
totals = _CareerTotals(
pa=sum(r.pa for r in rows),
hits=sum(r.hits for r in rows),
doubles=sum(r.doubles for r in rows),
triples=sum(r.triples for r in rows),
hr=sum(r.hr for r in rows),
outs=0,
strikeouts=sum(r.strikeouts for r in rows),
)
else:
rows = list(
PitchingSeasonStats.select().where(
(PitchingSeasonStats.player == player_id)
& (PitchingSeasonStats.team == team_id)
)
)
totals = _CareerTotals(
pa=0,
hits=0,
doubles=0,
triples=0,
hr=0,
outs=sum(r.outs for r in rows),
strikeouts=sum(r.strikeouts for r in rows),
)
# 3. Determine track
track = card_state.track
# 4. Compute formula value and new tier
value = _compute_value_fn(track.card_type, totals)
new_tier = _tier_from_value_fn(value, track)
# 58. Update card state.
now = datetime.now()
computed_tier = new_tier
computed_fully_evolved = computed_tier >= 4
# Always update value and timestamp; current_tier and fully_evolved are
# skipped when dry_run=True so that apply_tier_boost() can write them
# atomically with variant creation on tier-up.
card_state.current_value = value
card_state.last_evaluated_at = now
if not dry_run:
card_state.current_tier = max(card_state.current_tier, new_tier)
card_state.fully_evolved = card_state.current_tier >= 4
card_state.save()
logging.debug(
"refractor_eval: player=%s team=%s value=%.2f computed_tier=%s "
"stored_tier=%s dry_run=%s",
player_id,
team_id,
value,
computed_tier,
card_state.current_tier,
dry_run,
)
return {
"player_id": player_id,
"team_id": team_id,
"current_value": card_state.current_value,
"current_tier": card_state.current_tier,
"computed_tier": computed_tier,
"computed_fully_evolved": computed_fully_evolved,
"fully_evolved": card_state.fully_evolved,
"last_evaluated_at": card_state.last_evaluated_at.isoformat(),
}

View File

@ -1,138 +0,0 @@
"""
WP-10: Pack opening hook refractor_card_state initialization.
Public API
----------
initialize_card_refractor(player_id, team_id, card_type)
Get-or-create a RefractorCardState for the (player_id, team_id) pair.
Returns the state instance on success, or None if initialization fails
(missing track, integrity error, etc.). Never raises.
_determine_card_type(player)
Pure function: inspect player.pos_1 and return 'sp', 'rp', or 'batter'.
Exported so the cards router and tests can call it directly.
Design notes
------------
- The function is intentionally fire-and-forget from the caller's perspective.
All exceptions are caught and logged; pack opening is never blocked.
- No RefractorProgress rows are created here. Progress accumulation is a
separate concern handled by the stats-update pipeline (WP-07/WP-08).
- AI teams and Gauntlet teams skip Paperdex insertion (cards.py pattern);
we do NOT replicate that exclusion here all teams get a refractor state
so that future rule changes don't require back-filling.
"""
import logging
from typing import Optional
from app.db_engine import DoesNotExist, RefractorCardState, RefractorTrack
logger = logging.getLogger(__name__)
def _determine_card_type(player) -> str:
"""Map a player's primary position to a refractor card_type string.
Rules (from WP-10 spec):
- pos_1 contains 'SP' -> 'sp'
- pos_1 contains 'RP' or 'CP' -> 'rp'
- anything else -> 'batter'
Args:
player: Any object with a ``pos_1`` attribute (Player model or stub).
Returns:
One of the strings 'batter', 'sp', 'rp'.
"""
pos = (player.pos_1 or "").upper()
if "SP" in pos:
return "sp"
if "RP" in pos or "CP" in pos:
return "rp"
return "batter"
def initialize_card_refractor(
player_id: int,
team_id: int,
card_type: str,
) -> Optional[RefractorCardState]:
"""Get-or-create a RefractorCardState for a newly acquired card.
Called by the cards POST endpoint after each card is inserted. The
function is idempotent: if a state row already exists for the
(player_id, team_id) pair it is returned unchanged existing
refractor progress is never reset.
Args:
player_id: Primary key of the Player row (Player.player_id).
team_id: Primary key of the Team row (Team.id).
card_type: One of 'batter', 'sp', 'rp'. Determines which
RefractorTrack is assigned to the new state.
Returns:
The existing or newly created RefractorCardState instance, or
None if initialization could not complete (missing track seed
data, unexpected DB error, etc.).
"""
try:
track = RefractorTrack.get(RefractorTrack.card_type == card_type)
except DoesNotExist:
logger.warning(
"refractor_init: no RefractorTrack found for card_type=%r "
"(player_id=%s, team_id=%s) — skipping state creation",
card_type,
player_id,
team_id,
)
return None
except Exception:
logger.exception(
"refractor_init: unexpected error fetching track "
"(card_type=%r, player_id=%s, team_id=%s)",
card_type,
player_id,
team_id,
)
return None
try:
state, created = RefractorCardState.get_or_create(
player_id=player_id,
team_id=team_id,
defaults={
"track": track,
"current_tier": 0,
"current_value": 0.0,
"fully_evolved": False,
},
)
if created:
logger.debug(
"refractor_init: created RefractorCardState id=%s "
"(player_id=%s, team_id=%s, card_type=%r)",
state.id,
player_id,
team_id,
card_type,
)
else:
logger.debug(
"refractor_init: state already exists id=%s "
"(player_id=%s, team_id=%s) — no-op",
state.id,
player_id,
team_id,
)
return state
except Exception:
logger.exception(
"refractor_init: failed to get_or_create state "
"(player_id=%s, team_id=%s, card_type=%r)",
player_id,
team_id,
card_type,
)
return None

View File

@ -1,453 +0,0 @@
"""
season_stats.py Full-recalculation BattingSeasonStats and PitchingSeasonStats update logic.
Called once per completed StratGame to recompute the full season batting and
pitching statistics for every player who appeared in that game, then write
those totals to the batting_season_stats and pitching_season_stats tables.
Unlike the previous incremental (delta) approach, each call recomputes totals
from scratch by aggregating all StratPlay rows for the player+team+season
triple. This eliminates double-counting on re-delivery and makes every row a
faithful snapshot of the full season to date.
Idempotency: re-delivery of a game is detected via the ProcessedGame ledger
table, keyed on game_id.
- First call: records the ledger entry and proceeds with recalculation.
- Subsequent calls without force=True: return early with "skipped": True.
- force=True: skips the early-return check and recalculates anyway (useful
for correcting data after retroactive stat adjustments).
Upsert strategy: get_or_create + field assignment + save(). Because we are
writing the full recomputed total rather than adding a delta, there is no
risk of concurrent-write skew between games. A single unified path works for
both SQLite and PostgreSQL.
"""
import logging
from datetime import datetime
from peewee import Case, fn
from app.db_engine import (
db,
BattingSeasonStats,
Decision,
PitchingSeasonStats,
ProcessedGame,
StratGame,
StratPlay,
)
logger = logging.getLogger(__name__)
def _get_player_pairs(game_id: int) -> tuple[set, set]:
"""
Return the sets of (player_id, team_id) pairs that appeared in the game.
Queries StratPlay for all rows belonging to game_id and extracts:
- batting_pairs: set of (batter_id, batter_team_id), excluding rows where
batter_id is None (e.g. automatic outs, walk-off plays without a PA).
- pitching_pairs: set of (pitcher_id, pitcher_team_id) from all plays
(pitcher is always present), plus any pitchers from the Decision table
who may not have StratPlay rows (rare edge case).
Args:
game_id: Primary key of the StratGame to query.
Returns:
Tuple of (batting_pairs, pitching_pairs) where each element is a set
of (int, int) tuples.
"""
plays = (
StratPlay.select(
StratPlay.batter,
StratPlay.batter_team,
StratPlay.pitcher,
StratPlay.pitcher_team,
)
.where(StratPlay.game == game_id)
.tuples()
)
batting_pairs: set[tuple[int, int]] = set()
pitching_pairs: set[tuple[int, int]] = set()
for batter_id, batter_team_id, pitcher_id, pitcher_team_id in plays:
if batter_id is not None:
batting_pairs.add((batter_id, batter_team_id))
if pitcher_id is not None:
pitching_pairs.add((pitcher_id, pitcher_team_id))
# Include pitchers who have a Decision but no StratPlay rows for this game
# (rare edge case, e.g. a pitcher credited with a decision without recording
# any plays — the old code handled this explicitly in _apply_decisions).
decision_pitchers = (
Decision.select(Decision.pitcher, Decision.pitcher_team)
.where(Decision.game == game_id)
.tuples()
)
for pitcher_id, pitcher_team_id in decision_pitchers:
pitching_pairs.add((pitcher_id, pitcher_team_id))
return batting_pairs, pitching_pairs
def _recalc_batting(player_id: int, team_id: int, season: int) -> dict:
"""
Recompute full-season batting totals for a player+team+season triple.
Aggregates every StratPlay row where batter == player_id and
batter_team == team_id across all games in the given season.
games counts only games where the player had at least one official PA
(pa > 0). The COUNT(DISTINCT ...) with a CASE expression achieves this:
NULL values from the CASE are ignored by COUNT, so only game IDs where
pa > 0 contribute.
Args:
player_id: FK to the player record.
team_id: FK to the team record.
season: Integer season year.
Returns:
Dict with keys matching BattingSeasonStats columns; all values are
native Python ints (defaulting to 0 if no rows matched).
"""
row = (
StratPlay.select(
fn.COUNT(
Case(None, [(StratPlay.pa > 0, StratPlay.game)], None).distinct()
).alias("games"),
fn.SUM(StratPlay.pa).alias("pa"),
fn.SUM(StratPlay.ab).alias("ab"),
fn.SUM(StratPlay.hit).alias("hits"),
fn.SUM(StratPlay.double).alias("doubles"),
fn.SUM(StratPlay.triple).alias("triples"),
fn.SUM(StratPlay.homerun).alias("hr"),
fn.SUM(StratPlay.rbi).alias("rbi"),
fn.SUM(StratPlay.run).alias("runs"),
fn.SUM(StratPlay.bb).alias("bb"),
fn.SUM(StratPlay.so).alias("strikeouts"),
fn.SUM(StratPlay.hbp).alias("hbp"),
fn.SUM(StratPlay.sac).alias("sac"),
fn.SUM(StratPlay.ibb).alias("ibb"),
fn.SUM(StratPlay.gidp).alias("gidp"),
fn.SUM(StratPlay.sb).alias("sb"),
fn.SUM(StratPlay.cs).alias("cs"),
)
.join(StratGame, on=(StratPlay.game == StratGame.id))
.where(
StratPlay.batter == player_id,
StratPlay.batter_team == team_id,
StratGame.season == season,
)
.dicts()
.first()
)
if row is None:
row = {}
return {
"games": row.get("games") or 0,
"pa": row.get("pa") or 0,
"ab": row.get("ab") or 0,
"hits": row.get("hits") or 0,
"doubles": row.get("doubles") or 0,
"triples": row.get("triples") or 0,
"hr": row.get("hr") or 0,
"rbi": row.get("rbi") or 0,
"runs": row.get("runs") or 0,
"bb": row.get("bb") or 0,
"strikeouts": row.get("strikeouts") or 0,
"hbp": row.get("hbp") or 0,
"sac": row.get("sac") or 0,
"ibb": row.get("ibb") or 0,
"gidp": row.get("gidp") or 0,
"sb": row.get("sb") or 0,
"cs": row.get("cs") or 0,
}
def _recalc_pitching(player_id: int, team_id: int, season: int) -> dict:
"""
Recompute full-season pitching totals for a player+team+season triple.
Aggregates every StratPlay row where pitcher == player_id and
pitcher_team == team_id across all games in the given season. games counts
all distinct games in which the pitcher appeared (any play qualifies).
Stats derived from StratPlay (from the batter-perspective columns):
- outs = SUM(outs)
- strikeouts = SUM(so) batter SO = pitcher K
- hits_allowed = SUM(hit)
- bb = SUM(bb) walks allowed
- hbp = SUM(hbp)
- hr_allowed = SUM(homerun)
- wild_pitches = SUM(wild_pitch)
- balks = SUM(balk)
Fields not available from StratPlay (runs_allowed, earned_runs) default
to 0. Decision-level fields (wins, losses, etc.) are populated separately
by _recalc_decisions() and merged in the caller.
Args:
player_id: FK to the player record.
team_id: FK to the team record.
season: Integer season year.
Returns:
Dict with keys matching PitchingSeasonStats columns (excluding
decision fields, which are filled by _recalc_decisions).
"""
row = (
StratPlay.select(
fn.COUNT(StratPlay.game.distinct()).alias("games"),
fn.SUM(StratPlay.outs).alias("outs"),
fn.SUM(StratPlay.so).alias("strikeouts"),
fn.SUM(StratPlay.hit).alias("hits_allowed"),
fn.SUM(StratPlay.bb).alias("bb"),
fn.SUM(StratPlay.hbp).alias("hbp"),
fn.SUM(StratPlay.homerun).alias("hr_allowed"),
fn.SUM(StratPlay.wild_pitch).alias("wild_pitches"),
fn.SUM(StratPlay.balk).alias("balks"),
)
.join(StratGame, on=(StratPlay.game == StratGame.id))
.where(
StratPlay.pitcher == player_id,
StratPlay.pitcher_team == team_id,
StratGame.season == season,
)
.dicts()
.first()
)
if row is None:
row = {}
return {
"games": row.get("games") or 0,
"outs": row.get("outs") or 0,
"strikeouts": row.get("strikeouts") or 0,
"hits_allowed": row.get("hits_allowed") or 0,
"bb": row.get("bb") or 0,
"hbp": row.get("hbp") or 0,
"hr_allowed": row.get("hr_allowed") or 0,
"wild_pitches": row.get("wild_pitches") or 0,
"balks": row.get("balks") or 0,
# Not available from play-by-play data
"runs_allowed": 0,
"earned_runs": 0,
}
def _recalc_decisions(player_id: int, team_id: int, season: int) -> dict:
"""
Recompute full-season decision totals for a pitcher+team+season triple.
Aggregates all Decision rows for the pitcher across the season. Decision
rows are keyed by (pitcher, pitcher_team, season) independently of the
StratPlay table, so this query is separate from _recalc_pitching().
Decision.is_start is a BooleanField; CAST to INTEGER before summing to
ensure correct arithmetic across SQLite (True/False) and PostgreSQL
(boolean).
Args:
player_id: FK to the player record (pitcher).
team_id: FK to the team record.
season: Integer season year.
Returns:
Dict with keys: wins, losses, holds, saves, blown_saves,
games_started. All values are native Python ints.
"""
row = (
Decision.select(
fn.SUM(Decision.win).alias("wins"),
fn.SUM(Decision.loss).alias("losses"),
fn.SUM(Decision.hold).alias("holds"),
fn.SUM(Decision.is_save).alias("saves"),
fn.SUM(Decision.b_save).alias("blown_saves"),
fn.SUM(Decision.is_start.cast("INTEGER")).alias("games_started"),
)
.where(
Decision.pitcher == player_id,
Decision.pitcher_team == team_id,
Decision.season == season,
)
.dicts()
.first()
)
if row is None:
row = {}
return {
"wins": row.get("wins") or 0,
"losses": row.get("losses") or 0,
"holds": row.get("holds") or 0,
"saves": row.get("saves") or 0,
"blown_saves": row.get("blown_saves") or 0,
"games_started": row.get("games_started") or 0,
}
def update_season_stats(game_id: int, force: bool = False) -> dict:
"""
Recompute full-season batting and pitching stats for every player in the game.
Unlike the previous incremental approach, this function recalculates each
player's season totals from scratch by querying all StratPlay rows for
the player+team+season triple. The resulting totals replace whatever was
previously stored no additive delta is applied.
Algorithm:
1. Fetch StratGame to get the season.
2. Check the ProcessedGame ledger:
- If already processed and force=False, return early (skipped=True).
- If already processed and force=True, continue (overwrite allowed).
- If not yet processed, create the ledger entry.
3. Determine (player_id, team_id) pairs via _get_player_pairs().
4. For each batting pair: recompute season totals, then get_or_create
BattingSeasonStats and overwrite all fields.
5. For each pitching pair: recompute season play totals and decision
totals, merge, then get_or_create PitchingSeasonStats and overwrite
all fields.
Args:
game_id: Primary key of the StratGame to process.
force: If True, re-process even if the game was previously recorded
in the ProcessedGame ledger. Useful for correcting stats after
retroactive data adjustments.
Returns:
Dict with keys:
game_id echoed back
season season integer from StratGame
batters_updated number of BattingSeasonStats rows written
pitchers_updated number of PitchingSeasonStats rows written
skipped True only when the game was already processed
and force=False; absent otherwise.
Raises:
StratGame.DoesNotExist: If no StratGame row matches game_id.
"""
logger.info("update_season_stats: starting for game_id=%d force=%s", game_id, force)
game = StratGame.get_by_id(game_id)
season = game.season
with db.atomic():
# Idempotency check via ProcessedGame ledger.
_, created = ProcessedGame.get_or_create(game_id=game_id)
if not created and not force:
logger.info(
"update_season_stats: game_id=%d already processed, skipping",
game_id,
)
return {
"game_id": game_id,
"season": season,
"batters_updated": 0,
"pitchers_updated": 0,
"skipped": True,
}
if not created and force:
logger.info(
"update_season_stats: game_id=%d already processed, force=True — recalculating",
game_id,
)
batting_pairs, pitching_pairs = _get_player_pairs(game_id)
logger.debug(
"update_season_stats: game_id=%d found %d batting pairs, %d pitching pairs",
game_id,
len(batting_pairs),
len(pitching_pairs),
)
now = datetime.now()
# Recompute and overwrite batting season stats for each batter.
batters_updated = 0
for player_id, team_id in batting_pairs:
stats = _recalc_batting(player_id, team_id, season)
obj, _ = BattingSeasonStats.get_or_create(
player_id=player_id,
team_id=team_id,
season=season,
)
obj.games = stats["games"]
obj.pa = stats["pa"]
obj.ab = stats["ab"]
obj.hits = stats["hits"]
obj.doubles = stats["doubles"]
obj.triples = stats["triples"]
obj.hr = stats["hr"]
obj.rbi = stats["rbi"]
obj.runs = stats["runs"]
obj.bb = stats["bb"]
obj.strikeouts = stats["strikeouts"]
obj.hbp = stats["hbp"]
obj.sac = stats["sac"]
obj.ibb = stats["ibb"]
obj.gidp = stats["gidp"]
obj.sb = stats["sb"]
obj.cs = stats["cs"]
obj.last_game_id = game_id
obj.last_updated_at = now
obj.save()
batters_updated += 1
# Recompute and overwrite pitching season stats for each pitcher.
pitchers_updated = 0
for player_id, team_id in pitching_pairs:
play_stats = _recalc_pitching(player_id, team_id, season)
decision_stats = _recalc_decisions(player_id, team_id, season)
obj, _ = PitchingSeasonStats.get_or_create(
player_id=player_id,
team_id=team_id,
season=season,
)
obj.games = play_stats["games"]
obj.games_started = decision_stats["games_started"]
obj.outs = play_stats["outs"]
obj.strikeouts = play_stats["strikeouts"]
obj.bb = play_stats["bb"]
obj.hits_allowed = play_stats["hits_allowed"]
obj.runs_allowed = play_stats["runs_allowed"]
obj.earned_runs = play_stats["earned_runs"]
obj.hr_allowed = play_stats["hr_allowed"]
obj.hbp = play_stats["hbp"]
obj.wild_pitches = play_stats["wild_pitches"]
obj.balks = play_stats["balks"]
obj.wins = decision_stats["wins"]
obj.losses = decision_stats["losses"]
obj.holds = decision_stats["holds"]
obj.saves = decision_stats["saves"]
obj.blown_saves = decision_stats["blown_saves"]
obj.last_game_id = game_id
obj.last_updated_at = now
obj.save()
pitchers_updated += 1
logger.info(
"update_season_stats: game_id=%d complete — "
"batters_updated=%d pitchers_updated=%d",
game_id,
batters_updated,
pitchers_updated,
)
return {
"game_id": game_id,
"season": season,
"batters_updated": batters_updated,
"pitchers_updated": pitchers_updated,
}

View File

@ -1,57 +0,0 @@
-- Migration: Add scout_opportunity and scout_claim tables
-- Date: 2026-03-04
-- Issue: #44
-- Purpose: Support the scouting feature where players can scout cards
-- from other teams' opened packs within a 30-minute window.
--
-- Run on dev first, verify with:
-- SELECT count(*) FROM scout_opportunity;
-- SELECT count(*) FROM scout_claim;
--
-- Rollback: See DROP statements at bottom of file
-- ============================================
-- FORWARD MIGRATION
-- ============================================
BEGIN;
CREATE TABLE IF NOT EXISTS scout_opportunity (
id SERIAL PRIMARY KEY,
pack_id INTEGER REFERENCES pack(id) ON DELETE SET NULL,
opener_team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
card_ids VARCHAR(255) NOT NULL, -- JSON array of card IDs, e.g. "[10, 11, 12]"
expires_at BIGINT NOT NULL, -- Unix ms timestamp, 30 min after creation
created BIGINT NOT NULL -- Unix ms timestamp
);
CREATE TABLE IF NOT EXISTS scout_claim (
id SERIAL PRIMARY KEY,
scout_opportunity_id INTEGER NOT NULL REFERENCES scout_opportunity(id) ON DELETE CASCADE,
card_id INTEGER NOT NULL REFERENCES card(id) ON DELETE CASCADE,
claimed_by_team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
created BIGINT NOT NULL -- Unix ms timestamp, auto-set on creation
);
-- Unique constraint: one claim per team per opportunity
CREATE UNIQUE INDEX IF NOT EXISTS scout_claim_opportunity_team_uniq
ON scout_claim (scout_opportunity_id, claimed_by_team_id);
-- Index for the common query: find unclaimed, expired opportunities
CREATE INDEX IF NOT EXISTS scout_opportunity_expires_at_idx
ON scout_opportunity (expires_at);
COMMIT;
-- ============================================
-- VERIFICATION QUERIES
-- ============================================
-- \d scout_opportunity
-- \d scout_claim
-- SELECT indexname FROM pg_indexes WHERE tablename IN ('scout_opportunity', 'scout_claim');
-- ============================================
-- ROLLBACK (if needed)
-- ============================================
-- DROP TABLE IF EXISTS scout_claim CASCADE;
-- DROP TABLE IF EXISTS scout_opportunity CASCADE;

View File

@ -1,241 +0,0 @@
-- Migration: Add card evolution tables and column extensions
-- Date: 2026-03-17
-- Issue: WP-04
-- Purpose: Support the Card Evolution system — creates batting_season_stats
-- and pitching_season_stats for per-player stat accumulation, plus
-- evolution tracks with tier thresholds, per-card evolution state,
-- tier-based stat boosts, and cosmetic unlocks. Also extends the
-- card, battingcard, and pitchingcard tables with variant and
-- image_url columns required by the evolution display layer.
--
-- Run on dev first, verify with:
-- SELECT count(*) FROM batting_season_stats;
-- SELECT count(*) FROM pitching_season_stats;
-- SELECT count(*) FROM evolution_track;
-- SELECT count(*) FROM evolution_card_state;
-- SELECT count(*) FROM evolution_tier_boost;
-- SELECT count(*) FROM evolution_cosmetic;
-- SELECT column_name FROM information_schema.columns
-- WHERE table_name IN ('card', 'battingcard', 'pitchingcard')
-- AND column_name IN ('variant', 'image_url')
-- ORDER BY table_name, column_name;
--
-- Rollback: See DROP/ALTER statements at bottom of file
-- ============================================
-- FORWARD MIGRATION
-- ============================================
BEGIN;
-- --------------------------------------------
-- Table 1: batting_season_stats
-- Accumulates per-player per-team per-season
-- batting totals for evolution formula evaluation
-- and leaderboard queries.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS batting_season_stats (
id SERIAL PRIMARY KEY,
player_id INTEGER NOT NULL REFERENCES player(player_id) ON DELETE CASCADE,
team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
season INTEGER NOT NULL,
games INTEGER NOT NULL DEFAULT 0,
pa INTEGER NOT NULL DEFAULT 0,
ab INTEGER NOT NULL DEFAULT 0,
hits INTEGER NOT NULL DEFAULT 0,
doubles INTEGER NOT NULL DEFAULT 0,
triples INTEGER NOT NULL DEFAULT 0,
hr INTEGER NOT NULL DEFAULT 0,
rbi INTEGER NOT NULL DEFAULT 0,
runs INTEGER NOT NULL DEFAULT 0,
bb INTEGER NOT NULL DEFAULT 0,
strikeouts INTEGER NOT NULL DEFAULT 0,
hbp INTEGER NOT NULL DEFAULT 0,
sac INTEGER NOT NULL DEFAULT 0,
ibb INTEGER NOT NULL DEFAULT 0,
gidp INTEGER NOT NULL DEFAULT 0,
sb INTEGER NOT NULL DEFAULT 0,
cs INTEGER NOT NULL DEFAULT 0,
last_game_id INTEGER REFERENCES stratgame(id) ON DELETE SET NULL,
last_updated_at TIMESTAMP
);
-- One row per player per team per season
CREATE UNIQUE INDEX IF NOT EXISTS batting_season_stats_player_team_season_uniq
ON batting_season_stats (player_id, team_id, season);
-- Fast lookup by team + season (e.g. leaderboard queries)
CREATE INDEX IF NOT EXISTS batting_season_stats_team_season_idx
ON batting_season_stats (team_id, season);
-- Fast lookup by player across seasons
CREATE INDEX IF NOT EXISTS batting_season_stats_player_season_idx
ON batting_season_stats (player_id, season);
-- --------------------------------------------
-- Table 2: pitching_season_stats
-- Accumulates per-player per-team per-season
-- pitching totals for evolution formula evaluation
-- and leaderboard queries.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS pitching_season_stats (
id SERIAL PRIMARY KEY,
player_id INTEGER NOT NULL REFERENCES player(player_id) ON DELETE CASCADE,
team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
season INTEGER NOT NULL,
games INTEGER NOT NULL DEFAULT 0,
games_started INTEGER NOT NULL DEFAULT 0,
outs INTEGER NOT NULL DEFAULT 0,
strikeouts INTEGER NOT NULL DEFAULT 0,
bb INTEGER NOT NULL DEFAULT 0,
hits_allowed INTEGER NOT NULL DEFAULT 0,
runs_allowed INTEGER NOT NULL DEFAULT 0,
earned_runs INTEGER NOT NULL DEFAULT 0,
hr_allowed INTEGER NOT NULL DEFAULT 0,
hbp INTEGER NOT NULL DEFAULT 0,
wild_pitches INTEGER NOT NULL DEFAULT 0,
balks INTEGER NOT NULL DEFAULT 0,
wins INTEGER NOT NULL DEFAULT 0,
losses INTEGER NOT NULL DEFAULT 0,
holds INTEGER NOT NULL DEFAULT 0,
saves INTEGER NOT NULL DEFAULT 0,
blown_saves INTEGER NOT NULL DEFAULT 0,
last_game_id INTEGER REFERENCES stratgame(id) ON DELETE SET NULL,
last_updated_at TIMESTAMP
);
-- One row per player per team per season
CREATE UNIQUE INDEX IF NOT EXISTS pitching_season_stats_player_team_season_uniq
ON pitching_season_stats (player_id, team_id, season);
-- Fast lookup by team + season (e.g. leaderboard queries)
CREATE INDEX IF NOT EXISTS pitching_season_stats_team_season_idx
ON pitching_season_stats (team_id, season);
-- Fast lookup by player across seasons
CREATE INDEX IF NOT EXISTS pitching_season_stats_player_season_idx
ON pitching_season_stats (player_id, season);
-- --------------------------------------------
-- Table 3: evolution_track
-- Defines the available evolution tracks
-- (e.g. "HR Mastery", "Ace SP"), their
-- metric formula, and the four tier thresholds.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_track (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
card_type VARCHAR(50) NOT NULL, -- 'batter', 'sp', or 'rp'
formula VARCHAR(255) NOT NULL, -- e.g. 'hr', 'k_per_9', 'ops'
t1_threshold INTEGER NOT NULL,
t2_threshold INTEGER NOT NULL,
t3_threshold INTEGER NOT NULL,
t4_threshold INTEGER NOT NULL
);
-- --------------------------------------------
-- Table 4: evolution_card_state
-- Records each card's current evolution tier,
-- running metric value, and the track it
-- belongs to. One state row per card (player
-- + team combination uniquely identifies a
-- card in a given season).
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_card_state (
id SERIAL PRIMARY KEY,
player_id INTEGER NOT NULL REFERENCES player(player_id) ON DELETE CASCADE,
team_id INTEGER NOT NULL REFERENCES team(id) ON DELETE CASCADE,
track_id INTEGER NOT NULL REFERENCES evolution_track(id) ON DELETE CASCADE,
current_tier INTEGER NOT NULL DEFAULT 0,
current_value DOUBLE PRECISION NOT NULL DEFAULT 0.0,
fully_evolved BOOLEAN NOT NULL DEFAULT FALSE,
last_evaluated_at TIMESTAMP
);
-- One evolution state per card (player + team)
CREATE UNIQUE INDEX IF NOT EXISTS evolution_card_state_player_team_uniq
ON evolution_card_state (player_id, team_id);
-- --------------------------------------------
-- Table 5: evolution_tier_boost
-- Defines the stat boosts unlocked at each
-- tier within a track. A single tier may
-- grant multiple boosts (e.g. +1 HR and
-- +1 power rating).
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_tier_boost (
id SERIAL PRIMARY KEY,
track_id INTEGER NOT NULL REFERENCES evolution_track(id) ON DELETE CASCADE,
tier INTEGER NOT NULL, -- 1-4
boost_type VARCHAR(50) NOT NULL, -- e.g. 'rating_bump', 'display_only'
boost_target VARCHAR(50) NOT NULL, -- e.g. 'hr_rating', 'contact_rating'
boost_value DOUBLE PRECISION NOT NULL DEFAULT 0.0
);
-- Prevent duplicate boost definitions for the same track/tier/type/target
CREATE UNIQUE INDEX IF NOT EXISTS evolution_tier_boost_track_tier_type_target_uniq
ON evolution_tier_boost (track_id, tier, boost_type, boost_target);
-- --------------------------------------------
-- Table 6: evolution_cosmetic
-- Catalogue of unlockable visual treatments
-- (borders, foils, badges, etc.) tied to
-- minimum tier requirements.
-- --------------------------------------------
CREATE TABLE IF NOT EXISTS evolution_cosmetic (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
tier_required INTEGER NOT NULL DEFAULT 0,
cosmetic_type VARCHAR(50) NOT NULL, -- e.g. 'border', 'foil', 'badge'
css_class VARCHAR(255),
asset_url VARCHAR(500)
);
-- --------------------------------------------
-- Column extensions for existing tables
-- --------------------------------------------
-- Track which visual variant a card is displaying
-- (NULL = base card, 1+ = evolved variants)
ALTER TABLE card ADD COLUMN IF NOT EXISTS variant INTEGER DEFAULT NULL;
-- Store pre-rendered or externally-hosted card image URLs
ALTER TABLE battingcard ADD COLUMN IF NOT EXISTS image_url VARCHAR(500);
ALTER TABLE pitchingcard ADD COLUMN IF NOT EXISTS image_url VARCHAR(500);
COMMIT;
-- ============================================
-- VERIFICATION QUERIES
-- ============================================
-- \d batting_season_stats
-- \d pitching_season_stats
-- \d evolution_track
-- \d evolution_card_state
-- \d evolution_tier_boost
-- \d evolution_cosmetic
-- SELECT indexname FROM pg_indexes
-- WHERE tablename IN (
-- 'batting_season_stats',
-- 'pitching_season_stats',
-- 'evolution_card_state',
-- 'evolution_tier_boost'
-- )
-- ORDER BY tablename, indexname;
-- SELECT column_name, data_type FROM information_schema.columns
-- WHERE table_name IN ('card', 'battingcard', 'pitchingcard')
-- AND column_name IN ('variant', 'image_url')
-- ORDER BY table_name, column_name;
-- ============================================
-- ROLLBACK (if needed)
-- ============================================
-- ALTER TABLE pitchingcard DROP COLUMN IF EXISTS image_url;
-- ALTER TABLE battingcard DROP COLUMN IF EXISTS image_url;
-- ALTER TABLE card DROP COLUMN IF EXISTS variant;
-- DROP TABLE IF EXISTS evolution_cosmetic CASCADE;
-- DROP TABLE IF EXISTS evolution_tier_boost CASCADE;
-- DROP TABLE IF EXISTS evolution_card_state CASCADE;
-- DROP TABLE IF EXISTS evolution_track CASCADE;
-- DROP TABLE IF EXISTS pitching_season_stats CASCADE;
-- DROP TABLE IF EXISTS batting_season_stats CASCADE;

View File

@ -1,26 +0,0 @@
-- Migration: Add processed_game ledger for full update_season_stats() idempotency
-- Date: 2026-03-18
-- Issue: #105
-- Purpose: Replace the last_game FK check in update_season_stats() with an
-- atomic INSERT into processed_game. This prevents out-of-order
-- re-delivery (game G re-delivered after G+1 was already processed)
-- from bypassing the guard and double-counting stats.
BEGIN;
CREATE TABLE IF NOT EXISTS processed_game (
game_id INTEGER PRIMARY KEY REFERENCES stratgame(id) ON DELETE CASCADE,
processed_at TIMESTAMP NOT NULL DEFAULT NOW()
);
COMMIT;
-- ============================================
-- VERIFICATION QUERIES
-- ============================================
-- \d processed_game
-- ============================================
-- ROLLBACK (if needed)
-- ============================================
-- DROP TABLE IF EXISTS processed_game;

View File

@ -1,19 +0,0 @@
-- Migration: Rename evolution tables to refractor tables
-- Date: 2026-03-23
--
-- Renames all four evolution system tables to the refractor naming convention.
-- This migration corresponds to the application-level rename from
-- EvolutionTrack/EvolutionCardState/EvolutionTierBoost/EvolutionCosmetic
-- to RefractorTrack/RefractorCardState/RefractorTierBoost/RefractorCosmetic.
--
-- The table renames are performed in order that respects foreign key
-- dependencies (referenced tables first, then referencing tables).
ALTER TABLE evolution_track RENAME TO refractor_track;
ALTER TABLE evolution_card_state RENAME TO refractor_card_state;
ALTER TABLE evolution_tier_boost RENAME TO refractor_tier_boost;
ALTER TABLE evolution_cosmetic RENAME TO refractor_cosmetic;
-- Rename indexes to match new table names
ALTER INDEX IF EXISTS evolution_card_state_player_team_uniq RENAME TO refractor_card_state_player_team_uniq;
ALTER INDEX IF EXISTS evolution_tier_boost_track_tier_type_target_uniq RENAME TO refractor_tier_boost_track_tier_type_target_uniq;

View File

@ -1,19 +0,0 @@
-- Migration: Add team_id index to refractor_card_state
-- Date: 2026-03-25
--
-- Adds a non-unique index on refractor_card_state.team_id to support the new
-- GET /api/v2/refractor/cards list endpoint, which filters by team as its
-- primary discriminator and is called on every /refractor status bot command.
--
-- The existing unique index is on (player_id, team_id) with player leading,
-- so team-only queries cannot use it efficiently.
BEGIN;
CREATE INDEX IF NOT EXISTS idx_refractor_card_state_team
ON refractor_card_state (team_id);
COMMIT;
-- Rollback:
-- DROP INDEX IF EXISTS idx_refractor_card_state_team;

View File

@ -1,47 +0,0 @@
-- Migration: Refractor Phase 2 — rating boost support
-- Date: 2026-03-28
-- Purpose: Extends the Refractor system to track and audit rating boosts
-- applied at each tier-up. Adds a variant column to
-- refractor_card_state (mirrors card.variant for promoted copies)
-- and creates the refractor_boost_audit table to record the
-- boost delta, source card, and variant assigned at each tier.
--
-- Tables affected:
-- refractor_card_state — new column: variant INTEGER
-- refractor_boost_audit — new table
--
-- Run on dev first, verify with:
-- SELECT column_name FROM information_schema.columns
-- WHERE table_name = 'refractor_card_state'
-- AND column_name = 'variant';
-- SELECT count(*) FROM refractor_boost_audit;
--
-- Rollback: See DROP/ALTER statements at bottom of file
BEGIN;
-- Verify card.variant column exists (should be from Phase 1 migration).
-- If not present, uncomment:
-- ALTER TABLE card ADD COLUMN IF NOT EXISTS variant INTEGER DEFAULT NULL;
-- New columns on refractor_card_state (additive, no data migration needed)
ALTER TABLE refractor_card_state ADD COLUMN IF NOT EXISTS variant INTEGER;
-- Boost audit table: records what was applied at each tier-up
CREATE TABLE IF NOT EXISTS refractor_boost_audit (
id SERIAL PRIMARY KEY,
card_state_id INTEGER NOT NULL REFERENCES refractor_card_state(id) ON DELETE CASCADE,
tier SMALLINT NOT NULL,
battingcard_id INTEGER REFERENCES battingcard(id),
pitchingcard_id INTEGER REFERENCES pitchingcard(id),
variant_created INTEGER NOT NULL,
boost_delta_json JSONB NOT NULL,
applied_at TIMESTAMP NOT NULL DEFAULT NOW(),
UNIQUE(card_state_id, tier) -- Prevent duplicate audit records on retry
);
COMMIT;
-- Rollback:
-- DROP TABLE IF EXISTS refractor_boost_audit;
-- ALTER TABLE refractor_card_state DROP COLUMN IF EXISTS variant;

View File

@ -1,7 +0,0 @@
-- Drop orphaned RefractorTierBoost and RefractorCosmetic tables.
-- These were speculative schema from the initial Refractor design that were
-- never used — boosts are hardcoded in refractor_boost.py and tier visuals
-- are embedded in CSS templates. Both tables have zero rows on dev and prod.
DROP TABLE IF EXISTS refractor_tier_boost;
DROP TABLE IF EXISTS refractor_cosmetic;

View File

@ -1,65 +0,0 @@
"""
Migration: Replace 26 FK columns on Roster with RosterSlot junction table.
Creates the `rosterslot` table and migrates existing lineup data from the
card_1..card_26 columns. Safe to re-run (skips rosters already migrated).
Usage:
python migrations/migrate_roster_junction_table.py
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from app.db_engine import db, Roster, RosterSlot
SLOTS = 26
def migrate():
db.connect(reuse_if_open=True)
# Create the table if it doesn't exist yet
db.create_tables([RosterSlot], safe=True)
# Read raw rows from the old schema via plain SQL so we don't depend on
# the ORM model knowing about the legacy card_N columns.
cursor = db.execute_sql("SELECT * FROM roster")
columns = [desc[0] for desc in cursor.description]
migrated = 0
skipped = 0
with db.atomic():
for row in cursor.fetchall():
row_dict = dict(zip(columns, row))
roster_id = row_dict["id"]
already_migrated = (
RosterSlot.select().where(RosterSlot.roster == roster_id).exists()
)
if already_migrated:
skipped += 1
continue
slots_to_insert = []
for slot_num in range(1, SLOTS + 1):
col = f"card_{slot_num}_id"
card_id = row_dict.get(col)
if card_id is not None:
slots_to_insert.append(
{"roster": roster_id, "slot": slot_num, "card": card_id}
)
if slots_to_insert:
RosterSlot.insert_many(slots_to_insert).execute()
migrated += 1
print(f"Migration complete: {migrated} rosters migrated, {skipped} already done.")
db.close()
if __name__ == "__main__":
migrate()

View File

@ -1,5 +0,0 @@
[tool.ruff]
[tool.ruff.lint]
# db_engine.py uses `from peewee import *` throughout — a pre-existing
# codebase pattern. Suppress wildcard-import warnings for that file only.
per-file-ignores = { "app/db_engine.py" = ["F401", "F403", "F405"], "app/main.py" = ["E402", "F541"] }

View File

@ -1,14 +1,15 @@
pydantic==1.10.21
fastapi==0.111.1
uvicorn==0.30.6
peewee==3.17.9
psycopg2-binary==2.9.9
python-multipart==0.0.9
numpy==1.26.4
pandas==2.2.3
pygsheets==2.0.6
pybaseball==2.2.7
requests==2.32.3
html2image==2.0.6
jinja2==3.1.4
playwright==1.45.1
pydantic==1.*
fastapi
uvicorn
peewee
psycopg2-binary # PostgreSQL adapter for Python
python-multipart
numpy<2
pandas
pygsheets
pybaseball
python-multipart
requests
html2image
jinja2
playwright

View File

@ -1,3 +0,0 @@
[lint]
# db_engine.py uses `from peewee import *` intentionally — suppress star-import warnings
ignore = ["F403", "F405"]

View File

@ -1,132 +0,0 @@
#!/usr/bin/env bash
# run-local.sh — Spin up the Paper Dynasty Database API locally for testing.
#
# Connects to the dev PostgreSQL on the homelab (10.10.0.42) so you get real
# card data for rendering. Playwright Chromium must be installed locally
# (it already is on this workstation).
#
# Usage:
# ./run-local.sh # start on default port 8000
# ./run-local.sh 8001 # start on custom port
# ./run-local.sh --stop # kill a running instance
#
# Card rendering test URLs (after startup):
# HTML preview: http://localhost:8000/api/v2/players/{id}/battingcard/{date}/{variant}?html=True
# PNG render: http://localhost:8000/api/v2/players/{id}/battingcard/{date}/{variant}
# API docs: http://localhost:8000/api/docs
set -euo pipefail
cd "$(dirname "$0")"
PORT="${1:-8000}"
PIDFILE=".run-local.pid"
LOGFILE="logs/database/run-local.log"
# ── Stop mode ────────────────────────────────────────────────────────────────
if [[ "${1:-}" == "--stop" ]]; then
if [[ -f "$PIDFILE" ]]; then
pid=$(cat "$PIDFILE")
if kill -0 "$pid" 2>/dev/null; then
kill "$pid"
echo "Stopped local API (PID $pid)"
else
echo "PID $pid not running (stale pidfile)"
fi
rm -f "$PIDFILE"
else
echo "No pidfile found — nothing to stop"
fi
exit 0
fi
# ── Pre-flight checks ───────────────────────────────────────────────────────
if [[ -f "$PIDFILE" ]] && kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
echo "Already running (PID $(cat "$PIDFILE")). Use './run-local.sh --stop' first."
exit 1
fi
# Check Python deps are importable
python -c "import fastapi, peewee, playwright" 2>/dev/null || {
echo "Missing Python dependencies. Install with: pip install -r requirements.txt"
exit 1
}
# Check Playwright Chromium is available
python -c "
from playwright.sync_api import sync_playwright
p = sync_playwright().start()
print(p.chromium.executable_path)
p.stop()
" >/dev/null 2>&1 || {
echo "Playwright Chromium not installed. Run: playwright install chromium"
exit 1
}
# Check dev DB is reachable
DB_HOST="${POSTGRES_HOST_LOCAL:-10.10.0.42}"
python -c "
import socket, sys
s = socket.create_connection((sys.argv[1], 5432), timeout=3)
s.close()
" "$DB_HOST" 2>/dev/null || {
echo "Cannot reach dev PostgreSQL at ${DB_HOST}:5432 — is the homelab up?"
exit 1
}
# ── Ensure directories exist ────────────────────────────────────────────────
mkdir -p logs/database
mkdir -p storage/cards
# ── Launch ───────────────────────────────────────────────────────────────────
echo "Starting Paper Dynasty Database API on http://localhost:${PORT}"
echo " DB: paperdynasty_dev @ 10.10.0.42"
echo " Logs: ${LOGFILE}"
echo ""
# Load .env, then .env.local overrides (for passwords not in version control)
set -a
# shellcheck source=/dev/null
[[ -f .env ]] && source .env
[[ -f .env.local ]] && source .env.local
set +a
# Override DB host to point at the dev server's IP (not Docker network name)
export DATABASE_TYPE=postgresql
export POSTGRES_HOST="$DB_HOST"
export POSTGRES_PORT="${POSTGRES_PORT:-5432}"
export POSTGRES_DB="${POSTGRES_DB:-paperdynasty_dev}"
export POSTGRES_USER="${POSTGRES_USER:-sba_admin}"
export LOG_LEVEL=INFO
export TESTING=True
if [[ -z "${POSTGRES_PASSWORD:-}" || "$POSTGRES_PASSWORD" == "your_production_password" ]]; then
echo "ERROR: POSTGRES_PASSWORD not set or is the placeholder value."
echo "Create .env.local with: POSTGRES_PASSWORD=<actual password>"
exit 1
fi
uvicorn app.main:app \
--host 0.0.0.0 \
--port "$PORT" \
--reload \
--reload-dir app \
--reload-dir storage/templates \
2>&1 | tee "$LOGFILE" &
echo $! >"$PIDFILE"
sleep 2
if kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
echo ""
echo "API running (PID $(cat "$PIDFILE"))."
echo ""
echo "Quick test URLs:"
echo " API docs: http://localhost:${PORT}/api/docs"
echo " Health: curl -s http://localhost:${PORT}/api/v2/players/1/battingcard?html=True"
echo ""
echo "Stop with: ./run-local.sh --stop"
else
echo "Failed to start — check ${LOGFILE}"
rm -f "$PIDFILE"
exit 1
fi

View File

@ -2,26 +2,12 @@
<html lang="en">
<head>
{% include 'style.html' %}
{% include 'tier_style.html' %}
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Open+Sans:wght@300;400;700&family=Source+Sans+3:wght@400;700&display=swap" rel="stylesheet">
</head>
<body>
<div id="fullCard" style="width: 1200px; height: 600px;">
{% if refractor_tier is defined and refractor_tier > 0 %}
{%- set diamond_colors = {
1: {'color': '#1a6b1a', 'highlight': '#40b040'},
2: {'color': '#2070b0', 'highlight': '#50a0e8'},
3: {'color': '#a82020', 'highlight': '#e85050'},
4: {'color': '#6b2d8e', 'highlight': '#a060d0'},
} -%}
{%- set dc = diamond_colors[refractor_tier] -%}
{%- set filled_bg = 'linear-gradient(135deg, ' ~ dc.highlight ~ ' 0%, ' ~ dc.color ~ ' 50%, ' ~ dc.color ~ ' 100%)' -%}
<div class="tier-diamond{% if refractor_tier == 4 %} diamond-glow{% endif %}">
<div class="diamond-quad{% if refractor_tier >= 2 %} filled{% endif %}" {% if refractor_tier >= 2 %}style="background: {{ filled_bg }};"{% endif %}></div>
<div class="diamond-quad{% if refractor_tier >= 1 %} filled{% endif %}" {% if refractor_tier >= 1 %}style="background: {{ filled_bg }};"{% endif %}></div>
<div class="diamond-quad{% if refractor_tier >= 3 %} filled{% endif %}" {% if refractor_tier >= 3 %}style="background: {{ filled_bg }};"{% endif %}></div>
<div class="diamond-quad{% if refractor_tier >= 4 %} filled{% endif %}" {% if refractor_tier >= 4 %}style="background: {{ filled_bg }};"{% endif %}></div>
</div>
{% endif %}
<div id="header" class="row-wrapper header-text border-bot" style="height: 65px">
<!-- <div id="headerLeft" style="flex-grow: 3; height: auto">-->
<div id="headerLeft" style="width: 477px; height: auto">

File diff suppressed because one or more lines are too long

View File

@ -1,216 +0,0 @@
<style>
#fullCard {
position: relative;
overflow: hidden;
}
</style>
{% if refractor_tier is defined and refractor_tier > 0 %}
<style>
.tier-diamond {
position: absolute;
left: 597px;
top: 78.5px;
transform: translate(-50%, -50%) rotate(45deg);
display: grid;
grid-template: 1fr 1fr / 1fr 1fr;
gap: 2px;
z-index: 20;
pointer-events: none;
background: rgba(0,0,0,0.75);
border-radius: 2px;
box-shadow: 0 0 0 1.5px rgba(0,0,0,0.7), 0 2px 5px rgba(0,0,0,0.5);
}
.diamond-quad {
width: 19px;
height: 19px;
background: rgba(0,0,0,0.3);
}
.diamond-quad.filled {
box-shadow: inset 0 1px 2px rgba(255,255,255,0.45),
inset 0 -1px 2px rgba(0,0,0,0.35),
inset 1px 0 2px rgba(255,255,255,0.15);
}
{% if refractor_tier == 1 %}
/* T1 — Base Chrome */
#header {
background: linear-gradient(135deg, rgba(185,195,210,0.25) 0%, rgba(210,218,228,0.35) 50%, rgba(185,195,210,0.25) 100%), #ffffff;
}
.border-bot {
border-bottom-color: #8e9baf;
border-bottom-width: 4px;
}
#resultHeader.border-bot {
border-bottom-width: 3px;
}
.border-right-thick {
border-right-color: #8e9baf;
}
.border-right-thin {
border-right-color: #8e9baf;
}
.vline {
border-left-color: #8e9baf;
}
{% elif refractor_tier == 2 %}
/* T2 — Refractor */
#header {
background: linear-gradient(135deg, rgba(100,155,230,0.28) 0%, rgba(155,90,220,0.18) 25%, rgba(90,200,210,0.24) 50%, rgba(185,80,170,0.16) 75%, rgba(100,155,230,0.28) 100%), #ffffff;
}
#fullCard {
box-shadow: inset 0 0 14px 3px rgba(90,143,207,0.22);
}
.border-bot {
border-bottom-color: #7a9cc4;
border-bottom-width: 4px;
}
#resultHeader .border-right-thick {
border-right-width: 6px;
}
.border-right-thick {
border-right-color: #7a9cc4;
}
.border-right-thin {
border-right-color: #7a9cc4;
border-right-width: 3px;
}
.vline {
border-left-color: #7a9cc4;
}
.blue-gradient {
background-image: linear-gradient(to right, rgba(60,110,200,1), rgba(100,55,185,0.55), rgba(60,110,200,1));
}
.red-gradient {
background-image: linear-gradient(to right, rgba(190,35,80,1), rgba(165,25,100,0.55), rgba(190,35,80,1));
}
{% elif refractor_tier == 3 %}
/* T3 — Gold Refractor */
#header {
background: linear-gradient(135deg, rgba(195,155,35,0.26) 0%, rgba(235,200,70,0.2) 50%, rgba(195,155,35,0.26) 100%), #ffffff;
overflow: hidden;
position: relative;
}
#fullCard {
box-shadow: inset 0 0 16px 4px rgba(200,165,48,0.22);
}
.border-bot {
border-bottom-color: #c9a94e;
border-bottom-width: 4px;
}
.border-right-thick {
border-right-color: #c9a94e;
}
.border-right-thin {
border-right-color: #c9a94e;
border-right-width: 3px;
}
.vline {
border-left-color: #c9a94e;
}
.blue-gradient {
background-image: linear-gradient(to right, rgba(195,160,40,1), rgba(220,185,60,0.55), rgba(195,160,40,1));
}
.red-gradient {
background-image: linear-gradient(to right, rgba(195,160,40,1), rgba(220,185,60,0.55), rgba(195,160,40,1));
}
/* T3 shimmer animation — paused for static PNG capture */
@keyframes t3-shimmer {
0% { transform: translateX(-130%); }
100% { transform: translateX(230%); }
}
#header::after {
content: '';
position: absolute;
top: 0; left: 0; right: 0; bottom: 0;
background: linear-gradient(
105deg,
transparent 38%,
rgba(255,240,140,0.18) 44%,
rgba(255,220,80,0.38) 50%,
rgba(255,200,60,0.30) 53%,
rgba(255,240,140,0.14) 58%,
transparent 64%
);
pointer-events: none;
z-index: 5;
animation: t3-shimmer 2.5s ease-in-out infinite;
animation-play-state: paused;
}
{% elif refractor_tier == 4 %}
/* T4 — Superfractor */
#header {
background: #ffffff;
overflow: hidden;
position: relative;
}
#fullCard {
box-shadow: inset 0 0 22px 6px rgba(45,212,191,0.28), inset 0 0 39px 9px rgba(200,165,48,0.15);
}
.border-bot {
border-bottom-color: #c9a94e;
border-bottom-width: 4px;
}
.border-right-thick {
border-right-color: #c9a94e;
}
.border-right-thin {
border-right-color: #c9a94e;
}
.vline {
border-left-color: #c9a94e;
}
.blue-gradient {
background-image: linear-gradient(to right, rgba(195,160,40,1), rgba(220,185,60,0.55), rgba(195,160,40,1));
}
.red-gradient {
background-image: linear-gradient(to right, rgba(195,160,40,1), rgba(220,185,60,0.55), rgba(195,160,40,1));
}
/* T4 prismatic header sweep — paused for static PNG capture */
@keyframes t4-prismatic-sweep {
0% { transform: translateX(0%); }
100% { transform: translateX(-50%); }
}
#header::after {
content: '';
position: absolute;
top: 0; left: 0;
width: 200%; height: 100%;
background: linear-gradient(135deg,
transparent 2%, rgba(255,100,100,0.28) 8%, rgba(255,200,50,0.32) 14%,
rgba(100,255,150,0.30) 20%, rgba(50,190,255,0.32) 26%, rgba(140,80,255,0.28) 32%,
rgba(255,100,180,0.24) 38%, transparent 44%,
transparent 52%, rgba(255,100,100,0.28) 58%, rgba(255,200,50,0.32) 64%,
rgba(100,255,150,0.30) 70%, rgba(50,190,255,0.32) 76%, rgba(140,80,255,0.28) 82%,
rgba(255,100,180,0.24) 88%, transparent 94%
);
z-index: 1;
pointer-events: none;
animation: t4-prismatic-sweep 6s linear infinite;
animation-play-state: paused;
}
#header > * { z-index: 2; }
/* T4 diamond glow pulse — paused for static PNG */
@keyframes diamond-glow-pulse {
0%, 100% { box-shadow: 0 0 0 1.5px rgba(0,0,0,0.7), 0 2px 5px rgba(0,0,0,0.5),
0 0 8px 2px rgba(107,45,142,0.6); }
50% { box-shadow: 0 0 0 1.5px rgba(0,0,0,0.5), 0 2px 4px rgba(0,0,0,0.3),
0 0 14px 5px rgba(107,45,142,0.8),
0 0 24px 8px rgba(107,45,142,0.3); }
}
.tier-diamond.diamond-glow {
animation: diamond-glow-pulse 2s ease-in-out infinite;
animation-play-state: paused;
}
{% endif %}
</style>
{% endif %}

View File

View File

@ -1,214 +0,0 @@
"""
Shared test fixtures for the Paper Dynasty database test suite.
Uses in-memory SQLite with foreign_keys pragma enabled. Each test
gets a fresh set of tables via the setup_test_db fixture (autouse).
All models are bound to the in-memory database before table creation
so that no connection to the real storage/pd_master.db occurs during
tests.
"""
import os
import pytest
import psycopg2
from peewee import SqliteDatabase
# Set DATABASE_TYPE=postgresql so that the module-level SKIP_TABLE_CREATION
# flag is True. This prevents db_engine.py from calling create_tables()
# against the real storage/pd_master.db during import — those calls would
# fail if indexes already exist and would also contaminate the dev database.
# The PooledPostgresqlDatabase object is created but never actually connects
# because our fixture rebinds all models to an in-memory SQLite db before
# any query is executed.
os.environ["DATABASE_TYPE"] = "postgresql"
# Provide dummy credentials so PooledPostgresqlDatabase can be instantiated
# without raising a configuration error (it will not actually be used).
os.environ.setdefault("POSTGRES_PASSWORD", "test-dummy")
from app.db_engine import (
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
PackType,
Pack,
Card,
Roster,
RosterSlot,
StratGame,
StratPlay,
Decision,
BattingSeasonStats,
PitchingSeasonStats,
ProcessedGame,
BattingCard,
PitchingCard,
RefractorTrack,
RefractorCardState,
RefractorBoostAudit,
ScoutOpportunity,
ScoutClaim,
)
_test_db = SqliteDatabase(":memory:", pragmas={"foreign_keys": 1})
# All models in dependency order (parents before children) so that
# create_tables and drop_tables work without FK violations.
_TEST_MODELS = [
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
PackType,
Pack,
Card,
Roster,
RosterSlot,
StratGame,
StratPlay,
Decision,
BattingSeasonStats,
PitchingSeasonStats,
ProcessedGame,
ScoutOpportunity,
ScoutClaim,
RefractorTrack,
RefractorCardState,
BattingCard,
PitchingCard,
RefractorBoostAudit,
]
@pytest.fixture(autouse=True)
def setup_test_db():
"""Bind all models to in-memory SQLite and create tables.
The fixture is autouse so every test automatically gets a fresh,
isolated database schema without needing to request it explicitly.
Tables are dropped in reverse dependency order after each test to
keep the teardown clean and to catch any accidental FK reference
direction bugs early.
"""
_test_db.bind(_TEST_MODELS)
_test_db.connect()
_test_db.create_tables(_TEST_MODELS)
yield _test_db
_test_db.drop_tables(list(reversed(_TEST_MODELS)), safe=True)
_test_db.close()
# ---------------------------------------------------------------------------
# Minimal shared fixtures — create just enough data for FK dependencies
# ---------------------------------------------------------------------------
@pytest.fixture
def rarity():
"""A single Common rarity row used as FK seed for Player rows."""
return Rarity.create(value=1, name="Common", color="#ffffff")
@pytest.fixture
def player(rarity):
"""A minimal Player row with all required (non-nullable) columns filled.
Player.p_name is the real column name (not 'name'). All FK and
non-nullable varchar fields are provided so SQLite's NOT NULL
constraints are satisfied even with foreign_keys=ON.
"""
cardset = Cardset.create(
name="Test Set",
description="Test cardset",
total_cards=100,
)
return Player.create(
p_name="Test Player",
rarity=rarity,
cardset=cardset,
set_num=1,
pos_1="1B",
image="https://example.com/image.png",
mlbclub="TST",
franchise="TST",
description="A test player",
)
@pytest.fixture
def team():
"""A minimal Team row.
Team uses abbrev/lname/sname/gmid/gmname/gsheet/wallet/team_value/
collection_value not the 'name'/'user_id' shorthand described in
the spec, which referred to the real underlying columns by
simplified names.
"""
return Team.create(
abbrev="TST",
sname="Test",
lname="Test Team",
gmid=100000001,
gmname="testuser",
gsheet="https://docs.google.com/spreadsheets/test",
wallet=500,
team_value=1000,
collection_value=1000,
season=11,
is_ai=False,
)
@pytest.fixture
def track():
"""A minimal RefractorTrack for batter cards."""
return RefractorTrack.create(
name="Batter Track",
card_type="batter",
formula="pa + tb * 2",
t1_threshold=37,
t2_threshold=149,
t3_threshold=448,
t4_threshold=896,
)
# ---------------------------------------------------------------------------
# PostgreSQL integration fixture (used by test_refractor_*_api.py)
# ---------------------------------------------------------------------------
@pytest.fixture(scope="session")
def pg_conn():
"""Open a psycopg2 connection to the PostgreSQL instance for integration tests.
Reads connection parameters from the standard POSTGRES_* env vars that the
CI workflow injects when a postgres service container is running. Skips the
entire session (via pytest.skip) when POSTGRES_HOST is not set, keeping
local runs clean.
The connection is shared for the whole session (scope="session") because
the integration test modules use module-scoped fixtures that rely on it;
creating a new connection per test would break those module-scoped fixtures.
Teardown: the connection is closed once all tests have finished.
"""
host = os.environ.get("POSTGRES_HOST")
if not host:
pytest.skip("POSTGRES_HOST not set — PostgreSQL integration tests skipped")
conn = psycopg2.connect(
host=host,
port=int(os.environ.get("POSTGRES_PORT", "5432")),
dbname=os.environ.get("POSTGRES_DB", "paper_dynasty"),
user=os.environ.get("POSTGRES_USER", "postgres"),
password=os.environ.get("POSTGRES_PASSWORD", ""),
)
conn.autocommit = False
yield conn
conn.close()

View File

@ -1,323 +0,0 @@
"""Tests for the formula engine (WP-09).
Unit tests only no database required. Stats inputs are simple namespace
objects whose attributes match what BattingSeasonStats/PitchingSeasonStats expose.
Tier thresholds used (from refractor_tracks.json seed data):
Batter: t1=37, t2=149, t3=448, t4=896
SP: t1=10, t2=40, t3=120, t4=240
RP: t1=3, t2=12, t3=35, t4=70
"""
from types import SimpleNamespace
import pytest
from app.services.formula_engine import (
compute_batter_value,
compute_rp_value,
compute_sp_value,
compute_value_for_track,
tier_from_value,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def batter_stats(**kwargs):
"""Build a minimal batter stats object with all fields defaulting to 0."""
defaults = {"pa": 0, "hits": 0, "doubles": 0, "triples": 0, "hr": 0}
defaults.update(kwargs)
return SimpleNamespace(**defaults)
def pitcher_stats(**kwargs):
"""Build a minimal pitcher stats object with all fields defaulting to 0."""
defaults = {"outs": 0, "strikeouts": 0}
defaults.update(kwargs)
return SimpleNamespace(**defaults)
def track_dict(card_type: str) -> dict:
"""Return the locked threshold dict for a given card_type."""
return {
"batter": {
"card_type": "batter",
"t1_threshold": 37,
"t2_threshold": 149,
"t3_threshold": 448,
"t4_threshold": 896,
},
"sp": {
"card_type": "sp",
"t1_threshold": 10,
"t2_threshold": 40,
"t3_threshold": 120,
"t4_threshold": 240,
},
"rp": {
"card_type": "rp",
"t1_threshold": 3,
"t2_threshold": 12,
"t3_threshold": 35,
"t4_threshold": 70,
},
}[card_type]
def track_ns(card_type: str):
"""Return a namespace (attribute-style) track for a given card_type."""
return SimpleNamespace(**track_dict(card_type))
# ---------------------------------------------------------------------------
# compute_batter_value
# ---------------------------------------------------------------------------
def test_batter_formula_single_and_double():
"""4 PA, 1 single, 1 double: PA=4, TB=1+2=3, value = 4 + 3×2 = 10."""
stats = batter_stats(pa=4, hits=2, doubles=1)
assert compute_batter_value(stats) == 10.0
def test_batter_formula_no_hits():
"""4 PA, 0 hits: TB=0, value = 4 + 0 = 4."""
stats = batter_stats(pa=4)
assert compute_batter_value(stats) == 4.0
def test_batter_formula_hr_heavy():
"""4 PA, 2 HR: TB = 0 singles + 4×2 = 8, value = 4 + 8×2 = 20."""
stats = batter_stats(pa=4, hits=2, hr=2)
assert compute_batter_value(stats) == 20.0
# ---------------------------------------------------------------------------
# compute_sp_value
# ---------------------------------------------------------------------------
def test_sp_formula_standard():
"""18 outs + 5 K: IP = 18/3 = 6.0, value = 6.0 + 5 = 11.0."""
stats = pitcher_stats(outs=18, strikeouts=5)
assert compute_sp_value(stats) == 11.0
# ---------------------------------------------------------------------------
# compute_rp_value
# ---------------------------------------------------------------------------
def test_rp_formula_standard():
"""3 outs + 2 K: IP = 3/3 = 1.0, value = 1.0 + 2 = 3.0."""
stats = pitcher_stats(outs=3, strikeouts=2)
assert compute_rp_value(stats) == 3.0
# ---------------------------------------------------------------------------
# Zero stats
# ---------------------------------------------------------------------------
def test_batter_zero_stats_returns_zero():
"""All-zero batter stats must return 0.0."""
assert compute_batter_value(batter_stats()) == 0.0
def test_sp_zero_stats_returns_zero():
"""All-zero SP stats must return 0.0."""
assert compute_sp_value(pitcher_stats()) == 0.0
def test_rp_zero_stats_returns_zero():
"""All-zero RP stats must return 0.0."""
assert compute_rp_value(pitcher_stats()) == 0.0
# ---------------------------------------------------------------------------
# Formula dispatch by track name
# ---------------------------------------------------------------------------
def test_dispatch_batter():
"""compute_value_for_track('batter', ...) delegates to compute_batter_value."""
stats = batter_stats(pa=4, hits=2, doubles=1)
assert compute_value_for_track("batter", stats) == compute_batter_value(stats)
def test_dispatch_sp():
"""compute_value_for_track('sp', ...) delegates to compute_sp_value."""
stats = pitcher_stats(outs=18, strikeouts=5)
assert compute_value_for_track("sp", stats) == compute_sp_value(stats)
def test_dispatch_rp():
"""compute_value_for_track('rp', ...) delegates to compute_rp_value."""
stats = pitcher_stats(outs=3, strikeouts=2)
assert compute_value_for_track("rp", stats) == compute_rp_value(stats)
def test_dispatch_unknown_raises():
"""An unrecognised card_type must raise ValueError."""
with pytest.raises(ValueError, match="Unknown card_type"):
compute_value_for_track("dh", batter_stats())
# ---------------------------------------------------------------------------
# tier_from_value — batter thresholds (t1=37, t2=149, t3=448, t4=896)
# ---------------------------------------------------------------------------
def test_tier_exact_t1_boundary():
"""value=37 is exactly t1 for batter → T1."""
assert tier_from_value(37, track_dict("batter")) == 1
def test_tier_just_below_t1():
"""value=36 is just below t1=37 for batter → T0."""
assert tier_from_value(36, track_dict("batter")) == 0
def test_tier_t4_boundary():
"""value=896 is exactly t4 for batter → T4."""
assert tier_from_value(896, track_dict("batter")) == 4
def test_tier_above_t4():
"""value above t4 still returns T4 (fully evolved)."""
assert tier_from_value(1000, track_dict("batter")) == 4
def test_tier_t2_boundary():
"""value=149 is exactly t2 for batter → T2."""
assert tier_from_value(149, track_dict("batter")) == 2
def test_tier_t3_boundary():
"""value=448 is exactly t3 for batter → T3."""
assert tier_from_value(448, track_dict("batter")) == 3
def test_tier_accepts_namespace_track():
"""tier_from_value must work with attribute-style track objects (Peewee models)."""
assert tier_from_value(37, track_ns("batter")) == 1
# ---------------------------------------------------------------------------
# T1-1: Negative singles guard in compute_batter_value
# ---------------------------------------------------------------------------
def test_batter_negative_singles_component():
"""hits=1, doubles=1, triples=1, hr=0 produces singles=-1.
What: The formula computes singles = hits - doubles - triples - hr.
With hits=1, doubles=1, triples=1, hr=0 the result is singles = -1,
which is a physically impossible stat line but valid arithmetic input.
Why: Document the formula's actual behaviour when given an incoherent stat
line so that callers are aware that no clamping or guard exists. If a
guard is added in the future, this test will catch the change in behaviour.
singles = 1 - 1 - 1 - 0 = -1
tb = (-1)*1 + 1*2 + 1*3 + 0*4 = -1 + 2 + 3 = 4
value = pa + tb*2 = 0 + 4*2 = 8
"""
stats = batter_stats(hits=1, doubles=1, triples=1, hr=0)
# singles will be -1; the formula does NOT clamp, so TB = 4 and value = 8.0
result = compute_batter_value(stats)
assert result == 8.0, (
f"Expected 8.0 (negative singles flows through unclamped), got {result}"
)
def test_batter_negative_singles_is_not_clamped():
"""A singles value below zero is NOT clamped to zero by the formula.
What: Confirms that singles < 0 propagates into TB rather than being
floored at 0. If clamping were added, tb would be 0*1 + 1*2 + 1*3 = 5
and value would be 10.0, not 8.0.
Why: Guards future refactors if someone adds `singles = max(0, ...)`,
this assertion will fail immediately, surfacing the behaviour change.
"""
stats = batter_stats(hits=1, doubles=1, triples=1, hr=0)
unclamped_value = compute_batter_value(stats)
# If singles were clamped to 0: tb = 0+2+3 = 5, value = 10.0
clamped_value = 10.0
assert unclamped_value != clamped_value, (
"Formula appears to clamp negative singles — behaviour has changed"
)
# ---------------------------------------------------------------------------
# T1-2: Tier boundary precision with float SP values
# ---------------------------------------------------------------------------
def test_sp_tier_just_below_t1_outs29():
"""SP with outs=29 produces IP=9.666..., which is below T1 threshold (10) → T0.
What: 29 outs / 3 = 9.6666... IP + 0 K = 9.6666... value.
The SP T1 threshold is 10.0, so this value is strictly below T1.
Why: Floating-point IP values accumulate slowly for pitchers. A bug that
truncated or rounded IP upward could cause premature tier advancement.
Verify that tier_from_value uses a >= comparison (not >) and handles
non-integer values correctly.
"""
stats = pitcher_stats(outs=29, strikeouts=0)
value = compute_sp_value(stats)
assert value == pytest.approx(29 / 3) # 9.6666...
assert value < 10.0 # strictly below T1
assert tier_from_value(value, track_dict("sp")) == 0
def test_sp_tier_exactly_t1_outs30():
"""SP with outs=30 produces IP=10.0, exactly at T1 threshold → T1.
What: 30 outs / 3 = 10.0 IP + 0 K = 10.0 value.
The SP T1 threshold is 10.0, so value == t1 satisfies the >= condition.
Why: Off-by-one or strictly-greater-than comparisons would classify
this as T0 instead of T1. The boundary value must correctly promote
to the matching tier.
"""
stats = pitcher_stats(outs=30, strikeouts=0)
value = compute_sp_value(stats)
assert value == 10.0
assert tier_from_value(value, track_dict("sp")) == 1
def test_sp_float_value_at_exact_t2_boundary():
"""SP value exactly at T2 threshold (40.0) → T2.
What: outs=120 -> IP=40.0, strikeouts=0 -> value=40.0.
T2 threshold for SP is 40. The >= comparison must promote to T2.
Why: Validates that all four tier thresholds use inclusive lower-bound
comparisons for float values, not just T1.
"""
stats = pitcher_stats(outs=120, strikeouts=0)
value = compute_sp_value(stats)
assert value == 40.0
assert tier_from_value(value, track_dict("sp")) == 2
def test_sp_float_value_just_below_t2():
"""SP value just below T2 (39.999...) stays at T1.
What: outs=119 -> IP=39.6666..., strikeouts=0 -> value=39.666...
This is strictly less than T2=40, so tier should be 1 (already past T1=10).
Why: Confirms that sub-threshold float values are not prematurely promoted
due to floating-point comparison imprecision.
"""
stats = pitcher_stats(outs=119, strikeouts=0)
value = compute_sp_value(stats)
assert value == pytest.approx(119 / 3) # 39.666...
assert value < 40.0
assert tier_from_value(value, track_dict("sp")) == 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,804 +0,0 @@
"""Tests for the refractor evaluator service (WP-08).
Unit tests verify tier assignment, advancement, partial progress, idempotency,
full refractor tier, and no-regression behaviour without touching any database,
using stub Peewee models bound to an in-memory SQLite database.
The formula engine (WP-09) and Peewee models (WP-05/WP-07) are not imported
from db_engine/formula_engine; instead the tests supply minimal stubs and
inject them via the _stats_model, _state_model, _compute_value_fn, and
_tier_from_value_fn overrides on evaluate_card().
Stub track thresholds (batter):
T1: 37 T2: 149 T3: 448 T4: 896
Useful reference values:
value=30 T0 (below T1=37)
value=50 T1 (37 <= 50 < 149)
value=100 T1 (stays T1; T2 threshold is 149)
value=160 T2 (149 <= 160 < 448)
value=900 T4 (>= 896) fully_evolved
"""
import pytest
from datetime import datetime
from peewee import (
BooleanField,
CharField,
DateTimeField,
FloatField,
ForeignKeyField,
IntegerField,
Model,
SqliteDatabase,
)
from app.services.refractor_evaluator import evaluate_card
# ---------------------------------------------------------------------------
# Stub models — mirror WP-01/WP-04/WP-07 schema without importing db_engine
# ---------------------------------------------------------------------------
_test_db = SqliteDatabase(":memory:")
class TrackStub(Model):
"""Minimal RefractorTrack stub for evaluator tests."""
card_type = CharField(unique=True)
t1_threshold = IntegerField()
t2_threshold = IntegerField()
t3_threshold = IntegerField()
t4_threshold = IntegerField()
class Meta:
database = _test_db
table_name = "refractor_track"
class CardStateStub(Model):
"""Minimal RefractorCardState stub for evaluator tests."""
player_id = IntegerField()
team_id = IntegerField()
track = ForeignKeyField(TrackStub)
current_tier = IntegerField(default=0)
current_value = FloatField(default=0.0)
fully_evolved = BooleanField(default=False)
last_evaluated_at = DateTimeField(null=True)
class Meta:
database = _test_db
table_name = "refractor_card_state"
indexes = ((("player_id", "team_id"), True),)
class StatsStub(Model):
"""Minimal PlayerSeasonStats stub for evaluator tests."""
player_id = IntegerField()
team_id = IntegerField()
season = IntegerField()
pa = IntegerField(default=0)
hits = IntegerField(default=0)
doubles = IntegerField(default=0)
triples = IntegerField(default=0)
hr = IntegerField(default=0)
outs = IntegerField(default=0)
strikeouts = IntegerField(default=0)
class Meta:
database = _test_db
table_name = "player_season_stats"
# ---------------------------------------------------------------------------
# Formula stubs — avoid importing app.services.formula_engine before WP-09
# ---------------------------------------------------------------------------
def _compute_value(card_type: str, stats) -> float:
"""Stub compute_value_for_track: returns pa for batter, outs/3+k for pitchers."""
if card_type == "batter":
singles = stats.hits - stats.doubles - stats.triples - stats.hr
tb = singles + 2 * stats.doubles + 3 * stats.triples + 4 * stats.hr
return float(stats.pa + tb * 2)
return stats.outs / 3 + stats.strikeouts
def _tier_from_value(value: float, track) -> int:
"""Stub tier_from_value using TrackStub fields t1_threshold/t2_threshold/etc."""
if isinstance(track, dict):
t1, t2, t3, t4 = (
track["t1_threshold"],
track["t2_threshold"],
track["t3_threshold"],
track["t4_threshold"],
)
else:
t1, t2, t3, t4 = (
track.t1_threshold,
track.t2_threshold,
track.t3_threshold,
track.t4_threshold,
)
if value >= t4:
return 4
if value >= t3:
return 3
if value >= t2:
return 2
if value >= t1:
return 1
return 0
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def _db():
"""Create tables before each test and drop them afterwards."""
_test_db.connect(reuse_if_open=True)
_test_db.create_tables([TrackStub, CardStateStub, StatsStub])
yield
_test_db.drop_tables([StatsStub, CardStateStub, TrackStub])
@pytest.fixture()
def batter_track():
return TrackStub.create(
card_type="batter",
t1_threshold=37,
t2_threshold=149,
t3_threshold=448,
t4_threshold=896,
)
@pytest.fixture()
def sp_track():
return TrackStub.create(
card_type="sp",
t1_threshold=10,
t2_threshold=40,
t3_threshold=120,
t4_threshold=240,
)
def _make_state(player_id, team_id, track, current_tier=0, current_value=0.0):
return CardStateStub.create(
player_id=player_id,
team_id=team_id,
track=track,
current_tier=current_tier,
current_value=current_value,
fully_evolved=False,
last_evaluated_at=None,
)
def _make_stats(player_id, team_id, season, **kwargs):
return StatsStub.create(
player_id=player_id, team_id=team_id, season=season, **kwargs
)
def _eval(player_id, team_id, dry_run: bool = False):
return evaluate_card(
player_id,
team_id,
dry_run=dry_run,
_stats_model=StatsStub,
_state_model=CardStateStub,
_compute_value_fn=_compute_value,
_tier_from_value_fn=_tier_from_value,
)
# ---------------------------------------------------------------------------
# Unit tests
# ---------------------------------------------------------------------------
class TestTierAssignment:
"""Tier assigned from computed value against track thresholds."""
def test_value_below_t1_stays_t0(self, batter_track):
"""value=30 is below T1 threshold (37) → tier stays 0."""
_make_state(1, 1, batter_track)
# pa=30, no extra hits → value = 30 + 0 = 30 < 37
_make_stats(1, 1, 1, pa=30)
result = _eval(1, 1)
assert result["current_tier"] == 0
def test_value_at_t1_threshold_assigns_tier_1(self, batter_track):
"""value=50 → T1 (37 <= 50 < 149)."""
_make_state(1, 1, batter_track)
# pa=50, no hits → value = 50 + 0 = 50
_make_stats(1, 1, 1, pa=50)
result = _eval(1, 1)
assert result["current_tier"] == 1
def test_tier_advancement_to_t2(self, batter_track):
"""value=160 → T2 (149 <= 160 < 448)."""
_make_state(1, 1, batter_track)
# pa=160, no hits → value = 160
_make_stats(1, 1, 1, pa=160)
result = _eval(1, 1)
assert result["current_tier"] == 2
def test_partial_progress_stays_t1(self, batter_track):
"""value=100 with T2=149 → stays T1, does not advance to T2."""
_make_state(1, 1, batter_track)
# pa=100 → value = 100, T2 threshold = 149 → tier 1
_make_stats(1, 1, 1, pa=100)
result = _eval(1, 1)
assert result["current_tier"] == 1
assert result["fully_evolved"] is False
def test_fully_evolved_at_t4(self, batter_track):
"""value >= T4 (896) → tier=4 and fully_evolved=True."""
_make_state(1, 1, batter_track)
# pa=900 → value = 900 >= 896
_make_stats(1, 1, 1, pa=900)
result = _eval(1, 1)
assert result["current_tier"] == 4
assert result["fully_evolved"] is True
class TestNoRegression:
"""current_tier never decreases."""
def test_tier_never_decreases(self, batter_track):
"""If current_tier=2 and new value only warrants T1, tier stays 2."""
# Seed state at tier 2
_make_state(1, 1, batter_track, current_tier=2, current_value=160.0)
# Sparse stats: value=50 → would be T1, but current is T2
_make_stats(1, 1, 1, pa=50)
result = _eval(1, 1)
assert result["current_tier"] == 2 # no regression
def test_tier_advances_when_value_improves(self, batter_track):
"""If current_tier=1 and new value warrants T3, tier advances to 3."""
_make_state(1, 1, batter_track, current_tier=1, current_value=50.0)
# pa=500 → value = 500 >= 448 → T3
_make_stats(1, 1, 1, pa=500)
result = _eval(1, 1)
assert result["current_tier"] == 3
class TestIdempotency:
"""Calling evaluate_card twice with same stats returns the same result."""
def test_idempotent_same_result(self, batter_track):
"""Two evaluations with identical stats produce the same tier and value."""
_make_state(1, 1, batter_track)
_make_stats(1, 1, 1, pa=160)
result1 = _eval(1, 1)
result2 = _eval(1, 1)
assert result1["current_tier"] == result2["current_tier"]
assert result1["current_value"] == result2["current_value"]
assert result1["fully_evolved"] == result2["fully_evolved"]
def test_idempotent_at_fully_evolved(self, batter_track):
"""Repeated evaluation at T4 remains fully_evolved=True."""
_make_state(1, 1, batter_track)
_make_stats(1, 1, 1, pa=900)
_eval(1, 1)
result = _eval(1, 1)
assert result["current_tier"] == 4
assert result["fully_evolved"] is True
class TestCareerTotals:
"""Stats are summed across all seasons for the player/team pair."""
def test_multi_season_stats_summed(self, batter_track):
"""Stats from two seasons are aggregated into a single career total."""
_make_state(1, 1, batter_track)
# Season 1: pa=80, Season 2: pa=90 → total pa=170 → value=170 → T2
_make_stats(1, 1, 1, pa=80)
_make_stats(1, 1, 2, pa=90)
result = _eval(1, 1)
assert result["current_tier"] == 2
assert result["current_value"] == 170.0
def test_zero_stats_stays_t0(self, batter_track):
"""No stats rows → all zeros → value=0 → tier=0."""
_make_state(1, 1, batter_track)
result = _eval(1, 1)
assert result["current_tier"] == 0
assert result["current_value"] == 0.0
def test_other_team_stats_not_included(self, batter_track):
"""Stats for the same player on a different team are not counted."""
_make_state(1, 1, batter_track)
_make_stats(1, 1, 1, pa=50)
# Same player, different team — should not count
_make_stats(1, 2, 1, pa=200)
result = _eval(1, 1)
# Only pa=50 counted → value=50 → T1
assert result["current_tier"] == 1
assert result["current_value"] == 50.0
class TestFullyEvolvedPersistence:
"""T2-1: fully_evolved=True is preserved even when stats drop or are absent."""
def test_fully_evolved_persists_when_stats_zeroed(self, batter_track):
"""Card at T4/fully_evolved=True stays fully_evolved after stats are removed.
What: Set up a RefractorCardState at tier=4 with fully_evolved=True.
Then call evaluate_card with no season stats rows (zero career totals).
The evaluator computes value=0 -> new_tier=0, but current_tier must
stay at 4 (no regression) and fully_evolved must remain True.
Why: fully_evolved is a permanent achievement flag it must not be
revoked if a team's stats are rolled back, corrected, or simply not
yet imported. The no-regression rule (max(current, new)) prevents
tier demotion; this test confirms that fully_evolved follows the same
protection.
"""
# Seed state at T4 fully_evolved
_make_state(1, 1, batter_track, current_tier=4, current_value=900.0)
# No stats rows — career totals will be all zeros
# (no _make_stats call)
result = _eval(1, 1)
# The no-regression rule keeps tier at 4
assert result["current_tier"] == 4, (
f"Expected tier=4 (no regression), got {result['current_tier']}"
)
# fully_evolved must still be True since tier >= 4
assert result["fully_evolved"] is True, (
"fully_evolved was reset to False after re-evaluation with zero stats"
)
def test_fully_evolved_persists_with_partial_stats(self, batter_track):
"""Card at T4 stays fully_evolved even with stats below T1.
What: Same setup as above but with a season stats row giving value=30
(below T1=37). The computed tier would be 0, but current_tier must
not regress from 4.
Why: Validates that no-regression applies regardless of whether stats
are zero or merely insufficient for the achieved tier.
"""
_make_state(1, 1, batter_track, current_tier=4, current_value=900.0)
# pa=30 -> value=30, which is below T1=37 -> computed tier=0
_make_stats(1, 1, 1, pa=30)
result = _eval(1, 1)
assert result["current_tier"] == 4
assert result["fully_evolved"] is True
class TestMissingState:
"""ValueError when no card state exists for (player_id, team_id)."""
def test_missing_state_raises(self, batter_track):
"""evaluate_card raises ValueError when no state row exists."""
# No card state created
with pytest.raises(ValueError, match="No refractor_card_state"):
_eval(99, 99)
class TestReturnShape:
"""Return dict has the expected keys and types."""
def test_return_keys(self, batter_track):
"""Result dict contains all expected keys.
Phase 2 addition: 'computed_tier' is included alongside 'current_tier'
so that evaluate-game can detect tier-ups without writing the tier
(dry_run=True path). Both keys must always be present.
"""
_make_state(1, 1, batter_track)
result = _eval(1, 1)
assert set(result.keys()) == {
"player_id",
"team_id",
"current_tier",
"computed_tier",
"computed_fully_evolved",
"current_value",
"fully_evolved",
"last_evaluated_at",
}
def test_last_evaluated_at_is_iso_string(self, batter_track):
"""last_evaluated_at is a non-empty ISO-8601 string."""
_make_state(1, 1, batter_track)
result = _eval(1, 1)
ts = result["last_evaluated_at"]
assert isinstance(ts, str) and len(ts) > 0
# Must be parseable as a datetime
datetime.fromisoformat(ts)
class TestFullyEvolvedFlagCorrection:
"""T3-7: fully_evolved/tier mismatch is corrected by evaluate_card.
A database corruption where fully_evolved=True but current_tier < 4 can
occur if the flag was set incorrectly by a migration or external script.
evaluate_card must re-derive fully_evolved from the freshly-computed tier
(after the no-regression max() is applied), not trust the stored flag.
"""
def test_fully_evolved_flag_corrected_when_tier_below_4(self, batter_track):
"""fully_evolved=True with current_tier=3 is corrected to False after evaluation.
What: Manually set database state to fully_evolved=True, current_tier=3
(a corruption scenario tier 3 cannot be "fully evolved" since T4 is
the maximum tier). Provide stats that compute to a value in the T3
range (value=500, which is >= T3=448 but < T4=896).
After evaluate_card:
- computed value = 500 new_tier = 3
- no-regression: max(current_tier=3, new_tier=3) = 3 tier stays 3
- fully_evolved = (3 >= 4) = False flag is corrected
Why: The evaluator always recomputes fully_evolved from the final
current_tier rather than preserving the stored flag. This ensures
that a corrupted fully_evolved=True at tier<4 is silently repaired
on the next evaluation without requiring a separate migration.
"""
# Inject corruption: fully_evolved=True but tier=3
state = CardStateStub.create(
player_id=1,
team_id=1,
track=batter_track,
current_tier=3,
current_value=500.0,
fully_evolved=True, # intentionally wrong
last_evaluated_at=None,
)
# Stats that compute to value=500: pa=500, no hits → value=500+0=500
# T3 threshold=448, T4 threshold=896 → tier=3, NOT 4
_make_stats(1, 1, 1, pa=500)
result = _eval(1, 1)
assert result["current_tier"] == 3, (
f"Expected tier=3 after evaluation with value=500, got {result['current_tier']}"
)
assert result["fully_evolved"] is False, (
"fully_evolved should have been corrected to False for tier=3, "
f"got {result['fully_evolved']}"
)
# Confirm the database row was updated (not just the return dict)
state_reloaded = CardStateStub.get_by_id(state.id)
assert state_reloaded.fully_evolved is False, (
"fully_evolved was not persisted as False after correction"
)
def test_fully_evolved_flag_preserved_when_tier_reaches_4(self, batter_track):
"""fully_evolved=True with current_tier=3 stays True when new stats push to T4.
What: Same corruption setup as above (fully_evolved=True, tier=3),
but now provide stats with value=900 (>= T4=896).
After evaluate_card:
- computed value = 900 new_tier = 4
- no-regression: max(current_tier=3, new_tier=4) = 4 advances to 4
- fully_evolved = (4 >= 4) = True flag stays True (correctly)
Why: Confirms the evaluator correctly sets fully_evolved=True when
the re-computed tier legitimately reaches T4 regardless of whether
the stored flag was already True before evaluation.
"""
CardStateStub.create(
player_id=1,
team_id=1,
track=batter_track,
current_tier=3,
current_value=500.0,
fully_evolved=True, # stored flag (will be re-derived)
last_evaluated_at=None,
)
# pa=900 → value=900 >= T4=896 → new_tier=4
_make_stats(1, 1, 1, pa=900)
result = _eval(1, 1)
assert result["current_tier"] == 4, (
f"Expected tier=4 for value=900, got {result['current_tier']}"
)
assert result["fully_evolved"] is True, (
f"Expected fully_evolved=True for tier=4, got {result['fully_evolved']}"
)
class TestMultiTeamStatIsolation:
"""T3-8: A player's refractor value is isolated to a specific team's stats.
The evaluator queries BattingSeasonStats WHERE player_id=? AND team_id=?.
When a player has stats on two different teams in the same season, each
team's RefractorCardState must reflect only that team's stats not a
combined total.
"""
def test_multi_team_same_season_stats_isolated(self, batter_track):
"""Each team's refractor value reflects only that team's stats, not combined.
What: Create one player with BattingSeasonStats on team_id=1 (pa=80)
and team_id=2 (pa=120) in the same season. Create a RefractorCardState
for each team. Evaluate each team's card separately and verify:
- Team 1 state: value = 80 tier = T1 (80 >= T1=37, < T2=149)
- Team 2 state: value = 120 tier = T1 (120 >= T1=37, < T2=149)
- Neither value equals the combined total (80+120=200 would be T2)
Why: Confirms the `WHERE player_id=? AND team_id=?` filter in the
evaluator is correctly applied. Without proper team isolation, the
combined total of 200 would cross the T2 threshold (149) and both
states would be incorrectly assigned to T2. This is a critical
correctness requirement: a player traded between teams should have
separate refractor progressions for their time with each franchise.
"""
# Stats on team 1: pa=80 → value=80 (T1: 37<=80<149)
_make_stats(player_id=1, team_id=1, season=11, pa=80)
# Stats on team 2: pa=120 → value=120 (T1: 37<=120<149)
_make_stats(player_id=1, team_id=2, season=11, pa=120)
# combined pa would be 200 → value=200 → T2 (149<=200<448)
# Each team must see only its own stats, not 200
_make_state(player_id=1, team_id=1, track=batter_track)
_make_state(player_id=1, team_id=2, track=batter_track)
result_team1 = _eval(player_id=1, team_id=1)
result_team2 = _eval(player_id=1, team_id=2)
# Team 1: only pa=80 counted → value=80 → T1
assert result_team1["current_value"] == 80.0, (
f"Team 1 value should be 80.0 (its own stats only), "
f"got {result_team1['current_value']}"
)
assert result_team1["current_tier"] == 1, (
f"Team 1 tier should be T1 for value=80, got {result_team1['current_tier']}"
)
# Team 2: only pa=120 counted → value=120 → T1
assert result_team2["current_value"] == 120.0, (
f"Team 2 value should be 120.0 (its own stats only), "
f"got {result_team2['current_value']}"
)
assert result_team2["current_tier"] == 1, (
f"Team 2 tier should be T1 for value=120, got {result_team2['current_tier']}"
)
# Sanity: neither team crossed T2 (which would happen if stats were combined)
assert (
result_team1["current_tier"] != 2 and result_team2["current_tier"] != 2
), (
"At least one team was incorrectly assigned T2 — stats may have been combined"
)
def test_multi_team_different_seasons_isolated(self, batter_track):
"""Stats for the same player across multiple seasons remain per-team isolated.
What: Same player with two seasons of stats for each of two teams:
- team_id=1: season 10 pa=90, season 11 pa=70 combined=160
- team_id=2: season 10 pa=100, season 11 pa=80 combined=180
After evaluation:
- Team 1: value=160 T2 (149<=160<448)
- Team 2: value=180 T2 (149<=180<448)
The test confirms that cross-team season aggregation does not bleed
stats from team 2 into team 1's calculation or vice versa.
Why: Multi-season aggregation and multi-team isolation must work
together. A bug that incorrectly sums all player stats regardless
of team would produce combined values of 340 T2, which coincidentally
passes, but the per-team values and tiers would be wrong.
This test uses values where cross-contamination would produce a
materially different value (340 vs 160/180), catching that class of bug.
"""
# Team 1 stats: total pa=160 → value=160 → T2
_make_stats(player_id=1, team_id=1, season=10, pa=90)
_make_stats(player_id=1, team_id=1, season=11, pa=70)
# Team 2 stats: total pa=180 → value=180 → T2
_make_stats(player_id=1, team_id=2, season=10, pa=100)
_make_stats(player_id=1, team_id=2, season=11, pa=80)
_make_state(player_id=1, team_id=1, track=batter_track)
_make_state(player_id=1, team_id=2, track=batter_track)
result_team1 = _eval(player_id=1, team_id=1)
result_team2 = _eval(player_id=1, team_id=2)
assert result_team1["current_value"] == 160.0, (
f"Team 1 multi-season value should be 160.0, got {result_team1['current_value']}"
)
assert result_team1["current_tier"] == 2, (
f"Team 1 tier should be T2 for value=160, got {result_team1['current_tier']}"
)
assert result_team2["current_value"] == 180.0, (
f"Team 2 multi-season value should be 180.0, got {result_team2['current_value']}"
)
assert result_team2["current_tier"] == 2, (
f"Team 2 tier should be T2 for value=180, got {result_team2['current_tier']}"
)
class TestDryRun:
"""dry_run=True writes current_value and last_evaluated_at but NOT current_tier
or fully_evolved, allowing apply_tier_boost() to write tier + variant atomically.
All tests use stats that would produce a tier-up (value=160 T2) on a card
seeded at tier=0, so the delta between dry and non-dry behaviour is obvious.
Stub thresholds (batter): T1=37, T2=149, T3=448, T4=896.
value=160 T2 (149 <= 160 < 448); starting current_tier=0 tier-up to T2.
"""
def test_dry_run_does_not_write_current_tier(self, batter_track):
"""dry_run=True leaves current_tier unchanged in the database.
What: Seed a card at tier=0. Provide stats that would advance to T2
(value=160). Call evaluate_card with dry_run=True. Re-read the DB row
and assert current_tier is still 0.
Why: The dry_run path must not persist the tier so that apply_tier_boost()
can write tier + variant atomically on the next step. If current_tier
were written here, a boost failure would leave the tier advanced with no
corresponding variant, causing an inconsistent state.
"""
_make_state(1, 1, batter_track, current_tier=0)
_make_stats(1, 1, 1, pa=160)
_eval(1, 1, dry_run=True)
reloaded = CardStateStub.get(
(CardStateStub.player_id == 1) & (CardStateStub.team_id == 1)
)
assert reloaded.current_tier == 0, (
f"dry_run should not write current_tier; expected 0, got {reloaded.current_tier}"
)
def test_dry_run_does_not_write_fully_evolved(self, batter_track):
"""dry_run=True leaves fully_evolved=False unchanged in the database.
What: Seed a card at tier=0 with fully_evolved=False. Provide stats that
would push to T4 (value=900). Call evaluate_card with dry_run=True.
Re-read the DB row and assert fully_evolved is still False.
Why: fully_evolved follows current_tier and must be written atomically
by apply_tier_boost(). Writing it here would let the flag get out of
sync with the tier if the boost subsequently fails.
"""
_make_state(1, 1, batter_track, current_tier=0)
_make_stats(1, 1, 1, pa=900) # value=900 → T4 → fully_evolved=True normally
_eval(1, 1, dry_run=True)
reloaded = CardStateStub.get(
(CardStateStub.player_id == 1) & (CardStateStub.team_id == 1)
)
assert reloaded.fully_evolved is False, (
"dry_run should not write fully_evolved; expected False, "
f"got {reloaded.fully_evolved}"
)
def test_dry_run_writes_current_value(self, batter_track):
"""dry_run=True DOES update current_value in the database.
What: Seed a card with current_value=0. Provide stats giving value=160.
Call evaluate_card with dry_run=True. Re-read the DB row and assert
current_value has been updated to 160.0.
Why: current_value tracks formula progress and is safe to write
at any time it does not affect game logic atomicity, so it is
always persisted regardless of dry_run.
"""
_make_state(1, 1, batter_track, current_value=0.0)
_make_stats(1, 1, 1, pa=160)
_eval(1, 1, dry_run=True)
reloaded = CardStateStub.get(
(CardStateStub.player_id == 1) & (CardStateStub.team_id == 1)
)
assert reloaded.current_value == 160.0, (
f"dry_run should still write current_value; expected 160.0, "
f"got {reloaded.current_value}"
)
def test_dry_run_writes_last_evaluated_at(self, batter_track):
"""dry_run=True DOES update last_evaluated_at in the database.
What: Seed a card with last_evaluated_at=None. Call evaluate_card with
dry_run=True. Re-read the DB row and assert last_evaluated_at is now a
non-None datetime.
Why: last_evaluated_at is a bookkeeping field used for scheduling and
audit purposes. It is safe to update independently of tier writes
and should always reflect the most recent evaluation attempt.
"""
_make_state(1, 1, batter_track)
_make_stats(1, 1, 1, pa=160)
_eval(1, 1, dry_run=True)
reloaded = CardStateStub.get(
(CardStateStub.player_id == 1) & (CardStateStub.team_id == 1)
)
assert reloaded.last_evaluated_at is not None, (
"dry_run should still write last_evaluated_at; got None"
)
def test_dry_run_returns_computed_tier(self, batter_track):
"""dry_run=True return dict has computed_tier=T2 while current_tier stays 0.
What: Seed at tier=0. Stats value=160 T2. Call dry_run=True.
Assert:
- result["computed_tier"] == 2 (what the formula says)
- result["current_tier"] == 0 (what is stored; unchanged)
Why: Callers use the divergence between computed_tier and current_tier
to detect a pending tier-up. Both keys must be present and correct for
the evaluate-game endpoint to gate apply_tier_boost() correctly.
"""
_make_state(1, 1, batter_track, current_tier=0)
_make_stats(1, 1, 1, pa=160)
result = _eval(1, 1, dry_run=True)
assert result["computed_tier"] == 2, (
f"computed_tier should reflect formula result T2; got {result['computed_tier']}"
)
assert result["current_tier"] == 0, (
f"current_tier should reflect unchanged DB value 0; got {result['current_tier']}"
)
def test_dry_run_returns_computed_fully_evolved(self, batter_track):
"""dry_run=True sets computed_fully_evolved correctly in the return dict.
What: Two sub-cases:
- Stats value=160 T2: computed_fully_evolved should be False.
- Stats value=900 T4: computed_fully_evolved should be True.
In both cases fully_evolved in the DB remains False (tier not written).
Why: computed_fully_evolved lets callers know whether the pending tier-up
will result in a fully-evolved card without having to re-query the DB
or recalculate the tier themselves. It must match (computed_tier >= 4),
not the stored fully_evolved value.
"""
# Sub-case 1: computed T2 → computed_fully_evolved=False
_make_state(1, 1, batter_track, current_tier=0)
_make_stats(1, 1, 1, pa=160)
result = _eval(1, 1, dry_run=True)
assert result["computed_fully_evolved"] is False, (
f"computed_fully_evolved should be False for T2; got {result['computed_fully_evolved']}"
)
assert result["fully_evolved"] is False, (
"stored fully_evolved should remain False after dry_run"
)
# Reset for sub-case 2: computed T4 → computed_fully_evolved=True
CardStateStub.delete().execute()
StatsStub.delete().execute()
_make_state(1, 1, batter_track, current_tier=0)
_make_stats(1, 1, 1, pa=900) # value=900 → T4
result2 = _eval(1, 1, dry_run=True)
assert result2["computed_fully_evolved"] is True, (
f"computed_fully_evolved should be True for T4; got {result2['computed_fully_evolved']}"
)
assert result2["fully_evolved"] is False, (
"stored fully_evolved should remain False after dry_run even at T4"
)

View File

@ -1,370 +0,0 @@
"""
Tests for WP-10: refractor_card_state initialization on pack opening.
Covers `app/services/refractor_init.py` the `initialize_card_refractor`
function that creates an RefractorCardState row when a card is first acquired.
Test strategy:
- Unit tests for `_determine_card_type` cover all three branches (batter,
SP, RP/CP) using plain objects so no database round-trip is needed.
- Integration tests run against the in-memory SQLite database (conftest.py
autouse fixture) and exercise the full get_or_create path.
Why we test idempotency:
Pack-opening can post duplicate cards (e.g. the same player ID appears in
two separate pack insertions). The get_or_create guarantee means the second
call must be a no-op it must not reset current_tier/current_value of a
card that has already started evolving.
Why we test cross-player isolation:
Two different players with the same team must each get their own
RefractorCardState row. A bug that checked only team_id would share state
across players, so we assert that state.player_id matches.
"""
import pytest
from app.db_engine import (
Cardset,
RefractorCardState,
RefractorTrack,
Player,
)
from app.services.refractor_init import _determine_card_type, initialize_card_refractor
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FakePlayer:
"""Minimal stand-in for a Player instance used in unit tests.
We only need pos_1 for card-type determination; real FK fields are
not required by the pure function under test.
"""
def __init__(self, pos_1: str):
self.pos_1 = pos_1
def _make_player(rarity, pos_1: str) -> Player:
"""Create a minimal Player row with the given pos_1 value.
A fresh Cardset is created per call so that players are independent
of each other and can be iterated over in separate test cases without
FK conflicts.
"""
cardset = Cardset.create(
name=f"Set-{pos_1}-{id(pos_1)}",
description="Test",
total_cards=1,
)
return Player.create(
p_name=f"Player {pos_1}",
rarity=rarity,
cardset=cardset,
set_num=1,
pos_1=pos_1,
image="https://example.com/img.png",
mlbclub="TST",
franchise="TST",
description="test",
)
def _make_track(card_type: str) -> RefractorTrack:
"""Create an RefractorTrack for the given card_type.
Thresholds are kept small and arbitrary; the unit under test only
cares about card_type when selecting the track.
"""
return RefractorTrack.create(
name=f"Track-{card_type}",
card_type=card_type,
formula="pa",
t1_threshold=10,
t2_threshold=40,
t3_threshold=120,
t4_threshold=240,
)
# ---------------------------------------------------------------------------
# Unit tests — _determine_card_type (no DB required)
# ---------------------------------------------------------------------------
class TestDetermineCardType:
"""Unit tests for _determine_card_type, the pure position-to-type mapper.
The function receives a Player (or any object with a pos_1 attribute) and
returns one of the three strings 'batter', 'sp', or 'rp'. These unit
tests use _FakePlayer so no database is touched and failures are fast.
"""
def test_starting_pitcher(self):
"""pos_1 == 'SP' maps to card_type 'sp'.
SP is the canonical starting-pitcher position string stored in
Player.pos_1 by the card-creation pipeline.
"""
assert _determine_card_type(_FakePlayer("SP")) == "sp"
def test_relief_pitcher(self):
"""pos_1 == 'RP' maps to card_type 'rp'.
Relief pitchers carry the 'RP' position flag and must follow a
separate refractor track with lower thresholds.
"""
assert _determine_card_type(_FakePlayer("RP")) == "rp"
def test_closer_pitcher(self):
"""pos_1 == 'CP' maps to card_type 'rp'.
Closers share the RP refractor track; the spec explicitly lists 'CP'
as an rp-track position.
"""
assert _determine_card_type(_FakePlayer("CP")) == "rp"
def test_infielder_is_batter(self):
"""pos_1 == '1B' maps to card_type 'batter'.
Any non-pitcher position (1B, 2B, 3B, SS, OF, C, DH, etc.) should
fall through to the batter track.
"""
assert _determine_card_type(_FakePlayer("1B")) == "batter"
def test_catcher_is_batter(self):
"""pos_1 == 'C' maps to card_type 'batter'."""
assert _determine_card_type(_FakePlayer("C")) == "batter"
def test_dh_is_batter(self):
"""pos_1 == 'DH' maps to card_type 'batter'.
Designated hitters have no defensive rating but accumulate batting
stats, so they belong on the batter track.
"""
assert _determine_card_type(_FakePlayer("DH")) == "batter"
def test_outfielder_is_batter(self):
"""pos_1 == 'CF' maps to card_type 'batter'."""
assert _determine_card_type(_FakePlayer("CF")) == "batter"
# ---------------------------------------------------------------------------
# Integration tests — initialize_card_refractor
# ---------------------------------------------------------------------------
class TestDetermineCardTypeEdgeCases:
"""T2-2: Parametrized edge cases for _determine_card_type.
Covers all the boundary inputs identified in the PO review:
DH, C, 2B (batters), empty string, None, and the compound 'SP/RP'
which contains both 'SP' and 'RP' substrings.
The function checks 'SP' before 'RP'/'CP', so 'SP/RP' resolves to 'sp'.
"""
@pytest.mark.parametrize(
"pos_1, expected",
[
# Plain batter positions
("DH", "batter"),
("C", "batter"),
("2B", "batter"),
# Empty / None — fall through to batter default
("", "batter"),
(None, "batter"),
# Compound string containing 'SP' first — must resolve to 'sp'
# because _determine_card_type checks "SP" in pos.upper() before RP/CP
("SP/RP", "sp"),
],
)
def test_position_mapping(self, pos_1, expected):
"""_determine_card_type maps each pos_1 value to the expected card_type.
What: Directly exercises _determine_card_type with the given pos_1 string.
None is handled by the `(player.pos_1 or "").upper()` guard in the
implementation, so it falls through to 'batter'.
Why: The card_type string is the key used to look up a RefractorTrack.
An incorrect mapping silently assigns the wrong thresholds to a player's
entire refractor journey. Parametrized so each edge case is a
distinct, independently reported test failure.
"""
player = _FakePlayer(pos_1)
assert _determine_card_type(player) == expected, (
f"pos_1={pos_1!r}: expected {expected!r}, "
f"got {_determine_card_type(player)!r}"
)
class TestInitializeCardEvolution:
"""Integration tests for initialize_card_refractor against in-memory SQLite.
Each test relies on the conftest autouse fixture to get a clean database.
We create tracks for all three card types so the function can always find
a matching track regardless of which player position is used.
"""
@pytest.fixture(autouse=True)
def seed_tracks(self):
"""Create one RefractorTrack per card_type before each test.
initialize_card_refractor does a DB lookup for a track matching the
card_type. If no track exists the function must not crash (it should
log and return None), but having tracks present lets us verify the
happy path for all three types without repeating setup in every test.
"""
self.batter_track = _make_track("batter")
self.sp_track = _make_track("sp")
self.rp_track = _make_track("rp")
def test_first_card_creates_state(self, rarity, team):
"""First acquisition creates an RefractorCardState with zeroed values.
Acceptance criteria from WP-10:
- current_tier == 0
- current_value == 0.0
- fully_evolved == False
- track matches the player's card_type (batter here)
"""
player = _make_player(rarity, "2B")
state = initialize_card_refractor(player.player_id, team.id, "batter")
assert state is not None
assert state.player_id == player.player_id
assert state.team_id == team.id
assert state.track_id == self.batter_track.id
assert state.current_tier == 0
assert state.current_value == 0.0
assert state.fully_evolved is False
def test_duplicate_card_skips_creation(self, rarity, team):
"""Second call for the same (player_id, team_id) is a no-op.
The get_or_create guarantee: if a state row already exists it must
not be overwritten. This protects cards that have already started
evolving their current_tier and current_value must be preserved.
"""
player = _make_player(rarity, "SS")
# First call creates the state
state1 = initialize_card_refractor(player.player_id, team.id, "batter")
assert state1 is not None
# Simulate partial evolution progress
state1.current_tier = 2
state1.current_value = 250.0
state1.save()
# Second call (duplicate card) must not reset progress
state2 = initialize_card_refractor(player.player_id, team.id, "batter")
assert state2 is not None
# Exactly one row in the database
count = (
RefractorCardState.select()
.where(
RefractorCardState.player == player,
RefractorCardState.team == team,
)
.count()
)
assert count == 1
# Progress was NOT reset
refreshed = RefractorCardState.get_by_id(state1.id)
assert refreshed.current_tier == 2
assert refreshed.current_value == 250.0
def test_different_player_creates_new_state(self, rarity, team):
"""Two different players on the same team each get their own state row.
Cross-player isolation: the (player_id, team_id) uniqueness means
player A and player B must have separate rows even though team_id is
the same.
"""
player_a = _make_player(rarity, "LF")
player_b = _make_player(rarity, "RF")
state_a = initialize_card_refractor(player_a.player_id, team.id, "batter")
state_b = initialize_card_refractor(player_b.player_id, team.id, "batter")
assert state_a is not None
assert state_b is not None
assert state_a.id != state_b.id
assert state_a.player_id == player_a.player_id
assert state_b.player_id == player_b.player_id
def test_sp_card_gets_sp_track(self, rarity, team):
"""A starting pitcher is assigned the 'sp' RefractorTrack.
Track selection is driven by card_type, which in turn comes from
pos_1. This test passes card_type='sp' explicitly (mirroring the
router hook that calls _determine_card_type first) and confirms the
state links to the sp track, not the batter track.
"""
player = _make_player(rarity, "SP")
state = initialize_card_refractor(player.player_id, team.id, "sp")
assert state is not None
assert state.track_id == self.sp_track.id
def test_rp_card_gets_rp_track(self, rarity, team):
"""A relief pitcher (RP or CP) is assigned the 'rp' RefractorTrack."""
player = _make_player(rarity, "RP")
state = initialize_card_refractor(player.player_id, team.id, "rp")
assert state is not None
assert state.track_id == self.rp_track.id
def test_missing_track_returns_none(self, rarity, team):
"""If no track exists for the card_type, the function returns None.
This is the safe-failure path: the function must not raise an
exception if the evolution system is misconfigured (e.g. track seed
data missing). It logs the problem and returns None so that the
caller (the cards router) can proceed with pack opening unaffected.
We use a fictional card_type that has no matching seed row.
"""
player = _make_player(rarity, "SP")
# Delete the sp track to simulate missing seed data
self.sp_track.delete_instance()
result = initialize_card_refractor(player.player_id, team.id, "sp")
assert result is None
def test_card_type_from_pos1_batter(self, rarity, team):
"""_determine_card_type is wired correctly for a batter position.
End-to-end: pass the player object directly and verify the state
ends up on the batter track based solely on pos_1.
"""
player = _make_player(rarity, "3B")
card_type = _determine_card_type(player)
state = initialize_card_refractor(player.player_id, team.id, card_type)
assert state is not None
assert state.track_id == self.batter_track.id
def test_card_type_from_pos1_sp(self, rarity, team):
"""_determine_card_type is wired correctly for a starting pitcher."""
player = _make_player(rarity, "SP")
card_type = _determine_card_type(player)
state = initialize_card_refractor(player.player_id, team.id, card_type)
assert state is not None
assert state.track_id == self.sp_track.id
def test_card_type_from_pos1_rp(self, rarity, team):
"""_determine_card_type correctly routes CP to the rp track."""
player = _make_player(rarity, "CP")
card_type = _determine_card_type(player)
state = initialize_card_refractor(player.player_id, team.id, card_type)
assert state is not None
assert state.track_id == self.rp_track.id

View File

@ -1,215 +0,0 @@
"""
Tests for refractor-related models and BattingSeasonStats.
Covers WP-01 acceptance criteria:
- RefractorTrack: CRUD and unique-name constraint
- RefractorCardState: CRUD, defaults, unique-(player,team) constraint,
and FK resolution back to RefractorTrack
- BattingSeasonStats: CRUD with defaults, unique-(player, team, season),
and in-place stat accumulation
Each test class is self-contained: fixtures from conftest.py supply the
minimal parent rows needed to satisfy FK constraints, and every assertion
targets a single, clearly-named behaviour so failures are easy to trace.
"""
import pytest
from peewee import IntegrityError
from playhouse.shortcuts import model_to_dict
from app.db_engine import (
BattingSeasonStats,
RefractorCardState,
RefractorTrack,
)
# ---------------------------------------------------------------------------
# RefractorTrack
# ---------------------------------------------------------------------------
class TestRefractorTrack:
"""Tests for the RefractorTrack model.
RefractorTrack defines a named progression path (formula +
tier thresholds) for a card type. The name column carries a
UNIQUE constraint so that accidental duplicates are caught at
the database level.
"""
def test_create_track(self, track):
"""Creating a track persists all fields and they round-trip correctly.
Reads back via model_to_dict (recurse=False) to verify the raw
column values, not Python-object representations, match what was
inserted.
"""
data = model_to_dict(track, recurse=False)
assert data["name"] == "Batter Track"
assert data["card_type"] == "batter"
assert data["formula"] == "pa + tb * 2"
assert data["t1_threshold"] == 37
assert data["t2_threshold"] == 149
assert data["t3_threshold"] == 448
assert data["t4_threshold"] == 896
def test_track_unique_name(self, track):
"""Inserting a second track with the same name raises IntegrityError.
The UNIQUE constraint on RefractorTrack.name must prevent two
tracks from sharing the same identifier, as the name is used as
a human-readable key throughout the evolution system.
"""
with pytest.raises(IntegrityError):
RefractorTrack.create(
name="Batter Track", # duplicate
card_type="sp",
formula="outs * 3",
t1_threshold=10,
t2_threshold=40,
t3_threshold=120,
t4_threshold=240,
)
# ---------------------------------------------------------------------------
# RefractorCardState
# ---------------------------------------------------------------------------
class TestRefractorCardState:
"""Tests for RefractorCardState, which tracks per-player refractor progress.
Each row represents one card (player) owned by one team, linked to a
specific RefractorTrack. The model records the current tier (0-4),
accumulated progress value, and whether the card is fully evolved.
"""
def test_create_card_state(self, player, team, track):
"""Creating a card state stores all fields and defaults are correct.
Defaults under test:
current_tier 0 (fresh card, no tier unlocked yet)
current_value 0.0 (no formula progress accumulated)
fully_evolved False (evolution is not complete at creation)
last_evaluated_at None (never evaluated yet)
"""
state = RefractorCardState.create(player=player, team=team, track=track)
fetched = RefractorCardState.get_by_id(state.id)
assert fetched.player_id == player.player_id
assert fetched.team_id == team.id
assert fetched.track_id == track.id
assert fetched.current_tier == 0
assert fetched.current_value == 0.0
assert fetched.fully_evolved is False
assert fetched.last_evaluated_at is None
def test_card_state_unique_player_team(self, player, team, track):
"""A second card state for the same (player, team) pair raises IntegrityError.
The unique index on (player, team) enforces that each player card
has at most one refractor state per team roster slot, preventing
duplicate refractor progress rows for the same physical card.
"""
RefractorCardState.create(player=player, team=team, track=track)
with pytest.raises(IntegrityError):
RefractorCardState.create(player=player, team=team, track=track)
def test_card_state_fk_track(self, player, team, track):
"""Accessing card_state.track returns the original RefractorTrack instance.
This confirms the FK is correctly wired and that Peewee resolves
the relationship, returning an object with the same primary key and
name as the track used during creation.
"""
state = RefractorCardState.create(player=player, team=team, track=track)
fetched = RefractorCardState.get_by_id(state.id)
resolved_track = fetched.track
assert resolved_track.id == track.id
assert resolved_track.name == "Batter Track"
# ---------------------------------------------------------------------------
# BattingSeasonStats
# ---------------------------------------------------------------------------
class TestBattingSeasonStats:
"""Tests for BattingSeasonStats, the per-season batting accumulation table.
Each row aggregates game-by-game batting stats for one player on one
team in one season. The three-column unique constraint prevents
double-counting and ensures a single authoritative row for each
(player, team, season) combination.
"""
def test_create_season_stats(self, player, team):
"""Creating a stats row with explicit values stores everything correctly.
Also verifies the integer stat defaults (all 0) for columns that
are not provided, which is the initial state before any games are
processed.
"""
stats = BattingSeasonStats.create(
player=player,
team=team,
season=11,
games=5,
pa=20,
ab=18,
hits=6,
doubles=1,
triples=0,
hr=2,
bb=2,
hbp=0,
strikeouts=4,
rbi=5,
runs=3,
sb=1,
cs=0,
)
fetched = BattingSeasonStats.get_by_id(stats.id)
assert fetched.player_id == player.player_id
assert fetched.team_id == team.id
assert fetched.season == 11
assert fetched.games == 5
assert fetched.pa == 20
assert fetched.hits == 6
assert fetched.hr == 2
assert fetched.strikeouts == 4
# Nullable meta fields
assert fetched.last_game is None
assert fetched.last_updated_at is None
def test_season_stats_unique_constraint(self, player, team):
"""A second row for the same (player, team, season) raises IntegrityError.
The unique index on these three columns guarantees that each
player-team-season combination has exactly one accumulation row,
preventing duplicate stat aggregation that would inflate totals.
"""
BattingSeasonStats.create(player=player, team=team, season=11)
with pytest.raises(IntegrityError):
BattingSeasonStats.create(player=player, team=team, season=11)
def test_season_stats_increment(self, player, team):
"""Manually incrementing hits on an existing row persists the change.
Simulates the common pattern used by the stats accumulator:
fetch the row, add the game delta, save. Verifies that save()
writes back to the database and that subsequent reads reflect the
updated value.
"""
stats = BattingSeasonStats.create(
player=player,
team=team,
season=11,
hits=10,
)
stats.hits += 3
stats.save()
refreshed = BattingSeasonStats.get_by_id(stats.id)
assert refreshed.hits == 13

View File

@ -1,242 +0,0 @@
"""
Tests for app/seed/refractor_tracks.py seed_refractor_tracks().
What: Verify that the JSON-driven seed function correctly creates, counts,
and idempotently updates RefractorTrack rows in the database.
Why: The seed is the single source of truth for track configuration. A
regression here (duplicates, wrong thresholds, missing formula) would
silently corrupt refractor scoring for every card in the system.
Each test operates on a fresh in-memory SQLite database provided by the
autouse `setup_test_db` fixture in conftest.py. The seed reads its data
from `app/seed/refractor_tracks.json` on disk, so the tests also serve as
a light integration check between the JSON file and the Peewee model.
"""
import json
from pathlib import Path
import pytest
from app.db_engine import RefractorTrack
from app.seed.refractor_tracks import seed_refractor_tracks
# Path to the JSON fixture that the seed reads from at runtime
_JSON_PATH = Path(__file__).parent.parent / "app" / "seed" / "refractor_tracks.json"
@pytest.fixture
def json_tracks():
"""Load the raw JSON definitions so tests can assert against them.
This avoids hardcoding expected values if the JSON changes, tests
automatically follow without needing manual updates.
"""
return json.loads(_JSON_PATH.read_text(encoding="utf-8"))
def test_seed_creates_three_tracks(json_tracks):
"""After one seed call, exactly 3 RefractorTrack rows must exist.
Why: The JSON currently defines three card-type tracks (batter, sp, rp).
If the count is wrong the system would either be missing tracks
(refractor disabled for a card type) or have phantom extras.
"""
seed_refractor_tracks()
assert RefractorTrack.select().count() == 3
def test_seed_correct_card_types(json_tracks):
"""The set of card_type values persisted must match the JSON exactly.
Why: card_type is used as a discriminator throughout the refractor engine.
An unexpected value (e.g. 'pitcher' instead of 'sp') would cause
track-lookup misses and silently skip refractor scoring for that role.
"""
seed_refractor_tracks()
expected_types = {d["card_type"] for d in json_tracks}
actual_types = {t.card_type for t in RefractorTrack.select()}
assert actual_types == expected_types
def test_seed_thresholds_ascending():
"""For every track, t1 < t2 < t3 < t4.
Why: The refractor engine uses these thresholds to determine tier
boundaries. If they are not strictly ascending, tier comparisons
would produce incorrect or undefined results (e.g. a player could
simultaneously satisfy tier 3 and not satisfy tier 2).
"""
seed_refractor_tracks()
for track in RefractorTrack.select():
assert track.t1_threshold < track.t2_threshold, (
f"{track.name}: t1 ({track.t1_threshold}) >= t2 ({track.t2_threshold})"
)
assert track.t2_threshold < track.t3_threshold, (
f"{track.name}: t2 ({track.t2_threshold}) >= t3 ({track.t3_threshold})"
)
assert track.t3_threshold < track.t4_threshold, (
f"{track.name}: t3 ({track.t3_threshold}) >= t4 ({track.t4_threshold})"
)
def test_seed_thresholds_positive():
"""All tier threshold values must be strictly greater than zero.
Why: A zero or negative threshold would mean a card starts the game
already evolved (tier >= 1 at 0 accumulated stat points), which would
bypass the entire refractor progression system.
"""
seed_refractor_tracks()
for track in RefractorTrack.select():
assert track.t1_threshold > 0, f"{track.name}: t1_threshold is not positive"
assert track.t2_threshold > 0, f"{track.name}: t2_threshold is not positive"
assert track.t3_threshold > 0, f"{track.name}: t3_threshold is not positive"
assert track.t4_threshold > 0, f"{track.name}: t4_threshold is not positive"
def test_seed_formula_present():
"""Every persisted track must have a non-empty formula string.
Why: The formula is evaluated at runtime to compute a player's refractor
score. An empty formula would cause either a Python eval error or
silently produce 0 for every player, halting all refractor progress.
"""
seed_refractor_tracks()
for track in RefractorTrack.select():
assert track.formula and track.formula.strip(), (
f"{track.name}: formula is empty or whitespace-only"
)
def test_seed_idempotent():
"""Calling seed_refractor_tracks() twice must still yield exactly 3 rows.
Why: The seed is designed to be safe to re-run (e.g. as part of a
migration or CI bootstrap). If it inserts duplicates on a second call,
the unique constraint on RefractorTrack.name would raise an IntegrityError
in PostgreSQL, and in SQLite it would silently create phantom rows that
corrupt tier-lookup joins.
"""
seed_refractor_tracks()
seed_refractor_tracks()
assert RefractorTrack.select().count() == 3
# ---------------------------------------------------------------------------
# T1-4: Seed threshold ordering invariant (t1 < t2 < t3 < t4 + all positive)
# ---------------------------------------------------------------------------
def test_seed_all_thresholds_strictly_ascending_after_seed():
"""After seeding, every track satisfies t1 < t2 < t3 < t4.
What: Call seed_refractor_tracks(), then assert the full ordering chain
t1 < t2 < t3 < t4 for every row in the database. Also assert that all
four thresholds are strictly positive (> 0).
Why: The refractor tier engine uses these thresholds as exclusive partition
points. If any threshold is out-of-order or zero the tier assignment
becomes incorrect or undefined. This test is the authoritative invariant
guard; if a JSON edit accidentally violates the ordering this test fails
loudly before any cards are affected.
Separate from test_seed_thresholds_ascending which was written earlier
this test combines ordering + positivity into a single explicit assertion
block and uses more descriptive messages to aid debugging.
"""
seed_refractor_tracks()
for track in RefractorTrack.select():
assert track.t1_threshold > 0, (
f"{track.name}: t1_threshold={track.t1_threshold} is not positive"
)
assert track.t2_threshold > 0, (
f"{track.name}: t2_threshold={track.t2_threshold} is not positive"
)
assert track.t3_threshold > 0, (
f"{track.name}: t3_threshold={track.t3_threshold} is not positive"
)
assert track.t4_threshold > 0, (
f"{track.name}: t4_threshold={track.t4_threshold} is not positive"
)
assert (
track.t1_threshold
< track.t2_threshold
< track.t3_threshold
< track.t4_threshold
), (
f"{track.name}: thresholds are not strictly ascending: "
f"t1={track.t1_threshold}, t2={track.t2_threshold}, "
f"t3={track.t3_threshold}, t4={track.t4_threshold}"
)
# ---------------------------------------------------------------------------
# T2-10: Duplicate card_type tracks guard
# ---------------------------------------------------------------------------
def test_seed_each_card_type_has_exactly_one_track():
"""Each card_type must appear exactly once across all RefractorTrack rows.
What: After seeding, group the rows by card_type and assert that every
card_type has a count of exactly 1.
Why: RefractorTrack rows are looked up by card_type (e.g.
RefractorTrack.get(card_type='batter')). If a card_type appears more
than once, Peewee's .get() raises MultipleObjectsReturned, crashing
every pack opening and card evaluation for that type. This test acts as
a uniqueness contract so that seed bugs or accidental DB drift surface
immediately.
"""
seed_refractor_tracks()
from peewee import fn as peewee_fn
# Group by card_type and count occurrences
query = (
RefractorTrack.select(
RefractorTrack.card_type, peewee_fn.COUNT(RefractorTrack.id).alias("cnt")
)
.group_by(RefractorTrack.card_type)
.tuples()
)
for card_type, count in query:
assert count == 1, (
f"card_type={card_type!r} has {count} tracks; expected exactly 1"
)
def test_seed_updates_on_rerun(json_tracks):
"""A second seed call must restore any manually changed threshold to the JSON value.
What: Seed once, manually mutate a threshold in the DB, then seed again.
Assert that the threshold is now back to the JSON-defined value.
Why: The seed must act as the authoritative source of truth. If
re-seeding does not overwrite local changes, configuration drift can
build up silently and the production database would diverge from the
checked-in JSON without any visible error.
"""
seed_refractor_tracks()
# Pick the first track and corrupt its t1_threshold
first_def = json_tracks[0]
track = RefractorTrack.get(RefractorTrack.name == first_def["name"])
original_t1 = track.t1_threshold
corrupted_value = original_t1 + 9999
track.t1_threshold = corrupted_value
track.save()
# Confirm the corruption took effect before re-seeding
track_check = RefractorTrack.get(RefractorTrack.name == first_def["name"])
assert track_check.t1_threshold == corrupted_value
# Re-seed — should restore the JSON value
seed_refractor_tracks()
restored = RefractorTrack.get(RefractorTrack.name == first_def["name"])
assert restored.t1_threshold == first_def["t1_threshold"], (
f"Expected t1_threshold={first_def['t1_threshold']} after re-seed, "
f"got {restored.t1_threshold}"
)

File diff suppressed because it is too large Load Diff

View File

@ -1,332 +0,0 @@
"""Integration tests for the refractor track catalog API endpoints (WP-06).
Tests cover:
GET /api/v2/refractor/tracks
GET /api/v2/refractor/tracks/{track_id}
All tests require a live PostgreSQL connection (POSTGRES_HOST env var) and
assume the refractor schema migration (WP-04) has already been applied.
Tests auto-skip when POSTGRES_HOST is not set.
Test data is inserted via psycopg2 before the test module runs and deleted
afterwards so the tests are repeatable. ON CONFLICT keeps the table clean
even if a previous run did not complete teardown.
Tier 3 tests (T3-1) in this file use a SQLite-backed TestClient so they run
without a PostgreSQL connection. They test the card_type filter edge cases:
an unrecognised card_type string and an empty string should both return an
empty list (200 with count=0) rather than an error.
"""
import os
import pytest
from fastapi import FastAPI, Request
from fastapi.testclient import TestClient
from peewee import SqliteDatabase
os.environ.setdefault("API_TOKEN", "test-token")
from app.db_engine import ( # noqa: E402
BattingSeasonStats,
Card,
Cardset,
Decision,
Event,
MlbPlayer,
Pack,
PackType,
PitchingSeasonStats,
Player,
ProcessedGame,
Rarity,
RefractorCardState,
RefractorTrack,
Roster,
RosterSlot,
ScoutClaim,
ScoutOpportunity,
StratGame,
StratPlay,
Team,
)
POSTGRES_HOST = os.environ.get("POSTGRES_HOST")
_skip_no_pg = pytest.mark.skipif(
not POSTGRES_HOST, reason="POSTGRES_HOST not set — integration tests skipped"
)
AUTH_HEADER = {"Authorization": f"Bearer {os.environ.get('API_TOKEN', 'test-token')}"}
_SEED_TRACKS = [
("Batter", "batter", "pa+tb*2", 37, 149, 448, 896),
("Starting Pitcher", "sp", "ip+k", 10, 40, 120, 240),
("Relief Pitcher", "rp", "ip+k", 3, 12, 35, 70),
]
@pytest.fixture(scope="module")
def seeded_tracks(pg_conn):
"""Insert three canonical evolution tracks; remove them after the module.
Uses ON CONFLICT DO UPDATE so the fixture is safe to run even if rows
already exist from a prior test run that did not clean up. Returns the
list of row IDs that were upserted.
"""
cur = pg_conn.cursor()
ids = []
for name, card_type, formula, t1, t2, t3, t4 in _SEED_TRACKS:
cur.execute(
"""
INSERT INTO refractor_track
(name, card_type, formula, t1_threshold, t2_threshold, t3_threshold, t4_threshold)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (card_type) DO UPDATE SET
name = EXCLUDED.name,
formula = EXCLUDED.formula,
t1_threshold = EXCLUDED.t1_threshold,
t2_threshold = EXCLUDED.t2_threshold,
t3_threshold = EXCLUDED.t3_threshold,
t4_threshold = EXCLUDED.t4_threshold
RETURNING id
""",
(name, card_type, formula, t1, t2, t3, t4),
)
ids.append(cur.fetchone()[0])
pg_conn.commit()
yield ids
cur.execute("DELETE FROM refractor_track WHERE id = ANY(%s)", (ids,))
pg_conn.commit()
@pytest.fixture(scope="module")
def client():
"""FastAPI TestClient backed by the real PostgreSQL database."""
from app.main import app
with TestClient(app) as c:
yield c
@_skip_no_pg
def test_list_tracks_returns_count_3(client, seeded_tracks):
"""GET /tracks returns all three tracks with count=3.
After seeding batter/sp/rp, the table should have exactly those three
rows (no other tracks are inserted by other test modules).
"""
resp = client.get("/api/v2/refractor/tracks", headers=AUTH_HEADER)
assert resp.status_code == 200
data = resp.json()
assert data["count"] == 3
assert len(data["items"]) == 3
@_skip_no_pg
def test_filter_by_card_type(client, seeded_tracks):
"""card_type=sp filter returns exactly 1 track with card_type 'sp'."""
resp = client.get("/api/v2/refractor/tracks?card_type=sp", headers=AUTH_HEADER)
assert resp.status_code == 200
data = resp.json()
assert data["count"] == 1
assert data["items"][0]["card_type"] == "sp"
@_skip_no_pg
def test_get_single_track_with_thresholds(client, seeded_tracks):
"""GET /tracks/{id} returns a track dict with formula and t1-t4 thresholds."""
track_id = seeded_tracks[0] # batter
resp = client.get(f"/api/v2/refractor/tracks/{track_id}", headers=AUTH_HEADER)
assert resp.status_code == 200
data = resp.json()
assert data["card_type"] == "batter"
assert data["formula"] == "pa+tb*2"
for key in ("t1_threshold", "t2_threshold", "t3_threshold", "t4_threshold"):
assert key in data, f"Missing field: {key}"
assert data["t1_threshold"] == 37
assert data["t4_threshold"] == 896
@_skip_no_pg
def test_404_for_nonexistent_track(client, seeded_tracks):
"""GET /tracks/999999 returns 404 when the track does not exist."""
resp = client.get("/api/v2/refractor/tracks/999999", headers=AUTH_HEADER)
assert resp.status_code == 404
@_skip_no_pg
def test_auth_required(client, seeded_tracks):
"""Requests without a Bearer token return 401 for both endpoints."""
resp_list = client.get("/api/v2/refractor/tracks")
assert resp_list.status_code == 401
track_id = seeded_tracks[0]
resp_single = client.get(f"/api/v2/refractor/tracks/{track_id}")
assert resp_single.status_code == 401
# ===========================================================================
# SQLite-backed tests for T3-1: invalid card_type query parameter
#
# These tests run without a PostgreSQL connection. They verify that the
# card_type filter on GET /api/v2/refractor/tracks handles values that match
# no known track (an unrecognised string, an empty string) gracefully: the
# endpoint must return 200 with {"count": 0, "items": []}, not a 4xx/5xx.
# ===========================================================================
_track_api_db = SqliteDatabase(
"file:trackapitest?mode=memory&cache=shared",
uri=True,
pragmas={"foreign_keys": 1},
)
_TRACK_API_MODELS = [
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
PackType,
Pack,
Card,
Roster,
RosterSlot,
StratGame,
StratPlay,
Decision,
ScoutOpportunity,
ScoutClaim,
BattingSeasonStats,
PitchingSeasonStats,
ProcessedGame,
RefractorTrack,
RefractorCardState,
]
@pytest.fixture(autouse=False)
def setup_track_api_db():
"""Bind track-API test models to shared-memory SQLite and create tables.
Inserts exactly two tracks (batter, sp) so the filter tests have a
non-empty table to query against confirming that the WHERE predicate
excludes them rather than the table simply being empty.
"""
_track_api_db.bind(_TRACK_API_MODELS)
_track_api_db.connect(reuse_if_open=True)
_track_api_db.create_tables(_TRACK_API_MODELS)
# Seed two real tracks so the table is not empty
RefractorTrack.get_or_create(
name="T3-1 Batter Track",
defaults=dict(
card_type="batter",
formula="pa + tb * 2",
t1_threshold=37,
t2_threshold=149,
t3_threshold=448,
t4_threshold=896,
),
)
RefractorTrack.get_or_create(
name="T3-1 SP Track",
defaults=dict(
card_type="sp",
formula="ip + k",
t1_threshold=10,
t2_threshold=40,
t3_threshold=120,
t4_threshold=240,
),
)
yield _track_api_db
_track_api_db.drop_tables(list(reversed(_TRACK_API_MODELS)), safe=True)
def _build_track_api_app() -> FastAPI:
"""Minimal FastAPI app containing only the refractor router for T3-1 tests."""
from app.routers_v2.refractor import router as refractor_router
app = FastAPI()
@app.middleware("http")
async def db_middleware(request: Request, call_next):
_track_api_db.connect(reuse_if_open=True)
return await call_next(request)
app.include_router(refractor_router)
return app
@pytest.fixture
def track_api_client(setup_track_api_db):
"""FastAPI TestClient for the SQLite-backed T3-1 track filter tests."""
with TestClient(_build_track_api_app()) as c:
yield c
# ---------------------------------------------------------------------------
# T3-1a: card_type=foo (unrecognised value) returns empty list
# ---------------------------------------------------------------------------
def test_invalid_card_type_returns_empty_list(setup_track_api_db, track_api_client):
"""GET /tracks?card_type=foo returns 200 with count=0, not a 4xx/5xx.
What: Query the track list with a card_type value ('foo') that matches
no row in refractor_track. The table contains batter and sp tracks so
the result must be an empty list rather than a full list (which would
indicate the filter was ignored).
Why: The endpoint applies `WHERE card_type == card_type` when the
parameter is not None. An unrecognised value is a valid no-match query
the contract is an empty list, not a validation error. Returning
a 422 Unprocessable Entity or 500 here would break clients that probe
for tracks by card type before knowing which types are registered.
"""
resp = track_api_client.get(
"/api/v2/refractor/tracks?card_type=foo", headers=AUTH_HEADER
)
assert resp.status_code == 200
data = resp.json()
assert data["count"] == 0, (
f"Expected count=0 for unknown card_type 'foo', got {data['count']}"
)
assert data["items"] == [], (
f"Expected empty items list for unknown card_type 'foo', got {data['items']}"
)
# ---------------------------------------------------------------------------
# T3-1b: card_type= (empty string) returns empty list
# ---------------------------------------------------------------------------
def test_empty_string_card_type_returns_empty_list(
setup_track_api_db, track_api_client
):
"""GET /tracks?card_type= (empty string) returns 200 with count=0.
What: Pass an empty string as the card_type query parameter. No track
has card_type='' so the response must be an empty list with count=0.
Why: An empty string is not None FastAPI will pass it through as ''
rather than treating it as an absent parameter. The WHERE predicate
`card_type == ''` produces no matches, which is the correct silent
no-results behaviour. This guards against regressions where an empty
string might be mishandled as a None/absent value and accidentally return
all tracks, or raise a server error.
"""
resp = track_api_client.get(
"/api/v2/refractor/tracks?card_type=", headers=AUTH_HEADER
)
assert resp.status_code == 200
data = resp.json()
assert data["count"] == 0, (
f"Expected count=0 for empty card_type string, got {data['count']}"
)
assert data["items"] == [], (
f"Expected empty items list for empty card_type string, got {data['items']}"
)

View File

@ -1,451 +0,0 @@
"""Tests for BattingSeasonStats and PitchingSeasonStats Peewee models.
Unit tests verify model structure and defaults on unsaved instances without
touching a database. Integration tests use an in-memory SQLite database to
verify table creation, unique constraints, indexes, and the delta-update
(increment) pattern.
"""
import pytest
from peewee import SqliteDatabase, IntegrityError
from app.models.season_stats import BattingSeasonStats, PitchingSeasonStats
from app.db_engine import Rarity, Event, Cardset, MlbPlayer, Player, Team, StratGame
# Dependency order matters for FK resolution.
_TEST_MODELS = [
Rarity,
Event,
Cardset,
MlbPlayer,
Player,
Team,
StratGame,
BattingSeasonStats,
PitchingSeasonStats,
]
_test_db = SqliteDatabase(":memory:", pragmas={"foreign_keys": 1})
@pytest.fixture(autouse=True)
def setup_test_db():
"""Bind all models to an in-memory SQLite database, create tables, and
tear them down after each test so each test starts from a clean state."""
_test_db.bind(_TEST_MODELS)
_test_db.create_tables(_TEST_MODELS)
yield _test_db
_test_db.drop_tables(list(reversed(_TEST_MODELS)), safe=True)
# ── Fixture helpers ─────────────────────────────────────────────────────────
def make_rarity():
return Rarity.create(value=1, name="Common", color="#ffffff")
def make_cardset():
return Cardset.create(name="2025", description="2025 Season", total_cards=100)
def make_player(cardset, rarity, player_id=1):
return Player.create(
player_id=player_id,
p_name="Test Player",
cost=100,
image="test.png",
mlbclub="BOS",
franchise="Boston",
cardset=cardset,
set_num=1,
rarity=rarity,
pos_1="OF",
description="Test",
)
def make_team(abbrev="TEST", gmid=123456789):
return Team.create(
abbrev=abbrev,
sname=abbrev,
lname=f"Team {abbrev}",
gmid=gmid,
gmname="testuser",
gsheet="https://example.com",
wallet=1000,
team_value=1000,
collection_value=1000,
season=1,
)
def make_game(home_team, away_team, season=10):
return StratGame.create(
season=season,
game_type="ranked",
away_team=away_team,
home_team=home_team,
)
def make_batting_stats(player, team, season=10, **kwargs):
return BattingSeasonStats.create(player=player, team=team, season=season, **kwargs)
def make_pitching_stats(player, team, season=10, **kwargs):
return PitchingSeasonStats.create(player=player, team=team, season=season, **kwargs)
# ── Shared column-list constants ─────────────────────────────────────────────
_BATTING_STAT_COLS = [
"games",
"pa",
"ab",
"hits",
"doubles",
"triples",
"hr",
"rbi",
"runs",
"bb",
"strikeouts",
"hbp",
"sac",
"ibb",
"gidp",
"sb",
"cs",
]
_PITCHING_STAT_COLS = [
"games",
"games_started",
"outs",
"strikeouts",
"bb",
"hits_allowed",
"runs_allowed",
"earned_runs",
"hr_allowed",
"hbp",
"wild_pitches",
"balks",
"wins",
"losses",
"holds",
"saves",
"blown_saves",
]
_KEY_COLS = ["player", "team", "season"]
_META_COLS = ["last_game", "last_updated_at"]
# ── Shared index helper ───────────────────────────────────────────────────────
def _get_index_columns(db_conn, table: str) -> set:
"""Return a set of frozensets, each being the column set of one index."""
indexes = db_conn.execute_sql(f"PRAGMA index_list({table})").fetchall()
result = set()
for idx in indexes:
idx_name = idx[1]
cols = db_conn.execute_sql(f"PRAGMA index_info({idx_name})").fetchall()
result.add(frozenset(col[2] for col in cols))
return result
# ── Unit: column completeness ────────────────────────────────────────────────
class TestBattingColumnCompleteness:
"""All required columns are present in BattingSeasonStats."""
EXPECTED_COLS = _BATTING_STAT_COLS
KEY_COLS = _KEY_COLS
META_COLS = _META_COLS
def test_stat_columns_present(self):
"""All batting aggregate columns are present."""
fields = BattingSeasonStats._meta.fields
for col in self.EXPECTED_COLS:
assert col in fields, f"Missing batting column: {col}"
def test_key_columns_present(self):
"""player, team, and season columns are present."""
fields = BattingSeasonStats._meta.fields
for col in self.KEY_COLS:
assert col in fields, f"Missing key column: {col}"
def test_meta_columns_present(self):
"""Meta columns last_game and last_updated_at are present."""
fields = BattingSeasonStats._meta.fields
for col in self.META_COLS:
assert col in fields, f"Missing meta column: {col}"
class TestPitchingColumnCompleteness:
"""All required columns are present in PitchingSeasonStats."""
EXPECTED_COLS = _PITCHING_STAT_COLS
KEY_COLS = _KEY_COLS
META_COLS = _META_COLS
def test_stat_columns_present(self):
"""All pitching aggregate columns are present."""
fields = PitchingSeasonStats._meta.fields
for col in self.EXPECTED_COLS:
assert col in fields, f"Missing pitching column: {col}"
def test_key_columns_present(self):
"""player, team, and season columns are present."""
fields = PitchingSeasonStats._meta.fields
for col in self.KEY_COLS:
assert col in fields, f"Missing key column: {col}"
def test_meta_columns_present(self):
"""Meta columns last_game and last_updated_at are present."""
fields = PitchingSeasonStats._meta.fields
for col in self.META_COLS:
assert col in fields, f"Missing meta column: {col}"
# ── Unit: default values ─────────────────────────────────────────────────────
class TestBattingDefaultValues:
"""All integer stat columns default to 0; nullable meta fields default to None."""
INT_STAT_COLS = _BATTING_STAT_COLS
def test_all_int_columns_default_to_zero(self):
"""Every integer stat column defaults to 0 on an unsaved instance."""
row = BattingSeasonStats()
for col in self.INT_STAT_COLS:
val = getattr(row, col)
assert val == 0, f"Column {col!r} default is {val!r}, expected 0"
def test_last_game_defaults_to_none(self):
"""last_game FK is nullable and defaults to None."""
row = BattingSeasonStats()
assert row.last_game_id is None
def test_last_updated_at_defaults_to_none(self):
"""last_updated_at defaults to None."""
row = BattingSeasonStats()
assert row.last_updated_at is None
class TestPitchingDefaultValues:
"""All integer stat columns default to 0; nullable meta fields default to None."""
INT_STAT_COLS = _PITCHING_STAT_COLS
def test_all_int_columns_default_to_zero(self):
"""Every integer stat column defaults to 0 on an unsaved instance."""
row = PitchingSeasonStats()
for col in self.INT_STAT_COLS:
val = getattr(row, col)
assert val == 0, f"Column {col!r} default is {val!r}, expected 0"
def test_last_game_defaults_to_none(self):
"""last_game FK is nullable and defaults to None."""
row = PitchingSeasonStats()
assert row.last_game_id is None
def test_last_updated_at_defaults_to_none(self):
"""last_updated_at defaults to None."""
row = PitchingSeasonStats()
assert row.last_updated_at is None
# ── Integration: unique constraint ───────────────────────────────────────────
class TestBattingUniqueConstraint:
"""UNIQUE on (player_id, team_id, season) is enforced at the DB level."""
def test_duplicate_raises(self):
"""Inserting a second row for the same (player, team, season) raises IntegrityError."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
make_batting_stats(player, team, season=10)
with pytest.raises(IntegrityError):
make_batting_stats(player, team, season=10)
def test_different_season_allowed(self):
"""Same (player, team) in a different season creates a separate row."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
make_batting_stats(player, team, season=10)
row2 = make_batting_stats(player, team, season=11)
assert row2.id is not None
def test_different_team_allowed(self):
"""Same (player, season) on a different team creates a separate row."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team1 = make_team("TM1", gmid=111)
team2 = make_team("TM2", gmid=222)
make_batting_stats(player, team1, season=10)
row2 = make_batting_stats(player, team2, season=10)
assert row2.id is not None
class TestPitchingUniqueConstraint:
"""UNIQUE on (player_id, team_id, season) is enforced at the DB level."""
def test_duplicate_raises(self):
"""Inserting a second row for the same (player, team, season) raises IntegrityError."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
make_pitching_stats(player, team, season=10)
with pytest.raises(IntegrityError):
make_pitching_stats(player, team, season=10)
def test_different_season_allowed(self):
"""Same (player, team) in a different season creates a separate row."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
make_pitching_stats(player, team, season=10)
row2 = make_pitching_stats(player, team, season=11)
assert row2.id is not None
# ── Integration: delta update pattern ───────────────────────────────────────
class TestBattingDeltaUpdate:
"""Batting stats can be incremented (delta update) without replacing existing values."""
def test_increment_batting_stats(self):
"""Updating pa and hits increments correctly."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
row = make_batting_stats(player, team, season=10, pa=5, hits=2)
BattingSeasonStats.update(
pa=BattingSeasonStats.pa + 3,
hits=BattingSeasonStats.hits + 1,
).where(
(BattingSeasonStats.player == player)
& (BattingSeasonStats.team == team)
& (BattingSeasonStats.season == 10)
).execute()
updated = BattingSeasonStats.get_by_id(row.id)
assert updated.pa == 8
assert updated.hits == 3
def test_last_game_fk_is_nullable(self):
"""last_game FK can be set to a StratGame instance or left NULL."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
row = make_batting_stats(player, team, season=10)
assert row.last_game_id is None
game = make_game(home_team=team, away_team=team)
BattingSeasonStats.update(last_game=game).where(
BattingSeasonStats.id == row.id
).execute()
updated = BattingSeasonStats.get_by_id(row.id)
assert updated.last_game_id == game.id
class TestPitchingDeltaUpdate:
"""Pitching stats can be incremented (delta update) without replacing existing values."""
def test_increment_pitching_stats(self):
"""Updating outs and strikeouts increments correctly."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
row = make_pitching_stats(player, team, season=10, outs=9, strikeouts=3)
PitchingSeasonStats.update(
outs=PitchingSeasonStats.outs + 6,
strikeouts=PitchingSeasonStats.strikeouts + 2,
).where(
(PitchingSeasonStats.player == player)
& (PitchingSeasonStats.team == team)
& (PitchingSeasonStats.season == 10)
).execute()
updated = PitchingSeasonStats.get_by_id(row.id)
assert updated.outs == 15
assert updated.strikeouts == 5
def test_last_game_fk_is_nullable(self):
"""last_game FK can be set to a StratGame instance or left NULL."""
rarity = make_rarity()
cardset = make_cardset()
player = make_player(cardset, rarity)
team = make_team()
row = make_pitching_stats(player, team, season=10)
assert row.last_game_id is None
game = make_game(home_team=team, away_team=team)
PitchingSeasonStats.update(last_game=game).where(
PitchingSeasonStats.id == row.id
).execute()
updated = PitchingSeasonStats.get_by_id(row.id)
assert updated.last_game_id == game.id
# ── Integration: index existence ─────────────────────────────────────────────
class TestBattingIndexExistence:
"""Required indexes exist on batting_season_stats."""
def test_unique_index_on_player_team_season(self, setup_test_db):
"""A unique index covering (player_id, team_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "batting_season_stats")
assert frozenset({"player_id", "team_id", "season"}) in index_sets
def test_index_on_team_season(self, setup_test_db):
"""An index covering (team_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "batting_season_stats")
assert frozenset({"team_id", "season"}) in index_sets
def test_index_on_player_season(self, setup_test_db):
"""An index covering (player_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "batting_season_stats")
assert frozenset({"player_id", "season"}) in index_sets
class TestPitchingIndexExistence:
"""Required indexes exist on pitching_season_stats."""
def test_unique_index_on_player_team_season(self, setup_test_db):
"""A unique index covering (player_id, team_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "pitching_season_stats")
assert frozenset({"player_id", "team_id", "season"}) in index_sets
def test_index_on_team_season(self, setup_test_db):
"""An index covering (team_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "pitching_season_stats")
assert frozenset({"team_id", "season"}) in index_sets
def test_index_on_player_season(self, setup_test_db):
"""An index covering (player_id, season) exists."""
index_sets = _get_index_columns(setup_test_db, "pitching_season_stats")
assert frozenset({"player_id", "season"}) in index_sets

View File

@ -1,910 +0,0 @@
"""
Tests for app/services/season_stats.py update_season_stats().
What: Verify that the full-recalculation stat engine correctly aggregates
StratPlay and Decision rows into BattingSeasonStats and PitchingSeasonStats,
handles duplicate calls idempotently, accumulates stats across multiple games,
and supports forced reprocessing for self-healing.
Why: This is the core bookkeeping engine for card evolution scoring. A
double-count bug, a missed Decision merge, or a team-isolation failure
would silently produce wrong stats that would then corrupt every
evolution tier calculation downstream.
Test data is created using real Peewee models (no mocking) against the
in-memory SQLite database provided by the autouse setup_test_db fixture
in conftest.py. All Player and Team creation uses the actual required
column set discovered from the model definition in db_engine.py.
"""
import app.services.season_stats as _season_stats_module
import pytest
from app.db_engine import (
BattingSeasonStats,
Cardset,
Decision,
PitchingSeasonStats,
Player,
Rarity,
StratGame,
StratPlay,
Team,
)
from app.services.season_stats import update_season_stats
from tests.conftest import _test_db
# ---------------------------------------------------------------------------
# Module-level patch: redirect season_stats.db to the test database
# ---------------------------------------------------------------------------
# season_stats.py holds a module-level reference to the `db` object imported
# from db_engine. When test models are rebound to _test_db via bind(), the
# `db` object inside season_stats still points at the original production db
# (SQLite file or PostgreSQL). We replace it here so that db.atomic() in
# update_season_stats() operates on the same in-memory connection that the
# test fixtures write to.
_season_stats_module.db = _test_db
# ---------------------------------------------------------------------------
# Helper factories
# ---------------------------------------------------------------------------
def _make_cardset():
"""Return a reusable Cardset row (or fetch the existing one by name)."""
cs, _ = Cardset.get_or_create(
name="Test Set",
defaults={"description": "Test cardset", "total_cards": 100},
)
return cs
def _make_rarity():
"""Return the Common rarity singleton."""
r, _ = Rarity.get_or_create(value=1, name="Common", defaults={"color": "#ffffff"})
return r
def _make_player(name: str, pos: str = "1B") -> Player:
"""Create a Player row with all required (non-nullable) columns satisfied.
Why we need this helper: Player has many non-nullable varchar columns
(image, mlbclub, franchise, description) and a required FK to Cardset.
A single helper keeps test fixtures concise and consistent.
"""
return Player.create(
p_name=name,
rarity=_make_rarity(),
cardset=_make_cardset(),
set_num=1,
pos_1=pos,
image="https://example.com/image.png",
mlbclub="TST",
franchise="TST",
description=f"Test player: {name}",
)
def _make_team(abbrev: str, gmid: int, season: int = 11) -> Team:
"""Create a Team row with all required (non-nullable) columns satisfied."""
return Team.create(
abbrev=abbrev,
sname=abbrev,
lname=f"Team {abbrev}",
gmid=gmid,
gmname=f"gm_{abbrev.lower()}",
gsheet="https://docs.google.com/spreadsheets/test",
wallet=500,
team_value=1000,
collection_value=1000,
season=season,
is_ai=False,
)
def make_play(game, play_num, batter, batter_team, pitcher, pitcher_team, **stats):
"""Create a StratPlay row with sensible defaults for all required fields.
Why we provide defaults for every stat column: StratPlay has many
IntegerField columns with default=0 at the model level, but supplying
them explicitly makes it clear what the baseline state of each play is
and keeps the helper signature stable if defaults change.
"""
defaults = dict(
on_base_code="000",
inning_half="top",
inning_num=1,
batting_order=1,
starting_outs=0,
away_score=0,
home_score=0,
pa=0,
ab=0,
hit=0,
run=0,
double=0,
triple=0,
homerun=0,
bb=0,
so=0,
hbp=0,
rbi=0,
sb=0,
cs=0,
outs=0,
sac=0,
ibb=0,
gidp=0,
bphr=0,
bpfo=0,
bp1b=0,
bplo=0,
)
defaults.update(stats)
return StratPlay.create(
game=game,
play_num=play_num,
batter=batter,
batter_team=batter_team,
pitcher=pitcher,
pitcher_team=pitcher_team,
**defaults,
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def team_a():
return _make_team("TMA", gmid=1001)
@pytest.fixture
def team_b():
return _make_team("TMB", gmid=1002)
@pytest.fixture
def player_batter():
"""A batter-type player for team A."""
return _make_player("Batter One", pos="CF")
@pytest.fixture
def player_pitcher():
"""A pitcher-type player for team B."""
return _make_player("Pitcher One", pos="SP")
@pytest.fixture
def game(team_a, team_b):
return StratGame.create(
season=11,
game_type="ranked",
away_team=team_a,
home_team=team_b,
)
# ---------------------------------------------------------------------------
# Tests — Existing behavior (kept)
# ---------------------------------------------------------------------------
def test_single_game_batting_stats(team_a, team_b, player_batter, player_pitcher, game):
"""Batting stat totals from StratPlay rows are correctly accumulated.
What: Create three plate appearances (2 hits, 1 strikeout, a walk, and a
home run) for one batter. After update_season_stats(), the
BattingSeasonStats row should reflect the exact sum of all play fields.
Why: The core of the batting aggregation pipeline. If any field mapping
is wrong (e.g. 'hit' mapped to 'doubles' instead of 'hits'), evolution
scoring and leaderboards would silently report incorrect stats.
"""
# PA 1: single (hit=1, ab=1, pa=1)
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
# PA 2: home run (hit=1, homerun=1, ab=1, pa=1, rbi=1, run=1)
make_play(
game,
2,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
homerun=1,
rbi=1,
run=1,
outs=0,
)
# PA 3: strikeout (ab=1, pa=1, so=1, outs=1)
make_play(
game,
3,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
)
# PA 4: walk (pa=1, bb=1)
make_play(
game,
4,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
bb=1,
outs=0,
)
result = update_season_stats(game.id)
assert result["batters_updated"] >= 1
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 4
assert stats.ab == 3
assert stats.hits == 2
assert stats.hr == 1
assert stats.strikeouts == 1
assert stats.bb == 1
assert stats.rbi == 1
assert stats.runs == 1
assert stats.games == 1
def test_single_game_pitching_stats(
team_a, team_b, player_batter, player_pitcher, game
):
"""Pitching stat totals (outs, k, hits_allowed, bb_allowed) are correct.
What: The same plays that create batting stats for the batter are also
the source for the pitcher's opposing stats. This test checks that
_recalc_pitching() correctly inverts batter-perspective fields.
Why: The batter's 'so' becomes the pitcher's 'strikeouts', the batter's
'hit' becomes 'hits_allowed', etc. Any transposition in this mapping
would corrupt pitcher stats silently.
"""
# Play 1: strikeout — batter so=1, outs=1
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
)
# Play 2: single — batter hit=1
make_play(
game,
2,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
# Play 3: walk — batter bb=1
make_play(
game,
3,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
bb=1,
outs=0,
)
update_season_stats(game.id)
stats = PitchingSeasonStats.get(
PitchingSeasonStats.player == player_pitcher,
PitchingSeasonStats.team == team_b,
PitchingSeasonStats.season == 11,
)
assert stats.outs == 1 # one strikeout = one out recorded
assert stats.strikeouts == 1 # batter's so → pitcher's strikeouts
assert stats.hits_allowed == 1 # batter's hit → pitcher hits_allowed
assert stats.bb == 1 # batter's bb → pitcher bb (walks allowed)
assert stats.games == 1
def test_decision_integration(team_a, team_b, player_batter, player_pitcher, game):
"""Decision.win=1 for a pitcher results in wins=1 in PitchingSeasonStats.
What: Add a single StratPlay to establish the pitcher in pitching pairs,
then create a Decision row recording a win. Call update_season_stats()
and verify the wins column is 1.
Why: Decisions are stored in a separate table from StratPlay. If
_recalc_decisions() fails to merge them (wrong FK lookup, key mismatch),
pitchers would always show 0 wins/losses/saves regardless of actual game
outcomes, breaking standings and evolution criteria.
"""
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
Decision.create(
season=11,
game=game,
pitcher=player_pitcher,
pitcher_team=team_b,
win=1,
loss=0,
is_save=0,
hold=0,
b_save=0,
is_start=True,
)
update_season_stats(game.id)
stats = PitchingSeasonStats.get(
PitchingSeasonStats.player == player_pitcher,
PitchingSeasonStats.team == team_b,
PitchingSeasonStats.season == 11,
)
assert stats.wins == 1
assert stats.losses == 0
def test_double_count_prevention(team_a, team_b, player_batter, player_pitcher, game):
"""Calling update_season_stats() twice for the same game must not double the stats.
What: Process a game once (pa=3), then immediately call the function
again with the same game_id. The second call finds the ProcessedGame
ledger row and returns early with 'skipped'=True. The resulting pa
should still be 3, not 6.
Why: The bot infrastructure may deliver game-complete events more than
once (network retries, message replays). The ProcessedGame ledger
provides full idempotency for all replay scenarios.
"""
for i in range(3):
make_play(
game,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
first_result = update_season_stats(game.id)
assert "skipped" not in first_result
second_result = update_season_stats(game.id)
assert second_result.get("skipped") is True
assert second_result["batters_updated"] == 0
assert second_result["pitchers_updated"] == 0
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
# Must still be 3, not 6
assert stats.pa == 3
def test_two_games_accumulate(team_a, team_b, player_batter, player_pitcher):
"""Stats from two separate games are summed in a single BattingSeasonStats row.
What: Process game 1 (pa=2) then game 2 (pa=3) for the same batter/team.
After both updates the stats row should show pa=5.
Why: BattingSeasonStats is a season-long accumulator, not a per-game
snapshot. The full recalculation queries all StratPlay rows for the season,
so processing game 2 recomputes with all 5 PAs included.
"""
game1 = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
game2 = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
# Game 1: 2 plate appearances
for i in range(2):
make_play(
game1,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
# Game 2: 3 plate appearances
for i in range(3):
make_play(
game2,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
update_season_stats(game1.id)
update_season_stats(game2.id)
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 5
assert stats.games == 2
def test_two_team_game(team_a, team_b):
"""Players from both teams in a game each get their own stats row.
What: Create a batter+pitcher pair for team A and another pair for team B.
In the same game, team A bats against team B's pitcher and vice versa.
After update_season_stats(), both batters and both pitchers must have
correct, isolated stats rows.
Why: A key correctness guarantee is that stats are attributed to the
correct (player, team) combination. If team attribution is wrong,
a player's stats could appear under the wrong franchise or be merged
with an opponent's row.
"""
batter_a = _make_player("Batter A", pos="CF")
pitcher_a = _make_player("Pitcher A", pos="SP")
batter_b = _make_player("Batter B", pos="CF")
pitcher_b = _make_player("Pitcher B", pos="SP")
game = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
# Team A bats against team B's pitcher (away half)
make_play(
game,
1,
batter_a,
team_a,
pitcher_b,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
inning_half="top",
)
make_play(
game,
2,
batter_a,
team_a,
pitcher_b,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
inning_half="top",
)
# Team B bats against team A's pitcher (home half)
make_play(
game,
3,
batter_b,
team_b,
pitcher_a,
team_a,
pa=1,
ab=1,
bb=1,
outs=0,
inning_half="bottom",
)
update_season_stats(game.id)
# Team A's batter: 2 PA, 1 hit, 1 SO
stats_ba = BattingSeasonStats.get(
BattingSeasonStats.player == batter_a,
BattingSeasonStats.team == team_a,
)
assert stats_ba.pa == 2
assert stats_ba.hits == 1
assert stats_ba.strikeouts == 1
# Team B's batter: 1 PA, 1 BB
stats_bb = BattingSeasonStats.get(
BattingSeasonStats.player == batter_b,
BattingSeasonStats.team == team_b,
)
assert stats_bb.pa == 1
assert stats_bb.bb == 1
# Team B's pitcher (faced team A's batter): 1 hit allowed, 1 strikeout
stats_pb = PitchingSeasonStats.get(
PitchingSeasonStats.player == pitcher_b,
PitchingSeasonStats.team == team_b,
)
assert stats_pb.hits_allowed == 1
assert stats_pb.strikeouts == 1
# Team A's pitcher (faced team B's batter): 1 BB allowed
stats_pa = PitchingSeasonStats.get(
PitchingSeasonStats.player == pitcher_a,
PitchingSeasonStats.team == team_a,
)
assert stats_pa.bb == 1
def test_out_of_order_replay_prevented(team_a, team_b, player_batter, player_pitcher):
"""Out-of-order processing and re-delivery produce correct stats.
What: Process game G+1 first (pa=2), then game G (pa=3). The full
recalculation approach means both calls query all StratPlay rows for the
season, so the final stats are always correct regardless of processing
order. Re-delivering game G returns 'skipped'=True and leaves stats at 5.
Why: With full recalculation, out-of-order processing is inherently safe.
The ProcessedGame ledger still prevents redundant work on re-delivery.
"""
game_g = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
game_g1 = StratGame.create(
season=11, game_type="ranked", away_team=team_a, home_team=team_b
)
# Game G: 3 plate appearances
for i in range(3):
make_play(
game_g,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
# Game G+1: 2 plate appearances
for i in range(2):
make_play(
game_g1,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
# Process G+1 first, then G — simulates out-of-order delivery
update_season_stats(game_g1.id)
update_season_stats(game_g.id)
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 5 # 3 (game G) + 2 (game G+1)
# Re-deliver game G — must be blocked by ProcessedGame ledger
replay_result = update_season_stats(game_g.id)
assert replay_result.get("skipped") is True
# Stats must remain at 5, not 8
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 5
# ---------------------------------------------------------------------------
# Tests — New (force recalc / idempotency / self-healing)
# ---------------------------------------------------------------------------
def test_force_recalc(team_a, team_b, player_batter, player_pitcher, game):
"""Processing with force=True after initial processing does not double stats.
What: Process a game normally (pa=3), then reprocess with force=True.
Because the recalculation reads all StratPlay rows and writes totals
(not deltas), the stats remain at pa=3 after the forced reprocess.
Why: The force flag bypasses the ProcessedGame ledger skip, but since
the underlying data hasn't changed, the recalculated totals must be
identical. This proves the replacement upsert is safe.
"""
for i in range(3):
make_play(
game,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
first_result = update_season_stats(game.id)
assert first_result["batters_updated"] >= 1
assert "skipped" not in first_result
# Force reprocess — should NOT double stats
force_result = update_season_stats(game.id, force=True)
assert "skipped" not in force_result
assert force_result["batters_updated"] >= 1
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 3
assert stats.hits == 3
assert stats.games == 1
def test_idempotent_reprocessing(team_a, team_b, player_batter, player_pitcher, game):
"""Two consecutive force=True calls produce identical stats.
What: Force-process the same game twice. Both calls recompute from
scratch, so the stats after the second call must be identical to the
stats after the first call.
Why: Idempotency is a critical property of the recalculation engine.
External systems (admin scripts, retry loops) may call force=True
multiple times; the result must be stable.
"""
for i in range(4):
make_play(
game,
i + 1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1 if i % 2 == 0 else 0,
hit=0 if i % 2 == 0 else 1,
outs=1 if i % 2 == 0 else 0,
)
update_season_stats(game.id, force=True)
stats_after_first = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
pa_1, hits_1, so_1 = (
stats_after_first.pa,
stats_after_first.hits,
stats_after_first.strikeouts,
)
update_season_stats(game.id, force=True)
stats_after_second = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats_after_second.pa == pa_1
assert stats_after_second.hits == hits_1
assert stats_after_second.strikeouts == so_1
def test_partial_reprocessing_heals(
team_a, team_b, player_batter, player_pitcher, game
):
"""Force reprocessing corrects manually corrupted stats.
What: Process a game (pa=3, hits=2), then manually corrupt the stats
row (set pa=999). Force-reprocess the game. The stats should be healed
back to the correct totals (pa=3, hits=2).
Why: This is the primary self-healing benefit of full recalculation.
Partial processing, bugs, or manual edits can corrupt season stats;
force=True recomputes from the source-of-truth StratPlay data and
writes the correct totals regardless of current row state.
"""
# PA 1: single
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
outs=0,
)
# PA 2: double
make_play(
game,
2,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
hit=1,
double=1,
outs=0,
)
# PA 3: strikeout
make_play(
game,
3,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
so=1,
outs=1,
)
update_season_stats(game.id)
# Verify correct initial state
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 3
assert stats.hits == 2
assert stats.doubles == 1
# Corrupt the stats manually
stats.pa = 999
stats.hits = 0
stats.doubles = 50
stats.save()
# Verify corruption took effect
stats = BattingSeasonStats.get_by_id(stats.id)
assert stats.pa == 999
# Force reprocess — should heal the corruption
update_season_stats(game.id, force=True)
stats = BattingSeasonStats.get(
BattingSeasonStats.player == player_batter,
BattingSeasonStats.team == team_a,
BattingSeasonStats.season == 11,
)
assert stats.pa == 3
assert stats.hits == 2
assert stats.doubles == 1
assert stats.strikeouts == 1
assert stats.games == 1
def test_decision_only_pitcher(team_a, team_b, player_batter, player_pitcher, game):
"""A pitcher with a Decision but no StratPlay rows still gets stats recorded.
What: Create a second pitcher who has a Decision (win) for the game but
does not appear in any StratPlay rows. After update_season_stats(), the
decision-only pitcher should have a PitchingSeasonStats row with wins=1
and all play-level stats at 0.
Why: In rare cases a pitcher may be credited with a decision without
recording any plays (e.g. inherited runner scoring rules, edge cases in
game simulation). The old code handled this in _apply_decisions(); the
new code must include Decision-scanned pitchers in _get_player_pairs().
"""
relief_pitcher = _make_player("Relief Pitcher", pos="RP")
# The main pitcher has plays
make_play(
game,
1,
player_batter,
team_a,
player_pitcher,
team_b,
pa=1,
ab=1,
outs=1,
)
# The relief pitcher has a Decision but NO StratPlay rows
Decision.create(
season=11,
game=game,
pitcher=relief_pitcher,
pitcher_team=team_b,
win=1,
loss=0,
is_save=0,
hold=0,
b_save=0,
is_start=False,
)
update_season_stats(game.id)
# The relief pitcher should have a PitchingSeasonStats row
stats = PitchingSeasonStats.get(
PitchingSeasonStats.player == relief_pitcher,
PitchingSeasonStats.team == team_b,
PitchingSeasonStats.season == 11,
)
assert stats.wins == 1
assert stats.games == 0 # no plays, so COUNT(DISTINCT game) = 0
assert stats.outs == 0
assert stats.strikeouts == 0
assert stats.games_started == 0