Sync: remove paper-dynasty skill files, add templates, update settings/plugins/sessions

This commit is contained in:
Cal Corum 2026-03-26 02:00:52 -05:00
parent 0fa8486e93
commit 1922d25469
57 changed files with 1341 additions and 7271 deletions

View File

@ -1,5 +1,11 @@
{
"allow": [
"git pull*"
"git pull*",
"pd-pr *",
"pd-plan *",
"python *pytest*",
"git tag*",
"docker compose*",
"docker pull*"
]
}

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
{
"fetchedAt": "2026-03-24T04:00:47.945Z",
"fetchedAt": "2026-03-26T06:30:48.407Z",
"plugins": [
{
"plugin": "code-review@claude-plugins-official",

View File

@ -23,10 +23,10 @@
"playground@claude-plugins-official": [
{
"scope": "user",
"installPath": "/home/cal/.claude/plugins/cache/claude-plugins-official/playground/15268f03d2f5",
"version": "15268f03d2f5",
"installPath": "/home/cal/.claude/plugins/cache/claude-plugins-official/playground/0fa8486e9383",
"version": "0fa8486e9383",
"installedAt": "2026-02-18T19:51:28.422Z",
"lastUpdated": "2026-03-23T20:15:51.541Z",
"lastUpdated": "2026-03-25T06:30:49.672Z",
"gitCommitSha": "261ce4fba4f2c314c490302158909a32e5889c88"
}
],
@ -43,10 +43,10 @@
"frontend-design@claude-plugins-official": [
{
"scope": "user",
"installPath": "/home/cal/.claude/plugins/cache/claude-plugins-official/frontend-design/15268f03d2f5",
"version": "15268f03d2f5",
"installPath": "/home/cal/.claude/plugins/cache/claude-plugins-official/frontend-design/0fa8486e9383",
"version": "0fa8486e9383",
"installedAt": "2026-02-22T05:53:45.091Z",
"lastUpdated": "2026-03-23T20:15:51.536Z",
"lastUpdated": "2026-03-25T06:30:49.667Z",
"gitCommitSha": "aa296ec81e8ccb49c9784f167c2c0aa625a86cec"
}
],
@ -63,10 +63,10 @@
"session@agent-toolkit": [
{
"scope": "user",
"installPath": "/home/cal/.claude/plugins/cache/agent-toolkit/session/3.7.0",
"version": "3.7.0",
"installPath": "/home/cal/.claude/plugins/cache/agent-toolkit/session/3.7.1",
"version": "3.7.1",
"installedAt": "2026-03-18T23:37:09.034Z",
"lastUpdated": "2026-03-23T18:00:49.746Z",
"lastUpdated": "2026-03-24T17:30:49.073Z",
"gitCommitSha": "8c6e15ce7c51ae53121ec12d8dceee3c8bf936c6"
}
],
@ -163,10 +163,10 @@
"session-history-analyzer@agent-toolkit": [
{
"scope": "user",
"installPath": "/home/cal/.claude/plugins/cache/agent-toolkit/session-history-analyzer/1.0.0",
"version": "1.0.0",
"installPath": "/home/cal/.claude/plugins/cache/agent-toolkit/session-history-analyzer/1.0.1",
"version": "1.0.1",
"installedAt": "2026-03-21T03:55:36.988Z",
"lastUpdated": "2026-03-21T03:55:36.988Z",
"lastUpdated": "2026-03-24T17:30:49.067Z",
"gitCommitSha": "266237bb258d111433f099d86d735bd9e780569e"
}
],

View File

@ -5,7 +5,7 @@
"url": "https://github.com/anthropics/claude-plugins-official.git"
},
"installLocation": "/home/cal/.claude/plugins/marketplaces/claude-plugins-official",
"lastUpdated": "2026-03-23T15:53:46.553Z"
"lastUpdated": "2026-03-25T18:16:49.555Z"
},
"claude-code-plugins": {
"source": {
@ -13,7 +13,7 @@
"repo": "anthropics/claude-code"
},
"installLocation": "/home/cal/.claude/plugins/marketplaces/claude-code-plugins",
"lastUpdated": "2026-03-24T04:01:11.681Z"
"lastUpdated": "2026-03-26T07:00:49.839Z"
},
"agent-toolkit": {
"source": {
@ -21,7 +21,7 @@
"repo": "St0nefish/agent-toolkit"
},
"installLocation": "/home/cal/.claude/plugins/marketplaces/agent-toolkit",
"lastUpdated": "2026-03-24T03:30:48.201Z",
"lastUpdated": "2026-03-26T07:00:47.396Z",
"autoUpdate": true
},
"cal-claude-plugins": {

@ -1 +1 @@
Subproject commit 070f1d7f7485084a5336c6635593482c22c4387d
Subproject commit 61db86967f904b6b10e6ff9603dd1a13138f319e

@ -1 +1 @@
Subproject commit 6aadfbdca2c29f498f579509a56000e4e8daaf90
Subproject commit a0d9b87038e72d8a523b61c152ec53299ac6fe94

1
sessions/122493.json Normal file
View File

@ -0,0 +1 @@
{"pid":122493,"sessionId":"1cc72a81-2b7e-49a2-be60-c694fb06dbdc","cwd":"/home/cal/work","startedAt":1774469966155,"kind":"interactive"}

View File

@ -1 +0,0 @@
{"pid":1794866,"sessionId":"0fa5054d-b5c6-4499-b59c-9a0f8fae56f5","cwd":"/mnt/NV2/Development/paper-dynasty","startedAt":1774267485456}

View File

@ -1 +0,0 @@
{"pid":1841495,"sessionId":"d582937f-e7b1-4131-8046-993531618bc2","cwd":"/mnt/NV2/Development/claude-home","startedAt":1774271490814}

View File

@ -1 +0,0 @@
{"pid":2073728,"sessionId":"1e366762-a0e9-4620-b5d4-352b18bf4603","cwd":"/home/cal/work","startedAt":1774287133224}

View File

@ -1 +0,0 @@
{"pid":2085347,"sessionId":"c9852a3c-c4ff-4914-a22f-a6853f93d712","cwd":"/mnt/NV2/Development/major-domo","startedAt":1774287409057}

View File

@ -1 +0,0 @@
{"pid":2369320,"sessionId":"5296e222-0748-4619-acbe-b8c7e5b5f297","cwd":"/mnt/NV2/Development/cookbook","startedAt":1774303725948}

1
sessions/3202888.json Normal file
View File

@ -0,0 +1 @@
{"pid":3202888,"sessionId":"dcbc837f-59e9-49b1-acc9-4ada74f24d54","cwd":"/mnt/NV2/Development/mlb-the-show","startedAt":1774374370733}

1
sessions/3993643.json Normal file
View File

@ -0,0 +1 @@
{"pid":3993643,"sessionId":"e04a9d2e-e3d9-4690-ae24-702915201601","cwd":"/mnt/NV2/Development/paper-dynasty","startedAt":1774449677246,"kind":"interactive","name":"replace-tier-symbols-readable"}

1
sessions/555030.json Normal file
View File

@ -0,0 +1 @@
{"pid":555030,"sessionId":"0ab2989c-4cae-4704-946b-4bfc3b1521cc","cwd":"/mnt/NV2/Development/claude-home","startedAt":1774502266569,"kind":"interactive","entrypoint":"cli"}

1
sessions/597115.json Normal file
View File

@ -0,0 +1 @@
{"pid":597115,"sessionId":"a39c9150-df2c-4ac9-a8e4-b0dab9481d0e","cwd":"/home/cal","startedAt":1774508443770,"kind":"interactive","entrypoint":"sdk-cli"}

1
sessions/597413.json Normal file
View File

@ -0,0 +1 @@
{"pid":597413,"sessionId":"29f5d5e7-6bde-4b6a-9678-addb8d1a07a0","cwd":"/home/cal","startedAt":1774508446703,"kind":"interactive","entrypoint":"sdk-cli"}

View File

@ -7,21 +7,6 @@
},
"permissions": {
"allow": [
"Bash",
"Read(*)",
"MultiEdit(*)",
"Glob(*)",
"Grep(*)",
"LS(*)",
"WebFetch(domain:*)",
"WebSearch",
"NotebookRead(*)",
"NotebookEdit(*)",
"TodoWrite(*)",
"ExitPlanMode(*)",
"Task(*)",
"Skill(*)",
"SlashCommand(*)",
"mcp__n8n-mcp__*",
"mcp__gitea-mcp__*",
"mcp__tui-driver__*"
@ -31,8 +16,6 @@
"Bash(rm -rf /*)",
"Bash(rm -rf ~)",
"Bash(rm -rf $HOME)",
"Bash(rm -rf $PAI_HOME)",
"Bash(rm -rf $PAI_DIR)",
"Bash(sudo rm -rf /)",
"Bash(sudo rm -rf /*)",
"Bash(fork bomb)",
@ -47,19 +30,7 @@
},
"enableAllProjectMcpServers": false,
"enabledMcpjsonServers": [],
"hooks": {
"SubagentStop": [
{
"hooks": [
{
"type": "command",
"command": "/home/cal/.claude/hooks/notify-subagent-done.sh",
"timeout": 10
}
]
}
]
},
"hooks": {},
"statusLine": {
"type": "command",
"command": "bash ~/.claude/plugins/marketplaces/agent-toolkit/plugins-claude/statusline/scripts/statusline.sh"
@ -97,6 +68,7 @@
}
},
"autoUpdatesChannel": "latest",
"skipDangerousModePermissionPrompt": true,
"voiceEnabled": true
"voiceEnabled": true,
"autoDreamEnabled": true,
"skipDangerousModePermissionPrompt": true
}

View File

@ -0,0 +1,106 @@
# Extending Shared Skills to a New Project
This directory contains shared workflow templates that can be reused across projects.
Each project provides its own config; the templates provide the logic.
## How It Works
```
~/.claude/skills/_templates/ ← shared workflow logic (this directory)
pr-pipeline-workflow.md ← review→fix→merge steps
release-workflow.md ← CalVer tag→push steps
release-core.sh ← parameterized release script
EXTENDING.md ← this file
<project>/.claude/skills/ ← per-project config
pr-pipeline/SKILL.md ← agent names, repo mapping, → template pointer
release/SKILL.md ← service mapping, deploy commands, → template pointer
release/release.sh ← thin wrapper calling release-core.sh
release/release-config.env ← bash-sourceable service/image map
```
## Adding pr-pipeline to a New Project
1. Create `<project>/.claude/skills/pr-pipeline/SKILL.md`:
```markdown
---
name: pr-pipeline
description: Review-fix-merge pipeline for <Project Name> PRs.
---
# PR Pipeline — <Project Name>
## Config
| Key | Value |
|---|---|
| REVIEWER_AGENT | `pr-reviewer` |
| REVIEWER_MODEL | `sonnet` |
| FIXER_AGENT | `engineer` |
| FIXER_MODEL | `sonnet` |
| MERGER_AGENT | `<project-ops-agent>` |
| MERGER_MODEL | `sonnet` |
### Repo Mapping
| Short name | Gitea repo | Owner |
|---|---|---|
| `database` | `<project>-database` | `cal` |
| `discord` | `<project>-v2` | `cal` |
## Workflow
Follow the workflow defined in `~/.claude/skills/_templates/pr-pipeline-workflow.md`,
substituting the config values above.
```
**Key decisions:**
- `MERGER_AGENT` must match an agent defined in the project's `.claude/agents/` directory
- `REVIEWER_AGENT` and `FIXER_AGENT` are typically global agents (`pr-reviewer`, `engineer`)
- Repo mapping short names are arbitrary — pick whatever feels natural for the project
## Adding release to a New Project
1. Create `<project>/.claude/skills/release/release-config.env`:
```bash
BASEDIR="/mnt/NV2/Development/<project>"
declare -A SERVICE_DIRS=(
[database]="database"
[discord]="discord-app-v2"
)
declare -A SERVICE_IMAGES=(
[database]="manticorum67/<project>-database"
[discord]="manticorum67/<project>-discordapp"
)
```
2. Create `<project>/.claude/skills/release/release.sh`:
```bash
#!/usr/bin/env bash
exec bash ~/.claude/skills/_templates/release-core.sh \
--config "$(dirname "$0")/release-config.env" "$@"
```
3. Create `<project>/.claude/skills/release/SKILL.md` with:
- Config section (BASEDIR, service mapping table, deploy commands)
- Usage/examples section with the correct paths
- Workflow section pointing to `~/.claude/skills/_templates/release-workflow.md`
**Key decisions:**
- Only list services that need **manual** CalVer release in the config. If a repo auto-tags on merge (like some CI setups), omit it from SERVICE_DIRS.
- Deploy commands are project-specific — list them in the SKILL.md so Claude knows how to deploy after tagging.
- `SERVICE_IMAGES` can omit services that don't produce Docker images (e.g., CLI-only tools).
## Modifying Shared Workflow Logic
Edit the template files in `~/.claude/skills/_templates/`. Changes propagate to all projects automatically.
**Rules:**
- Templates must not contain project-specific strings (no repo names, paths, or agent names)
- Use `{PLACEHOLDER}` notation for values that come from project config
- Test changes against at least the Paper Dynasty skills before considering them stable

View File

@ -0,0 +1,71 @@
# PR Pipeline — Shared Workflow
Automated review-fix-merge cycle. The calling skill MUST define a **Config** section with
the values listed under "Required Config" before referencing this workflow.
## Required Config
| Key | Description |
|---|---|
| `REPO_MAPPING` | Table: short name → Gitea repo → owner |
| `REVIEWER_AGENT` | Agent type for code reviews (e.g., `pr-reviewer`) |
| `REVIEWER_MODEL` | Model for reviewer agent |
| `FIXER_AGENT` | Agent type for fixing review findings (e.g., `engineer`) |
| `FIXER_MODEL` | Model for fixer agent |
| `MERGER_AGENT` | Agent type for merging (e.g., `pd-ops`) |
| `MERGER_MODEL` | Model for merger agent |
## Usage
```
/pr-pipeline <repo-short-name> <pr_numbers...>
```
Resolve `<repo-short-name>` via the REPO_MAPPING table in the calling skill's Config section.
## Workflow
For each PR, run this cycle (max 3 iterations to prevent infinite loops):
### Step 1: Review
Launch a `{REVIEWER_AGENT}` agent (model: `{REVIEWER_MODEL}`) against the PR.
The reviewer posts a formal review via Gitea API.
### Step 2: Evaluate
- If **APPROVED** → go to Step 4
- If **REQUEST_CHANGES** → go to Step 3
- If this is iteration 3 and still not approved → stop, report the remaining issues to the user
### Step 3: Fix
Launch a `{FIXER_AGENT}` agent (model: `{FIXER_MODEL}`) to address the reviewer's feedback. The engineer should:
1. Check out the PR's head branch
2. Read the review comments from Gitea
3. **Actually fix every issue** — make the code changes, don't defer them
4. Run tests (`python -m pytest tests/ --tb=short -q`)
5. Commit and push to the PR branch
Then return to Step 1 (re-review).
**Engineer fix rules:**
- **Fix it, don't punt it.** Every reviewer finding should result in a code change, not a TODO, FIXME, or "will address in follow-up PR." The goal is to get to APPROVED, not to get to merge by deferring work.
- **Simple changes go in now.** Renames, missing null guards, docstring updates, import fixes, test updates — these are not "follow-up" material. Do them.
- **Only escalate to the user** if a fix requires a design decision (e.g., changing an API contract, choosing between two valid approaches) or if the reviewer's suggestion is wrong/misguided. In that case, stop and ask rather than guessing.
- **Don't introduce new features** while fixing. Stay scoped to what the reviewer flagged.
- **If the reviewer flagged something as "non-blocking" or "suggestion"**, still fix it if it's simple (< 5 minutes of work). Only skip if it's genuinely out of scope for this PR.
- **Create Gitea issues for anything not fixed.** If a reviewer recommendation is legitimately out of scope or requires a design decision, create a Gitea issue capturing the finding so it doesn't get lost. Reference the PR number in the issue body. Nothing should silently disappear.
### Step 4: Merge
Launch a `{MERGER_AGENT}` agent (model: `{MERGER_MODEL}`) to merge the PR.
The merger handles the approval dance and branch cleanup.
## Important Rules
- **Max 3 review cycles** per PR. If still failing after 3 rounds, stop and report.
- **Run PRs in parallel** when they are independent (different repos or no dependency).
- **Run PRs sequentially** when one depends on another (noted in PR description).
- **Never skip the review step** — even "obvious" fixes get reviewed.
- **Report progress** after each step completes — don't go silent during long pipelines.

170
skills/_templates/release-core.sh Executable file
View File

@ -0,0 +1,170 @@
#!/usr/bin/env bash
# Shared Release Script — CalVer tag and push for any project.
# Called by per-project release.sh wrappers.
#
# Usage: release-core.sh --config <config-file> <service> [version]
#
# Config file format (bash-sourceable):
# BASEDIR="/mnt/NV2/Development/my-project"
# declare -A SERVICE_DIRS=([database]="database" [discord]="discord-app")
# declare -A SERVICE_IMAGES=([database]="manticorum67/my-database" [discord]="manticorum67/my-discord")
#
# shellcheck disable=SC2086
set -euo pipefail
# --- Parse args ---
CONFIG_FILE=""
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG_FILE="$2"
shift 2
;;
*) break ;;
esac
done
if [[ -z "$CONFIG_FILE" || ! -f "$CONFIG_FILE" ]]; then
echo "Error: config file not found: ${CONFIG_FILE:-<not specified>}"
echo "Usage: release-core.sh --config <config-file> <service> [version]"
exit 1
fi
# shellcheck source=/dev/null
source "$CONFIG_FILE"
# Validate config
if [[ -z "${BASEDIR:-}" ]]; then
echo "Error: BASEDIR not set in config file"
exit 1
fi
# --- Service resolution ---
SERVICE="${1:-}"
VERSION="${2:-}"
if [[ -z "$SERVICE" ]]; then
echo "Usage: release-core.sh --config <config> <service> [version]"
echo ""
echo "Available services: ${!SERVICE_DIRS[*]}"
exit 1
fi
SERVICE_DIR="${SERVICE_DIRS[$SERVICE]:-}"
if [[ -z "$SERVICE_DIR" ]]; then
echo "Error: unknown service '$SERVICE'"
echo "Available services: ${!SERVICE_DIRS[*]}"
exit 1
fi
IMAGE="${SERVICE_IMAGES[$SERVICE]:-}"
REPO_DIR="$BASEDIR/$SERVICE_DIR"
if [[ ! -d "$REPO_DIR/.git" ]]; then
echo "Error: $REPO_DIR is not a git repository"
exit 1
fi
cd "$REPO_DIR"
# --- Validation ---
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [[ "$CURRENT_BRANCH" != "main" ]]; then
echo "Error: not on main branch (currently on '$CURRENT_BRANCH')"
echo "Switch to main before releasing."
exit 1
fi
if ! git diff --quiet HEAD 2>/dev/null; then
echo "Error: uncommitted changes in $REPO_DIR"
echo "Commit or stash changes before releasing."
exit 1
fi
echo "Pulling latest main..."
git pull --ff-only origin main 2>/dev/null || {
echo "Error: could not fast-forward main. Resolve divergence first."
exit 1
}
# --- Version ---
IS_DEV=0
if [[ "$VERSION" == "dev" ]]; then
IS_DEV=1
elif [[ -z "$VERSION" ]]; then
YEAR=$(date +%Y)
MONTH=$(date +%-m)
LATEST=$(git tag --list "${YEAR}.${MONTH}.*" --sort=-version:refname | head -1)
if [[ -n "$LATEST" ]]; then
BUILD=$(echo "$LATEST" | cut -d. -f3)
BUILD=$((BUILD + 1))
else
BUILD=1
fi
VERSION="${YEAR}.${MONTH}.${BUILD}"
fi
if [[ "$IS_DEV" -eq 0 ]]; then
if ! [[ "$VERSION" =~ ^20[0-9]{2}\.[0-9]{1,2}\.[0-9]+$ ]]; then
echo "Error: invalid CalVer format '$VERSION'"
echo "Expected: YYYY.M.BUILD (e.g., 2026.3.42) or 'dev'"
exit 1
fi
if git rev-parse "$VERSION" &>/dev/null; then
echo "Error: tag '$VERSION' already exists"
exit 1
fi
fi
# --- Summary ---
echo ""
echo "=== Release Summary ==="
echo " Service: $SERVICE"
echo " Repo: $REPO_DIR"
echo " Branch: main ($(git rev-parse --short HEAD))"
echo " Version: $VERSION"
if [[ "$IS_DEV" -eq 1 ]]; then
echo " Environment: dev (force-updating tag)"
fi
if [[ -n "$IMAGE" ]]; then
echo " Image: $IMAGE:$VERSION"
fi
echo ""
# --- Confirm ---
if [[ "${SKIP_CONFIRM:-}" != "1" ]]; then
read -r -p "Proceed? [y/N] " REPLY
if [[ ! "$REPLY" =~ ^[Yy]$ ]]; then
echo "Aborted."
exit 0
fi
fi
# --- Tag and push ---
if [[ "$IS_DEV" -eq 1 ]]; then
echo "Force-updating dev tag..."
git tag -f dev
git push origin dev --force 2>&1
else
echo "Creating tag $VERSION..."
git tag "$VERSION"
echo "Pushing tag..."
git push origin "$VERSION" 2>&1
fi
echo ""
if [[ -n "$IMAGE" ]]; then
echo "CI will build and push: $IMAGE:$VERSION"
fi
echo ""
echo "Done. Tagged $SERVICE as $VERSION."

View File

@ -0,0 +1,51 @@
# Release — Shared Workflow
Tags a sub-project with a CalVer version and pushes to trigger CI/CD.
The calling skill MUST define a **Config** section with the values listed under "Required Config".
## Required Config
| Key | Description |
|---|---|
| `BASEDIR` | Absolute path to the project root (e.g., `/mnt/NV2/Development/paper-dynasty`) |
| `SERVICE_MAPPING` | Table: short name → local directory → Docker image (if applicable) |
| `DEPLOY_COMMANDS` | Per-environment deploy commands (optional — listed in the calling skill) |
## Usage
```
/release <service> [version]
```
- **service**: A short name from the SERVICE_MAPPING table
- **version**: CalVer tag (e.g., `2026.3.42`), `dev`, or omitted to auto-increment
## What It Does
1. Validates the service name against SERVICE_MAPPING
2. Checks repo state (must be on `main`, clean working tree)
3. Pulls latest `main` via fast-forward
4. Determines version:
- **Omitted**: auto-generates next CalVer (`YYYY.M.BUILD`)
- **Explicit**: validates CalVer format, checks tag doesn't exist
- **`dev`**: force-updates the `dev` tag
5. Creates the git tag (or force-updates `dev`)
6. Pushes the tag to origin — triggers CI/CD pipeline
## Environments
- **CalVer tags** (e.g., `2026.3.6`) → production Docker tags
- **`dev` tag** → dev environment Docker tags
## After Releasing
If DEPLOY_COMMANDS are defined in the calling skill's config, show them to the user.
Do not auto-execute deploy commands — always confirm first.
## Script
The calling skill should provide a `release.sh` that calls the shared script:
```bash
bash ~/.claude/skills/_templates/release-core.sh --config <path-to-config> <service> [version]
```

1
skills/paper-dynasty Symbolic link
View File

@ -0,0 +1 @@
/mnt/NV2/Development/paper-dynasty/.claude/skills/paper-dynasty

View File

@ -1,156 +0,0 @@
# Paper Dynasty - Unified Skill
Complete Paper Dynasty baseball card game management with workflow-based architecture.
## Structure
```
paper-dynasty/
├── SKILL.md # Main skill documentation (read by Claude Code)
├── README.md # This file
├── api_client.py # Shared API client for all operations
├── workflows/
│ ├── gauntlet-cleanup.md # Gauntlet team cleanup workflow
│ └── card-generation.md # Weekly card generation workflow
└── scripts/
├── gauntlet_cleanup.py # Gauntlet cleanup script
├── generate_summary.py # Card update summary generator
└── validate_database.py # Database validation tool
```
## Key Features
### 1. Unified API Client
- Single, reusable `Paper DynastyAPI` class
- Handles authentication, environments (prod/dev)
- Methods for all common operations
- Used by all scripts and workflows
### 2. Workflow-Based Architecture
- **Structured workflows** for repeatable tasks
- **Flexible ad-hoc** queries for creative requests
- Workflows are documented separately for clarity
- Scripts can be used standalone or via skill activation
### 3. Comprehensive Context
- Full API endpoint documentation
- Database structure and relationships
- Safety considerations
- Common request patterns
## Quick Start
### Setup Environment
```bash
# Required for API access
export API_TOKEN='your-api-token'
export DATABASE='prod' # or 'dev'
```
### Using the API Client
```python
from api_client import PaperDynastyAPI
api = PaperDynastyAPI(environment='prod')
# Get a team
team = api.get_team(abbrev='SKB')
# List cards
cards = api.list_cards(team_id=team['id'])
# Gauntlet operations
runs = api.list_gauntlet_runs(event_id=8, active_only=True)
```
### Using Scripts
```bash
cd ~/.claude/skills/paper-dynasty/scripts
# List gauntlet runs
python gauntlet_cleanup.py list --event-id 8 --active-only
# Wipe gauntlet team
python gauntlet_cleanup.py wipe --team-abbrev Gauntlet-SKB --event-id 8
```
## Available Workflows
### Gauntlet Team Cleanup
**Documentation**: `workflows/gauntlet-cleanup.md`
Clean up temporary gauntlet teams:
- Wipe cards (unassign from team)
- Delete packs
- End active runs
- Preserve historical data
### Weekly Card Generation
**Documentation**: `workflows/card-generation.md`
Generate and update player cards:
- Update date constants
- Generate card images
- Validate database
- Upload to S3
- Create scouting CSVs
- Generate release summary
## Advantages Over Fragmented Skills
**Before** (fragmented):
- `paper-dynasty-cards` - Card generation only
- `paper-dynasty-gauntlet` - Gauntlet cleanup only
- Duplicated API client code
- Couldn't handle creative queries
- Limited cross-functional operations
**After** (unified):
- ✅ Single skill with comprehensive context
- ✅ Shared, reusable API client
- ✅ Structured workflows for common tasks
- ✅ Flexible ad-hoc query handling
- ✅ Better discoverability and maintainability
## Example Interactions
**Structured (uses workflow)**:
```
User: "Clean up gauntlet team SKB"
→ Follows gauntlet-cleanup workflow
```
**Ad-Hoc (uses API client directly)**:
```
User: "How many cards does team SKB have?"
→ api.get_team(abbrev='SKB')
→ api.list_cards(team_id=X)
→ Returns count
```
**Creative (combines API calls)**:
```
User: "Show me all teams with active gauntlet runs"
→ api.list_gauntlet_runs(active_only=True)
→ Formats and displays team list
```
## Migration Notes
This skill consolidates:
- `~/.claude/skills/paper-dynasty-cards/``workflows/card-generation.md`
- `~/.claude/skills/paper-dynasty-gauntlet/``workflows/gauntlet-cleanup.md`
Old skills can be removed after verifying the unified skill works correctly.
## See Also
- **Main Documentation**: `SKILL.md`
- **API Client**: `api_client.py`
- **Workflows**: `workflows/`
- **Scripts**: `scripts/`
- **Database API**: `/mnt/NV2/Development/paper-dynasty/database/`
- **Discord Bot**: `/mnt/NV2/Development/paper-dynasty/discord-app/`

View File

@ -1,386 +0,0 @@
---
name: paper-dynasty
description: Paper Dynasty baseball card game management. USE WHEN user mentions Paper Dynasty, gauntlet teams, player cards, scouting reports, pack distribution, rewards, or card game database operations.
---
# Paper Dynasty - Baseball Card Game Management
**SCOPE**: Only use in paper-dynasty, paper-dynasty-database repos. Do not activate in unrelated projects.
---
## When to Activate This Skill
**Database Operations**:
- "Sync prod to dev" / "Copy production database"
- "Pull production data to dev"
**Structured Operations**:
- "Clean up gauntlet team [name]"
- "Generate weekly cards" / "run card workflow"
- "Update scouting" / "regenerate scouting reports"
- "Distribute [N] packs to all teams"
- "Wipe team [abbrev] cards"
**Discord Bot Troubleshooting**:
- "Check pd-discord logs" / "restart bot"
- "Is the bot running?" / "bot status"
**Ad-Hoc Queries**:
- "How many cards does team SKB have?"
- "Show me all MVP rarity players"
- "List all teams in season 5"
- "Find active gauntlet runs"
**Ecosystem & Cross-Project**:
- "PD status" / "ecosystem status" / "what needs work"
- "Show PD ecosystem status" / "What's the status across all projects?"
**Growth & Engagement**:
- "growth roadmap" / "engagement" / "user retention"
> **For deployment**, use the `deploy` skill instead.
---
## What is Paper Dynasty?
A baseball card TCG with digital player cards, team collection, game simulation, and gauntlet tournaments.
**Key Components**:
- **API**: `pd.manticorum.com` (prod) / `pddev.manticorum.com` (dev)
- **Discord Bot**: On `sba-bots` server
- **Card Generation**: `/mnt/NV2/Development/paper-dynasty/card-creation/`
---
## Critical Rules
### CLI-First for ALL Operations
**ALWAYS** use CLI tools first — they handle auth, formatting, and error handling
**NEVER** write raw Python API calls when a CLI command exists
**NEVER** access local SQLite directly
**paperdomo CLI** (queries, packs, gauntlet, teams):
```bash
python ~/.claude/skills/paper-dynasty/cli.py [command]
```
**pd-cards CLI** (card generation, scouting, uploads):
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
pd-cards [command]
```
Only fall back to the Python API (`api_client.py`) for complex multi-step operations that the CLI doesn't cover (e.g., batch cleanup loops, custom card creation).
**For CLI reference**: `reference/cli-overview.md` (links to per-command files)
---
## Workflows
| Workflow | Trigger | Quick Command |
|----------|---------|---------------|
| **Database Sync** | "Sync prod to dev" | `~/.claude/skills/paper-dynasty/scripts/sync_prod_to_dev.sh` |
| **Gauntlet Cleanup** | "Clean up gauntlet team X" | `$PD gauntlet cleanup Gauntlet-X -e N -y` |
| **Pack Distribution** | "Give N packs to everyone" | `$PD pack distribute --num N` |
| **Scouting Update** | "Update scouting" | `pd-cards scouting all -c 27` |
| **Card Generation (Retrosheet)** | "Generate cards for 2005" | **Use `retrosheet-card-update` agent** |
| **Card Generation (Live Series)** | "Update live series cards" | **Use `live-series-card-update` agent** |
| **Custom Cards** | "Create custom player" | `pd-cards custom preview name` |
| **S3 Upload** | "Upload cards to S3" | `pd-cards upload s3 -c "2005 Live"` |
| **Bot Troubleshooting** | "Check bot logs" | `ssh sba-bots "docker logs paper-dynasty_discord-app_1 --tail 100"` |
**Detailed workflow docs**: `workflows/` directory
### Gauntlet Cleanup Safety
**Safe to clean**: Gauntlet teams (temporary), completed runs, eliminated teams
**Never clean**: Regular season teams, teams with active gameplay, before tournament ends
| Data | Action | Reversible? |
|------|--------|-------------|
| Cards | Unassigned (team = NULL) | Yes (reassign) |
| Packs | Deleted | No |
| Run Record | Ended (timestamp set) | Kept in DB |
| Team/Results/Stats | Preserved | Kept in DB |
**Batch cleanup** (all active runs in an event):
```python
runs = api.list_gauntlet_runs(event_id=8, active_only=True)
for run in runs:
team_id = run['team']['id']
api.wipe_team_cards(team_id)
for pack in api.list_packs(team_id=team_id):
api.delete_pack(pack['id'])
api.end_gauntlet_run(run['id'])
```
### Database Sync Notes
**Restore a dev backup**:
```bash
BACKUP_FILE=~/.paper-dynasty/db-backups/paperdynasty_dev_YYYYMMDD_HHMMSS.sql
ssh pd-database "docker exec -i sba_postgres psql -U sba_admin -d paperdynasty_dev" < "$BACKUP_FILE"
```
**Manual fallback** (if script fails):
```bash
ssh akamai "docker exec sba_postgres pg_dump -U pd_admin -d pd_master --clean --if-exists" > /tmp/pd_prod_dump.sql
ssh pd-database "docker exec -i sba_postgres psql -U sba_admin -d paperdynasty_dev" < /tmp/pd_prod_dump.sql
```
**Large databases**: Pipe through `gzip`/`gunzip` for compressed transfer.
---
## Discord Bot
**Server**: `sba-bots` (10.10.0.88) | **Container**: `paper-dynasty_discord-app_1` | **Compose Dir**: `/home/cal/container-data/paper-dynasty/`
Related services: PostgreSQL (`paper-dynasty_db_1`), Adminer (`paper-dynasty_adminer_1`, port 8080 — user: `postgres`, pass: `example`, server: `db`)
```bash
# Check status
ssh sba-bots "docker ps --filter name=paper-dynasty"
# View logs
ssh sba-bots "docker logs paper-dynasty_discord-app_1 --tail 100"
# Follow logs
ssh sba-bots "docker logs paper-dynasty_discord-app_1 -f --tail 50"
# Restart bot
ssh sba-bots "cd /home/cal/container-data/paper-dynasty && docker compose restart discord-app"
# Database CLI
ssh sba-bots "docker exec -it paper-dynasty_db_1 psql -U postgres"
```
**Key env vars** (in docker-compose.yml): `BOT_TOKEN`, `GUILD_ID`, `API_TOKEN`, `DATABASE` (Prod/Dev), `LOG_LEVEL`, `DB_URL` (usually `db`), `SCOREBOARD_CHANNEL`
---
## Common Patterns
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
# Teams
$PD team get SKB # Single team details
$PD team list --season 10 # All teams in a season
$PD team cards SKB # Team's card collection
# Players
$PD player get 12785 # Single player details
$PD player list --rarity MVP --cardset 27 # Filtered player list
# Packs
$PD status # Packs opened today
$PD pack list --team SKB --unopened # Team's unopened packs
$PD pack distribute --num 10 # Give 10 packs to all teams
$PD pack distribute --num 5 --exclude CAR # Exclude a team
# Gauntlet
$PD gauntlet list --active # Active gauntlet runs
$PD gauntlet teams --active # Active gauntlet teams
$PD gauntlet cleanup Gauntlet-SKB -e 9 -y # Wipe a gauntlet team
```
Add `--json` to any command for machine-readable output. Add `--env dev` for dev database.
---
## Quick Reference
### Environment Setup
```bash
export API_TOKEN='your-token'
export DATABASE='prod' # or 'dev'
```
### Key IDs
- **Current Live Cardset**: 27 (2005 Live)
- **Default Pack Type**: 1 (Standard)
- **Rarities**: Replacement < Reserve < Starter < All-Star < MVP < Hall of Fame
**For full schema**: `reference/database-schema.md`
### pd-cards Commands
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
pd-cards custom list/preview/submit # Custom cards
pd-cards scouting all -c 27 # Scouting reports
pd-cards retrosheet process 2005 -c 27 -d Live # Card generation
pd-cards upload s3 -c "2005 Live" # S3 upload
```
### paperdomo Commands
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
$PD health # API health check
$PD status # Packs opened today
$PD team list/get/cards # Team operations
$PD player get/list # Player operations
$PD pack list/today/distribute # Pack operations
$PD gauntlet list/teams/cleanup # Gauntlet operations
```
---
## File Structure
```
~/.claude/skills/paper-dynasty/
├── SKILL.md # This file (routing & quick reference)
├── api_client.py # Python API client
├── cli.py # paperdomo CLI
├── reference/
│ ├── database-schema.md # Models, cardsets, pack types, rarities
│ ├── api-reference.md # Endpoints, authentication, client examples
│ ├── cli-overview.md # CLI routing table — load this first
│ └── cli/
│ ├── team.md # team list/get/cards
│ ├── pack.md # pack list/today/distribute + pack type IDs
│ ├── player.md # player get/list
│ ├── gauntlet.md # gauntlet list/teams/cleanup
│ └── pd-cards.md # custom/scouting/retrosheet/upload/live-series
├── workflows/
│ ├── card-generation.md # Retrosheet reference (pipeline now in retrosheet-card-update agent)
│ ├── live-series-update.md # Live series reference (pipeline now in live-series-card-update agent)
│ ├── card_utilities.py # Card refresh pipeline (fetch → S3 → update)
│ ├── custom-card-creation.md # Archetypes, manual creation, rating rules
│ └── TROUBLESHOOTING.md # Card rendering issues
└── scripts/
├── distribute_packs.py
├── gauntlet_cleanup.py
└── validate_database.py
```
**Related Codebases**:
- Database/API: `/mnt/NV2/Development/paper-dynasty/database/`
- Discord Bot: `/mnt/NV2/Development/paper-dynasty/discord-app/`
- Card Creation: `/mnt/NV2/Development/paper-dynasty/card-creation/`
---
## When to Load Additional Context
| Need | Load |
|------|------|
| Database model details | `reference/database-schema.md` |
| API endpoints & client usage | `reference/api-reference.md` |
| CLI command reference | `reference/cli-overview.md` → load `cli/team.md`, `cli/pack.md`, `cli/player.md`, `cli/gauntlet.md`, or `cli/pd-cards.md` |
| Retrosheet card workflow / PotM | **Use `retrosheet-card-update` agent** (ref: `workflows/card-generation.md`) |
| Live series workflow / PotM | **Use `live-series-card-update` agent** (ref: `workflows/live-series-update.md`) |
| Card rendering issues | `workflows/TROUBLESHOOTING.md` |
---
---
## Ecosystem Dashboard
Provides a cross-project view of all Paper Dynasty Gitea repos in a single terminal dashboard.
**Script**: `~/.claude/skills/paper-dynasty/scripts/ecosystem_status.sh`
**Trigger phrases**:
- "Show PD ecosystem status"
- "What's the status across all projects?"
- "PD status" / "ecosystem status" / "what needs work"
**Usage**:
```bash
# Requires GITEA_TOKEN in env (or auto-reads from gitea-mcp config)
~/.claude/skills/paper-dynasty/scripts/ecosystem_status.sh
```
**What it shows**:
- Open issue count per repo
- Open PR count per repo
- Latest commit SHA + date per repo
- Recent commits (last 3) per repo with author and message
- Open issue titles grouped by repo (with labels)
- Open PR titles grouped by repo (with branch info)
- Cross-repo totals
**Repos covered**: paper-dynasty-database, paper-dynasty-discord, paper-dynasty-card-creation, paper-dynasty-website
**Auth**: Uses `GITEA_TOKEN` env var. If unset, attempts to read from `~/.config/claude-code/mcp-servers/gitea-mcp.json`.
---
## Initiative Tracker (`pd-plan`)
Local SQLite database tracking cross-project initiatives, priorities, and status.
**CLI**: `python ~/.claude/skills/paper-dynasty/plan/cli.py [command]`
**Database**: `~/.claude/skills/paper-dynasty/plan/initiatives.db`
**Trigger phrases**:
- "what should I work on" / "what's the priority"
- "initiative status" / "pd-plan" / "show priorities"
- "update initiative" / "mark done"
**Quick reference**:
```bash
PDP="python ~/.claude/skills/paper-dynasty/plan/cli.py"
$PDP summary # Dashboard — run at session start
$PDP list # All active initiatives
$PDP list --phase 1 # Phase 1 only
$PDP list --repo discord # Filter by repo
$PDP next # Highest priority non-blocked item
$PDP next --repo discord # Next for a specific repo
$PDP show 1 # Full details + activity log
$PDP add "Title" --phase 1 --priority 20 --impact retention --size M --repos discord
$PDP update 3 --status in_progress --actor pd-discord
$PDP update 3 --note "Merged 8 PRs" --actor pd-ops
$PDP update 3 --link "discord#104" # Append linked issue
$PDP done 3 --actor pd-ops # Mark complete
$PDP list --json # Machine-readable output
```
**Session startup**: Always run `pd-plan summary` at the start of a Paper Dynasty session to understand current priorities.
---
## Growth Roadmap
High-level roadmap for Paper Dynasty player growth, engagement, and retention strategies.
**File**: `/mnt/NV2/Development/paper-dynasty/ROADMAP.md`
**Trigger phrases**:
- "growth roadmap" / "engagement" / "user retention"
- "what's planned" / "next features"
Load the roadmap file for context before discussing growth priorities, feature planning, or retention strategies. Use `pd-plan` for current status of specific initiatives.
---
## Specialized Agents
Dispatch work to these agents for their respective domains. Do not do their work inline — launch them explicitly.
| Agent | Model | Domain | Dispatch When |
|-------|-------|--------|---------------|
| `pd-database` | Opus | Database/API | Schema changes, endpoints, migrations, data model |
| `pd-discord` | Opus | Discord bot | Commands, gameplay engine, bot UX |
| `pd-cards` | Opus | Card pipeline | Card generation, ratings, scouting, rendering |
| `pd-growth` | Opus | Product growth | Engagement, retention, roadmap prioritization |
| `pd-ops` | Sonnet | Release ops | Merging PRs, deploys, branch cleanup, process |
PO agents (Opus) decide **what** to build. `pd-ops` ensures it **ships correctly**. Implementation is delegated to `engineer`, `issue-worker`, or `swarm-coder`.
**How to dispatch**: Mention the agent name explicitly, e.g. "use the pd-cards agent to regenerate scouting" or "dispatch to pd-database for this migration".
---
**Last Updated**: 2026-03-22
**Version**: 2.8 (Added pd-plan initiative tracker, pd-ops agent, updated agent table with models)

View File

@ -1,731 +0,0 @@
#!/usr/bin/env python3
"""
Paper Dynasty API Client
Shared API client for all Paper Dynasty operations.
Provides methods for interacting with teams, players, cards, gauntlets, and more.
Environment Variables:
API_TOKEN: Bearer token for API authentication (required)
DATABASE: 'prod' or 'dev' (default: dev)
"""
import os
import sys
from typing import Optional, Dict, List, Any
import requests
class PaperDynastyAPI:
"""
Paper Dynasty API client for remote database access
Usage:
api = PaperDynastyAPI(environment='prod')
# Get a team
team = api.get_team(abbrev='SKB')
# List gauntlet runs
runs = api.list_gauntlet_runs(event_id=8, active_only=True)
# Wipe team cards
api.wipe_team_cards(team_id=464)
"""
def __init__(
self,
environment: str = "dev",
token: Optional[str] = None,
verbose: bool = False,
):
"""
Initialize API client
Args:
environment: 'prod' or 'dev'
token: API token (defaults to API_TOKEN env var). Only required for write operations (POST/PATCH/DELETE).
verbose: Print request/response details
"""
self.env = environment.lower()
self.base_url = (
"https://pd.manticorum.com/api"
if "prod" in self.env
else "https://pddev.manticorum.com/api"
)
self.token = token or os.getenv("API_TOKEN")
self.verbose = verbose
self.headers = {"Content-Type": "application/json"}
if self.token:
self.headers["Authorization"] = f"Bearer {self.token}"
def _require_token(self):
"""Raise if no API token is set (needed for write operations)"""
if not self.token:
raise ValueError(
"API_TOKEN required for write operations. "
"Set it with: export API_TOKEN='your-token-here'"
)
def _log(self, message: str):
"""Print message if verbose mode enabled"""
if self.verbose:
print(f"[API] {message}")
def _build_url(
self,
endpoint: str,
api_ver: int = 2,
object_id: Optional[int] = None,
params: Optional[List] = None,
) -> str:
"""Build API URL with parameters"""
url = f"{self.base_url}/v{api_ver}/{endpoint}"
if object_id is not None:
url += f"/{object_id}"
if params:
param_strs = [f"{k}={v}" for k, v in params]
url += "?" + "&".join(param_strs)
return url
# ====================
# Low-level HTTP methods
# ====================
def get(
self,
endpoint: str,
object_id: Optional[int] = None,
params: Optional[List] = None,
timeout: int = 10,
) -> Dict:
"""GET request to API"""
url = self._build_url(endpoint, object_id=object_id, params=params)
self._log(f"GET {url}")
response = requests.get(url, headers=self.headers, timeout=timeout)
response.raise_for_status()
return response.json()
def post(
self, endpoint: str, payload: Optional[Dict] = None, timeout: int = 10
) -> Any:
"""POST request to API"""
self._require_token()
url = self._build_url(endpoint)
self._log(f"POST {url}")
response = requests.post(
url, headers=self.headers, json=payload, timeout=timeout
)
response.raise_for_status()
return response.json() if response.text else {}
def patch(
self, endpoint: str, object_id: int, params: List, timeout: int = 10
) -> Dict:
"""PATCH request to API"""
self._require_token()
url = self._build_url(endpoint, object_id=object_id, params=params)
self._log(f"PATCH {url}")
response = requests.patch(url, headers=self.headers, timeout=timeout)
response.raise_for_status()
return response.json()
def delete(self, endpoint: str, object_id: int, timeout: int = 10) -> str:
"""DELETE request to API"""
self._require_token()
url = self._build_url(endpoint, object_id=object_id)
self._log(f"DELETE {url}")
response = requests.delete(url, headers=self.headers, timeout=timeout)
response.raise_for_status()
return response.text
# ====================
# Team Operations
# ====================
def get_team(
self, team_id: Optional[int] = None, abbrev: Optional[str] = None
) -> Dict:
"""
Get a team by ID or abbreviation
Args:
team_id: Team ID
abbrev: Team abbreviation (e.g., 'SKB')
Returns:
Team dict
"""
if team_id:
return self.get("teams", object_id=team_id)
elif abbrev:
result = self.get("teams", params=[("abbrev", abbrev.upper())])
teams = result.get("teams", [])
if not teams:
raise ValueError(f"Team '{abbrev}' not found")
return teams[0]
else:
raise ValueError("Must provide team_id or abbrev")
def list_teams(
self, season: Optional[int] = None, event_id: Optional[int] = None
) -> List[Dict]:
"""
List teams
Args:
season: Filter by season
event_id: Filter by event
Returns:
List of team dicts
"""
params = []
if season:
params.append(("season", season))
if event_id:
params.append(("event", event_id))
result = self.get("teams", params=params if params else None)
return result.get("teams", [])
# ====================
# Card Operations
# ====================
def wipe_team_cards(self, team_id: int) -> Any:
"""
Wipe all cards for a team (unassigns them)
Args:
team_id: Team ID
Returns:
API response
"""
return self.post(f"cards/wipe-team/{team_id}")
def list_cards(
self, team_id: Optional[int] = None, player_id: Optional[int] = None
) -> List[Dict]:
"""
List cards. At least one filter is required to avoid massive unfiltered queries.
Args:
team_id: Filter by team
player_id: Filter by player
Returns:
List of card dicts
"""
if not team_id and not player_id:
raise ValueError(
"list_cards requires at least one filter (team_id or player_id)"
)
params = []
if team_id:
params.append(("team_id", team_id))
if player_id:
params.append(("player_id", player_id))
result = self.get("cards", params=params if params else None)
return result.get("cards", [])
# ====================
# Pack Operations
# ====================
def list_packs(
self,
team_id: Optional[int] = None,
opened: Optional[bool] = None,
new_to_old: bool = False,
limit: Optional[int] = None,
timeout: int = 10,
) -> List[Dict]:
"""
List packs
Args:
team_id: Filter by team
opened: Filter by opened status (True=opened, False=unopened)
new_to_old: Sort newest to oldest (default: False)
limit: Maximum number of results (e.g., 200, 1000, 2000)
timeout: Request timeout in seconds (default: 10, increase for large queries)
Returns:
List of pack dicts
Examples:
# Get 200 most recently opened packs
packs = api.list_packs(opened=True, new_to_old=True, limit=200)
# Get unopened packs for a team
packs = api.list_packs(team_id=69, opened=False)
# Large query with extended timeout
packs = api.list_packs(opened=True, limit=2000, timeout=30)
"""
if team_id is None and opened is None:
raise ValueError(
"list_packs requires at least one filter (team_id or opened)"
)
params = []
if team_id:
params.append(("team_id", team_id))
if opened is not None:
params.append(("opened", "true" if opened else "false"))
if new_to_old:
params.append(("new_to_old", "true"))
if limit:
params.append(("limit", str(limit)))
result = self.get("packs", params=params if params else None, timeout=timeout)
return result.get("packs", [])
def delete_pack(self, pack_id: int) -> str:
"""
Delete a pack
Args:
pack_id: Pack ID
Returns:
Success message
"""
return self.delete("packs", object_id=pack_id)
def update_pack(
self,
pack_id: int,
pack_cardset_id: Optional[int] = None,
pack_team_id: Optional[int] = None,
pack_type_id: Optional[int] = None,
) -> Dict:
"""
Update pack properties (PATCH)
Args:
pack_id: Pack ID
pack_cardset_id: Update pack cardset (use -1 to clear)
pack_team_id: Update pack team (use -1 to clear)
pack_type_id: Update pack type
Returns:
Updated pack dict
Example:
# Fix missing cardset on Team Choice pack
api.update_pack(pack_id=21207, pack_cardset_id=27)
"""
params = []
if pack_cardset_id is not None:
params.append(("pack_cardset_id", pack_cardset_id))
if pack_team_id is not None:
params.append(("pack_team_id", pack_team_id))
if pack_type_id is not None:
params.append(("pack_type_id", pack_type_id))
return self.patch("packs", object_id=pack_id, params=params)
def create_packs(self, packs: List[Dict]) -> Any:
"""
Create packs (bulk distribution)
Args:
packs: List of pack dicts with keys: team_id, pack_type_id, pack_cardset_id
Returns:
API response
Example:
# Give 5 Standard packs to team 31
api.create_packs([
{'team_id': 31, 'pack_type_id': 1, 'pack_cardset_id': None}
for _ in range(5)
])
"""
payload = {"packs": packs}
return self.post("packs", payload=payload)
def get_packs_opened_today(self, limit: int = 2000, timeout: int = 30) -> Dict:
"""
Get analytics on packs opened today
Args:
limit: Number of recent packs to check (default: 2000)
timeout: Request timeout in seconds (default: 30)
Returns:
Dict with keys:
- total: Total packs opened today
- teams: List of dicts with team info and pack counts
- note: Warning if limit was reached
Example:
result = api.get_packs_opened_today()
print(f"{result['total']} packs opened by {len(result['teams'])} teams")
"""
from datetime import datetime, timezone
from collections import defaultdict
# Get recent opened packs
packs = self.list_packs(
opened=True, new_to_old=True, limit=limit, timeout=timeout
)
# Today's date (UTC)
today = datetime.now(timezone.utc).date()
# Count packs by team
teams_data = defaultdict(
lambda: {"count": 0, "abbrev": "", "lname": "", "first": None, "last": None}
)
total = 0
for pack in packs:
if pack.get("open_time"):
try:
open_dt = datetime.fromtimestamp(
pack["open_time"] / 1000, tz=timezone.utc
)
if open_dt.date() == today:
total += 1
team_id = pack["team"]["id"]
teams_data[team_id]["abbrev"] = pack["team"]["abbrev"]
teams_data[team_id]["lname"] = pack["team"]["lname"]
teams_data[team_id]["count"] += 1
if (
teams_data[team_id]["first"] is None
or open_dt < teams_data[team_id]["first"]
):
teams_data[team_id]["first"] = open_dt
if (
teams_data[team_id]["last"] is None
or open_dt > teams_data[team_id]["last"]
):
teams_data[team_id]["last"] = open_dt
except Exception:
pass
# Format results
teams_list = []
for team_id, data in teams_data.items():
teams_list.append(
{
"team_id": team_id,
"abbrev": data["abbrev"],
"name": data["lname"],
"packs": data["count"],
"first_pack": data["first"].isoformat() if data["first"] else None,
"last_pack": data["last"].isoformat() if data["last"] else None,
}
)
# Sort by pack count
teams_list.sort(key=lambda x: x["packs"], reverse=True)
result = {"total": total, "teams": teams_list, "date": today.isoformat()}
if len(packs) == limit:
result["note"] = f"Hit limit of {limit} packs - actual count may be higher"
return result
def distribute_packs(
self,
num_packs: int = 5,
exclude_team_abbrev: Optional[List[str]] = None,
pack_type_id: int = 1,
season: Optional[int] = None,
cardset_id: Optional[int] = None,
) -> Dict:
"""
Distribute packs to all human-controlled teams
Args:
num_packs: Number of packs to give to each team (default: 5)
exclude_team_abbrev: List of team abbreviations to exclude (default: None)
pack_type_id: Pack type ID (default: 1 = Standard packs)
season: Season to distribute for (default: current season)
cardset_id: Cardset ID for pack types that require it (e.g., Promo Choice = type 9)
Returns:
Dict with keys:
- total_packs: Total packs distributed
- teams_count: Number of teams that received packs
- teams: List of teams that received packs
Example:
# Give 10 packs to all teams
result = api.distribute_packs(num_packs=10)
# Give 11 packs to all teams except CAR
result = api.distribute_packs(num_packs=11, exclude_team_abbrev=['CAR'])
"""
if exclude_team_abbrev is None:
exclude_team_abbrev = []
# Convert to uppercase for case-insensitive matching
exclude_team_abbrev = [abbrev.upper() for abbrev in exclude_team_abbrev]
# Get current season if not specified
if season is None:
current = self.get("current")
season = current["season"]
self._log(f"Distributing {num_packs} packs to season {season} teams")
# Get all teams for season
all_teams = self.list_teams(season=season)
# Filter for human-controlled teams only
qualifying_teams = []
for team in all_teams:
if not team["is_ai"] and "gauntlet" not in team["abbrev"].lower():
# Check if team is in exclusion list
if team["abbrev"].upper() in exclude_team_abbrev:
self._log(f"Excluding team {team['abbrev']}: {team['sname']}")
continue
qualifying_teams.append(team)
self._log(f"Found {len(qualifying_teams)} qualifying teams")
if exclude_team_abbrev:
self._log(f"Excluded teams: {', '.join(exclude_team_abbrev)}")
# Distribute packs to each team
total_packs = 0
for team in qualifying_teams:
self._log(f"Giving {num_packs} packs to {team['abbrev']} ({team['sname']})")
# Create pack payload
packs = [
{
"team_id": team["id"],
"pack_type_id": pack_type_id,
"pack_cardset_id": cardset_id,
}
for _ in range(num_packs)
]
try:
self.create_packs(packs)
total_packs += num_packs
self._log(
f" ✓ Successfully gave {num_packs} packs to {team['abbrev']}"
)
except Exception as e:
self._log(f" ✗ Failed to give packs to {team['abbrev']}: {e}")
raise
result = {
"total_packs": total_packs,
"teams_count": len(qualifying_teams),
"teams": qualifying_teams,
}
self._log(
f"Distribution complete: {total_packs} packs to {len(qualifying_teams)} teams"
)
return result
# ====================
# Gauntlet Operations
# ====================
def list_gauntlet_runs(
self,
event_id: Optional[int] = None,
team_id: Optional[int] = None,
active_only: bool = False,
) -> List[Dict]:
"""
List gauntlet runs
Args:
event_id: Filter by event
team_id: Filter by team
active_only: Only show active runs
Returns:
List of run dicts
"""
params = []
if event_id:
params.append(("gauntlet_id", event_id))
if team_id:
params.append(("team_id", team_id))
if active_only:
params.append(("is_active", "true"))
result = self.get("gauntletruns", params=params if params else None)
return result.get("runs", [])
def end_gauntlet_run(self, run_id: int) -> Dict:
"""
End a gauntlet run by setting ended timestamp
Args:
run_id: Run ID
Returns:
Updated run dict
"""
return self.patch("gauntletruns", object_id=run_id, params=[("ended", "true")])
# ====================
# Player Operations
# ====================
def get_player(self, player_id: int) -> Dict:
"""
Get a player by ID
Args:
player_id: Player ID
Returns:
Player dict
"""
return self.get("players", object_id=player_id)
def list_players(
self,
cardset_id: Optional[int] = None,
rarity: Optional[str] = None,
timeout: int = 30,
) -> List[Dict]:
"""
List players. At least one filter is required to avoid massive unfiltered queries.
Args:
cardset_id: Filter by cardset
rarity: Filter by rarity
timeout: Request timeout in seconds (default: 30, player lists are large)
Returns:
List of player dicts
"""
if not cardset_id and not rarity:
raise ValueError(
"list_players requires at least one filter (cardset_id or rarity)"
)
params = []
if cardset_id:
params.append(("cardset", cardset_id))
if rarity:
params.append(("rarity", rarity))
result = self.get("players", params=params, timeout=timeout)
return result.get("players", [])
# ====================
# Result/Stats Operations
# ====================
def list_results(
self, season: Optional[int] = None, team_id: Optional[int] = None
) -> List[Dict]:
"""
List game results. At least one filter is required to avoid massive unfiltered queries.
Args:
season: Filter by season
team_id: Filter by team
Returns:
List of result dicts
"""
if not season and not team_id:
raise ValueError(
"list_results requires at least one filter (season or team_id)"
)
params = []
if season:
params.append(("season", season))
if team_id:
params.append(("team_id", team_id))
result = self.get("results", params=params if params else None)
return result.get("results", [])
# ====================
# Helper Methods
# ====================
def find_gauntlet_teams(
self, event_id: Optional[int] = None, active_only: bool = False
) -> List[Dict]:
"""
Find gauntlet teams (teams with 'Gauntlet' in abbrev)
Args:
event_id: Filter by event
active_only: Only teams with active runs
Returns:
List of team dicts with run information
"""
if active_only:
# Get active runs, then get teams
runs = self.list_gauntlet_runs(event_id=event_id, active_only=True)
teams_with_runs = []
for run in runs:
team = run["team"]
team["active_run"] = run
teams_with_runs.append(team)
return teams_with_runs
else:
# Get all teams with 'Gauntlet' in name
all_teams = self.list_teams()
gauntlet_teams = [t for t in all_teams if "Gauntlet" in t.get("abbrev", "")]
# Optionally add run info
if event_id:
runs = self.list_gauntlet_runs(event_id=event_id)
run_by_team = {r["team"]["id"]: r for r in runs}
for team in gauntlet_teams:
if team["id"] in run_by_team:
team["run"] = run_by_team[team["id"]]
return gauntlet_teams
def main():
"""Example usage"""
import argparse
parser = argparse.ArgumentParser(description="Paper Dynasty API Client")
parser.add_argument(
"--env", choices=["prod", "dev"], default="dev", help="Environment"
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
try:
api = PaperDynastyAPI(environment=args.env, verbose=args.verbose)
print(f"✓ Connected to {args.env.upper()} database: {api.base_url}")
# Example: List gauntlet teams
print("\nExample: Listing gauntlet teams...")
teams = api.find_gauntlet_teams(active_only=True)
print(f"Found {len(teams)} active gauntlet teams")
except ValueError as e:
print(f"❌ Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,731 +0,0 @@
#!/usr/bin/env python3
"""
Paper Dynasty CLI - Baseball Card Game Management
A command-line interface for the Paper Dynasty API, primarily for use with Claude Code.
Usage:
pd status
pd team list
pd team get SKB
pd team cards SKB
pd pack today
pd pack distribute --num 10
pd gauntlet list --event-id 8 --active
pd gauntlet cleanup Gauntlet-SKB --event-id 8 --yes
Environment:
API_TOKEN: Required. Bearer token for API authentication.
"""
import json
import os
import sys
from typing import Annotated, List, Optional
import typer
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
# Import the existing API client from same directory
sys.path.insert(0, os.path.dirname(__file__))
from api_client import PaperDynastyAPI
# ============================================================================
# App Setup
# ============================================================================
app = typer.Typer(
name="pd",
help="Paper Dynasty Baseball Card Game CLI",
no_args_is_help=True,
)
team_app = typer.Typer(help="Team operations")
pack_app = typer.Typer(help="Pack operations")
gauntlet_app = typer.Typer(help="Gauntlet operations")
player_app = typer.Typer(help="Player operations")
app.add_typer(team_app, name="team")
app.add_typer(pack_app, name="pack")
app.add_typer(gauntlet_app, name="gauntlet")
app.add_typer(player_app, name="player")
console = Console()
class State:
"""Global state for API client and settings"""
api: Optional[PaperDynastyAPI] = None
json_output: bool = False
state = State()
# ============================================================================
# Output Helpers
# ============================================================================
def output_json(data):
"""Output data as formatted JSON"""
console.print_json(json.dumps(data, indent=2, default=str))
def output_table(
title: str, columns: List[str], rows: List[List], show_lines: bool = False
):
"""Output data as a rich table"""
table = Table(
title=title, show_header=True, header_style="bold cyan", show_lines=show_lines
)
for col in columns:
table.add_column(col)
for row in rows:
table.add_row(*[str(cell) if cell is not None else "" for cell in row])
console.print(table)
def handle_error(e: Exception, context: str = ""):
"""Graceful error handling with helpful messages"""
error_str = str(e)
if "401" in error_str:
console.print("[red]Error:[/red] Unauthorized. Check your API_TOKEN.")
elif "404" in error_str:
console.print(f"[red]Error:[/red] Not found. {context}")
elif "Connection" in error_str or "ConnectionError" in error_str:
console.print(
"[red]Error:[/red] Cannot connect to API. Check network and --env setting."
)
else:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
# ============================================================================
# Main Callback (Global Options)
# ============================================================================
@app.callback()
def main(
env: Annotated[
str, typer.Option("--env", help="Environment: prod or dev")
] = "prod",
json_output: Annotated[bool, typer.Option("--json", help="Output as JSON")] = False,
verbose: Annotated[
bool, typer.Option("--verbose", "-v", help="Verbose output")
] = False,
):
"""Paper Dynasty Baseball Card Game CLI"""
state.api = PaperDynastyAPI(environment=env, verbose=verbose)
state.json_output = json_output
# ============================================================================
# Status & Health Commands
# ============================================================================
@app.command()
def status():
"""Show packs opened today summary"""
try:
result = state.api.get_packs_opened_today()
if state.json_output:
output_json(result)
return
console.print(
f"\n[bold cyan]Packs Opened Today ({result['date']})[/bold cyan]\n"
)
console.print(f"[bold]Total:[/bold] {result['total']} packs\n")
if result["teams"]:
rows = []
for t in result["teams"]:
rows.append([t["abbrev"], t["name"], t["packs"]])
output_table("By Team", ["Abbrev", "Team", "Packs"], rows)
else:
console.print("[dim]No packs opened today[/dim]")
if result.get("note"):
console.print(f"\n[yellow]Note:[/yellow] {result['note']}")
except Exception as e:
handle_error(e)
@app.command()
def health():
"""Check API health status"""
try:
# Try to list teams as a health check
teams = state.api.list_teams()
console.print(f"[green]API is healthy[/green] ({state.api.base_url})")
console.print(f"[dim]Found {len(teams)} teams[/dim]")
except Exception as e:
handle_error(e)
# ============================================================================
# Team Commands
# ============================================================================
@team_app.command("list")
def team_list(
season: Annotated[
Optional[int], typer.Option("--season", "-s", help="Filter by season")
] = None,
):
"""List all teams"""
try:
teams = state.api.list_teams(season=season)
if state.json_output:
output_json(teams)
return
if not teams:
console.print("[yellow]No teams found[/yellow]")
return
# Filter out gauntlet teams for cleaner display
regular_teams = [t for t in teams if "Gauntlet" not in t.get("abbrev", "")]
rows = []
for t in regular_teams:
rows.append(
[
t["abbrev"],
t.get("sname", ""),
t.get("season", ""),
t.get("wallet", 0),
t.get("ranking", "N/A"),
"AI" if t.get("is_ai") else "Human",
]
)
title = "Teams"
if season:
title += f" - Season {season}"
output_table(
title, ["Abbrev", "Name", "Season", "Wallet", "Rank", "Type"], rows
)
except Exception as e:
handle_error(e)
@team_app.command("get")
def team_get(
abbrev: Annotated[str, typer.Argument(help="Team abbreviation")],
):
"""Get team details"""
try:
team = state.api.get_team(abbrev=abbrev.upper())
if state.json_output:
output_json(team)
return
panel = Panel(
f"[bold]ID:[/bold] {team['id']}\n"
f"[bold]Abbreviation:[/bold] {team['abbrev']}\n"
f"[bold]Short Name:[/bold] {team.get('sname', 'N/A')}\n"
f"[bold]Full Name:[/bold] {team.get('lname', 'N/A')}\n"
f"[bold]Season:[/bold] {team.get('season', 'N/A')}\n"
f"[bold]Wallet:[/bold] ${team.get('wallet', 0)}\n"
f"[bold]Ranking:[/bold] {team.get('ranking', 'N/A')}\n"
f"[bold]Type:[/bold] {'AI' if team.get('is_ai') else 'Human'}",
title=f"Team: {team.get('lname', abbrev)}",
border_style="green",
)
console.print(panel)
except ValueError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
except Exception as e:
handle_error(e, f"Team '{abbrev}' may not exist.")
@team_app.command("cards")
def team_cards(
abbrev: Annotated[str, typer.Argument(help="Team abbreviation")],
limit: Annotated[int, typer.Option("--limit", "-n", help="Max cards to show")] = 50,
):
"""List team's cards"""
try:
team = state.api.get_team(abbrev=abbrev.upper())
cards = state.api.list_cards(team_id=team["id"])
if state.json_output:
output_json(cards)
return
if not cards:
console.print(f"[yellow]Team {abbrev} has no cards[/yellow]")
return
rows = []
for c in cards[:limit]:
player = c.get("player", {})
rows.append(
[
c["id"],
player.get("p_name", "Unknown"),
player.get("rarity", ""),
c.get("value", 0),
]
)
output_table(
f"Cards for {team.get('lname', abbrev)} ({len(cards)} total)",
["Card ID", "Player", "Rarity", "Value"],
rows,
)
if len(cards) > limit:
console.print(
f"\n[dim]Showing {limit} of {len(cards)} cards. Use --limit to see more.[/dim]"
)
except ValueError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
except Exception as e:
handle_error(e)
# ============================================================================
# Pack Commands
# ============================================================================
@pack_app.command("list")
def pack_list(
team: Annotated[
Optional[str], typer.Option("--team", "-t", help="Filter by team abbrev")
] = None,
opened: Annotated[
Optional[bool],
typer.Option("--opened/--unopened", help="Filter by opened status"),
] = None,
limit: Annotated[int, typer.Option("--limit", "-n", help="Max packs to show")] = 50,
):
"""List packs"""
try:
team_id = None
team_name = None
if team:
team_obj = state.api.get_team(abbrev=team.upper())
team_id = team_obj["id"]
team_name = team_obj.get("sname", team)
packs = state.api.list_packs(
team_id=team_id, opened=opened, new_to_old=True, limit=limit
)
if state.json_output:
output_json(packs)
return
if not packs:
console.print("[yellow]No packs found[/yellow]")
return
rows = []
for p in packs:
pack_team = p.get("team", {})
pack_type = p.get("pack_type", {})
is_opened = "Yes" if p.get("open_time") else "No"
rows.append(
[
p["id"],
pack_team.get("abbrev", "N/A"),
pack_type.get("name", "Unknown"),
is_opened,
]
)
title = "Packs"
if team_name:
title += f" - {team_name}"
if opened is True:
title += " (Opened)"
elif opened is False:
title += " (Unopened)"
output_table(title, ["Pack ID", "Team", "Type", "Opened"], rows)
except ValueError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
except Exception as e:
handle_error(e)
@pack_app.command("today")
def pack_today():
"""Show packs opened today analytics"""
# Reuse status command
status()
@pack_app.command("distribute")
def pack_distribute(
num: Annotated[
int, typer.Option("--num", "-n", help="Number of packs per team")
] = 5,
exclude: Annotated[
Optional[List[str]],
typer.Option("--exclude", "-x", help="Team abbrevs to exclude"),
] = None,
pack_type: Annotated[
int,
typer.Option(
"--pack-type",
help="1=Standard, 2=Starter, 3=Premium, 4=Check-In, 5=MVP, 6=All Star, 7=Mario, 8=Team Choice, 9=Promo Choice",
),
] = 1,
cardset: Annotated[
Optional[int],
typer.Option(
"--cardset", "-c", help="Cardset ID (required for Promo Choice packs)"
),
] = None,
dry_run: Annotated[
bool, typer.Option("--dry-run", help="Show what would be done")
] = False,
):
"""Distribute packs to all human teams"""
try:
if dry_run:
# Get qualifying teams to show preview
current = state.api.get("current")
season = current["season"]
all_teams = state.api.list_teams(season=season)
exclude_upper = [e.upper() for e in (exclude or [])]
qualifying = [
t
for t in all_teams
if not t["is_ai"]
and "gauntlet" not in t["abbrev"].lower()
and t["abbrev"].upper() not in exclude_upper
]
console.print(
f"\n[bold cyan]Pack Distribution Preview (DRY RUN)[/bold cyan]\n"
)
console.print(f"[bold]Packs per team:[/bold] {num}")
console.print(f"[bold]Pack type:[/bold] {pack_type}")
if cardset is not None:
console.print(f"[bold]Cardset ID:[/bold] {cardset}")
console.print(f"[bold]Teams:[/bold] {len(qualifying)}")
console.print(f"[bold]Total packs:[/bold] {num * len(qualifying)}")
if exclude:
console.print(f"[bold]Excluded:[/bold] {', '.join(exclude)}")
console.print("\n[bold]Qualifying teams:[/bold]")
for t in qualifying:
console.print(f" - {t['abbrev']}: {t['sname']}")
return
result = state.api.distribute_packs(
num_packs=num,
exclude_team_abbrev=exclude,
pack_type_id=pack_type,
cardset_id=cardset,
)
if state.json_output:
output_json(result)
return
console.print(f"\n[green]Distribution complete![/green]")
console.print(f"[bold]Total packs:[/bold] {result['total_packs']}")
console.print(f"[bold]Teams:[/bold] {result['teams_count']}")
if exclude:
console.print(f"[bold]Excluded:[/bold] {', '.join(exclude)}")
except Exception as e:
handle_error(e)
# ============================================================================
# Gauntlet Commands
# ============================================================================
@gauntlet_app.command("list")
def gauntlet_list(
event_id: Annotated[
Optional[int], typer.Option("--event-id", "-e", help="Filter by event ID")
] = None,
active: Annotated[
bool, typer.Option("--active", "-a", help="Only active runs")
] = False,
):
"""List gauntlet runs"""
try:
runs = state.api.list_gauntlet_runs(event_id=event_id, active_only=active)
if state.json_output:
output_json(runs)
return
if not runs:
console.print("[yellow]No gauntlet runs found[/yellow]")
return
rows = []
for r in runs:
team = r.get("team", {})
is_active = "Active" if r.get("ended") is None else "Ended"
gauntlet = r.get("gauntlet", {})
rows.append(
[
r["id"],
team.get("abbrev", "N/A"),
r.get("wins", 0),
r.get("losses", 0),
gauntlet.get("id", "N/A"),
is_active,
]
)
title = "Gauntlet Runs"
if event_id:
title += f" - Event {event_id}"
if active:
title += " (Active Only)"
output_table(title, ["Run ID", "Team", "W", "L", "Event", "Status"], rows)
except Exception as e:
handle_error(e)
@gauntlet_app.command("teams")
def gauntlet_teams(
event_id: Annotated[
Optional[int], typer.Option("--event-id", "-e", help="Filter by event ID")
] = None,
active: Annotated[
bool, typer.Option("--active", "-a", help="Only teams with active runs")
] = False,
):
"""List gauntlet teams"""
try:
teams = state.api.find_gauntlet_teams(event_id=event_id, active_only=active)
if state.json_output:
output_json(teams)
return
if not teams:
console.print("[yellow]No gauntlet teams found[/yellow]")
return
rows = []
for t in teams:
run = t.get("active_run") or t.get("run", {})
wins = run.get("wins", "-") if run else "-"
losses = run.get("losses", "-") if run else "-"
rows.append([t["id"], t["abbrev"], t.get("sname", ""), wins, losses])
title = "Gauntlet Teams"
if active:
title += " (Active)"
output_table(title, ["Team ID", "Abbrev", "Name", "W", "L"], rows)
except Exception as e:
handle_error(e)
@gauntlet_app.command("cleanup")
def gauntlet_cleanup(
team_abbrev: Annotated[
str, typer.Argument(help="Team abbreviation (e.g., Gauntlet-SKB)")
],
event_id: Annotated[
int, typer.Option("--event-id", "-e", help="Event ID (required)")
],
yes: Annotated[bool, typer.Option("--yes", "-y", help="Skip confirmation")] = False,
):
"""Clean up a gauntlet team (wipe cards, delete packs, end run)"""
try:
# Find the team
team = state.api.get_team(abbrev=team_abbrev)
team_id = team["id"]
# Get cards and packs count
cards = state.api.list_cards(team_id=team_id)
packs = state.api.list_packs(team_id=team_id, opened=False)
# Find active run
runs = state.api.list_gauntlet_runs(
event_id=event_id, team_id=team_id, active_only=True
)
active_run = runs[0] if runs else None
console.print(f"\n[bold cyan]Gauntlet Cleanup: {team_abbrev}[/bold cyan]\n")
console.print(f"[bold]Team ID:[/bold] {team_id}")
console.print(f"[bold]Cards to wipe:[/bold] {len(cards)}")
console.print(f"[bold]Packs to delete:[/bold] {len(packs)}")
console.print(
f"[bold]Active run:[/bold] {'Yes (ID: ' + str(active_run['id']) + ')' if active_run else 'No'}"
)
if not yes:
console.print("\n[yellow]This is a destructive operation![/yellow]")
console.print("Use --yes flag to confirm.")
raise typer.Exit(0)
# Perform cleanup
results = []
# 1. Wipe cards
if cards:
state.api.wipe_team_cards(team_id)
results.append(f"Wiped {len(cards)} cards")
# 2. Delete packs
for pack in packs:
state.api.delete_pack(pack["id"])
if packs:
results.append(f"Deleted {len(packs)} packs")
# 3. End gauntlet run
if active_run:
state.api.end_gauntlet_run(active_run["id"])
results.append(f"Ended run {active_run['id']}")
console.print(f"\n[green]Cleanup complete![/green]")
for r in results:
console.print(f" - {r}")
except ValueError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
except Exception as e:
handle_error(e)
# ============================================================================
# Player Commands
# ============================================================================
@player_app.command("get")
def player_get(
player_id: Annotated[int, typer.Argument(help="Player ID")],
):
"""Get player by ID"""
try:
player = state.api.get_player(player_id=player_id)
if state.json_output:
output_json(player)
return
# Get positions
positions = []
for i in range(1, 9):
pos = player.get(f"pos_{i}")
if pos:
positions.append(pos)
cardset = player.get("cardset", {})
rarity = player.get("rarity", {})
rarity_name = rarity.get("name", "N/A") if isinstance(rarity, dict) else rarity
panel = Panel(
f"[bold]ID:[/bold] {player['player_id']}\n"
f"[bold]Name:[/bold] {player.get('p_name', 'Unknown')}\n"
f"[bold]Rarity:[/bold] {rarity_name}\n"
f"[bold]Cost:[/bold] {player.get('cost', 0)}\n"
f"[bold]Positions:[/bold] {', '.join(positions) if positions else 'N/A'}\n"
f"[bold]Cardset:[/bold] {cardset.get('name', 'N/A')} (ID: {cardset.get('id', 'N/A')})\n"
f"[bold]Hand:[/bold] {player.get('hand', 'N/A')}",
title=f"Player: {player.get('p_name', 'Unknown')}",
border_style="blue",
)
console.print(panel)
except Exception as e:
handle_error(e, f"Player ID {player_id} may not exist.")
@player_app.command("list")
def player_list(
rarity: Annotated[
Optional[str], typer.Option("--rarity", "-r", help="Filter by rarity")
] = None,
cardset: Annotated[
Optional[int], typer.Option("--cardset", "-c", help="Filter by cardset ID")
] = None,
limit: Annotated[
int, typer.Option("--limit", "-n", help="Max players to show")
] = 50,
):
"""List players"""
try:
players = state.api.list_players(cardset_id=cardset, rarity=rarity)
if state.json_output:
output_json(players)
return
if not players:
console.print("[yellow]No players found[/yellow]")
return
rows = []
for p in players[:limit]:
cs = p.get("cardset", {})
rarity = p.get("rarity", {})
rarity_name = rarity.get("name", "") if isinstance(rarity, dict) else rarity
rows.append(
[
p["player_id"],
p.get("p_name", "Unknown"),
rarity_name,
p.get("cost", 0),
cs.get("name", "N/A"),
]
)
title = "Players"
if rarity:
title += f" - {rarity}"
if cardset:
title += f" - Cardset {cardset}"
output_table(title, ["ID", "Name", "Rarity", "Cost", "Cardset"], rows)
if len(players) > limit:
console.print(
f"\n[dim]Showing {limit} of {len(players)} players. Use --limit to see more.[/dim]"
)
except Exception as e:
handle_error(e)
# ============================================================================
# Entry Point
# ============================================================================
if __name__ == "__main__":
app()

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
Metadata-Version: 2.4
Name: pd-plan
Version: 1.0.0
Summary: Paper Dynasty initiative tracker — local SQLite CLI for cross-project priorities
Requires-Python: >=3.10

View File

@ -1,7 +0,0 @@
cli.py
pyproject.toml
pd_plan.egg-info/PKG-INFO
pd_plan.egg-info/SOURCES.txt
pd_plan.egg-info/dependency_links.txt
pd_plan.egg-info/entry_points.txt
pd_plan.egg-info/top_level.txt

View File

@ -1,2 +0,0 @@
[console_scripts]
pd-plan = cli:main

View File

@ -1,12 +0,0 @@
[project]
name = "pd-plan"
version = "1.0.0"
description = "Paper Dynasty initiative tracker — local SQLite CLI for cross-project priorities"
requires-python = ">=3.10"
[project.scripts]
pd-plan = "cli:main"
[build-system]
requires = ["setuptools>=68.0"]
build-backend = "setuptools.build_meta"

View File

@ -1,195 +0,0 @@
# Paper Dynasty API Reference
**Load this when**: You need API endpoint details, authentication setup, or Python client examples.
---
## Authentication
All API requests require Bearer token:
```bash
export API_TOKEN='your-token-here'
```
Headers:
```python
{'Authorization': f'Bearer {API_TOKEN}'}
```
---
## Environments
| Environment | URL | When to Use |
|-------------|-----|-------------|
| **Production** | `https://pd.manticorum.com/api/v2/` | Default for all operations |
| **Development** | `https://pddev.manticorum.com/api/v2/` | Testing only |
Set via:
```bash
export DATABASE='prod' # or 'dev'
```
---
## Key Endpoints
### Teams
- `GET /teams` - List teams (params: `season`, `abbrev`, `event`)
- `GET /teams/{id}` - Get team by ID
### Cards
- `GET /cards` - List cards (params: `team_id`, `player_id`)
- `POST /cards/wipe-team/{team_id}` - Unassign all team cards
### Packs
- `GET /packs` - List packs
- Params: `team_id`, `opened` (true/false), `new_to_old` (true/false), `limit` (e.g., 200, 1000, 2000)
- Example: `/packs?opened=true&new_to_old=true&limit=200` (200 most recently opened packs)
- Note: Use extended timeout for large limits (>1000)
- `POST /packs` - Create packs (bulk distribution)
- Payload: `{'packs': [{'team_id': int, 'pack_type_id': int, 'pack_cardset_id': int|None}]}`
- `DELETE /packs/{id}` - Delete pack
### Gauntlet Runs
- `GET /gauntletruns` - List runs (params: `gauntlet_id`, `team_id`, `is_active`)
- `PATCH /gauntletruns/{id}?ended=true` - End run
### Players
- `GET /players` - List players (params: `cardset`, `rarity`)
- `GET /players/{id}` - Get player by ID
### Results
- `GET /results` - List game results (params: `season`, `team_id`)
### Stats
- `GET /batstats` - Batting statistics
- `GET /pitstats` - Pitching statistics
---
## Using the API Client
### Basic Setup
```python
from api_client import PaperDynastyAPI
# Initialize (reads API_TOKEN from environment)
api = PaperDynastyAPI(environment='prod', verbose=True)
# Or provide token directly
api = PaperDynastyAPI(environment='dev', token='your-token')
```
### Common Operations
```python
# Get a team
team = api.get_team(abbrev='SKB')
team = api.get_team(team_id=69)
# List teams
all_teams = api.list_teams()
season_teams = api.list_teams(season=5)
# List cards for a team
cards = api.list_cards(team_id=69)
# Find gauntlet teams with active runs
active_gauntlet_teams = api.find_gauntlet_teams(event_id=8, active_only=True)
# List gauntlet runs
runs = api.list_gauntlet_runs(event_id=8, active_only=True)
# Get player info
player = api.get_player(player_id=12345)
# List all MVP players
mvp_players = api.list_players(rarity='MVP')
```
### Gauntlet Operations
```python
# Find team
team = api.get_team(abbrev='Gauntlet-SKB')
# Wipe cards
api.wipe_team_cards(team_id=team['id'])
# Delete packs
packs = api.list_packs(team_id=team['id'])
for pack in packs:
api.delete_pack(pack['id'])
# End run
runs = api.list_gauntlet_runs(team_id=team['id'], active_only=True)
for run in runs:
api.end_gauntlet_run(run['id'])
```
### Pack Distribution
```python
# Distribute packs to all teams
result = api.distribute_packs(num_packs=10)
print(f"Distributed {result['total_packs']} to {result['teams_count']} teams")
# Distribute with exclusions
result = api.distribute_packs(num_packs=11, exclude_team_abbrev=['CAR', 'SKB'])
# Create specific packs for one team
api.create_packs([
{'team_id': 31, 'pack_type_id': 1, 'pack_cardset_id': None}
for _ in range(5)
])
# Create Team Choice pack with specific cardset
api.create_packs([{
'team_id': 31,
'pack_type_id': 8, # Team Choice
'pack_cardset_id': 27 # 2005 Live
}])
```
### Analytics Operations
```python
# Get packs opened today (built-in analytics)
result = api.get_packs_opened_today()
print(f"{result['total']} packs opened by {len(result['teams'])} teams")
for team in result['teams']:
print(f" {team['abbrev']}: {team['packs']} packs")
# Get recent opened packs
recent_packs = api.list_packs(opened=True, new_to_old=True, limit=200)
# Get unopened packs for a team
unopened = api.list_packs(team_id=69, opened=False)
# Large query with extended timeout
all_recent = api.list_packs(opened=True, limit=2000, timeout=30)
# Custom analytics (filter by specific criteria)
from datetime import datetime, timezone, timedelta
packs = api.list_packs(opened=True, limit=1000, timeout=30)
yesterday = (datetime.now(timezone.utc) - timedelta(days=1)).date()
yesterday_packs = [
p for p in packs
if p.get('open_time') and
datetime.fromtimestamp(p['open_time']/1000, tz=timezone.utc).date() == yesterday
]
```
---
## API Client Location
**File**: `~/.claude/skills/paper-dynasty/api_client.py`
**Test connection**:
```bash
cd ~/.claude/skills/paper-dynasty
python api_client.py --env prod --verbose
```

View File

@ -1,59 +0,0 @@
# Paper Dynasty CLI Overview
**Load this when**: You need to know which CLI command to use. Load the linked file for full syntax and flags.
---
## Global Options
These apply to all `paperdomo` commands:
```bash
--env prod|dev # Environment (default: prod)
--json # Output as JSON
--verbose / -v # Show API request details
--yes / -y # Skip confirmation for destructive operations
```
---
## paperdomo Command Groups
**Shell alias**: `paperdomo` | **Full path**: `python ~/.claude/skills/paper-dynasty/cli.py`
| Command Group | What it does | Details |
|---------------|-------------|---------|
| `status` | Packs opened today summary | (inline — no subfile needed) |
| `health` | API health check | (inline — no subfile needed) |
| `team` | List teams, get details, view team cards | Load `cli/team.md` |
| `pack` | List packs, today's analytics, distribute to teams | Load `cli/pack.md` |
| `player` | Get player by ID, list/filter players | Load `cli/player.md` |
| `gauntlet` | List runs, list teams, cleanup gauntlet teams | Load `cli/gauntlet.md` |
```bash
# Quick inline commands (no extra file needed)
python ~/.claude/skills/paper-dynasty/cli.py status # Packs opened today
python ~/.claude/skills/paper-dynasty/cli.py health # API health check
```
---
## pd-cards Command Groups
**Location**: `/mnt/NV2/Development/paper-dynasty/card-creation`
| Command Group | What it does | Details |
|---------------|-------------|---------|
| `custom` | List, preview, submit, create custom cards | Load `cli/pd-cards.md` |
| `scouting` | Generate scouting reports for batters/pitchers | Load `cli/pd-cards.md` |
| `retrosheet` | Process season stats, validate positions, arm/defense ratings | Load `cli/pd-cards.md` |
| `upload` | S3 uploads, refresh card images, validate | Load `cli/pd-cards.md` |
| `live-series` | Update and check live series status | Load `cli/pd-cards.md` |
---
## Quick Status Reference
- **Current Live Cardset**: 27 (2005 Live)
- **Default Pack Type**: 1 (Standard)
- **Rarities**: Replacement < Reserve < Starter < All-Star < MVP < Hall of Fame

View File

@ -1,52 +0,0 @@
# paperdomo gauntlet Commands
**Load this when**: You need gauntlet list, teams, or cleanup command syntax.
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
```
---
## Commands
```bash
$PD gauntlet list [--event-id N] [--active] # List gauntlet runs
$PD gauntlet teams [--active] # List gauntlet teams
$PD gauntlet cleanup TEAM_ABBREV --event-id N --yes # Cleanup a gauntlet team
```
## Examples
```bash
$PD gauntlet list # All gauntlet runs
$PD gauntlet list --active # Active runs only
$PD gauntlet list --event-id 8 # Runs for event 8
$PD gauntlet list --event-id 8 --active # Active runs in event 8
$PD gauntlet teams # All gauntlet teams
$PD gauntlet teams --active # Active gauntlet teams only
$PD gauntlet cleanup Gauntlet-SKB --event-id 8 --yes # Wipe team (skip confirm)
$PD gauntlet cleanup Gauntlet-SKB -e 9 -y # Short flags
```
## Cleanup Safety
**Safe to clean**: Gauntlet teams (temporary), completed runs, eliminated teams
**Never clean**: Regular season teams, teams with active gameplay, before tournament ends
Cleanup effects:
| Data | Action | Reversible? |
|------|--------|-------------|
| Cards | Unassigned (team = NULL) | Yes (reassign) |
| Packs | Deleted | No |
| Run Record | Ended (timestamp set) | Kept in DB |
| Team/Results/Stats | Preserved | Kept in DB |
## Global Options
```bash
--env prod|dev # Environment (default: prod)
--json # Output as JSON
--verbose / -v # Show API request details
--yes / -y # Skip confirmation prompt
```

View File

@ -1,58 +0,0 @@
# paperdomo pack Commands
**Load this when**: You need pack list, today, or distribute command syntax, or pack type IDs.
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
```
---
## Commands
```bash
$PD pack list [--team SKB] [--opened] [--unopened] # List packs with optional filters
$PD pack today # Analytics for packs opened today
$PD pack distribute --num N # Distribute N packs to all teams
$PD pack distribute --num N --exclude ABBREV # Distribute with team exclusion
```
## Examples
```bash
$PD pack list # All packs
$PD pack list --team SKB # SKB's packs only
$PD pack list --team SKB --unopened # SKB's unopened packs
$PD pack today # Today's open summary
$PD pack distribute --num 10 # Give 10 packs to every team
$PD pack distribute --num 11 --exclude CAR # Skip CAR team
```
## Global Options
```bash
--env prod|dev # Environment (default: prod)
--json # Output as JSON
--verbose / -v # Show API request details
--yes / -y # Skip confirmation for destructive operations
```
---
## Pack Types Reference
Use these IDs when creating packs via the API or scripts.
| ID | Name | Description |
|----|------|-------------|
| 1 | Standard | Default pack type |
| 2 | Starter | Starter deck pack |
| 3 | Premium | Premium pack |
| 4 | Check-In Player | Daily check-in reward |
| 5 | MVP | MVP-only pack |
| 6 | All Star | All-Star pack |
| 7 | Mario | Special Mario pack |
| 8 | Team Choice | 1 card, choice of 4 from selected MLB club |
| 9 | Promo Choice | Promotional choice pack |
**Default Pack Type**: 1 (Standard) for most operations

View File

@ -1,80 +0,0 @@
# pd-cards CLI Reference
**Load this when**: You need pd-cards command syntax for custom cards, scouting, retrosheet, S3 uploads, or live series.
**Location**: `/mnt/NV2/Development/paper-dynasty/card-creation`
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
```
---
## Custom Cards
```bash
pd-cards custom list # List profiles
pd-cards custom preview <name> # Preview ratings
pd-cards custom submit <name> # Submit to DB
pd-cards custom new -n "Name" -t batter -h L # New template
```
---
## Scouting Reports
```bash
pd-cards scouting all -c 27 # All reports for cardset 27
pd-cards scouting batters -c 27 -c 29 # Batters only (multiple cardsets)
pd-cards scouting pitchers -c 27 # Pitchers only
```
---
## Retrosheet Processing
```bash
pd-cards retrosheet process 2005 -c 27 -d Live # Full season processing
pd-cards retrosheet validate 27 # Check positions for cardset 27
pd-cards retrosheet arms 2005 -e events.csv # OF arm ratings
pd-cards retrosheet defense 2005 --output "dir/" # Fetch defense stats
```
**Retrosheet Flags**:
- `--end YYYYMMDD` - End date for data processing
- `--start YYYYMMDD` - Start date for data processing
- `--season-pct FLOAT` - Season percentage (0.0-1.0)
- `--cardset-id, -c INT` - Target cardset ID
- `--description, -d TEXT` - "Live" or "Month PotM"
- `--dry-run, -n` - Preview without database changes
- `--last-week-ratio FLOAT` - Recency bias for last week
- `--last-twoweeks-ratio FLOAT` - Recency bias for last 2 weeks
- `--last-month-ratio FLOAT` - Recency bias for last month
---
## S3 Uploads
```bash
pd-cards upload s3 -c "2005 Live" # Upload to S3
pd-cards upload s3 -c "2005 Live" --limit 10 # Test with limit
pd-cards upload refresh -c "2005 Live" # Regenerate card images
pd-cards upload check -c "2005 Live" # Validate only
```
**Upload Flags**:
- `--cardset, -c` - Cardset name (required)
- `--start-id` - Resume from player ID
- `--limit, -l` - Max cards to process
- `--no-upload` - Validate only, no upload
- `--skip-batters` / `--skip-pitchers` - Skip card types
- `--dry-run, -n` - Preview mode
---
## Live Series
```bash
pd-cards live-series update -c "2025 Season" -g 81 # Update with 81 games played
pd-cards live-series status # Check live series status
```

View File

@ -1,38 +0,0 @@
# paperdomo player Commands
**Load this when**: You need player get or list command syntax.
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
```
---
## Commands
```bash
$PD player get ID # Get player by numeric ID
$PD player list [--rarity RARITY] [--cardset ID] # List/filter players
```
## Examples
```bash
$PD player get 12345 # Player with ID 12345
$PD player get 12785 --json # Machine-readable output
$PD player list --rarity "Hall of Fame" # All Hall of Fame players
$PD player list --rarity MVP --cardset 27 # MVP players in cardset 27
$PD player list --cardset 27 # All players in cardset 27
```
## Rarity Values (lowest to highest)
`Replacement` < `Reserve` < `Starter` < `All-Star` < `MVP` < `Hall of Fame`
## Global Options
```bash
--env prod|dev # Environment (default: prod)
--json # Output as JSON
--verbose / -v # Show API request details
```

View File

@ -1,35 +0,0 @@
# paperdomo team Commands
**Load this when**: You need team list, get, or cards command syntax.
```bash
PD="python ~/.claude/skills/paper-dynasty/cli.py"
```
---
## Commands
```bash
$PD team list [--season N] # List all teams, optionally filtered by season
$PD team get SKB # Get details for a specific team (by abbrev)
$PD team cards SKB # List cards owned by a team
```
## Examples
```bash
$PD team list # All teams
$PD team list --season 10 # Teams in season 10
$PD team get SKB # SKB team details
$PD team cards SKB # SKB's card collection
$PD team cards SKB --json # Machine-readable output
```
## Global Options
```bash
--env prod|dev # Environment (default: prod)
--json # Output as JSON
--verbose / -v # Show API request details
```

View File

@ -1,104 +0,0 @@
# Paper Dynasty Database Schema Reference
**Load this when**: You need details about database models, cardset IDs, pack types, or rarity values.
---
## Core Models
**Teams** (`Team`):
- Regular teams (season-based, permanent)
- Gauntlet teams (temporary, event-specific, abbrev contains "Gauntlet")
- Fields: `id`, `abbrev`, `lname`, `season`, `wallet`, `ranking`
**Players** (`Player`):
- Baseball players with card variants
- Fields: `player_id`, `p_name`, `cardset`, `rarity`, `cost`, positions
- Linked to: `BattingCard`, `PitchingCard`, `CardPosition`
**Cards** (`Card`):
- Individual card instances owned by teams
- Fields: `id`, `player`, `team`, `pack`, `value`
- Team can be NULL (unassigned/wiped cards)
**Packs** (`Pack`):
- Card packs owned by teams
- Fields: `id`, `team`, `pack_type`, `open_time`
**Gauntlet Runs** (`GauntletRun`):
- Tournament run tracking
- Fields: `id`, `team`, `gauntlet` (event_id), `wins`, `losses`, `created`, `ended`
- Active run: `ended == 0`
- Ended run: `ended > 0` (timestamp)
**Results** (`Result`):
- Game outcomes
- Fields: `away_team`, `home_team`, scores, `season`, `week`, `ranked`
**Stats**:
- `BattingStat`: Plate appearance stats per game
- `PitchingStat`: Pitching stats per game
---
## Cardsets
**Current Live Cardset**: 27 (2005 Live)
**Known Cardsets** (ID: Name):
| ID | Name | Status |
|----|------|--------|
| 24 | 2025 Season | Ranked Legal, In Packs |
| 25 | 2025 Promos | Ranked Legal, In Packs |
| 26 | 2025 Custom | Ranked Legal, In Packs |
| 27 | 2005 Live | Ranked Legal, In Packs |
| 28 | 2005 Promos | Ranked Legal, In Packs |
| 29 | 2005 Custom | Ranked Legal, In Packs |
**Ranked Legal Cardsets**: [24, 25, 26, 27, 28, 29]
**Gauntlet Events**: Have specific cardset configurations
*Note: Cardset IDs are static - expand this reference as we discover new cardsets via `api.get('cardsets')`*
---
## Pack Types
**Complete Pack Type Reference** (ID: Name):
| ID | Name | Description |
|----|------|-------------|
| 1 | Standard | Default pack type |
| 2 | Starter | Starter deck pack |
| 3 | Premium | Premium pack |
| 4 | Check-In Player | Daily check-in reward |
| 5 | MVP | MVP-only pack |
| 6 | All Star | All-Star pack |
| 7 | Mario | Special Mario pack |
| 8 | Team Choice | 1 card, choice of 4 from selected MLB club |
| 9 | Promo Choice | Promotional choice pack |
**Default Pack Type**: 1 (Standard) for most operations
---
## Rarities
From lowest to highest:
| Rank | Rarity |
|------|--------|
| 1 | Replacement |
| 2 | Reserve |
| 3 | Starter |
| 4 | All-Star |
| 5 | MVP |
| 6 | Hall of Fame |
---
## Related Files
- **Full Schema**: `/mnt/NV2/Development/paper-dynasty/database/app/db_engine.py`
- **API Models**: `/mnt/NV2/Development/paper-dynasty/database/app/routers_v2/`

View File

@ -1,38 +0,0 @@
# Paper Dynasty Scripts
Portable scripts for Paper Dynasty operations.
## Available Scripts
### distribute_packs.py
Distribute packs to all human-controlled teams in the league.
**Usage**:
```bash
# Dev environment
python distribute_packs.py --num-packs 10
# Production
DATABASE=prod python distribute_packs.py --num-packs 11 --exclude-team-abbrev CAR
```
**Features**:
- Automatically filters AI teams and gauntlet teams
- Supports team exclusions
- Works with both prod and dev environments
- Uses Paper Dynasty API client for all operations
**Original Location**: `/mnt/NV2/Development/paper-dynasty/discord-app/manual_pack_distribution.py`
### gauntlet_cleanup.py
Clean up gauntlet teams after events (wipe cards, delete packs, end runs).
### validate_database.py
Validate database integrity and relationships.
### generate_summary.py
Generate release summaries for card updates.

View File

@ -1,134 +0,0 @@
#!/usr/bin/env python3
"""
Distribute packs to all human-controlled teams in Paper Dynasty
Standalone script for pack distribution that can be used from the Paper Dynasty skill.
Works with both production and development environments.
This script uses the Paper Dynasty API client for all operations.
"""
import argparse
import logging
import os
import sys
from pathlib import Path
# Add parent directory to path to import api_client
sys.path.insert(0, str(Path(__file__).parent.parent))
from api_client import PaperDynastyAPI
# Set up logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("pack_distribution")
def distribute_packs(
num_packs: int = 5,
exclude_team_abbrev: list[str] = None,
pack_type_id: int = 1,
cardset_id: int = None,
):
"""Distribute packs to all human-controlled teams using Paper Dynasty API client
Args:
num_packs: Number of packs to give to each team (default: 5)
exclude_team_abbrev: List of team abbreviations to exclude (default: None)
pack_type_id: Pack type ID (default: 1 = Standard packs)
cardset_id: Cardset ID for pack types that require it (e.g., Promo Choice = type 9)
"""
if exclude_team_abbrev is None:
exclude_team_abbrev = []
# Get environment
database_env = os.getenv("DATABASE", "dev").lower()
try:
# Initialize API client
api = PaperDynastyAPI(environment=database_env, verbose=True)
# Use the distribute_packs method
result = api.distribute_packs(
num_packs=num_packs,
exclude_team_abbrev=exclude_team_abbrev,
pack_type_id=pack_type_id,
cardset_id=cardset_id,
)
# Log final summary
logger.info(
f"\n🎉 All done! Distributed {result['total_packs']} packs to {result['teams_count']} teams"
)
except ValueError as e:
logger.error(f"Configuration error: {e}")
sys.exit(1)
except Exception as e:
logger.error(f"Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Distribute packs to all human-controlled teams",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Give 5 packs to all teams (default)
python distribute_packs.py
# Give 10 packs to all teams
python distribute_packs.py --num-packs 10
# Give 5 packs, excluding certain teams
python distribute_packs.py --exclude-team-abbrev NYY BOS
# Give 3 packs, excluding one team
python distribute_packs.py --num-packs 3 --exclude-team-abbrev LAD
# Production environment
DATABASE=prod python distribute_packs.py --num-packs 10
# Exclude specific team in production
DATABASE=prod python distribute_packs.py --num-packs 11 --exclude-team-abbrev CAR
Environment Variables:
API_TOKEN - Required: API authentication token
DATABASE - Optional: 'dev' (default) or 'prod'
""",
)
parser.add_argument(
"--num-packs",
type=int,
default=5,
help="Number of packs to give to each team (default: 5)",
)
parser.add_argument(
"--exclude-team-abbrev",
nargs="*",
default=[],
help="Team abbreviations to exclude (space-separated, e.g., NYY BOS LAD)",
)
parser.add_argument(
"--pack-type-id",
type=int,
default=1,
help="Pack type ID (default: 1 = Standard packs)",
)
parser.add_argument(
"--cardset-id",
type=int,
default=None,
help="Cardset ID for pack types that require it (e.g., Promo Choice = type 9)",
)
args = parser.parse_args()
distribute_packs(
num_packs=args.num_packs,
exclude_team_abbrev=args.exclude_team_abbrev,
pack_type_id=args.pack_type_id,
cardset_id=args.cardset_id,
)

View File

@ -1,273 +0,0 @@
#!/usr/bin/env bash
# ecosystem_status.sh — Paper Dynasty cross-project dashboard
# Usage: GITEA_TOKEN=<token> ./ecosystem_status.sh
# or: ./ecosystem_status.sh (auto-reads from gitea-mcp config if available)
set -euo pipefail
# ---------------------------------------------------------------------------
# Auth
# ---------------------------------------------------------------------------
if [[ -z "${GITEA_TOKEN:-}" ]]; then
# Try to pull token from the gitea-mcp config (standard claude-code location)
GITEA_MCP_CONFIG="${HOME}/.config/claude-code/mcp-servers/gitea-mcp.json"
if [[ -f "$GITEA_MCP_CONFIG" ]]; then
GITEA_TOKEN=$(python3 -c "
import json, sys, os
cfg_path = os.environ.get('GITEA_MCP_CONFIG', '')
try:
cfg = json.load(open(cfg_path))
env = cfg.get('env', {})
print(env.get('GITEA_TOKEN', env.get('GITEA_API_TOKEN', '')))
except Exception:
print('')
" 2>/dev/null)
fi
fi
if [[ -z "${GITEA_TOKEN:-}" ]]; then
echo "ERROR: GITEA_TOKEN not set and could not be read from gitea-mcp config." >&2
echo " Set it with: export GITEA_TOKEN=your-token" >&2
exit 1
fi
API_BASE="https://git.manticorum.com/api/v1"
AUTH_HEADER="Authorization: token ${GITEA_TOKEN}"
REPOS=(
"cal/paper-dynasty-database"
"cal/paper-dynasty-discord"
"cal/paper-dynasty-card-creation"
"cal/paper-dynasty-website"
"cal/paper-dynasty-gameplay-webapp"
"cal/paper-dynasty-apiproxy"
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
gitea_get() {
# gitea_get <endpoint-path> — returns JSON or "null" on error
curl -sf -H "$AUTH_HEADER" -H "Content-Type: application/json" \
"${API_BASE}/${1}" 2>/dev/null || echo "null"
}
count_items() {
local json="$1"
if [[ "$json" == "null" || -z "$json" ]]; then
echo "?"
return
fi
python3 -c "
import json,sys
data=json.loads(sys.argv[1])
print(len(data) if isinstance(data,list) else '?')
" "$json" 2>/dev/null || echo "?"
}
# ---------------------------------------------------------------------------
# Banner
# ---------------------------------------------------------------------------
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
echo ""
echo "╔══════════════════════════════════════════════════════════╗"
echo "║ PAPER DYNASTY — ECOSYSTEM STATUS DASHBOARD ║"
printf "║ %-56s ║\n" "$TIMESTAMP"
echo "╚══════════════════════════════════════════════════════════╝"
# ---------------------------------------------------------------------------
# Per-repo summary table
# ---------------------------------------------------------------------------
echo ""
printf "%-36s %6s %5s %s\n" "REPOSITORY" "ISSUES" "PRs" "LATEST COMMIT"
printf "%-36s %6s %5s %s\n" "────────────────────────────────────" "──────" "─────" "────────────────────────────────"
TOTAL_ISSUES=0
TOTAL_PRS=0
declare -A ALL_ISSUES_JSON
declare -A ALL_PRS_JSON
for REPO in "${REPOS[@]}"; do
SHORT_NAME="${REPO#cal/}"
ISSUES_JSON=$(gitea_get "repos/${REPO}/issues?type=issues&state=open&limit=50")
ISSUE_COUNT=$(count_items "$ISSUES_JSON")
ALL_ISSUES_JSON["$REPO"]="$ISSUES_JSON"
PRS_JSON=$(gitea_get "repos/${REPO}/pulls?state=open&limit=50")
PR_COUNT=$(count_items "$PRS_JSON")
ALL_PRS_JSON["$REPO"]="$PRS_JSON"
COMMITS_JSON=$(gitea_get "repos/${REPO}/commits?limit=1")
LATEST_SHA="n/a"
LATEST_DATE=""
if [[ "$COMMITS_JSON" != "null" && -n "$COMMITS_JSON" ]]; then
LATEST_SHA=$(python3 -c "
import json,sys
data=json.loads(sys.argv[1])
if isinstance(data,list) and data:
print(data[0].get('sha','')[:7])
else:
print('n/a')
" "$COMMITS_JSON" 2>/dev/null || echo "n/a")
LATEST_DATE=$(python3 -c "
import json,sys
data=json.loads(sys.argv[1])
if isinstance(data,list) and data:
ts=data[0].get('commit',{}).get('committer',{}).get('date','')
print(ts[:10] if ts else '')
else:
print('')
" "$COMMITS_JSON" 2>/dev/null || echo "")
fi
if [[ "$ISSUE_COUNT" =~ ^[0-9]+$ ]]; then
TOTAL_ISSUES=$((TOTAL_ISSUES + ISSUE_COUNT))
fi
if [[ "$PR_COUNT" =~ ^[0-9]+$ ]]; then
TOTAL_PRS=$((TOTAL_PRS + PR_COUNT))
fi
COMMIT_LABEL="${LATEST_SHA}${LATEST_DATE:+ [${LATEST_DATE}]}"
printf "%-36s %6s %5s %s\n" "$SHORT_NAME" "$ISSUE_COUNT" "$PR_COUNT" "$COMMIT_LABEL"
done
echo ""
printf " TOTALS: %d open issues, %d open PRs across %d repos\n" \
"$TOTAL_ISSUES" "$TOTAL_PRS" "${#REPOS[@]}"
# ---------------------------------------------------------------------------
# Recent commits — last 3 per repo
# ---------------------------------------------------------------------------
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " RECENT COMMITS (last 3 per repo)"
echo "═══════════════════════════════════════════════════════════════"
for REPO in "${REPOS[@]}"; do
SHORT_NAME="${REPO#cal/}"
echo ""
echo "${SHORT_NAME}"
COMMITS_JSON=$(gitea_get "repos/${REPO}/commits?limit=3")
if [[ "$COMMITS_JSON" == "null" || -z "$COMMITS_JSON" ]]; then
echo " (could not fetch commits)"
continue
fi
python3 -c "
import json, sys
data = json.loads(sys.argv[1])
if not isinstance(data, list) or not data:
print(' (no commits)')
sys.exit(0)
for c in data:
sha = c.get('sha', '')[:7]
msg = c.get('commit', {}).get('message', '').split('\n')[0][:58]
ts = c.get('commit', {}).get('committer', {}).get('date', '')[:10]
author = c.get('commit', {}).get('committer', {}).get('name', 'unknown')[:16]
print(f' {sha} {ts} {author:<16} {msg}')
" "$COMMITS_JSON"
done
# ---------------------------------------------------------------------------
# Open issues detail
# ---------------------------------------------------------------------------
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " OPEN ISSUES"
echo "═══════════════════════════════════════════════════════════════"
FOUND_ISSUES=false
for REPO in "${REPOS[@]}"; do
SHORT_NAME="${REPO#cal/}"
ISSUES_JSON="${ALL_ISSUES_JSON[$REPO]}"
ISSUE_COUNT=$(count_items "$ISSUES_JSON")
if [[ "$ISSUE_COUNT" == "0" || "$ISSUE_COUNT" == "?" ]]; then
continue
fi
echo ""
echo "${SHORT_NAME} (${ISSUE_COUNT} open)"
FOUND_ISSUES=true
python3 -c "
import json, sys
data = json.loads(sys.argv[1])
if not isinstance(data, list):
print(' (error reading issues)')
sys.exit(0)
for i in data[:10]:
num = i.get('number', '?')
title = i.get('title', '(no title)')[:65]
labels = ', '.join(l.get('name','') for l in i.get('labels',[]))
lstr = f' [{labels}]' if labels else ''
print(f' #{num:<4} {title}{lstr}')
if len(data) > 10:
print(f' ... and {len(data)-10} more')
" "$ISSUES_JSON"
done
if [[ "$FOUND_ISSUES" == "false" ]]; then
echo ""
echo " (no open issues across all repos)"
fi
# ---------------------------------------------------------------------------
# Open PRs detail
# ---------------------------------------------------------------------------
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " OPEN PULL REQUESTS"
echo "═══════════════════════════════════════════════════════════════"
FOUND_PRS=false
for REPO in "${REPOS[@]}"; do
SHORT_NAME="${REPO#cal/}"
PRS_JSON="${ALL_PRS_JSON[$REPO]}"
PR_COUNT=$(count_items "$PRS_JSON")
if [[ "$PR_COUNT" == "0" || "$PR_COUNT" == "?" ]]; then
continue
fi
echo ""
echo "${SHORT_NAME} (${PR_COUNT} open)"
FOUND_PRS=true
python3 -c "
import json, sys
data = json.loads(sys.argv[1])
if not isinstance(data, list):
print(' (error reading PRs)')
sys.exit(0)
for pr in data:
num = pr.get('number', '?')
title = pr.get('title', '(no title)')[:65]
head = pr.get('head', {}).get('label', '')
base = pr.get('base', {}).get('label', '')
print(f' #{num:<4} {title}')
if head and base:
print(f' {head} → {base}')
" "$PRS_JSON"
done
if [[ "$FOUND_PRS" == "false" ]]; then
echo ""
echo " (no open PRs across all repos)"
fi
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " Done. Gitea: https://git.manticorum.com/cal"
echo "═══════════════════════════════════════════════════════════════"
echo ""

View File

@ -1,236 +0,0 @@
#!/usr/bin/env python3
"""
Paper Dynasty Gauntlet Team Cleanup
Uses the shared Paper Dynasty API client to clean up gauntlet teams.
Usage:
# List active gauntlet runs in event 8
python gauntlet_cleanup.py list --event-id 8 --active-only
# Wipe a gauntlet team (cards + packs + end run)
python gauntlet_cleanup.py wipe --team-abbrev Gauntlet-SKB --event-id 8
"""
import argparse
import os
import sys
from datetime import datetime
from typing import Optional
# Import shared API client from parent directory
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api_client import PaperDynastyAPI
def list_gauntlet_runs(api: PaperDynastyAPI, event_id: Optional[int] = None, active_only: bool = False):
"""List gauntlet teams and runs"""
print("\n🏆 GAUNTLET RUNS")
print("="*80)
try:
runs = api.list_gauntlet_runs(event_id=event_id, active_only=active_only)
if not runs:
filter_desc = f" in event {event_id}" if event_id else ""
status_desc = " active" if active_only else ""
print(f"No{status_desc} gauntlet runs found{filter_desc}")
else:
for run in runs:
team = run['team']
status = "ACTIVE" if run['ended'] == 0 else "ENDED"
created = datetime.fromtimestamp(run['created']/1000)
print(f"\n[{status}] Team: {team['abbrev']} - {team['lname']}")
print(f" Run ID: {run['id']}")
print(f" Team ID: {team['id']}")
print(f" Event: {run['gauntlet_id']}")
print(f" Record: {run['wins']}-{run['losses']}")
print(f" Created: {created}")
if run['ended'] != 0:
ended = datetime.fromtimestamp(run['ended']/1000)
print(f" Ended: {ended}")
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
print("="*80)
def wipe_gauntlet_team(
api: PaperDynastyAPI,
team_abbrev: Optional[str] = None,
team_id: Optional[int] = None,
event_id: Optional[int] = None,
skip_confirmation: bool = False
):
"""
Wipe gauntlet team data: cards, packs, and optionally end run
Args:
api: PaperDynastyAPI instance
team_abbrev: Team abbreviation (e.g., 'Gauntlet-SKB')
team_id: Team ID (use this OR team_abbrev)
event_id: If provided, end the active run in this event
skip_confirmation: Skip user confirmation prompt
"""
# Find the team
try:
if team_id:
team = api.get_team(team_id=team_id)
elif team_abbrev:
team = api.get_team(abbrev=team_abbrev)
else:
print("❌ Must provide either --team-abbrev or --team-id")
sys.exit(1)
except Exception as e:
print(f"❌ Error finding team: {e}")
sys.exit(1)
team_id = team['id']
team_abbrev = team['abbrev']
print(f"\n✓ Found team: {team['lname']} (ID: {team_id}, Abbrev: {team_abbrev})")
# Show what will be done
print("\n⚠️ CLEANUP OPERATIONS:")
print(f" - Wipe all cards for team {team_abbrev}")
print(f" - Delete all packs for team {team_abbrev}")
if event_id:
print(f" - End active gauntlet run in event {event_id}")
# Confirmation
if not skip_confirmation:
response = input("\nType 'yes' to continue: ")
if response.lower() != 'yes':
print("Aborted.")
sys.exit(0)
stats = {
'cards_wiped': False,
'packs_deleted': 0,
'runs_ended': 0
}
# Step 1: Wipe cards
print("\n📦 Wiping cards...")
try:
result = api.wipe_team_cards(team_id)
print(f" ✓ Cards wiped: {result}")
stats['cards_wiped'] = True
except Exception as e:
print(f" ❌ Failed to wipe cards: {e}")
# Step 2: Delete packs
print("\n🎁 Deleting packs...")
try:
packs = api.list_packs(team_id=team_id)
print(f" Found {len(packs)} packs")
for pack in packs:
api.delete_pack(pack['id'])
stats['packs_deleted'] += 1
print(f" ✓ Deleted {stats['packs_deleted']} packs")
except Exception as e:
print(f" ❌ Failed to delete packs: {e}")
# Step 3: End gauntlet run (if event_id provided)
if event_id:
print(f"\n🏃 Ending gauntlet run in event {event_id}...")
try:
runs = api.list_gauntlet_runs(team_id=team_id, event_id=event_id, active_only=True)
if not runs:
print(f" ⚠️ No active run found for team {team_abbrev} in event {event_id}")
else:
for run in runs:
api.end_gauntlet_run(run['id'])
stats['runs_ended'] += 1
print(f" ✓ Ended run ID {run['id']} (Record: {run['wins']}-{run['losses']})")
except Exception as e:
print(f" ❌ Failed to end run: {e}")
# Summary
print("\n" + "="*60)
print("📊 CLEANUP SUMMARY:")
print(f" Cards wiped: {'' if stats['cards_wiped'] else ''}")
print(f" Packs deleted: {stats['packs_deleted']}")
print(f" Runs ended: {stats['runs_ended']}")
print("="*60)
return stats
def main():
parser = argparse.ArgumentParser(
description='Paper Dynasty Gauntlet Team Cleanup',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Environment Variables:
API_TOKEN Bearer token for API authentication (required)
DATABASE 'prod' or 'dev' (default: dev)
Examples:
# Set up environment
export API_TOKEN='your-token-here'
export DATABASE='prod' # or 'dev'
# List all active runs in event 8
python gauntlet_cleanup.py list --event-id 8 --active-only
# Wipe team by abbreviation
python gauntlet_cleanup.py wipe --team-abbrev Gauntlet-SKB --event-id 8
# Wipe team by ID
python gauntlet_cleanup.py wipe --team-id 464 --event-id 8
# Skip confirmation (for automation)
python gauntlet_cleanup.py wipe --team-abbrev Gauntlet-SKB --event-id 8 --yes
"""
)
parser.add_argument('command', choices=['list', 'wipe'], help='Command to execute')
parser.add_argument('--team-abbrev', type=str, help='Team abbreviation (e.g., Gauntlet-SKB)')
parser.add_argument('--team-id', type=int, help='Team ID')
parser.add_argument('--event-id', type=int, help='Gauntlet event ID')
parser.add_argument('--active-only', action='store_true', help='Only show active runs (for list command)')
parser.add_argument('--yes', '-y', action='store_true', help='Skip confirmation prompt')
parser.add_argument('--env', choices=['prod', 'dev'], help='Override DATABASE environment variable')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose API output')
args = parser.parse_args()
# Determine environment
env = args.env or os.getenv('DATABASE', 'dev')
# Initialize API client
try:
api = PaperDynastyAPI(environment=env, verbose=args.verbose)
print(f"🌐 Using {env.upper()} database: {api.base_url}")
except ValueError as e:
print(f"{e}")
sys.exit(1)
# Execute command
if args.command == 'list':
list_gauntlet_runs(api, event_id=args.event_id, active_only=args.active_only)
elif args.command == 'wipe':
if not args.team_abbrev and not args.team_id:
print("❌ Error: wipe command requires --team-abbrev or --team-id")
parser.print_help()
sys.exit(1)
wipe_gauntlet_team(
api,
team_abbrev=args.team_abbrev,
team_id=args.team_id,
event_id=args.event_id,
skip_confirmation=args.yes
)
if __name__ == '__main__':
main()

View File

@ -1,149 +0,0 @@
#!/usr/bin/env python3
"""
Generate summary report for Paper Dynasty card update
Collects statistics and notable changes for release notes.
Uses the Paper Dynasty API instead of direct database access.
Usage:
python generate_summary.py [--cardset-id 24] [--env prod]
"""
import sys
import argparse
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Tuple
sys.path.insert(0, str(Path(__file__).parent.parent))
from api_client import PaperDynastyAPI
def get_card_counts(api: PaperDynastyAPI, cardset_id: int) -> Dict[str, int]:
"""Get total card counts from the API"""
batting = api.get("battingcards", params=[("cardset_id", cardset_id)])
pitching = api.get("pitchingcards", params=[("cardset_id", cardset_id)])
b_count = batting.get("count", 0)
p_count = pitching.get("count", 0)
return {"batting": b_count, "pitching": p_count, "total": b_count + p_count}
def get_player_count(api: PaperDynastyAPI, cardset_id: int) -> int:
"""Get total player count for the cardset from the API"""
try:
players = api.list_players(cardset_id=cardset_id)
return len(players)
except Exception:
return 0
def get_date_range(card_creation_dir: Path) -> Tuple[str, str]:
"""Extract date range from retrosheet_data.py"""
retrosheet_file = card_creation_dir / "retrosheet_data.py"
if not retrosheet_file.exists():
return "Unknown", "Unknown"
content = retrosheet_file.read_text()
start_date = "Unknown"
end_date = "Unknown"
for line in content.split('\n'):
if 'START_DATE' in line and '=' in line:
start_date = line.split('=')[1].strip().strip('"\'')
elif 'END_DATE' in line and '=' in line:
end_date = line.split('=')[1].strip().strip('"\'')
return start_date, end_date
def generate_markdown_summary(
counts: Dict[str, int],
player_count: int,
date_range: Tuple[str, str],
csv_files: List[str],
cardset_id: int,
env: str,
) -> str:
"""Generate markdown summary report"""
today = datetime.now().strftime('%Y-%m-%d')
start_date, end_date = date_range
lines = [
f"# Paper Dynasty Card Update - {today}",
"",
"## Overview",
f"- **Total Cards**: {counts['batting']} batting, {counts['pitching']} pitching",
f"- **Total Players**: {player_count}",
f"- **Data Range**: {start_date} to {end_date}",
f"- **Cardset ID**: {cardset_id} ({env})",
"",
"## Files Generated",
"- ✅ Card images uploaded to S3",
"- ✅ Scouting CSVs transferred to database server",
]
for csv_file in csv_files:
lines.append(f" - {csv_file}")
lines.extend([
"",
"## Validation",
"- ✅ No negative groundball_b values",
"- ✅ All required fields populated",
"- ✅ Database integrity checks passed",
"",
"---",
"Generated by Claude Code - Paper Dynasty Cards Skill",
])
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(
description="Generate summary report for Paper Dynasty card update"
)
parser.add_argument(
"--cardset-id", type=int, default=24,
help="Cardset ID to summarize (default: 24, the live cardset)"
)
parser.add_argument(
"--env", choices=["prod", "dev"], default="prod",
help="API environment (default: prod)"
)
args = parser.parse_args()
api = PaperDynastyAPI(environment=args.env)
# Collect data from API
counts = get_card_counts(api, args.cardset_id)
player_count = get_player_count(api, args.cardset_id)
# Get date range from retrosheet_data.py if present
card_creation_dir = Path("/mnt/NV2/Development/paper-dynasty/card-creation")
date_range = get_date_range(card_creation_dir)
# Get CSV files from scouting directory
scouting_dir = card_creation_dir / "scouting"
csv_files = sorted([f.name for f in scouting_dir.glob("*.csv")]) if scouting_dir.exists() else []
# Generate summary
summary = generate_markdown_summary(
counts, player_count, date_range, csv_files, args.cardset_id, args.env
)
# Print to stdout
print(summary)
# Save to file
output_file = Path.home() / ".claude" / "scratchpad" / f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_card_update_summary.md"
output_file.parent.mkdir(parents=True, exist_ok=True)
output_file.write_text(summary)
print(f"\n✅ Summary saved to: {output_file}", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@ -1,847 +0,0 @@
#!/usr/bin/env python3
"""
Paper Dynasty Smoke Test
Comprehensive deployment verification for the Database API and Discord Bot.
Tests endpoint availability, data integrity, and key features like card rendering
and the Refractor (evolution) system.
Usage:
python smoke_test.py # Test dev environment
python smoke_test.py --env prod # Test production
python smoke_test.py --env dev --verbose # Show response details
python smoke_test.py --category core # Run only core tests
Exit codes:
0 = all tests passed
1 = one or more tests failed
"""
import argparse
import json
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional
sys.path.insert(0, str(Path(__file__).parent.parent))
from api_client import PaperDynastyAPI
# ANSI colors
GREEN = "\033[92m"
RED = "\033[91m"
YELLOW = "\033[93m"
CYAN = "\033[96m"
DIM = "\033[2m"
BOLD = "\033[1m"
RESET = "\033[0m"
@dataclass
class TestResult:
name: str
category: str
passed: bool
status_code: Optional[int] = None
detail: str = ""
duration_ms: float = 0
@dataclass
class SmokeTestRunner:
env: str = "dev"
verbose: bool = False
categories: Optional[list] = None
mode: str = "quick"
results: list = field(default_factory=list)
api: PaperDynastyAPI = field(init=False)
def __post_init__(self):
self.api = PaperDynastyAPI(environment=self.env, verbose=self.verbose)
def check(
self,
name: str,
category: str,
endpoint: str,
*,
params: Optional[list] = None,
expect_list: bool = False,
min_count: int = 0,
expect_keys: Optional[list] = None,
timeout: int = 30,
requires_auth: bool = False,
) -> Optional[TestResult]:
"""Run a single endpoint check."""
if self.categories and category not in self.categories:
return None
import requests
if requires_auth and not self.api.token:
return TestResult(
name=name,
category=category,
passed=True,
detail="skipped (no API_TOKEN)",
duration_ms=0,
)
start = time.time()
try:
url = self.api._build_url(endpoint, params=params)
raw = requests.get(url, headers=self.api.headers, timeout=timeout)
elapsed = (time.time() - start) * 1000
status = raw.status_code
if status != 200:
return TestResult(
name=name,
category=category,
passed=False,
status_code=status,
detail=f"HTTP {status}",
duration_ms=elapsed,
)
data = raw.json()
# API returns {"count": N, "<resource>": [...]} — unwrap
if expect_list and isinstance(data, dict):
# Find the list value in the response dict
lists = [v for v in data.values() if isinstance(v, list)]
if lists:
data = lists[0]
else:
return TestResult(
name=name,
category=category,
passed=False,
status_code=status,
detail=f"no list found in response keys: {list(data.keys())}",
duration_ms=elapsed,
)
# Validate response shape
if expect_list:
if not isinstance(data, list):
return TestResult(
name=name,
category=category,
passed=False,
status_code=status,
detail=f"expected list, got {type(data).__name__}",
duration_ms=elapsed,
)
if len(data) < min_count:
return TestResult(
name=name,
category=category,
passed=False,
status_code=status,
detail=f"expected >= {min_count} items, got {len(data)}",
duration_ms=elapsed,
)
detail = f"{len(data)} items"
elif expect_keys:
if isinstance(data, list):
obj = data[0] if data else {}
else:
obj = data
missing = [k for k in expect_keys if k not in obj]
if missing:
return TestResult(
name=name,
category=category,
passed=False,
status_code=status,
detail=f"missing keys: {missing}",
duration_ms=elapsed,
)
detail = "schema ok"
else:
detail = "ok"
if self.verbose and isinstance(data, list):
detail += (
f" (first: {json.dumps(data[0], default=str)[:80]}...)"
if data
else ""
)
elif self.verbose and isinstance(data, dict):
detail += f" ({json.dumps(data, default=str)[:80]}...)"
return TestResult(
name=name,
category=category,
passed=True,
status_code=status,
detail=detail,
duration_ms=elapsed,
)
except requests.exceptions.Timeout:
elapsed = (time.time() - start) * 1000
return TestResult(
name=name,
category=category,
passed=False,
detail=f"timeout after {timeout}s",
duration_ms=elapsed,
)
except requests.exceptions.ConnectionError:
elapsed = (time.time() - start) * 1000
return TestResult(
name=name,
category=category,
passed=False,
detail="connection refused",
duration_ms=elapsed,
)
except Exception as e:
elapsed = (time.time() - start) * 1000
return TestResult(
name=name,
category=category,
passed=False,
detail=str(e)[:100],
duration_ms=elapsed,
)
def check_url(
self,
name: str,
category: str,
url: str,
*,
timeout: int = 30,
expect_content_type: Optional[str] = None,
) -> Optional[TestResult]:
"""Check a raw URL (not through the API client)."""
if self.categories and category not in self.categories:
return None
import requests
start = time.time()
try:
raw = requests.get(url, timeout=timeout)
elapsed = (time.time() - start) * 1000
if raw.status_code != 200:
return TestResult(
name=name,
category=category,
passed=False,
status_code=raw.status_code,
detail=f"HTTP {raw.status_code}",
duration_ms=elapsed,
)
detail = f"{len(raw.content)} bytes"
if expect_content_type:
ct = raw.headers.get("content-type", "")
if expect_content_type not in ct:
return TestResult(
name=name,
category=category,
passed=False,
status_code=raw.status_code,
detail=f"expected {expect_content_type}, got {ct}",
duration_ms=elapsed,
)
return TestResult(
name=name,
category=category,
passed=True,
status_code=raw.status_code,
detail=detail,
duration_ms=elapsed,
)
except Exception as e:
elapsed = (time.time() - start) * 1000
return TestResult(
name=name,
category=category,
passed=False,
detail=str(e)[:100],
duration_ms=elapsed,
)
def _fetch_id(
self, endpoint: str, params: Optional[list] = None, requires_auth: bool = False
) -> Optional[int]:
"""Fetch the first item's ID from a list endpoint."""
import requests
if requires_auth and not self.api.token:
return None
try:
url = self.api._build_url(endpoint, params=params)
raw = requests.get(url, headers=self.api.headers, timeout=10)
if raw.status_code != 200:
return None
data = raw.json()
if isinstance(data, dict):
lists = [v for v in data.values() if isinstance(v, list)]
data = lists[0] if lists else []
if data and isinstance(data, list) and "id" in data[0]:
return data[0]["id"]
except Exception:
pass
return None
def run_all(self, mode: str = "quick"):
"""Run smoke test checks. mode='quick' for core, 'full' for everything."""
base = self.api.base_url
# Pre-fetch IDs for by-ID lookups (full mode only)
if mode == "full":
team_id = self._fetch_id("teams", params=[("limit", 1)])
player_id = self._fetch_id("players", params=[("limit", 1)])
card_id = self._fetch_id("cards", params=[("limit", 1)])
game_id = self._fetch_id("games")
result_id = self._fetch_id("results")
track_id = self._fetch_id("evolution/tracks", requires_auth=True)
else:
team_id = player_id = card_id = game_id = result_id = track_id = None
# ── QUICK: fast, reliable endpoints — deployment canary ──
tests = [
self.check_url("API docs", "core", f"{base}/docs", timeout=5),
self.check(
"Current season/week", "core", "current", expect_keys=["season", "week"]
),
self.check("Rarities", "core", "rarities", expect_list=True, min_count=5),
self.check("Cardsets", "core", "cardsets", expect_list=True, min_count=1),
self.check(
"Pack types", "core", "packtypes", expect_list=True, min_count=1
),
self.check_url(
"OpenAPI schema",
"core",
f"{base}/openapi.json",
expect_content_type="application/json",
),
self.check(
"Teams",
"teams",
"teams",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Team by abbrev",
"teams",
"teams",
params=[("abbrev", "SKB")],
expect_list=True,
),
self.check(
"Players",
"players",
"players",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Player search",
"players",
"players/search",
params=[("q", "Judge"), ("limit", 3)],
expect_list=True,
),
self.check(
"Batting cards",
"cards",
"battingcards",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Pitching cards",
"cards",
"pitchingcards",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Packs",
"economy",
"packs",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check("Events", "economy", "events", expect_list=True),
self.check(
"Scout opportunities",
"scouting",
"scout_opportunities",
expect_list=True,
),
self.check(
"Evolution tracks",
"refractor",
"evolution/tracks",
expect_list=True,
min_count=1,
requires_auth=True,
),
]
# ── FULL: all endpoints, by-ID lookups, sub-resources ──
if mode == "full":
tests.extend(
[
# Core extras
self.check(
"Cardset search",
"core",
"cardsets/search",
params=[("q", "Live"), ("limit", 3)],
expect_list=True,
),
# Team sub-resources
*(
[
self.check(
f"Team by ID ({team_id})",
"teams",
f"teams/{team_id}",
expect_keys=["id", "abbrev"],
),
self.check(
"Team cards",
"teams",
f"teams/{team_id}/cards",
expect_list=True,
),
self.check(
"Team evolutions",
"teams",
f"teams/{team_id}/evolutions",
expect_list=True,
requires_auth=True,
),
self.check(
"Team lineup",
"teams",
f"teams/{team_id}/lineup/default",
expect_list=True,
),
self.check(
"Team SP lineup",
"teams",
f"teams/{team_id}/sp/default",
expect_list=True,
),
self.check(
"Team RP lineup",
"teams",
f"teams/{team_id}/rp/default",
expect_list=True,
),
self.check(
"Team season record",
"teams",
f"teams/{team_id}/season-record/11",
),
]
if team_id
else []
),
# Player extras
self.check(
"Random player",
"players",
"players/random",
expect_keys=["id", "p_name"],
),
*(
[
self.check(
f"Player by ID ({player_id})",
"players",
f"players/{player_id}",
expect_keys=["id", "p_name"],
),
]
if player_id
else []
),
# Card extras
self.check(
"Cards list",
"cards",
"cards",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Batting card ratings",
"cards",
"battingcardratings",
expect_list=True,
requires_auth=True,
),
self.check(
"Pitching card ratings",
"cards",
"pitchingcardratings",
expect_list=True,
requires_auth=True,
),
self.check(
"Card positions",
"cards",
"cardpositions",
expect_list=True,
),
*(
[
self.check(
f"Card by ID ({card_id})",
"cards",
f"cards/{card_id}",
expect_keys=["id"],
),
]
if card_id
else []
),
*(
[
self.check(
"Batting card by player",
"cards",
f"battingcards/player/{player_id}",
expect_list=True,
),
]
if player_id
else []
),
# Games & results
self.check(
"Games list",
"games",
"games",
expect_list=True,
min_count=1,
),
self.check(
"Results list",
"games",
"results",
expect_list=True,
min_count=1,
),
self.check(
"Plays list",
"games",
"plays",
params=[("limit", 5)],
expect_list=True,
min_count=1,
),
self.check(
"Plays batting agg",
"games",
"plays/batting",
params=[("limit", 5)],
expect_list=True,
),
self.check(
"Plays pitching agg",
"games",
"plays/pitching",
params=[("limit", 5)],
expect_list=True,
),
*(
[
self.check(
f"Game by ID ({game_id})",
"games",
f"games/{game_id}",
expect_keys=["id"],
),
self.check(
"Game summary", "games", f"plays/game-summary/{game_id}"
),
]
if game_id
else []
),
*(
[
self.check(
f"Result by ID ({result_id})",
"games",
f"results/{result_id}",
expect_keys=["id"],
),
]
if result_id
else []
),
*(
[
self.check(
"Team W/L record",
"games",
f"results/team/{team_id}",
params=[("season", 11)],
),
]
if team_id
else []
),
# Economy extras
self.check(
"Rewards",
"economy",
"rewards",
expect_list=True,
),
self.check(
"Game rewards",
"economy",
"gamerewards",
expect_list=True,
min_count=1,
),
self.check(
"Gauntlet rewards",
"economy",
"gauntletrewards",
expect_list=True,
),
self.check(
"Gauntlet runs",
"economy",
"gauntletruns",
expect_list=True,
),
self.check(
"Awards",
"economy",
"awards",
expect_list=True,
),
self.check(
"Notifications",
"economy",
"notifs",
expect_list=True,
),
# Scouting extras
self.check(
"Scout claims",
"scouting",
"scout_claims",
expect_list=True,
),
self.check(
"MLB players",
"scouting",
"mlbplayers",
expect_list=True,
),
self.check(
"Paperdex",
"scouting",
"paperdex",
expect_list=True,
),
*(
[
self.check(
"Scouting player keys",
"scouting",
"scouting/playerkeys",
params=[("player_id", player_id)],
expect_list=True,
requires_auth=True,
),
]
if player_id
else []
),
# Stats
self.check(
"Batting stats",
"stats",
"batstats",
expect_list=True,
),
self.check(
"Pitching stats",
"stats",
"pitstats",
expect_list=True,
),
self.check(
"Decisions",
"stats",
"decisions",
params=[("limit", 5)],
expect_list=True,
),
self.check(
"Decisions rest",
"stats",
"decisions/rest",
params=[("limit", 5)],
expect_list=True,
),
# Refractor extras
*(
[
self.check(
f"Evolution track ({track_id})",
"refractor",
f"evolution/tracks/{track_id}",
expect_keys=["id", "name"],
requires_auth=True,
),
]
if track_id
else []
),
*(
[
self.check(
"Evolution card state",
"refractor",
f"evolution/cards/{card_id}",
requires_auth=True,
),
]
if card_id
else []
),
]
)
# Filter out None results (skipped categories)
self.results = [t for t in tests if t is not None]
def print_results(self):
"""Print formatted test results."""
passed = sum(1 for r in self.results if r.passed)
failed = sum(1 for r in self.results if not r.passed)
total = len(self.results)
print(
f"\n{BOLD}Paper Dynasty Smoke Test — {self.env.upper()} ({self.mode}){RESET}"
)
print(f"{DIM}{self.api.base_url}{RESET}\n")
current_category = None
for r in self.results:
if r.category != current_category:
current_category = r.category
print(f" {CYAN}{current_category.upper()}{RESET}")
icon = f"{GREEN}PASS{RESET}" if r.passed else f"{RED}FAIL{RESET}"
timing = f"{DIM}{r.duration_ms:6.0f}ms{RESET}"
if "skipped" in r.detail:
icon = f"{YELLOW}SKIP{RESET}"
print(f" {icon} {r.name:<30} {timing} {DIM}{r.detail}{RESET}")
print(f"\n {BOLD}Results:{RESET} ", end="")
if failed == 0:
print(f"{GREEN}{passed}/{total} passed{RESET}")
else:
print(
f"{RED}{failed} failed{RESET}, {GREEN}{passed} passed{RESET} of {total}"
)
return failed == 0
def as_json(self) -> str:
"""Return results as JSON for programmatic use."""
return json.dumps(
[
{
"name": r.name,
"category": r.category,
"passed": r.passed,
"status_code": r.status_code,
"detail": r.detail,
"duration_ms": round(r.duration_ms, 1),
}
for r in self.results
],
indent=2,
)
def main():
parser = argparse.ArgumentParser(description="Paper Dynasty deployment smoke test")
parser.add_argument(
"--env",
default="dev",
choices=["dev", "prod"],
help="Environment to test (default: dev)",
)
parser.add_argument(
"--verbose", "-v", action="store_true", help="Show response details"
)
parser.add_argument("--json", action="store_true", help="Output results as JSON")
parser.add_argument(
"mode",
nargs="?",
default="quick",
choices=["quick", "full"],
help="Test mode: quick (16 checks, ~5s) or full (50+ checks, ~2min)",
)
parser.add_argument(
"--category",
"-c",
action="append",
choices=[
"core",
"teams",
"players",
"cards",
"games",
"economy",
"scouting",
"stats",
"refractor",
],
help="Run only specific categories (can repeat)",
)
args = parser.parse_args()
runner = SmokeTestRunner(
env=args.env,
verbose=args.verbose,
categories=args.category,
mode=args.mode,
)
runner.run_all(mode=args.mode)
if args.json:
print(runner.as_json())
all_passed = all(r.passed for r in runner.results)
else:
all_passed = runner.print_results()
sys.exit(0 if all_passed else 1)
if __name__ == "__main__":
main()

View File

@ -1,222 +0,0 @@
#!/bin/bash
#
# Paper Dynasty Database Sync - Production to Dev
#
# Syncs production PostgreSQL database (akamai) to dev environment (pd-database)
#
# Usage:
# ./sync_prod_to_dev.sh [--dry-run] [--no-backup]
#
# Options:
# --dry-run Show what would happen without making changes
# --no-backup Skip creating backup of dev database before restore
#
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
DUMP_FILE="/tmp/pd_prod_dump_${TIMESTAMP}.sql"
BACKUP_DIR="$HOME/.paper-dynasty/db-backups"
# Production database info
PROD_HOST="akamai"
PROD_CONTAINER="sba_postgres"
PROD_DB="pd_master"
PROD_USER="pd_admin"
PROD_PASSWORD="wJHZRZbO5NJBjhGfqydsZueV"
# Dev database info
DEV_HOST="pd-database"
DEV_CONTAINER="sba_postgres"
DEV_DB="paperdynasty_dev"
DEV_USER="sba_admin"
# Parse arguments
DRY_RUN=false
NO_BACKUP=false
SKIP_CONFIRM=false
while [[ $# -gt 0 ]]; do
case $1 in
--dry-run)
DRY_RUN=true
shift
;;
--no-backup)
NO_BACKUP=true
shift
;;
--yes|-y)
SKIP_CONFIRM=true
shift
;;
*)
echo "Unknown option: $1"
echo "Usage: $0 [--dry-run] [--no-backup] [--yes]"
exit 1
;;
esac
done
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Step 1: Verify connectivity
log_info "Verifying connectivity to production and dev databases..."
if ! ssh -q "$PROD_HOST" "docker exec $PROD_CONTAINER psql -U $PROD_USER -d $PROD_DB -c 'SELECT 1' > /dev/null 2>&1"; then
log_error "Cannot connect to production database on $PROD_HOST"
exit 1
fi
if ! ssh -q "$DEV_HOST" "docker exec $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB -c 'SELECT 1' > /dev/null 2>&1"; then
log_error "Cannot connect to dev database on $DEV_HOST"
exit 1
fi
log_success "Database connectivity verified"
# Step 2: Get database statistics
log_info "Fetching database statistics..."
PROD_SIZE=$(ssh "$PROD_HOST" "docker exec $PROD_CONTAINER psql -U $PROD_USER -d $PROD_DB -t -c \"SELECT pg_size_pretty(pg_database_size('$PROD_DB'));\"" | xargs)
PROD_TABLES=$(ssh "$PROD_HOST" "docker exec $PROD_CONTAINER psql -U $PROD_USER -d $PROD_DB -t -c \"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';\"" | xargs)
DEV_SIZE=$(ssh "$DEV_HOST" "docker exec $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB -t -c \"SELECT pg_size_pretty(pg_database_size('$DEV_DB'));\"" | xargs)
DEV_TABLES=$(ssh "$DEV_HOST" "docker exec $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB -t -c \"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';\"" | xargs)
echo ""
log_info "Production Database (${PROD_HOST}):"
echo " Database: $PROD_DB"
echo " Size: $PROD_SIZE"
echo " Tables: $PROD_TABLES"
echo ""
log_info "Dev Database (${DEV_HOST}):"
echo " Database: $DEV_DB"
echo " Size: $DEV_SIZE"
echo " Tables: $DEV_TABLES"
echo ""
if [ "$DRY_RUN" = true ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo ""
log_info "Would perform the following steps:"
echo " 1. Create pg_dump of production database ($PROD_SIZE)"
if [ "$NO_BACKUP" = false ]; then
echo " 2. Backup current dev database to $BACKUP_DIR"
fi
echo " 3. Drop and recreate dev database schema"
echo " 4. Restore production dump to dev database"
echo " 5. Verify restored database"
echo " 6. Clean up temporary files"
exit 0
fi
# Confirmation prompt
if [ "$SKIP_CONFIRM" = false ]; then
log_warning "This will REPLACE the dev database with production data!"
read -p "Continue? (yes/no): " -r
echo
if [[ ! $REPLY =~ ^[Yy][Ee][Ss]$ ]]; then
log_info "Aborted by user"
exit 0
fi
else
log_warning "Skipping confirmation (--yes flag provided)"
fi
# Step 3: Create backup directory
mkdir -p "$BACKUP_DIR"
# Step 4: Backup current dev database (optional)
if [ "$NO_BACKUP" = false ]; then
log_info "Creating backup of current dev database..."
BACKUP_FILE="$BACKUP_DIR/paperdynasty_dev_${TIMESTAMP}.sql"
ssh "$DEV_HOST" "docker exec $DEV_CONTAINER pg_dump -U $DEV_USER -d $DEV_DB --clean --if-exists" > "$BACKUP_FILE"
if [ -f "$BACKUP_FILE" ]; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Dev database backed up to $BACKUP_FILE ($BACKUP_SIZE)"
else
log_error "Failed to create backup"
exit 1
fi
fi
# Step 5: Create production database dump
log_info "Creating production database dump..."
ssh "$PROD_HOST" "docker exec $PROD_CONTAINER pg_dump -U $PROD_USER -d $PROD_DB --clean --if-exists" > "$DUMP_FILE"
if [ ! -f "$DUMP_FILE" ]; then
log_error "Failed to create production dump"
exit 1
fi
DUMP_SIZE=$(du -h "$DUMP_FILE" | cut -f1)
log_success "Production dump created: $DUMP_FILE ($DUMP_SIZE)"
# Step 6: Restore to dev database
log_info "Restoring production dump to dev database..."
# Restore the dump
if ssh "$DEV_HOST" "docker exec -i $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB" < "$DUMP_FILE"; then
log_success "Database restored successfully"
else
log_error "Failed to restore database"
log_warning "Backup is available at: $BACKUP_FILE"
exit 1
fi
# Step 7: Verify restoration
log_info "Verifying restored database..."
NEW_SIZE=$(ssh "$DEV_HOST" "docker exec $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB -t -c \"SELECT pg_size_pretty(pg_database_size('$DEV_DB'));\"" | xargs)
NEW_TABLES=$(ssh "$DEV_HOST" "docker exec $DEV_CONTAINER psql -U $DEV_USER -d $DEV_DB -t -c \"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';\"" | xargs)
echo ""
log_info "Dev Database After Restore:"
echo " Database: $DEV_DB"
echo " Size: $NEW_SIZE (was $DEV_SIZE)"
echo " Tables: $NEW_TABLES (was $DEV_TABLES)"
echo ""
# Step 8: Clean up temporary dump file
log_info "Cleaning up temporary files..."
rm -f "$DUMP_FILE"
log_success "Temporary dump file removed"
# Step 9: Summary
echo ""
log_success "Database sync complete!"
echo ""
log_info "Summary:"
echo " Production ($PROD_HOST): $PROD_SIZE, $PROD_TABLES tables"
echo " Dev ($DEV_HOST): $NEW_SIZE, $NEW_TABLES tables"
if [ "$NO_BACKUP" = false ]; then
echo " Backup: $BACKUP_FILE"
fi
echo ""
log_info "Dev database is now synchronized with production"

View File

@ -1,157 +0,0 @@
#!/usr/bin/env python3
"""
Database Validation Script for Paper Dynasty Card Generation
Checks for common errors in card data via the API before deployment:
- Missing batting or pitching cards for a cardset
- Players with no corresponding card record
- Rarity distribution sanity check
Usage:
python validate_database.py [--cardset-id 24] [--env prod]
"""
import sys
import argparse
from pathlib import Path
from typing import List, Dict
sys.path.insert(0, str(Path(__file__).parent.parent))
from api_client import PaperDynastyAPI
class ValidationError:
def __init__(self, entity: str, issue: str, count: int, examples: List[str]):
self.entity = entity
self.issue = issue
self.count = count
self.examples = examples
def __str__(self):
lines = [f"{self.entity}: {self.issue} ({self.count} records)"]
for example in self.examples[:5]: # Show max 5 examples
lines.append(f" - {example}")
if self.count > 5:
lines.append(f" ... and {self.count - 5} more")
return "\n".join(lines)
def get_card_counts(api: PaperDynastyAPI, cardset_id: int) -> Dict[str, int]:
"""Get total card counts for the cardset from the API"""
batting = api.get("battingcards", params=[("cardset_id", cardset_id)])
pitching = api.get("pitchingcards", params=[("cardset_id", cardset_id)])
return {
"batting": batting.get("count", 0),
"pitching": pitching.get("count", 0),
}
def validate_card_counts(api: PaperDynastyAPI, cardset_id: int) -> List[ValidationError]:
"""Check that batting and pitching cards exist for the cardset"""
errors = []
counts = get_card_counts(api, cardset_id)
if counts["batting"] == 0:
errors.append(ValidationError(
"battingcards", f"No batting cards found for cardset {cardset_id}", 0, []
))
if counts["pitching"] == 0:
errors.append(ValidationError(
"pitchingcards", f"No pitching cards found for cardset {cardset_id}", 0, []
))
return errors
def validate_player_coverage(api: PaperDynastyAPI, cardset_id: int) -> List[ValidationError]:
"""Check that every player in the cardset has at least one card"""
errors = []
try:
players = api.list_players(cardset_id=cardset_id)
except Exception as e:
errors.append(ValidationError("players", f"Could not fetch players: {e}", 0, []))
return errors
if not players:
errors.append(ValidationError(
"players", f"No players found for cardset {cardset_id}", 0, []
))
return errors
batting_data = api.get("battingcards", params=[("cardset_id", cardset_id)])
pitching_data = api.get("pitchingcards", params=[("cardset_id", cardset_id)])
batting_player_ids = {
c["player"]["player_id"]
for c in batting_data.get("cards", [])
if c.get("player")
}
pitching_player_ids = {
c["player"]["player_id"]
for c in pitching_data.get("cards", [])
if c.get("player")
}
all_card_player_ids = batting_player_ids | pitching_player_ids
uncovered = [
p for p in players
if p["player_id"] not in all_card_player_ids
]
if uncovered:
examples = [
f"{p.get('p_name', 'Unknown')} (ID: {p['player_id']}, rarity: {p.get('rarity', {}).get('name', '?') if isinstance(p.get('rarity'), dict) else p.get('rarity', '?')})"
for p in uncovered
]
errors.append(ValidationError(
"players", "Players with no batting or pitching card", len(uncovered), examples
))
return errors
def main():
parser = argparse.ArgumentParser(
description="Validate Paper Dynasty card data via the API"
)
parser.add_argument(
"--cardset-id", type=int, default=24,
help="Cardset ID to validate (default: 24, the live cardset)"
)
parser.add_argument(
"--env", choices=["prod", "dev"], default="prod",
help="API environment (default: prod)"
)
args = parser.parse_args()
api = PaperDynastyAPI(environment=args.env)
print(f"🔍 Validating cardset {args.cardset_id} ({args.env})")
print()
counts = get_card_counts(api, args.cardset_id)
print(f"📊 Total Cards:")
print(f" - Batting: {counts['batting']}")
print(f" - Pitching: {counts['pitching']}")
print()
all_errors = []
all_errors.extend(validate_card_counts(api, args.cardset_id))
all_errors.extend(validate_player_coverage(api, args.cardset_id))
if all_errors:
print("❌ VALIDATION FAILED")
print()
for error in all_errors:
print(error)
print()
print(f"Total issues: {len(all_errors)}")
sys.exit(1)
else:
print("✅ VALIDATION PASSED")
print("No errors found")
sys.exit(0)
if __name__ == "__main__":
main()

View File

@ -1,373 +0,0 @@
# Paper Dynasty Card Troubleshooting Guide
**Load this context only when debugging card issues**
Use this guide when:
- Cards display incorrect data despite correct database values
- Switch hitter handedness shows wrong
- Positions appear incorrect on cards
- Rarity calculations seem off
- Images won't regenerate
---
## Card Caching Issues
### Symptom
Database has correct values but card images show old/incorrect data
### Root Cause
The Paper Dynasty API caches generated card images by date parameter. Once generated for a specific date, the cached image is served even if database data changes.
### Solution
Use a future date to force regeneration:
```python
# Instead of today
/v2/players/{id}/battingcard?d=2025-11-11 # ❌ Returns cached image
# Use tomorrow or future date
/v2/players/{id}/battingcard?d=2025-11-12 # ✅ Forces fresh generation
```
### Verification Steps
1. **Check database value**:
```python
api.get('battingcards', params=[('player_id', 12785)])
# Verify 'hand' field is correct
```
2. **Test with tomorrow's date**:
```bash
curl "https://pd.manticorum.com/api/v2/players/12785/battingcard?d=2025-11-12&html=true"
# Check if handedness displays correctly
```
3. **If correct with new date → cache issue**:
- Regenerate all affected cards with cache-bust date
- Upload to S3
- Update player.image URLs
4. **If still wrong → code or database issue**:
- Check card rendering code in `/mnt/NV2/Development/paper-dynasty/database/app/card_creation.py`
- Verify database field is being read correctly
---
## Switch Hitter Handedness
### Common Issues
#### Issue 1: Shows 'R' instead of 'S'
**Cause**: Cached card image from before handedness was corrected
**Fix**:
```python
from workflows.card_utilities import regenerate_cards_for_players
switch_hitters = [12785, 12788, 12854, ...] # List of affected IDs
regenerate_cards_for_players(
player_ids=switch_hitters,
cardset_id=27,
cache_bust_date="2025-11-12", # Tomorrow
upload_to_s3=True,
update_player_records=True
)
```
#### Issue 2: Database has wrong handedness
**Cause**: Card creation code didn't detect switch hitter correctly
**Check**:
```python
# Look at raw FanGraphs data
import pandas as pd
vrhp = pd.read_csv('data-input/2005 Live Cardset/vrhp-basic.csv')
vlhp = pd.read_csv('data-input/2005 Live Cardset/vlhp-basic.csv')
player = vrhp[vrhp['key_bbref'] == 'willibe02']
# Check if PA counts are similar vs both sides
```
**Fix location**: `/mnt/NV2/Development/paper-dynasty/card-creation/batters/creation.py`
- Look for handedness detection logic around PA vs L/R comparisons
#### Issue 3: Player name has "#" suffix
**Symptom**: Player shows as "Bernie Williams #" in database
**Cause**: CSV had duplicate player names, script added "#" to dedupe
**Impact**: May affect handedness detection if logic uses name parsing
**Fix**: Remove "#" suffix from player names in database
---
## Position Assignment Problems
### Symptom
Outfielders show as DH, or positions seem wrong on cards
### Root Cause
Usually one of:
1. Defense CSV files missing or malformed
2. Column name mismatch in defense data
3. Defensive calculation logic error
### Diagnostic Steps
#### Step 1: Check cardpositions table
```python
api = PaperDynastyAPI(environment='prod')
positions = api.get('cardpositions', params=[('player_id', 12854)])
# Should show LF, CF, RF for outfielders
# If all show DH → defensive ratings didn't calculate
```
#### Step 2: Verify defense CSV files exist
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation/data-input/2005\ Live\ Cardset/
ls defense_*.csv
# Should have: defense_c.csv, defense_1b.csv, defense_2b.csv,
# defense_3b.csv, defense_ss.csv, defense_lf.csv,
# defense_cf.csv, defense_rf.csv
```
#### Step 3: Check defense CSV column names
```python
import pandas as pd
df = pd.read_csv('defense_cf.csv')
print(df.columns.tolist())
# Required columns:
# - key_bbref (player ID)
# - Inn_def (innings at position)
# - tz_runs_total (or bis_runs_total)
# - fielding_perc
# - For catchers: caught_stealing_perc
# - For others: PO (putouts)
```
#### Step 4: Check retrosheet_data.py logic
Look for column name checks around lines 889, 926, 947:
```python
# WRONG - checks if column exists in batter row
if 'tz_runs_total' in row: # ❌
# CORRECT - checks if column exists in defense dataframe
if 'tz_runs_total' in pos_df.columns: # ✅
```
### Common Fixes
#### Fix 1: Column name mismatch
```python
# In retrosheet_data.py or similar
# Change from:
of_run_rating = 'tz_runs_outfield' # ❌ Column doesn't exist
# To:
of_run_rating = 'bis_runs_outfield' if 'bis_runs_outfield' in pos_df.columns else 'tz_runs_total'
```
#### Fix 2: Regenerate with correct defense files
1. Fix defense CSV files
2. Re-run `retrosheet_data.py`
3. Verify positions with: `./scripts/check_positions.sh 27`
#### Fix 3: Clear old cardpositions
The `post_positions()` function now DELETEs all existing cardpositions before posting new ones, preventing stale DH positions from persisting.
---
## Rarity Calculation Issues
### Symptom
Players have wrong rarity or show as Common when they should be higher
### Root Cause
Usually one of:
1. Missing ratings data (LEFT JOIN preserves players without ratings)
2. OPS threshold mismatch between years
3. Ratings DataFrame merge corrupted dictionary columns
### Diagnostic Steps
#### Step 1: Check if player has ratings
```python
api.get('battingcardratings', params=[('battingcard_id', 5977)])
# Should return 2 records (vs L and vs R)
```
#### Step 2: Check OPS calculation
```python
# In card creation logs, look for:
# "WARNING: Player {id} has no ratings, assigning default"
# Players without ratings get:
# - Batters: Common rarity (5), OPS 0.612
# - Pitchers: Common rarity (5), OPS-against 0.702
```
#### Step 3: Verify year-specific thresholds
Check `/mnt/NV2/Development/paper-dynasty/card-creation/rarity_thresholds.py`:
```python
# 2024 and earlier use different thresholds than 2025+
if SEASON <= 2024:
BATTER_THRESHOLDS = {6: 1.050, 5: 0.900, ...}
else:
BATTER_THRESHOLDS = {6: 1.000, 5: 0.850, ...}
```
### Common Fixes
#### Fix 1: Regenerate ratings
If ratings are missing, re-run card creation to calculate them
#### Fix 2: Check DataFrame merge
In card creation code, when merging ratings:
```python
# WRONG - merges entire DataFrame, corrupts dict columns
full_card_df = full_card_df.merge(ratings_df)
# CORRECT - merge only needed columns
full_card_df = full_card_df.merge(
ratings_df[['key_bbref', 'player_id', 'battingcard_id']]
)
```
---
## Image Won't Generate
### Symptom
API returns 404 or error when requesting card image
### Possible Causes
#### Cause 1: No batting/pitching card record
```python
# Check if card exists
api.get('battingcards', params=[('player_id', 12785)])
# Should have count > 0
```
**Fix**: Run card creation script to generate card records
#### Cause 2: Missing ratings
```python
# Check ratings exist
api.get('battingcardratings', params=[('battingcard_id', 5977)])
# Should return 2 records (vs L and vs R)
```
**Fix**: Ratings are usually created during card generation. Re-run creation script.
#### Cause 3: Wrong card type
Using `/battingcard` for a pitcher or vice versa
**Fix**: Check player positions to determine card type
#### Cause 4: Variant doesn't exist
Most players have variant 0, but some may not
**Fix**: Try variant=0 explicitly: `/battingcard?variant=0&d=2025-11-11`
---
## S3 Upload Failures
### Symptom
Local card generated successfully but S3 upload fails
### Diagnostic Steps
#### Step 1: Check AWS credentials
```bash
aws sts get-caller-identity
# Should show your AWS account info
```
#### Step 2: Test S3 access
```bash
aws s3 ls s3://paper-dynasty/cards/cardset-027/ --region us-east-1
# Should list existing cards
```
#### Step 3: Check IAM permissions
Required permissions:
- `s3:PutObject`
- `s3:GetObject`
- `s3:ListBucket`
### Common Fixes
#### Fix 1: Configure AWS credentials
```bash
aws configure
# Or set environment variables:
export AWS_ACCESS_KEY_ID=xxx
export AWS_SECRET_ACCESS_KEY=xxx
export AWS_DEFAULT_REGION=us-east-1
```
#### Fix 2: Wrong bucket/region
Verify:
- Bucket: `paper-dynasty`
- Region: `us-east-1`
#### Fix 3: File permissions
Ensure local file is readable:
```bash
ls -la /tmp/switch_hitter_cards/player-12785-battingcard.png
chmod 644 /tmp/switch_hitter_cards/player-12785-battingcard.png
```
---
## Verification Tools
### Check Positions Script
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
./scripts/check_positions.sh 27
# Flags issues like:
# - Too many DHs (should be <5 for full season)
# - Missing outfield positions
# - Mismatches between player.pos_X and cardpositions
```
### Verify Switch Hitters
```python
from workflows.card_utilities import verify_switch_hitters
results = verify_switch_hitters(cardset_id=27, environment='prod')
# Shows which switch hitters need card refresh
```
### Check Rarity Distribution
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
python analyze_cardset_rarity.py
# Shows player counts by rarity (should follow expected distribution)
```
---
## When to Load This Guide
Load this troubleshooting context when:
- User reports cards showing incorrect data
- Debugging card generation failures
- Investigating position assignment issues
- Verifying switch hitter detection
- Troubleshooting S3 uploads
- User asks about card caching behavior
**Otherwise**, keep this context unloaded to save tokens.

View File

@ -1,223 +0,0 @@
# Card Generation Workflow
## Pre-Flight
Ask the user before starting:
1. **Refresh or new date range?** (refresh keeps existing config)
2. **Which environment?** (prod or dev)
3. **Which cardset?** (e.g., 27 for "2005 Live")
4. **Season progress?** (games played or date range for season-pct calculation)
All commands run from `/mnt/NV2/Development/paper-dynasty/card-creation/`.
## Steps
```bash
# 1. Verify config (dry-run shows settings without executing)
pd-cards retrosheet process <year> -c <cardset_id> -d <description> \
--start <YYYYMMDD> --end <YYYYMMDD> --season-pct <0.0-1.0> --dry-run
# 2. Generate cards (POSTs player data to API)
pd-cards retrosheet process <year> -c <cardset_id> -d <description> \
--start <YYYYMMDD> --end <YYYYMMDD> --season-pct <0.0-1.0>
# 3. Validate positions (DH count MUST be <5; high DH = defense calc failure)
pd-cards retrosheet validate <cardset_id>
# 4. Generate images WITHOUT upload (triggers rendering; groundball_b bug can occur here)
pd-cards upload check -c "<cardset name>"
# 5. CRITICAL: Validate database for negative groundball_b — STOP if errors found
# (see "Bug Prevention" section below)
# 6. Upload to S3
pd-cards upload s3 -c "<cardset name>"
# 7. Generate scouting reports (ALWAYS run without --cardset-id to cover all cardsets)
pd-cards scouting all
# 8. Upload scouting CSVs to production server
pd-cards scouting upload
```
### CLI Parameter Reference
| Parameter | Description | Example |
|-----------|-------------|---------|
| `--start` | Season start date (YYYYMMDD) | `--start 20050403` |
| `--end` | Data cutoff date (YYYYMMDD) | `--end 20050815` |
| `--season-pct` | Fraction of season completed (0.0-1.0) | `--season-pct 0.728` |
| `--min-pa-vl` | Min plate appearances vs LHP (default: 20 Live, 1 PotM) | `--min-pa-vl 20` |
| `--min-pa-vr` | Min plate appearances vs RHP (default: 40 Live, 1 PotM) | `--min-pa-vr 40` |
| `--last-twoweeks-ratio` | Recency bias weight (auto-enabled at 0.2 after May 30) | `--last-twoweeks-ratio 0.2` |
| `--dry-run` / `-n` | Preview without saving to database | |
### Example: 2005 Live Series Update (Mid-August)
```bash
pd-cards retrosheet process 2005 -c 27 -d Live --start 20050403 --end 20050815 --season-pct 0.728 --dry-run
pd-cards retrosheet process 2005 -c 27 -d Live --start 20050403 --end 20050815 --season-pct 0.728
pd-cards retrosheet validate 27
pd-cards upload check -c "2005 Live"
# Run groundball_b validation (step 5)
pd-cards upload s3 -c "2005 Live"
pd-cards scouting all
pd-cards scouting upload
```
---
## Bug Prevention: The Double-Run Pattern
Card image generation (step 4) can create **negative groundball_b values** that crash game simulation. The prevention strategy:
1. **Step 4**: Run `upload check` (no S3 upload) — triggers image rendering and caches images
2. **Step 5**: Query database for negative groundball_b — **STOP if any found**
3. **Step 6**: Run `upload s3` — uploads the already-cached (validated) images. Fast because images are cached from step 4.
**Never skip step 5.** Broken cards uploaded to S3 affect all players immediately.
### Step 5 Validation Script
There is no CLI command for this validation yet. Run this Python script via `uv run python -c`:
```python
uv run python -c "
from db_calls import db_get
import asyncio
async def check_cards():
result = await db_get('battingcards', params=[('cardset', CARDSET_ID)])
cards = result.get('cards', [])
errors = []
for card in cards:
player = card.get('player', {})
pid = player.get('player_id', card.get('id'))
gb = card.get('groundball_b')
if gb is not None and gb < 0:
errors.append(f'Player {pid}: groundball_b = {gb}')
for field in ['gb_b', 'fb_b', 'ld_b']:
val = card.get(field)
if val is not None and (val < 0 or val > 100):
errors.append(f'Player {pid}: {field} = {val}')
if errors:
print('ERRORS FOUND:')
print('\n'.join(errors))
print('\nDO NOT PROCEED — fix data and re-run step 2')
else:
print(f'Validation passed — {len(cards)} batting cards checked, no issues')
asyncio.run(check_cards())
"
```
**Note:** Replace `CARDSET_ID` with the actual cardset ID (e.g., 27). The API returns `{'count': N, 'cards': [...]}` — always use `result.get('cards', [])` to extract the card list.
---
## Architecture
- `retrosheet_data.py` processes Retrosheet play-by-play data, calculates ratings, POSTs to API
- API stores cards in production database; cards are rendered on-demand via URL
- nginx caches rendered card images by date parameter (`?d=YYYY-MM-DD`)
- All operations are idempotent and safe to re-run
**Data sources**: Retrosheet events CSV, Baseball Reference defense CSVs (`data-input/`), FanGraphs splits (if needed)
**Required input files**:
- `data-input/retrosheet/retrosheets_events_*.csv`
- `data-input/<cardset name>/defense_*.csv` (defense_c.csv, defense_1b.csv, etc.)
- `data-input/<cardset name>/pitching.csv`, `running.csv`
**Scouting output**: 4 CSVs in `scouting/``batting-basic.csv`, `batting-ratings.csv`, `pitching-basic.csv`, `pitching-ratings.csv`
---
## Common Issues
**"No players found" after successful run**: Wrong database environment, wrong CARDSET_ID, or DATE mismatch. Check `alt_database` in `db_calls.py`. For promos, ensure PROMO_INCLUSION_RETRO_IDS is populated.
**High DH count (50+ players)**: Defense calculation failed. Check defense CSVs exist and column names match (`tz_runs_total` not `tz_runs_outfield`). Re-run step 2 after fixing.
**S3 upload fails**: Check `~/.aws/credentials`, verify cards render at API URL manually, re-run (idempotent).
**"surplus of X.XX chances" / "Adding X.XX results"**: Normal rounding adjustments in card generation — informational, not errors.
---
## Players of the Month (PotM) Variant
PotM cards use the same retrosheet pipeline but with a narrower date range, a promo cardset, and a curated player list.
### Key Differences from Full Cardset
| Setting | Full Cardset | PotM |
|---------|-------------|------|
| `--description` | `Live` | `<Month> PotM` (e.g., `April PotM`) |
| `--cardset-id` | Live cardset (e.g., 27) | Promo cardset (e.g., 28) |
| `--start` / `--end` | Full season range | Single month (e.g., `20050401` - `20050430`) |
| `--min-pa-vl` / `--min-pa-vr` | 20 / 40 (auto) | 1 / 1 (auto when description != "Live") |
| Player filtering | All qualifying players | Only `PROMO_INCLUSION_RETRO_IDS` |
| Position updates | Yes | Skipped (promo players keep existing positions) |
### PotM Pre-Flight Checklist
1. **Choose players** — Typically 2 IF, 2 OF, 1 SP, 1 RP per league (AL/NL)
2. **Get Retro IDs** — Look up each player's `key_retro` (e.g., `rodra001` for A-Rod)
3. **Determine date range** — First and last day of the month in `YYYYMMDD` format
4. **Confirm promo cardset ID** — Usually a separate cardset from the live one
### PotM Steps
```bash
# 1. Dry-run to verify config
pd-cards retrosheet process <year> -c <promo_cardset_id> \
-d "<Month> PotM" \
--start <YYYYMMDD> --end <YYYYMMDD> \
--dry-run
# 2. Generate promo cards
pd-cards retrosheet process <year> -c <promo_cardset_id> \
-d "<Month> PotM" \
--start <YYYYMMDD> --end <YYYYMMDD>
# 3. Validate (expect higher DH count — promo players may lack defense data for short windows)
pd-cards retrosheet validate <promo_cardset_id>
# 4-5. Image validation (same as full cardset — check, validate groundball_b, then upload)
pd-cards upload check -c "<promo cardset name>"
# Run groundball_b validation (step 5 from main workflow)
pd-cards upload s3 -c "<promo cardset name>"
# 6-7. Scouting reports — ALWAYS regenerate for ALL cardsets (no --cardset-id filter)
pd-cards scouting all
pd-cards scouting upload
```
### PotM-Specific Gotchas
- **`PROMO_INCLUSION_RETRO_IDS` must be populated** — If description is not "Live", retrosheet_data.py filters to only these IDs. Empty list = 0 players generated.
- **Don't mix Live and PotM** — If `PROMO_INCLUSION_RETRO_IDS` has entries but description is "Live", the script warns and exits.
- **Description protection** — Once a player has a PotM description (e.g., "April PotM"), it is never overwritten by subsequent live series runs. Promo cardset descriptions are also protected: existing cards keep their original month.
- **Scouting must cover ALL cardsets** — PotM players appear in scouting alongside live players. Always run `pd-cards scouting all` without `--cardset-id` to avoid overwriting the unified scouting data with partial results.
### Example: May 2005 PotM
```bash
# Players: A-Rod (IF), Delgado (IF), Mench (OF), Abreu (OF), Colon (SP), Ryan (RP), Harang (SP), Hoffman (RP)
# Retro IDs configured in retrosheet_data.py PROMO_INCLUSION_RETRO_IDS
pd-cards retrosheet process 2005 -c 28 -d "May PotM" --start 20050501 --end 20050531 --dry-run
pd-cards retrosheet process 2005 -c 28 -d "May PotM" --start 20050501 --end 20050531
pd-cards retrosheet validate 28
pd-cards upload check -c "2005 Promos"
# Run groundball_b validation
pd-cards upload s3 -c "2005 Promos"
pd-cards scouting all
pd-cards scouting upload
```
---
**Last Updated**: 2026-02-15
**Version**: 3.2 (Fixed scouting commands to use CLI, fixed groundball_b validation script, added CLI parameter reference and example)

View File

@ -1,438 +0,0 @@
#!/usr/bin/env python3
"""
Card Refresh Utility Functions
Reusable functions for regenerating and uploading player cards to S3.
HOW CACHING WORKS:
The API caches generated card images by the `d=YYYY-M-D` query parameter.
Once a card is rendered for a given date, subsequent requests return the cached
image even if the underlying database data changed. To force regeneration,
pass a future date (typically tomorrow) as cache_bust_date.
TROUBLESHOOTING STALE CARDS:
1. Verify the database has correct values (battingcards / pitchingcards tables)
2. Ensure cache_bust_date is a future date (not today or earlier)
3. Confirm S3 upload succeeded (file exists in s3://paper-dynasty/cards/...)
4. Confirm player.image URL was updated with the new date
5. Clear browser cache / use incognito CloudFront may also cache
S3 UPLOAD ISSUES:
- Check AWS creds: `aws sts get-caller-identity`
- Bucket: paper-dynasty (us-east-1)
- Required IAM permissions: s3:PutObject, s3:GetObject
"""
import os
import sys
import asyncio
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Optional, Tuple
import aiohttp
import boto3
from botocore.exceptions import ClientError
# Add parent directory for imports
sys.path.insert(0, '/home/cal/.claude/skills/paper-dynasty')
from api_client import PaperDynastyAPI
# AWS Configuration
AWS_BUCKET = "paper-dynasty"
AWS_REGION = "us-east-1"
def get_cache_bust_date(days_ahead: int = 1) -> str:
"""
Get a cache-busting date for card regeneration
Args:
days_ahead: Number of days in the future (default: 1 = tomorrow)
Returns:
Date string in format "YYYY-M-D"
"""
future_date = datetime.now() + timedelta(days=days_ahead)
return f"{future_date.year}-{future_date.month}-{future_date.day}"
async def fetch_card_image(
session: aiohttp.ClientSession,
player_id: int,
card_type: str,
cache_bust_date: str,
output_dir: Path
) -> Tuple[int, str, Optional[str]]:
"""
Fetch a single card image from the API
Args:
session: aiohttp session
player_id: Player ID
card_type: 'batting' or 'pitching'
cache_bust_date: Date string for cache-busting
output_dir: Directory to save the image
Returns:
Tuple of (player_id, status, file_path or error_message)
"""
card_url = f"https://pd.manticorum.com/api/v2/players/{player_id}/{card_type}card?d={cache_bust_date}"
try:
async with session.get(card_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
if response.status != 200:
return (player_id, 'error', f"HTTP {response.status}")
file_path = output_dir / f"player-{player_id}-{card_type}card.png"
with open(file_path, 'wb') as f:
f.write(await response.read())
return (player_id, 'success', str(file_path))
except asyncio.TimeoutError:
return (player_id, 'error', 'Request timeout')
except Exception as e:
return (player_id, 'error', str(e))
async def fetch_cards_batch(
player_ids: List[int],
card_type: str,
cache_bust_date: str,
output_dir: Path
) -> Dict[int, str]:
"""
Fetch multiple cards in parallel
Args:
player_ids: List of player IDs
card_type: 'batting' or 'pitching'
cache_bust_date: Date string for cache-busting
output_dir: Directory to save images
Returns:
Dict mapping player_id to local file path
"""
results = {}
async with aiohttp.ClientSession() as session:
tasks = [
fetch_card_image(session, pid, card_type, cache_bust_date, output_dir)
for pid in player_ids
]
completed = await asyncio.gather(*tasks, return_exceptions=True)
for player_id, status, result in completed:
if isinstance(result, Exception):
print(f" ❌ Player {player_id}: {result}")
elif status == 'success':
results[player_id] = result
print(f" ✓ Player {player_id}: {Path(result).name}")
else:
print(f" ❌ Player {player_id}: {result}")
return results
def upload_to_s3(
file_path: str,
player_id: int,
cardset_id: int,
cache_bust_date: str,
card_type: str = 'batting'
) -> str:
"""
Upload a card image to S3
Args:
file_path: Local file path
player_id: Player ID
cardset_id: Cardset ID
cache_bust_date: Date string for cache-busting
card_type: 'batting' or 'pitching'
Returns:
S3 URL with cache-busting query parameter
"""
s3_client = boto3.client('s3', region_name=AWS_REGION)
s3_key = f"cards/cardset-{cardset_id:03d}/player-{player_id}/{card_type}card.png"
try:
with open(file_path, 'rb') as f:
s3_client.put_object(
Bucket=AWS_BUCKET,
Key=s3_key,
Body=f,
ContentType='image/png',
CacheControl='public, max-age=31536000'
)
s3_url = f"https://{AWS_BUCKET}.s3.{AWS_REGION}.amazonaws.com/{s3_key}?d={cache_bust_date}"
return s3_url
except ClientError as e:
raise Exception(f"S3 upload failed: {e}")
def regenerate_cards_for_players(
player_ids: List[int],
cardset_id: int,
cache_bust_date: Optional[str] = None,
upload_to_s3_flag: bool = True,
update_player_records: bool = True,
environment: str = 'prod',
card_type: str = 'batting',
batch_size: int = 50
) -> Dict:
"""
Complete pipeline: fetch cards, upload to S3, update player records
Args:
player_ids: List of player IDs to refresh
cardset_id: Cardset ID
cache_bust_date: Date for cache-busting (default: tomorrow)
upload_to_s3_flag: Whether to upload to S3
update_player_records: Whether to update player.image URLs
environment: 'prod' or 'dev'
card_type: 'batting' or 'pitching'
batch_size: Number of players per batch
Returns:
Dict with 'success', 'failures', 'total'
"""
if cache_bust_date is None:
cache_bust_date = get_cache_bust_date()
# Create output directory
output_dir = Path("/tmp/card_refresh") / f"{cardset_id}_{cache_bust_date.replace('-', '')}"
output_dir.mkdir(parents=True, exist_ok=True)
api = PaperDynastyAPI(environment=environment, verbose=False)
results = {
'success': 0,
'failures': [],
'total': len(player_ids),
's3_urls': {}
}
print(f"Regenerating {len(player_ids)} cards for cardset {cardset_id}")
print(f"Cache-bust date: {cache_bust_date}")
print(f"Output dir: {output_dir}")
print()
# Process in batches
for i in range(0, len(player_ids), batch_size):
batch = player_ids[i:i + batch_size]
batch_num = (i // batch_size) + 1
total_batches = (len(player_ids) + batch_size - 1) // batch_size
print(f"Batch {batch_num}/{total_batches} ({len(batch)} players)")
# Fetch cards
local_files = asyncio.run(fetch_cards_batch(
batch, card_type, cache_bust_date, output_dir
))
# Upload to S3 and update records
for player_id, file_path in local_files.items():
try:
if upload_to_s3_flag:
s3_url = upload_to_s3(
file_path, player_id, cardset_id, cache_bust_date, card_type
)
results['s3_urls'][player_id] = s3_url
if update_player_records:
api.patch('players', object_id=player_id, params=[('image', s3_url)])
results['success'] += 1
except Exception as e:
print(f" ❌ Failed to process player {player_id}: {e}")
results['failures'].append({'player_id': player_id, 'error': str(e)})
print()
return results
def verify_switch_hitters(cardset_id: int, environment: str = 'prod') -> Dict:
"""
Verify all switch hitters in a cardset have correct handedness
Args:
cardset_id: Cardset ID to check
environment: 'prod' or 'dev'
Returns:
Dict with 'correct', 'incorrect', and 'details'
"""
api = PaperDynastyAPI(environment=environment, verbose=False)
# Get all batting cards for the cardset
players = api.get('players', params=[('cardset_id', cardset_id)])['players']
results = {
'correct': [],
'incorrect': [],
'total_checked': 0
}
print(f"Checking switch hitters in cardset {cardset_id}...")
for player in players:
player_id = player['player_id']
# Check if player has a batting card
try:
# Get battingcard record via API v2
bc_response = api.get('battingcards', params=[
('player_id', player_id),
('variant', 0)
])
if bc_response['count'] == 0:
continue # No batting card
battingcard = bc_response['battingcards'][0]
hand = battingcard.get('hand')
if hand == 'S':
results['total_checked'] += 1
# Verify image URL has recent date (not cached with old data)
image_url = player.get('image', '')
# Check if image is from S3 and relatively recent
if 's3.amazonaws.com' in image_url:
results['correct'].append({
'player_id': player_id,
'name': player['p_name'],
'hand': hand,
'image_url': image_url
})
else:
results['incorrect'].append({
'player_id': player_id,
'name': player['p_name'],
'hand': hand,
'issue': 'No S3 URL',
'image_url': image_url
})
except Exception as e:
print(f" Error checking player {player_id}: {e}")
continue
print(f"\nResults:")
print(f" Total switch hitters: {results['total_checked']}")
print(f" Correct: {len(results['correct'])}")
print(f" Needs refresh: {len(results['incorrect'])}")
if results['incorrect']:
print(f"\nSwitch hitters needing refresh:")
for player in results['incorrect']:
print(f" - {player['name']} (ID: {player['player_id']}): {player['issue']}")
return results
def regenerate_cards_for_cardset(
cardset_id: int,
environment: str = 'prod',
cache_bust_date: Optional[str] = None,
player_filter: Optional[Dict] = None,
batch_size: int = 50
) -> Dict:
"""
Regenerate all cards for an entire cardset
Args:
cardset_id: Cardset ID
environment: 'prod' or 'dev'
cache_bust_date: Date for cache-busting (default: tomorrow)
player_filter: Optional dict of filters (e.g., {'pos_include': 'SP'})
batch_size: Players per batch
Returns:
Dict with results
"""
api = PaperDynastyAPI(environment=environment, verbose=False)
# Get players
params = [('cardset_id', cardset_id)]
if player_filter:
for key, value in player_filter.items():
params.append((key, value))
players = api.get('players', params=params)['players']
# Separate batters and pitchers
batters = []
pitchers = []
for player in players:
positions = [player.get(f'pos_{i}') for i in range(1, 9)]
positions = [p for p in positions if p]
if any(pos in ['SP', 'RP', 'CP'] for pos in positions):
pitchers.append(player['player_id'])
else:
batters.append(player['player_id'])
print(f"Cardset {cardset_id}: {len(batters)} batters, {len(pitchers)} pitchers")
results = {'batters': {}, 'pitchers': {}}
if batters:
print("\n=== REGENERATING BATTING CARDS ===")
results['batters'] = regenerate_cards_for_players(
batters, cardset_id, cache_bust_date,
upload_to_s3_flag=True,
update_player_records=True,
environment=environment,
card_type='batting',
batch_size=batch_size
)
if pitchers:
print("\n=== REGENERATING PITCHING CARDS ===")
results['pitchers'] = regenerate_cards_for_players(
pitchers, cardset_id, cache_bust_date,
upload_to_s3_flag=True,
update_player_records=True,
environment=environment,
card_type='pitching',
batch_size=batch_size
)
return results
if __name__ == "__main__":
# Example usage
print("Card Utilities - Example Usage\n")
# Get tomorrow's date
tomorrow = get_cache_bust_date()
print(f"Tomorrow's date: {tomorrow}\n")
# Example: Refresh specific players
# player_ids = [12785, 12788, 12854]
# results = regenerate_cards_for_players(
# player_ids=player_ids,
# cardset_id=27,
# cache_bust_date=tomorrow,
# upload_to_s3_flag=True,
# update_player_records=True
# )
# print(f"Success: {results['success']}/{results['total']}")
# Example: Verify switch hitters
# verify_switch_hitters(cardset_id=27, environment='prod')
# Example: Refresh entire cardset
# regenerate_cards_for_cardset(cardset_id=27, batch_size=50)

View File

@ -1,263 +0,0 @@
# Custom Card Creation
## Purpose
Create fictional player cards using baseball archetypes (interactive tool) or manually create custom player database records via API calls.
## Interactive Creator
**Script**: `/mnt/NV2/Development/paper-dynasty/card-creation/custom_cards/interactive_creator.py`
**Supporting**: `archetype_definitions.py`, `archetype_calculator.py`
```bash
cd /mnt/NV2/Development/paper-dynasty/card-creation
source venv/bin/activate
python -m custom_cards.interactive_creator
```
The interactive creator handles: cardset setup, archetype selection, rating calculation, review/tweak, database creation, S3 upload.
### Archetypes
**Batter**: Power Slugger, Contact Hitter, Speedster, Balanced Star, Patient Walker, Slap Hitter, Three True Outcomes, Defensive Specialist
**Pitcher**: Ace, Power Pitcher, Finesse Pitcher, Groundball Specialist, Dominant Closer, Setup Man, Swingman, Lefty Specialist
---
## Manual Creation: Database Submission Template
When creating a custom player without the interactive tool:
```python
import asyncio
from db_calls import db_post, db_put, db_patch, db_get
from creation_helpers import mlbteam_and_franchise
# 1. Create Player (ASK USER for cost and rarity_id - NEVER make up values)
mlb_team_id, franchise_id = mlbteam_and_franchise('FA')
player_payload = {
'p_name': 'Player Name',
'cost': '88', # STRING - user specifies
'image': 'change-me',
'mlbclub': mlb_team_id,
'franchise': franchise_id,
'cardset_id': 29,
'set_num': 99999,
'rarity_id': 3, # INT - user specifies (see rarity table)
'pos_1': '1B',
'description': '2005 Custom',
'bbref_id': 'custom_playerp01',
'fangr_id': 0,
'mlbplayer_id': None # None, not 0
}
player = await db_post('players', payload=player_payload)
player_id = player['player_id']
# 2. Create BattingCard
batting_card = {
'player_id': player_id,
'key_bbref': 'custom_playerp01',
'key_fangraphs': 0,
'key_mlbam': 0,
'key_retro': '',
'name_first': 'Player',
'name_last': 'Name',
'steal_low': 7, # 0-20 scale (2d10)
'steal_high': 11, # 0-20 scale (2d10)
'steal_auto': 0, # 0 or 1
'steal_jump': 0.055, # 0.0-1.0 (fraction of 36)
'hit_and_run': 'A', # A, B, C, or D
'running': 12, # 8-17 scale
'hand': 'R' # R, L, or S
}
await db_put('battingcards', payload={'cards': [batting_card]}, timeout=10)
bc_result = await db_get('battingcards', params=[('player_id', player_id)])
battingcard_id = bc_result['cards'][0]['id']
# 3. Create BattingCardRatings (one per opposing hand — see full field ref below)
rating_vl = {
'battingcard_id': battingcard_id,
'bat_hand': 'R',
'vs_hand': 'L',
# Hits
'homerun': 0.55,
'bp_homerun': 1.00, # MUST be whole number (0, 1, 2, or 3)
'triple': 0.80,
'double_three': 0.00, # Usually 0.00 (reserved)
'double_two': 5.60,
'double_pull': 2.95,
'single_two': 10.35,
'single_one': 4.95,
'single_center': 8.45,
'bp_single': 5.00, # MUST be whole number (usually 5.0)
# On-base
'walk': 8.15,
'hbp': 0.90,
# Outs
'strikeout': 13.05,
'lineout': 11.60,
'popout': 0.00, # Usually 0.00 (added during image gen)
'flyout_a': 0.00, # Only 1.0 for power hitters (HR% > 10%)
'flyout_bq': 5.30,
'flyout_lf_b': 4.55,
'flyout_rf_b': 3.70,
'groundout_a': 9.45, # Double play balls
'groundout_b': 6.55,
'groundout_c': 5.10,
# Percentages
'hard_rate': 0.33,
'med_rate': 0.50,
'soft_rate': 0.17,
'pull_rate': 0.38,
'center_rate': 0.36,
'slap_rate': 0.26
}
# Create matching rating_vr with vs_hand='R' and different values
await db_put('battingcardratings', payload={'ratings': [rating_vl, rating_vr]}, timeout=10)
# 4. Create CardPositions (use db_put, NOT db_post)
await db_put('cardpositions', payload={'positions': [
{'player_id': player_id, 'position': 'LF', 'range': 3, 'error': 7, 'arm': 2},
{'player_id': player_id, 'position': '2B', 'range': 4, 'error': 12}
]})
# 5. Generate card image, upload to S3, update player
await db_patch('players', object_id=player_id, params=[('image', s3_url)])
```
---
## Rating Constraints
- All D20 chances must be multiples of **0.05**
- Total chances must equal exactly **108.00** (D20 x 5.4)
- Apply **+/-0.5 randomization** to avoid mechanical-looking cards
- **bp_homerun** rules:
- hr_count < 0.5: BP-HR = 0, HR = 0
- hr_count <= 1.0: BP-HR = 1, HR = 0
- hr_count < 3: BP-HR = 1, HR = hr_count - 1
- hr_count < 6: BP-HR = 2, HR = hr_count - 2
- hr_count >= 6: BP-HR = 3, HR = hr_count - 3
- **NEVER allow negative regular HR values**
- When removing HRs to lower OPS, redistribute to **singles** (not doubles) for more effective SLG reduction
### Ballpark (BP) Result Calculations
- BP-HR and BP-Single multiply by **0.5** for AVG/OBP
- BP results use **full value** for SLG (BP-HR = 2 bases, BP-Single = 1 base)
```python
# AVG/OBP
total_hits = homerun + (bp_homerun * 0.5) + triple + ... + (bp_single * 0.5)
avg = total_hits / 108
# SLG
total_bases = (homerun * 4) + (bp_homerun * 2) + (triple * 3) + ... + bp_single
slg = total_bases / 108
```
### Total OPS Formula
- **Batters**: `(OPS_vR + OPS_vL + min(OPS_vL, OPS_vR)) / 3` (weaker split double-counted)
- **Pitchers**: `(OPS_vR + OPS_vL + max(OPS_vL, OPS_vR)) / 3` (stronger split double-counted)
---
## Database Field Reference
### Player Table (ALL fields required)
`p_name`, `bbref_id`, `hand`, `mlbclub`, `franchise`, `cardset_id`, `description`, `is_custom`, `image`, `set_num`, `pos_1`, `cost` (STRING), `rarity_id` (INT), `mlbplayer_id` (None, not 0), `fangr_id` (0 for custom)
### Rarity IDs (Batters)
| ID | Rarity | OPS Threshold |
|----|--------|---------------|
| 99 | Hall of Fame | 1.200+ |
| 1 | Diamond | 1.000+ |
| 2 | All-Star | 0.900+ |
| 3 | Starter | 0.800+ |
| 4 | Reserve | 0.700+ |
| 5 | Replacement | remainder |
**NEVER make up rarity IDs or cost values** — always ask the user.
### CardPosition Fields
```python
{
'player_id': int,
'variant': 0,
'position': str, # C, 1B, 2B, 3B, SS, LF, CF, RF, DH, P
'innings': 1,
'range': int, # NOT fielding_rating
'error': int, # NOT fielding_error
'arm': int, # NOT fielding_arm — required for C, LF, CF, RF
'pb': int, # Catchers only (NOT catcher_pb)
'overthrow': int # Catchers only (NOT catcher_throw)
}
```
### MLBPlayer Field Names
Use `first_name`/`last_name` (NOT `name_first`/`name_last`)
### DB Operation Methods
| Endpoint | Method |
|----------|--------|
| `players`, `mlbplayers` | `db_post` |
| `battingcards`, `pitchingcards` | `db_put` |
| `battingcardratings`, `pitchingcardratings` | `db_put` |
| `cardpositions` | `db_put` (NOT db_post) |
---
## Common Mistakes
- **mlbplayer_id = 0**: Use `None`, not `0` — avoids FK constraint issues
- **db_post for cardpositions**: Must use `db_put` — endpoint doesn't support POST
- **Missing required fields**: ALL Player fields listed above are required
- **Wrong field names**: `range`/`error`/`arm` for positions, `first_name`/`last_name` for MLBPlayer
- **Making up rarity/cost**: NEVER assume — always ask the user
---
## Handling Existing Records
```python
# Check for existing MLBPlayer by bbref_id, then fall back to name
mlb_query = await db_get('mlbplayers', params=[('key_bbref', bbref_id)])
if not mlb_query or mlb_query.get('count', 0) == 0:
mlb_query = await db_get('mlbplayers', params=[
('first_name', name_first), ('last_name', name_last)
])
# Check for existing Player before creating
p_query = await db_get('players', params=[('bbref_id', bbref_id), ('cardset_id', cardset_id)])
if p_query and p_query.get('count', 0) > 0:
player_id = p_query['players'][0]['player_id'] # Use existing
```
---
## Custom Player Conventions
- **bbref_id**: `custom_{lastname}{firstinitial}01` (e.g., `custom_smithj01`)
- **Org**: `mlbclub` and `franchise` = "Custom Ballplayers"
- **Default cardset**: 29 | **set_num**: 9999
- **Flags**: `is_custom: True`, `fangraphs_id: 0`, `mlbam_id: 0`
- **Reference impl**: `/mnt/NV2/Development/paper-dynasty/card-creation/create_valerie_theolia.py`
## Verification
- **Dev**: `https://pddev.manticorum.com/api/v2/players/{player_id}/battingcard`
- **Prod**: `https://pd.manticorum.com/api/v2/players/{player_id}/battingcard`
- Add `?html=true` for HTML preview instead of PNG
---
**Last Updated**: 2026-02-12
**Version**: 3.0 (Merged custom-player-database-creation.md; trimmed redundant content)

View File

@ -1,181 +0,0 @@
# Live Series Update Workflow
Used during the MLB regular season to generate cards from current-year FanGraphs split data and Baseball Reference fielding/running stats.
## Pre-Flight
Ask the user before starting:
1. **Which cardset?** (e.g., "2025 Season")
2. **How many games played?** (determines season percentage for min PA thresholds)
3. **Which environment?** (prod or dev — check `alt_database` in `db_calls.py`)
All commands run from `/mnt/NV2/Development/paper-dynasty/card-creation/`.
## Data Sourcing
Live series uses **FanGraphs splits** for batting/pitching and **Baseball Reference** for defense/running.
### FanGraphs Data (Manual Download)
FanGraphs split data must be downloaded manually via `scripts/fangraphs_scrape.py` or the FanGraphs web UI. The scraper uses Selenium to export 8 CSV files:
| File | Content |
|------|---------|
| `Batting_vLHP_Standard.csv` | Batting vs LHP — standard stats |
| `Batting_vLHP_BattedBalls.csv` | Batting vs LHP — batted ball profile |
| `Batting_vRHP_Standard.csv` | Batting vs RHP — standard stats |
| `Batting_vRHP_BattedBalls.csv` | Batting vs RHP — batted ball profile |
| `Pitching_vLHH_Standard.csv` | Pitching vs LHH — standard stats |
| `Pitching_vLHH_BattedBalls.csv` | Pitching vs LHH — batted ball profile |
| `Pitching_vRHH_Standard.csv` | Pitching vs RHH — standard stats |
| `Pitching_vRHH_BattedBalls.csv` | Pitching vs RHH — batted ball profile |
These map to the expected input files in `data-input/{cardset} Cardset/`:
- `vlhp-basic.csv` / `vlhp-rate.csv`
- `vrhp-basic.csv` / `vrhp-rate.csv`
- `vlhh-basic.csv` / `vlhh-rate.csv`
- `vrhh-basic.csv` / `vrhh-rate.csv`
**For PotM**: Adjust the `startDate` and `endDate` in the scraper to cover only the target month.
### Baseball Reference Data
Fielding stats are pulled automatically during card generation when `--pull-fielding` is enabled (default). Running and pitching stats come from CSVs in the data-input directory.
---
## Steps
```bash
# 1. Download FanGraphs splits data
# Run the scraper or manually download from FanGraphs splits leaderboard
# Place CSVs in data-input/{cardset} Cardset/
# 2. Verify config (dry-run)
pd-cards live-series update --cardset "<cardset name>" --games <N> --dry-run
# 3. Generate cards (POSTs player data to API)
pd-cards live-series update --cardset "<cardset name>" --games <N>
# 4. Generate images WITHOUT upload (triggers rendering)
pd-cards upload check -c "<cardset name>"
# 5. CRITICAL: Validate database for negative groundball_b — STOP if errors found
# (see card-generation.md "Bug Prevention" section for validation query)
# 6. Upload to S3 (fast — uses cached images from step 4)
pd-cards upload s3 -c "<cardset name>"
# 7. Generate scouting reports (ALWAYS run for ALL cardsets)
pd-cards scouting all
# 8. Upload scouting CSVs to production server
pd-cards scouting upload
```
**Verify scouting upload**: `ssh akamai "ls -lh container-data/paper-dynasty/storage/ | grep -E 'batting|pitching'"`
---
## Key Differences from Retrosheet Workflow
| Aspect | Live Series | Retrosheet |
|--------|-------------|------------|
| **Data source** | FanGraphs splits + BBRef | Retrosheet play-by-play events |
| **CLI command** | `pd-cards live-series update` | `pd-cards retrosheet process` |
| **Season progress** | `--games N` (1-162) | `--season-pct` + `--start`/`--end` dates |
| **Defense data** | Auto-pulled from BBRef (`--pull-fielding`) | Pre-downloaded defense CSVs |
| **Position validation** | Built-in (skips for promo cardsets) | Separate `pd-cards retrosheet validate` step |
| **Arm ratings** | Not applicable (BBRef has current data) | Generated from Retrosheet events |
| **Recency bias** | Not applicable | `--last-twoweeks-ratio` (auto-enabled after May 30) |
| **Player ID lookup** | FanGraphs/BBRef IDs in CSV | Retrosheet IDs → pybaseball reverse lookup |
---
## Players of the Month (PotM) Variant
During the regular season, PotM cards are generated from the same FanGraphs pipeline but filtered to a single month's stats and posted to a promo cardset.
### Key Differences from Full Update
| Setting | Full Update | PotM |
|---------|------------|------|
| Cardset | Season cardset (e.g., "2025 Season") | Promo cardset (e.g., "2025 Promos") |
| FanGraphs date range | Season start → current date | Month start → month end |
| `--games` | Cumulative games played | Games in that month (~27) |
| `--ignore-limits` | Usually no | Usually yes (short sample) |
| Position updates | Yes | Skipped (cardset name contains "promos") |
### PotM Pre-Flight Checklist
1. **Choose players** — Typically 2 IF, 2 OF, 1 SP, 1 RP per league
2. **Download month-specific FanGraphs data** — Set date range in scraper to the target month only
3. **Confirm promo cardset exists** in the database
4. **Place CSVs** in the promo cardset's data-input directory
### PotM Steps
```bash
# 1. Download FanGraphs splits for the target month only
# Adjust startDate/endDate in fangraphs_scrape.py or manual download
# Place in data-input/{promo cardset} Cardset/
# 2. Dry-run
pd-cards live-series update --cardset "<promo cardset>" --games <month_games> \
--description "<Month> PotM" --ignore-limits --dry-run
# 3. Generate cards
pd-cards live-series update --cardset "<promo cardset>" --games <month_games> \
--description "<Month> PotM" --ignore-limits
# 4-6. Image validation and S3 upload (same pattern)
pd-cards upload check -c "<promo cardset name>"
# Run groundball_b validation
pd-cards upload s3 -c "<promo cardset name>"
# 7-8. Scouting reports — ALWAYS regenerate for ALL cardsets
pd-cards scouting all
pd-cards scouting upload
```
### PotM-Specific Notes
- **Position updates are skipped** when the cardset name contains "promos" (both live_series_update.py and the CLI check for this).
- **Description protection** — PotM descriptions (e.g., "April PotM") are never overwritten by subsequent full-cardset runs. The `should_update_player_description()` helper checks for "potm" in the existing description.
- **`--ignore-limits`** is typically needed because a single month may not produce enough PA/TBF to meet normal thresholds (20 vL / 40 vR).
- **Scouting must cover ALL cardsets** — PotM players appear alongside live players. Always run `pd-cards scouting all` without `--cardset-id` to preserve the unified scouting view.
### Example: June 2025 PotM
```bash
# Download June-only FanGraphs splits (June 1 - June 30)
# Place CSVs in data-input/2025 Promos Cardset/
pd-cards live-series update --cardset "2025 Promos" --games 27 \
--description "June PotM" --ignore-limits --dry-run
pd-cards live-series update --cardset "2025 Promos" --games 27 \
--description "June PotM" --ignore-limits
pd-cards upload check -c "2025 Promos"
pd-cards upload s3 -c "2025 Promos"
pd-cards scouting all
pd-cards scouting upload
```
---
## Common Issues
**"No players found"**: Wrong cardset name or database environment. Verify `alt_database` in `db_calls.py`.
**Missing FanGraphs CSVs**: The scraper requires Chrome/Selenium. If it fails, download manually from FanGraphs splits leaderboard with the correct date range and stat group settings.
**High DH count**: Defense pull failed or BBRef was rate-limited. Re-run with `--pull-fielding` or manually download defense CSVs.
**Early-season runs**: Use `--ignore-limits` when games played is low (< ~40) to avoid filtering out most players.
---
**Last Updated**: 2026-02-14
**Version**: 1.0 (Initial workflow documentation)