Implement Phase 1 foundation: DB schema, queries, and config integration

Wire up the full data pipeline for the Rust TUI rewrite:
- SQL schema creation for all 9 tables with correct types, FKs, and constraints
- 20 async query functions (teams, players, cards, lineups, sync status, cache)
- Config loading via figment integrated into main.rs startup flow
- App struct now holds SqlitePool and Settings for screen access
- Roster aggregate query and Lineup JSON helper methods
- Added csv, sha2, regex crates for upcoming phases

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Cal Corum 2026-02-26 19:17:36 -06:00
parent 6ddbd82f7c
commit 2005307b7a
8 changed files with 1105 additions and 7 deletions

28
rust/Cargo.lock generated
View File

@ -349,6 +349,27 @@ dependencies = [
"phf",
]
[[package]]
name = "csv"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde_core",
]
[[package]]
name = "csv-core"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782"
dependencies = [
"memchr",
]
[[package]]
name = "darling"
version = "0.23.0"
@ -2158,11 +2179,14 @@ dependencies = [
"anyhow",
"chrono",
"crossterm 0.28.1",
"csv",
"figment",
"ratatui",
"regex",
"reqwest",
"serde",
"serde_json",
"sha2",
"sqlx",
"thiserror 2.0.18",
"tokio",
@ -2428,6 +2452,7 @@ checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6"
dependencies = [
"base64",
"bytes",
"chrono",
"crc",
"crossbeam-queue",
"either",
@ -2503,6 +2528,7 @@ dependencies = [
"bitflags 2.11.0",
"byteorder",
"bytes",
"chrono",
"crc",
"digest",
"dotenvy",
@ -2544,6 +2570,7 @@ dependencies = [
"base64",
"bitflags 2.11.0",
"byteorder",
"chrono",
"crc",
"dotenvy",
"etcetera",
@ -2578,6 +2605,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea"
dependencies = [
"atoi",
"chrono",
"flume",
"futures-channel",
"futures-core",

View File

@ -12,7 +12,7 @@ crossterm = { version = "0.28", features = ["event-stream"] }
tokio = { version = "1", features = ["full"] }
# Database
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "chrono"] }
# HTTP client
reqwest = { version = "0.12", features = ["json"] }
@ -35,3 +35,12 @@ tracing-subscriber = "0.3"
# Date/time
chrono = { version = "0.4", features = ["serde"] }
# CSV parsing
csv = "1"
# Hashing
sha2 = "0.10"
# Regex
regex = "1"

View File

@ -0,0 +1,351 @@
{
"meta": {
"version": "1.0.0",
"created": "2026-02-26",
"lastUpdated": "2026-02-26",
"planType": "migration",
"phase": "Phase 1: Foundation (DB + Config + Schema)",
"description": "Wire up the data pipeline foundation for the SBA Scout Rust TUI rewrite. Database schema creation, full query layer, config integration, and dependency additions.",
"totalEstimatedHours": 18,
"totalTasks": 12,
"completedTasks": 0
},
"categories": {
"critical": "Must complete before any other phase can start",
"high": "Required for data pipeline to function",
"medium": "Query functions needed by screens",
"low": "Dependency prep for later phases"
},
"tasks": [
{
"id": "CRIT-001",
"name": "Add SQL migration file for all 9 tables",
"description": "Create a sqlx migration (or embedded SQL) that defines CREATE TABLE statements for all 9 tables matching the Python SQLAlchemy models exactly. Must include all columns, types, defaults, foreign keys, and unique constraints. sqlx does not have ORM-style create_all — tables must be defined as raw SQL.",
"category": "critical",
"priority": 1,
"completed": false,
"tested": false,
"dependencies": [],
"files": [
{
"path": "rust/src/db/schema.rs",
"lines": [6],
"issue": "Pool init exists but no table creation logic"
},
{
"path": "src/sba_scout/db/models.py",
"lines": [39, 83, 181, 246, 318, 351, 383, 413, 459],
"issue": "Python reference — all 9 model classes with exact column specs"
}
],
"suggestedFix": "1. Add a `create_tables` async fn in schema.rs that runs raw SQL via `sqlx::query()`. 2. Define CREATE TABLE IF NOT EXISTS for: teams (with UNIQUE(abbrev, season)), players (FK teams.id), batter_cards (FK players.id, UNIQUE player_id), pitcher_cards (FK players.id, UNIQUE player_id), transactions (UNIQUE(move_id, player_id)), lineups (FK players.id for starting_pitcher_id), matchup_cache (UNIQUE(batter_id, pitcher_id)), standardized_score_cache (UNIQUE(batter_card_id, split), UNIQUE(pitcher_card_id, split)), sync_status (UNIQUE entity_type). 3. Call create_tables from main.rs after pool init. Column types: INTEGER for ids/ints, REAL for floats, TEXT for strings, BOOLEAN for bools (SQLite stores as 0/1), TEXT for JSON columns (batting_order, positions, details, stat_scores).",
"estimatedHours": 2,
"notes": "SQLite has no native JSON type — use TEXT and serialize/deserialize with serde_json in Rust. SQLite also has no native DATETIME — use TEXT in ISO 8601 format (chrono::NaiveDateTime serializes this way). The Python models use autoincrement for most PKs, but Team and Player use API-provided IDs as PK (not autoincrement)."
},
{
"id": "CRIT-002",
"name": "Implement database session/connection management",
"description": "Port the Python get_session() context manager pattern to Rust. Need a way to acquire a connection from the pool, run queries, and handle commit/rollback. The Python version uses async context manager with auto-commit on success and auto-rollback on exception.",
"category": "critical",
"priority": 2,
"completed": false,
"tested": false,
"dependencies": ["CRIT-001"],
"files": [
{
"path": "rust/src/db/schema.rs",
"lines": [6, 18],
"issue": "Only has init_pool — no session management, no create_tables"
},
{
"path": "src/sba_scout/db/schema.py",
"lines": [46, 63, 65, 76, 85, 97],
"issue": "Python reference — get_session ctx manager, init/drop/reset/close_database"
}
],
"suggestedFix": "sqlx uses the pool directly (no ORM session). For transaction support, use `pool.begin()` which returns a `Transaction` that auto-rolls-back on drop. Add helper functions: 1. `create_tables(pool)` — runs the migration SQL. 2. `reset_database(pool)` — DROP TABLE IF EXISTS for all 9 tables, then create_tables. Most queries will just use `&SqlitePool` directly since sqlx auto-manages connections. For operations that need atomicity (sync upserts), use `pool.begin()` explicitly.",
"estimatedHours": 1,
"notes": "Unlike SQLAlchemy, sqlx doesn't need a session factory pattern. The pool IS the connection manager. Keep it simple — don't over-abstract."
},
{
"id": "CRIT-003",
"name": "Integrate config loading into main.rs and App",
"description": "Wire up the existing config.rs (figment-based Settings) into the application startup flow. Load settings in main(), pass to App, pass db_path to pool init. Currently main.rs ignores config entirely.",
"category": "critical",
"priority": 3,
"completed": false,
"tested": false,
"dependencies": ["CRIT-001", "CRIT-002"],
"files": [
{
"path": "rust/src/main.rs",
"lines": [15, 18, 24],
"issue": "No config loading, no DB pool init, App::new() takes no args"
},
{
"path": "rust/src/app.rs",
"lines": [19, 24],
"issue": "App struct has no fields for settings or db pool"
},
{
"path": "rust/src/config.rs",
"lines": [106],
"issue": "load_settings() exists but is never called"
}
],
"suggestedFix": "1. In main.rs: call `load_settings()`, then `init_pool(&settings.db_path)`, then `create_tables(&pool)`. 2. Add `pool: SqlitePool` and `settings: Settings` fields to App struct. 3. Update App::new(settings, pool) constructor. 4. Pass pool reference to screen render functions (they'll need it for queries in Phase 4). 5. Ensure data/ directory is created if missing (match Python's ensure_db_directory validator). 6. Add TOML settings file support — config.rs already has Toml provider but the Rust config uses settings.toml while Python uses settings.yaml. Decide on TOML (idiomatic Rust) and document the difference.",
"estimatedHours": 1.5,
"notes": "The Python app uses a global lazy singleton for settings. In Rust, prefer passing owned/borrowed Settings through the app rather than using a global. The pool is Clone-able (it's an Arc internally) so passing it around is cheap."
},
{
"id": "HIGH-001",
"name": "Implement team query functions",
"description": "Port all team queries from Python db/queries.py to Rust db/queries.rs: get_all_teams, get_team_by_abbrev, get_team_by_id.",
"category": "high",
"priority": 4,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "rust/src/db/queries.rs",
"lines": [1, 2],
"issue": "Empty stub — only has a comment"
},
{
"path": "src/sba_scout/db/queries.py",
"lines": [31, 61, 72],
"issue": "Python reference — 3 team query functions"
}
],
"suggestedFix": "Use `sqlx::query_as::<_, Team>()` with hand-written SQL. Key details: 1. `get_all_teams(pool, season, active_only)` — WHERE season = ? AND (if active_only) abbrev NOT LIKE '%IL' AND abbrev NOT LIKE '%MiL' ORDER BY abbrev. 2. `get_team_by_abbrev(pool, abbrev, season)` — WHERE abbrev = ? AND season = ? returns Option<Team>. 3. `get_team_by_id(pool, team_id)` — WHERE id = ? returns Option<Team>.",
"estimatedHours": 1,
"notes": "sqlx::query_as maps rows directly to structs via FromRow derive (already on models). Use fetch_all for lists, fetch_optional for Option<T>."
},
{
"id": "HIGH-002",
"name": "Implement player query functions",
"description": "Port all player queries: get_players_by_team, get_player_by_id, get_player_by_name, search_players, get_pitchers, get_batters, get_players_missing_cards.",
"category": "high",
"priority": 5,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "rust/src/db/queries.rs",
"lines": [1, 2],
"issue": "Empty stub"
},
{
"path": "src/sba_scout/db/queries.py",
"lines": [84, 113, 131, 153, 189, 214, 259],
"issue": "Python reference — 7 player query functions"
}
],
"suggestedFix": "Key differences from Python: 1. No ORM eager loading (selectinload) — sqlx uses raw SQL. For 'include_cards', use LEFT JOIN on batter_cards/pitcher_cards and map to a custom struct (PlayerWithCards) that has Option<BatterCard> and Option<PitcherCard>. Alternatively, do separate queries (simpler, matches Python's selectinload which runs separate SELECTs anyway). 2. `search_players` uses LIKE '%query%' (SQLite is case-insensitive for ASCII by default with LIKE). 3. `get_pitchers` checks pos_1 OR pos_2 IN ('SP','RP','CP'). 4. `get_batters` checks pos_1 IN ('C','1B','2B','3B','SS','LF','CF','RF','DH'). 5. `get_players_missing_cards` uses subquery anti-join: WHERE id NOT IN (SELECT player_id FROM batter_cards).",
"estimatedHours": 2.5,
"notes": "Decide on the 'include_cards' pattern early. Recommend: separate queries approach (fetch players, then batch-fetch cards by player_ids). This avoids complex JOIN mapping and matches how the Python ORM actually executes selectinload. Create a PlayerWithCards struct or add a method to attach cards after loading."
},
{
"id": "HIGH-003",
"name": "Implement card query functions",
"description": "Port card queries: get_batter_card, get_pitcher_card.",
"category": "high",
"priority": 6,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "rust/src/db/queries.rs",
"lines": [1, 2],
"issue": "Empty stub"
},
{
"path": "src/sba_scout/db/queries.py",
"lines": [239, 249],
"issue": "Python reference — 2 card query functions"
}
],
"suggestedFix": "Simple single-table queries: 1. `get_batter_card(pool, player_id) -> Option<BatterCard>` — SELECT * FROM batter_cards WHERE player_id = ?. 2. `get_pitcher_card(pool, player_id) -> Option<PitcherCard>` — SELECT * FROM pitcher_cards WHERE player_id = ?.",
"estimatedHours": 0.5,
"notes": "Straightforward — these are the simplest queries in the system."
},
{
"id": "HIGH-004",
"name": "Implement roster query function (get_my_roster)",
"description": "Port the composite roster query that fetches majors, minors, and IL players for the user's team. This is a high-level function that calls team + player queries internally.",
"category": "high",
"priority": 7,
"completed": false,
"tested": false,
"dependencies": ["HIGH-001", "HIGH-002"],
"files": [
{
"path": "src/sba_scout/db/queries.py",
"lines": [309, 336],
"issue": "Python reference — get_my_roster function"
}
],
"suggestedFix": "Create a `Roster` struct with fields `majors: Vec<Player>`, `minors: Vec<Player>`, `il: Vec<Player>`. Implement `get_my_roster(pool, team_abbrev, season) -> Roster` that: 1. Looks up team by abbrev (e.g., 'WV'). 2. Looks up IL team by abbrev + 'IL' (e.g., 'WVIL'). 3. Looks up MiL team by abbrev + 'MiL' (e.g., 'WVMiL'). 4. Fetches players for each (with cards). Returns empty vecs if team not found.",
"estimatedHours": 1,
"notes": "Consider running the 3 player queries concurrently with tokio::join! since they're independent."
},
{
"id": "HIGH-005",
"name": "Implement sync status query functions",
"description": "Port sync status queries: get_sync_status and update_sync_status (upsert pattern).",
"category": "high",
"priority": 8,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "src/sba_scout/db/queries.py",
"lines": [344, 354],
"issue": "Python reference — get_sync_status and update_sync_status"
}
],
"suggestedFix": "1. `get_sync_status(pool, entity_type) -> Option<SyncStatus>` — SELECT * FROM sync_status WHERE entity_type = ?. 2. `update_sync_status(pool, entity_type, count, error)` — Use SQLite's INSERT OR REPLACE (or INSERT ... ON CONFLICT(entity_type) DO UPDATE) for clean upsert. The Python version does a select-then-update/insert pattern which is racy; the SQL upsert is better.",
"estimatedHours": 0.5,
"notes": "SQLite ON CONFLICT is the idiomatic way to do upserts. This is simpler than the Python approach."
},
{
"id": "MED-001",
"name": "Implement matchup cache query functions",
"description": "Port matchup cache queries: get_cached_matchup, invalidate_matchup_cache. Note: MatchupCache table exists but is largely unused in practice — the StandardizedScoreCache is the primary cache. Still needed for completeness.",
"category": "medium",
"priority": 9,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "src/sba_scout/db/queries.py",
"lines": [382, 398],
"issue": "Python reference — 2 matchup cache functions"
}
],
"suggestedFix": "1. `get_cached_matchup(pool, batter_id, pitcher_id, weights_hash) -> Option<MatchupCache>` — SELECT WHERE batter_id = ? AND pitcher_id = ? AND weights_hash = ?. 2. `invalidate_matchup_cache(pool) -> i64` — DELETE FROM matchup_cache, return rows_affected.",
"estimatedHours": 0.5,
"notes": "Low usage in practice but include for feature parity."
},
{
"id": "MED-002",
"name": "Implement lineup query functions",
"description": "Port lineup CRUD: get_lineups, get_lineup_by_name, save_lineup (upsert), delete_lineup.",
"category": "medium",
"priority": 10,
"completed": false,
"tested": false,
"dependencies": ["CRIT-002"],
"files": [
{
"path": "src/sba_scout/db/queries.py",
"lines": [418, 425, 435, 468],
"issue": "Python reference — 4 lineup CRUD functions"
}
],
"suggestedFix": "1. `get_lineups(pool) -> Vec<Lineup>` — SELECT * ORDER BY name. 2. `get_lineup_by_name(pool, name) -> Option<Lineup>` — WHERE name = ?. 3. `save_lineup(pool, name, batting_order, positions, ...)` — INSERT OR REPLACE. batting_order and positions are JSON TEXT — serialize Vec<i64> and HashMap<String, i64> with serde_json::to_string. 4. `delete_lineup(pool, name) -> bool` — DELETE WHERE name = ?, return rows_affected > 0. Note: The Lineup model in Rust stores batting_order/positions as String (JSON text). Add helper methods or a wrapper to deserialize on read.",
"estimatedHours": 1.5,
"notes": "JSON serialization for batting_order (Vec<i64>) and positions (HashMap<String, i64>) needs serde_json. Consider adding `Lineup::batting_order_vec()` and `Lineup::positions_map()` convenience methods."
},
{
"id": "LOW-001",
"name": "Add missing crate dependencies for later phases",
"description": "Add crates needed by Phase 2+ to Cargo.toml now so they're available: csv (CSV import), sha2 (cache hashing), regex (endurance parsing).",
"category": "low",
"priority": 11,
"completed": false,
"tested": false,
"dependencies": [],
"files": [
{
"path": "rust/Cargo.toml",
"lines": [6, 38],
"issue": "Missing csv, sha2, regex crates"
}
],
"suggestedFix": "Add to [dependencies]: `csv = \"1\"`, `sha2 = \"0.10\"`, `regex = \"1\"`. These are stable, widely-used crates with no breaking changes expected.",
"estimatedHours": 0.25,
"notes": "Quick win — add now to avoid compile delays later. No code changes needed."
},
{
"id": "LOW-002",
"name": "Add Lineup JSON helper methods",
"description": "Add deserialization helpers to the Lineup model so screens can easily work with batting_order and positions as typed Rust values instead of raw JSON strings.",
"category": "low",
"priority": 12,
"completed": false,
"tested": false,
"dependencies": ["MED-002"],
"files": [
{
"path": "rust/src/db/models.rs",
"lines": [209, 219],
"issue": "Lineup stores batting_order/positions as Option<String> (JSON) with no parse helpers"
}
],
"suggestedFix": "Add impl block for Lineup with: 1. `batting_order_vec(&self) -> Vec<i64>` — deserialize JSON string or return empty vec. 2. `positions_map(&self) -> HashMap<String, i64>` — deserialize JSON string or return empty map. 3. `set_batting_order(&mut self, order: &[i64])` — serialize to JSON string. 4. `set_positions(&mut self, positions: &HashMap<String, i64>)` — serialize to JSON string.",
"estimatedHours": 0.5,
"notes": "Quality-of-life improvement that prevents JSON parse errors from spreading across the codebase."
}
],
"quickWins": [
{
"taskId": "LOW-001",
"estimatedMinutes": 15,
"impact": "Prevents compile-time delays when starting Phase 2"
},
{
"taskId": "HIGH-003",
"estimatedMinutes": 20,
"impact": "Simplest queries — good warmup for the query pattern"
}
],
"productionBlockers": [
{
"taskId": "CRIT-001",
"reason": "No tables = no data storage. Everything depends on this."
},
{
"taskId": "CRIT-002",
"reason": "No connection management = can't execute any queries."
},
{
"taskId": "CRIT-003",
"reason": "App can't find the DB or API without config wired in."
}
],
"weeklyRoadmap": {
"session1": {
"theme": "Schema + Connection + Config",
"tasks": ["CRIT-001", "CRIT-002", "CRIT-003", "LOW-001"],
"estimatedHours": 5,
"notes": "Get the app booting with a real DB connection and config loaded. Verify with cargo run."
},
"session2": {
"theme": "Core Queries (Teams + Players + Cards)",
"tasks": ["HIGH-001", "HIGH-002", "HIGH-003", "HIGH-004"],
"estimatedHours": 5,
"notes": "All the read queries that screens will need. Test against the existing Python-created DB file."
},
"session3": {
"theme": "Supporting Queries + Polish",
"tasks": ["HIGH-005", "MED-001", "MED-002", "LOW-002"],
"estimatedHours": 4,
"notes": "Sync status, cache, lineup CRUD, and JSON helpers. Phase 1 complete."
}
},
"architecturalDecisions": {
"no_orm_session_pattern": "sqlx uses pool directly — no session factory needed. Use pool.begin() for transactions.",
"include_cards_strategy": "Separate queries (fetch players, then batch-fetch cards) rather than JOINs. Matches Python's selectinload behavior and keeps models simple.",
"json_columns": "Store as TEXT in SQLite, serialize/deserialize with serde_json. Add helper methods on Lineup for typed access.",
"upsert_pattern": "Use SQLite ON CONFLICT DO UPDATE instead of Python's select-then-update. Cleaner and race-free.",
"config_format": "TOML (not YAML) for Rust config. figment + toml crate already in Cargo.toml. Document the format change from Python version.",
"datetime_storage": "Store as TEXT in ISO 8601 format. chrono::NaiveDateTime with sqlx handles this automatically.",
"pool_passing": "Pass SqlitePool by reference (&SqlitePool) to query functions. Pool is Clone (Arc internally) so App can own it and hand out refs."
}
}

View File

@ -5,6 +5,9 @@ use ratatui::{
widgets::{Block, Borders, Paragraph},
Frame,
};
use sqlx::sqlite::SqlitePool;
use crate::config::Settings;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Screen {
@ -18,12 +21,16 @@ pub enum Screen {
pub struct App {
pub current_screen: Screen,
pub pool: SqlitePool,
pub settings: Settings,
}
impl App {
pub fn new() -> Self {
pub fn new(settings: Settings, pool: SqlitePool) -> Self {
Self {
current_screen: Screen::Dashboard,
pool,
settings,
}
}

View File

@ -201,6 +201,17 @@ pub struct Transaction {
pub synced_at: Option<NaiveDateTime>,
}
// =============================================================================
// Roster (aggregate view, not a DB table)
// =============================================================================
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Roster {
pub majors: Vec<Player>,
pub minors: Vec<Player>,
pub il: Vec<Player>,
}
// =============================================================================
// User Data (local only)
// =============================================================================
@ -218,6 +229,30 @@ pub struct Lineup {
pub updated_at: Option<NaiveDateTime>,
}
impl Lineup {
pub fn batting_order_vec(&self) -> Vec<i64> {
self.batting_order
.as_deref()
.and_then(|s| serde_json::from_str(s).ok())
.unwrap_or_default()
}
pub fn positions_map(&self) -> std::collections::HashMap<String, i64> {
self.positions
.as_deref()
.and_then(|s| serde_json::from_str(s).ok())
.unwrap_or_default()
}
pub fn set_batting_order(&mut self, order: &[i64]) {
self.batting_order = serde_json::to_string(order).ok();
}
pub fn set_positions(&mut self, positions: &std::collections::HashMap<String, i64>) {
self.positions = serde_json::to_string(positions).ok();
}
}
#[derive(Debug, FromRow, Serialize, Deserialize)]
pub struct MatchupCache {
pub id: i64,

View File

@ -1,2 +1,393 @@
// Database query functions will be implemented here.
// Each function will use sqlx::query_as! to map results to model structs.
use anyhow::Result;
use sqlx::SqlitePool;
use std::collections::HashMap;
use super::models::{BatterCard, Lineup, MatchupCache, PitcherCard, Player, Roster, SyncStatus, Team};
// =============================================================================
// Team Queries
// =============================================================================
pub async fn get_all_teams(pool: &SqlitePool, season: i64, active_only: bool) -> Result<Vec<Team>> {
let teams = if active_only {
sqlx::query_as::<_, Team>(
"SELECT * FROM teams \
WHERE season = ? \
AND abbrev NOT LIKE '%IL' \
AND abbrev NOT LIKE '%MiL' \
ORDER BY abbrev",
)
.bind(season)
.fetch_all(pool)
.await?
} else {
sqlx::query_as::<_, Team>("SELECT * FROM teams WHERE season = ? ORDER BY abbrev")
.bind(season)
.fetch_all(pool)
.await?
};
Ok(teams)
}
pub async fn get_team_by_abbrev(
pool: &SqlitePool,
abbrev: &str,
season: i64,
) -> Result<Option<Team>> {
let team = sqlx::query_as::<_, Team>(
"SELECT * FROM teams WHERE abbrev = ? AND season = ?",
)
.bind(abbrev)
.bind(season)
.fetch_optional(pool)
.await?;
Ok(team)
}
pub async fn get_team_by_id(pool: &SqlitePool, team_id: i64) -> Result<Option<Team>> {
let team = sqlx::query_as::<_, Team>("SELECT * FROM teams WHERE id = ?")
.bind(team_id)
.fetch_optional(pool)
.await?;
Ok(team)
}
// =============================================================================
// Player Queries
// =============================================================================
pub async fn get_players_by_team(pool: &SqlitePool, team_id: i64) -> Result<Vec<Player>> {
let players = sqlx::query_as::<_, Player>(
"SELECT * FROM players WHERE team_id = ? ORDER BY name",
)
.bind(team_id)
.fetch_all(pool)
.await?;
Ok(players)
}
pub async fn get_player_by_id(pool: &SqlitePool, player_id: i64) -> Result<Option<Player>> {
let player = sqlx::query_as::<_, Player>("SELECT * FROM players WHERE id = ?")
.bind(player_id)
.fetch_optional(pool)
.await?;
Ok(player)
}
pub async fn get_player_by_name(
pool: &SqlitePool,
name: &str,
season: i64,
) -> Result<Option<Player>> {
let player = sqlx::query_as::<_, Player>(
"SELECT * FROM players WHERE LOWER(name) = LOWER(?) AND season = ?",
)
.bind(name)
.bind(season)
.fetch_optional(pool)
.await?;
Ok(player)
}
pub async fn search_players(
pool: &SqlitePool,
query_str: &str,
season: i64,
limit: i64,
) -> Result<Vec<Player>> {
let pattern = format!("%{}%", query_str);
let players = sqlx::query_as::<_, Player>(
"SELECT * FROM players WHERE name LIKE ? AND season = ? ORDER BY name LIMIT ?",
)
.bind(pattern)
.bind(season)
.bind(limit)
.fetch_all(pool)
.await?;
Ok(players)
}
pub async fn get_pitchers(
pool: &SqlitePool,
team_id: Option<i64>,
season: Option<i64>,
) -> Result<Vec<Player>> {
let mut sql = String::from(
"SELECT * FROM players \
WHERE (pos_1 IN ('SP', 'RP', 'CP') OR pos_2 IN ('SP', 'RP', 'CP'))",
);
if team_id.is_some() {
sql.push_str(" AND team_id = ?");
}
if season.is_some() {
sql.push_str(" AND season = ?");
}
sql.push_str(" ORDER BY name");
let mut query = sqlx::query_as::<_, Player>(&sql);
if let Some(tid) = team_id {
query = query.bind(tid);
}
if let Some(s) = season {
query = query.bind(s);
}
Ok(query.fetch_all(pool).await?)
}
pub async fn get_batters(
pool: &SqlitePool,
team_id: Option<i64>,
season: Option<i64>,
) -> Result<Vec<Player>> {
let mut sql = String::from(
"SELECT * FROM players \
WHERE pos_1 IN ('C', '1B', '2B', '3B', 'SS', 'LF', 'CF', 'RF', 'DH')",
);
if team_id.is_some() {
sql.push_str(" AND team_id = ?");
}
if season.is_some() {
sql.push_str(" AND season = ?");
}
sql.push_str(" ORDER BY name");
let mut query = sqlx::query_as::<_, Player>(&sql);
if let Some(tid) = team_id {
query = query.bind(tid);
}
if let Some(s) = season {
query = query.bind(s);
}
Ok(query.fetch_all(pool).await?)
}
pub async fn get_players_missing_cards(
pool: &SqlitePool,
season: i64,
card_type: &str,
) -> Result<Vec<Player>> {
let players = if card_type == "batter" {
sqlx::query_as::<_, Player>(
"SELECT * FROM players \
WHERE season = ? \
AND pos_1 IN ('C', '1B', '2B', '3B', 'SS', 'LF', 'CF', 'RF', 'DH') \
AND id NOT IN (SELECT player_id FROM batter_cards) \
ORDER BY name",
)
.bind(season)
.fetch_all(pool)
.await?
} else {
sqlx::query_as::<_, Player>(
"SELECT * FROM players \
WHERE season = ? \
AND pos_1 IN ('SP', 'RP', 'CP') \
AND id NOT IN (SELECT player_id FROM pitcher_cards) \
ORDER BY name",
)
.bind(season)
.fetch_all(pool)
.await?
};
Ok(players)
}
// =============================================================================
// Card Queries
// =============================================================================
pub async fn get_batter_card(pool: &SqlitePool, player_id: i64) -> Result<Option<BatterCard>> {
let card = sqlx::query_as::<_, BatterCard>(
"SELECT * FROM batter_cards WHERE player_id = ?",
)
.bind(player_id)
.fetch_optional(pool)
.await?;
Ok(card)
}
pub async fn get_pitcher_card(pool: &SqlitePool, player_id: i64) -> Result<Option<PitcherCard>> {
let card = sqlx::query_as::<_, PitcherCard>(
"SELECT * FROM pitcher_cards WHERE player_id = ?",
)
.bind(player_id)
.fetch_optional(pool)
.await?;
Ok(card)
}
// =============================================================================
// Sync Status Queries
// =============================================================================
pub async fn get_sync_status(
pool: &SqlitePool,
entity_type: &str,
) -> Result<Option<SyncStatus>> {
let status = sqlx::query_as::<_, SyncStatus>(
"SELECT * FROM sync_status WHERE entity_type = ?",
)
.bind(entity_type)
.fetch_optional(pool)
.await?;
Ok(status)
}
pub async fn update_sync_status(
pool: &SqlitePool,
entity_type: &str,
count: i64,
error: Option<&str>,
) -> Result<()> {
sqlx::query(
"INSERT INTO sync_status (entity_type, last_sync, last_sync_count, last_error) \
VALUES (?, datetime('now'), ?, ?) \
ON CONFLICT(entity_type) DO UPDATE SET \
last_sync = excluded.last_sync, \
last_sync_count = excluded.last_sync_count, \
last_error = excluded.last_error",
)
.bind(entity_type)
.bind(count)
.bind(error)
.execute(pool)
.await?;
Ok(())
}
// =============================================================================
// Matchup Cache Queries
// =============================================================================
pub async fn get_cached_matchup(
pool: &SqlitePool,
batter_id: i64,
pitcher_id: i64,
weights_hash: &str,
) -> Result<Option<MatchupCache>> {
let cache = sqlx::query_as::<_, MatchupCache>(
"SELECT * FROM matchup_cache \
WHERE batter_id = ? AND pitcher_id = ? AND weights_hash = ?",
)
.bind(batter_id)
.bind(pitcher_id)
.bind(weights_hash)
.fetch_optional(pool)
.await?;
Ok(cache)
}
pub async fn invalidate_matchup_cache(pool: &SqlitePool) -> Result<u64> {
let result = sqlx::query("DELETE FROM matchup_cache")
.execute(pool)
.await?;
Ok(result.rows_affected())
}
// =============================================================================
// Lineup Queries
// =============================================================================
pub async fn get_lineups(pool: &SqlitePool) -> Result<Vec<Lineup>> {
let lineups = sqlx::query_as::<_, Lineup>("SELECT * FROM lineups ORDER BY name")
.fetch_all(pool)
.await?;
Ok(lineups)
}
pub async fn get_lineup_by_name(pool: &SqlitePool, name: &str) -> Result<Option<Lineup>> {
let lineup = sqlx::query_as::<_, Lineup>("SELECT * FROM lineups WHERE name = ?")
.bind(name)
.fetch_optional(pool)
.await?;
Ok(lineup)
}
pub async fn save_lineup(
pool: &SqlitePool,
name: &str,
batting_order: &[i64],
positions: &HashMap<String, i64>,
lineup_type: &str,
description: Option<&str>,
starting_pitcher_id: Option<i64>,
) -> Result<()> {
let batting_order_json = serde_json::to_string(batting_order)?;
let positions_json = serde_json::to_string(positions)?;
// UPDATE existing lineup if found; INSERT if not
let rows_updated = sqlx::query(
"UPDATE lineups \
SET description = ?, lineup_type = ?, batting_order = ?, positions = ?, \
starting_pitcher_id = ?, updated_at = datetime('now') \
WHERE name = ?",
)
.bind(description)
.bind(lineup_type)
.bind(&batting_order_json)
.bind(&positions_json)
.bind(starting_pitcher_id)
.bind(name)
.execute(pool)
.await?
.rows_affected();
if rows_updated == 0 {
sqlx::query(
"INSERT INTO lineups \
(name, description, lineup_type, batting_order, positions, starting_pitcher_id, \
created_at, updated_at) \
VALUES (?, ?, ?, ?, ?, ?, datetime('now'), datetime('now'))",
)
.bind(name)
.bind(description)
.bind(lineup_type)
.bind(&batting_order_json)
.bind(&positions_json)
.bind(starting_pitcher_id)
.execute(pool)
.await?;
}
Ok(())
}
pub async fn delete_lineup(pool: &SqlitePool, name: &str) -> Result<bool> {
let result = sqlx::query("DELETE FROM lineups WHERE name = ?")
.bind(name)
.execute(pool)
.await?;
Ok(result.rows_affected() > 0)
}
// =============================================================================
// Roster Queries
// =============================================================================
pub async fn get_my_roster(
pool: &SqlitePool,
team_abbrev: &str,
season: i64,
) -> Result<Roster> {
let majors_team = get_team_by_abbrev(pool, team_abbrev, season).await?;
let majors = match majors_team {
Some(t) => get_players_by_team(pool, t.id).await?,
None => vec![],
};
let il_abbrev = format!("{}IL", team_abbrev);
let il_team = get_team_by_abbrev(pool, &il_abbrev, season).await?;
let il = match il_team {
Some(t) => get_players_by_team(pool, t.id).await?,
None => vec![],
};
let mil_abbrev = format!("{}MiL", team_abbrev);
let mil_team = get_team_by_abbrev(pool, &mil_abbrev, season).await?;
let minors = match mil_team {
Some(t) => get_players_by_team(pool, t.id).await?,
None => vec![],
};
Ok(Roster { majors, minors, il })
}

View File

@ -16,3 +16,263 @@ pub async fn init_pool(db_path: &Path) -> Result<SqlitePool> {
Ok(pool)
}
pub async fn create_tables(pool: &SqlitePool) -> Result<()> {
sqlx::query("PRAGMA foreign_keys = ON")
.execute(pool)
.await?;
// 1. teams — API-provided PKs (no autoincrement)
sqlx::query(
"CREATE TABLE IF NOT EXISTS teams (
id INTEGER PRIMARY KEY,
abbrev TEXT NOT NULL,
short_name TEXT NOT NULL,
long_name TEXT NOT NULL,
season INTEGER NOT NULL,
manager1_name TEXT,
manager2_name TEXT,
gm_discord_id TEXT,
gm2_discord_id TEXT,
division_id INTEGER,
division_name TEXT,
league_abbrev TEXT,
thumbnail TEXT,
color TEXT,
dice_color TEXT,
stadium TEXT,
salary_cap REAL,
synced_at TEXT,
UNIQUE(abbrev, season)
)",
)
.execute(pool)
.await?;
// 2. players — API-provided PKs (no autoincrement)
sqlx::query(
"CREATE TABLE IF NOT EXISTS players (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
season INTEGER NOT NULL,
team_id INTEGER REFERENCES teams(id),
swar REAL DEFAULT 0,
card_image TEXT,
card_image_alt TEXT,
headshot TEXT,
vanity_card TEXT,
pos_1 TEXT,
pos_2 TEXT,
pos_3 TEXT,
pos_4 TEXT,
pos_5 TEXT,
pos_6 TEXT,
pos_7 TEXT,
pos_8 TEXT,
hand TEXT,
injury_rating TEXT,
il_return TEXT,
demotion_week INTEGER,
strat_code TEXT,
bbref_id TEXT,
sbaplayer_id INTEGER,
last_game TEXT,
last_game2 TEXT,
synced_at TEXT
)",
)
.execute(pool)
.await?;
// 3. batter_cards
sqlx::query(
"CREATE TABLE IF NOT EXISTS batter_cards (
id INTEGER PRIMARY KEY AUTOINCREMENT,
player_id INTEGER NOT NULL UNIQUE REFERENCES players(id),
so_vlhp REAL DEFAULT 0,
bb_vlhp REAL DEFAULT 0,
hit_vlhp REAL DEFAULT 0,
ob_vlhp REAL DEFAULT 0,
tb_vlhp REAL DEFAULT 0,
hr_vlhp REAL DEFAULT 0,
dp_vlhp REAL DEFAULT 0,
so_vrhp REAL DEFAULT 0,
bb_vrhp REAL DEFAULT 0,
hit_vrhp REAL DEFAULT 0,
ob_vrhp REAL DEFAULT 0,
tb_vrhp REAL DEFAULT 0,
hr_vrhp REAL DEFAULT 0,
dp_vrhp REAL DEFAULT 0,
bphr_vlhp REAL DEFAULT 0,
bphr_vrhp REAL DEFAULT 0,
bp1b_vlhp REAL DEFAULT 0,
bp1b_vrhp REAL DEFAULT 0,
stealing TEXT,
steal_rating TEXT,
speed INTEGER DEFAULT 10,
bunt TEXT,
hit_run TEXT,
fielding TEXT,
catcher_arm INTEGER,
catcher_pb INTEGER,
catcher_t INTEGER,
rating_vl REAL,
rating_vr REAL,
rating_overall REAL,
imported_at TEXT,
source TEXT
)",
)
.execute(pool)
.await?;
// 4. pitcher_cards
sqlx::query(
"CREATE TABLE IF NOT EXISTS pitcher_cards (
id INTEGER PRIMARY KEY AUTOINCREMENT,
player_id INTEGER NOT NULL UNIQUE REFERENCES players(id),
so_vlhb REAL DEFAULT 0,
bb_vlhb REAL DEFAULT 0,
hit_vlhb REAL DEFAULT 0,
ob_vlhb REAL DEFAULT 0,
tb_vlhb REAL DEFAULT 0,
hr_vlhb REAL DEFAULT 0,
dp_vlhb REAL DEFAULT 0,
bphr_vlhb REAL DEFAULT 0,
bp1b_vlhb REAL DEFAULT 0,
so_vrhb REAL DEFAULT 0,
bb_vrhb REAL DEFAULT 0,
hit_vrhb REAL DEFAULT 0,
ob_vrhb REAL DEFAULT 0,
tb_vrhb REAL DEFAULT 0,
hr_vrhb REAL DEFAULT 0,
dp_vrhb REAL DEFAULT 0,
bphr_vrhb REAL DEFAULT 0,
bp1b_vrhb REAL DEFAULT 0,
hold_rating INTEGER DEFAULT 0,
endurance_start INTEGER,
endurance_relief INTEGER,
endurance_close INTEGER,
fielding_range INTEGER,
fielding_error INTEGER,
wild_pitch INTEGER DEFAULT 0,
balk INTEGER DEFAULT 0,
batting_rating TEXT,
rating_vlhb REAL,
rating_vrhb REAL,
rating_overall REAL,
imported_at TEXT,
source TEXT
)",
)
.execute(pool)
.await?;
// 5. transactions
sqlx::query(
"CREATE TABLE IF NOT EXISTS transactions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
season INTEGER NOT NULL,
week INTEGER NOT NULL,
move_id TEXT NOT NULL,
player_id INTEGER NOT NULL REFERENCES players(id),
from_team_id INTEGER NOT NULL REFERENCES teams(id),
to_team_id INTEGER NOT NULL REFERENCES teams(id),
cancelled INTEGER DEFAULT 0,
frozen INTEGER DEFAULT 0,
synced_at TEXT,
UNIQUE(move_id, player_id)
)",
)
.execute(pool)
.await?;
// 6. lineups
sqlx::query(
"CREATE TABLE IF NOT EXISTS lineups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
description TEXT,
lineup_type TEXT DEFAULT 'standard',
batting_order TEXT,
positions TEXT,
starting_pitcher_id INTEGER REFERENCES players(id),
created_at TEXT,
updated_at TEXT
)",
)
.execute(pool)
.await?;
// 7. matchup_cache
sqlx::query(
"CREATE TABLE IF NOT EXISTS matchup_cache (
id INTEGER PRIMARY KEY AUTOINCREMENT,
batter_id INTEGER NOT NULL REFERENCES players(id),
pitcher_id INTEGER NOT NULL REFERENCES players(id),
rating REAL NOT NULL,
tier TEXT,
details TEXT,
computed_at TEXT,
weights_hash TEXT,
UNIQUE(batter_id, pitcher_id)
)",
)
.execute(pool)
.await?;
// 8. standardized_score_cache
sqlx::query(
"CREATE TABLE IF NOT EXISTS standardized_score_cache (
id INTEGER PRIMARY KEY AUTOINCREMENT,
batter_card_id INTEGER REFERENCES batter_cards(id),
pitcher_card_id INTEGER REFERENCES pitcher_cards(id),
split TEXT NOT NULL,
total_score REAL NOT NULL,
stat_scores TEXT NOT NULL,
computed_at TEXT,
weights_hash TEXT,
league_stats_hash TEXT,
UNIQUE(batter_card_id, split),
UNIQUE(pitcher_card_id, split)
)",
)
.execute(pool)
.await?;
// 9. sync_status
sqlx::query(
"CREATE TABLE IF NOT EXISTS sync_status (
id INTEGER PRIMARY KEY AUTOINCREMENT,
entity_type TEXT NOT NULL UNIQUE,
last_sync TEXT,
last_sync_count INTEGER DEFAULT 0,
last_error TEXT
)",
)
.execute(pool)
.await?;
Ok(())
}
pub async fn reset_database(pool: &SqlitePool) -> Result<()> {
// Drop in reverse dependency order to satisfy foreign key constraints
for table in &[
"standardized_score_cache",
"matchup_cache",
"lineups",
"transactions",
"pitcher_cards",
"batter_cards",
"players",
"teams",
"sync_status",
] {
sqlx::query(&format!("DROP TABLE IF EXISTS {}", table))
.execute(pool)
.await?;
}
create_tables(pool).await
}

View File

@ -8,21 +8,38 @@ mod screens;
use anyhow::Result;
use crossterm::event::{self, Event, KeyCode};
use ratatui::DefaultTerminal;
use sqlx::sqlite::SqlitePool;
use app::App;
use config::Settings;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let settings = match config::load_settings() {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to load settings: {}", e);
std::process::exit(1);
}
};
if let Some(parent) = settings.db_path.parent() {
std::fs::create_dir_all(parent)?;
}
let pool = db::schema::init_pool(&settings.db_path).await?;
db::schema::create_tables(&pool).await?;
let mut terminal = ratatui::init();
let result = run(&mut terminal).await;
let result = run(&mut terminal, settings, pool).await;
ratatui::restore();
result
}
async fn run(terminal: &mut DefaultTerminal) -> Result<()> {
let mut app = App::new();
async fn run(terminal: &mut DefaultTerminal, settings: Settings, pool: SqlitePool) -> Result<()> {
let mut app = App::new(settings, pool);
loop {
terminal.draw(|frame| app.render(frame))?;