Clean up legacy CI/CD files and one-time scripts

Removed legacy CI/CD infrastructure:
- GitLab CI config (.gitlab-ci.yml, .gitlab/ directory)
- Manual build scripts (build-and-push.sh, BUILD_AND_PUSH.md)
- Unused Dockerfile variant (Dockerfile.versioned)

Removed outdated documentation:
- AGENTS.md (superseded by comprehensive CLAUDE.md files)

Removed one-time recovery scripts:
- scripts/ directory (week 19 transaction recovery - completed)
- test_real_data.py (ad-hoc testing script)

Note: Runtime artifacts (.coverage, htmlcov/, __pycache__/, etc.) are already
properly excluded via .gitignore and were not tracked in git.

All CI/CD is now handled by .gitea/workflows/docker-build.yml

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Cal Corum 2026-02-05 15:09:19 -06:00
parent c7f55d79e3
commit bfe78fb7ac
14 changed files with 0 additions and 3712 deletions

View File

@ -1,237 +0,0 @@
stages:
- test
- build
- deploy
variables:
DOCKER_IMAGE: yourusername/discord-bot-v2
DOCKER_DRIVER: overlay2
# Semantic versioning - update these for releases
VERSION_MAJOR: "2"
VERSION_MINOR: "1"
# Test on all branches
test:
stage: test
image: python:3.11-slim
before_script:
- cd discord-app-v2
- pip install --cache-dir .cache/pip -r requirements.txt
script:
- python -m pytest --tb=short -q --cov=. --cov-report=term-missing
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
- .cache/pip
only:
- branches
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: discord-app-v2/coverage.xml
# Build with versioned tags
build:
stage: build
image: docker:24-dind
services:
- docker:24-dind
before_script:
- docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
script:
- cd discord-app-v2
# Calculate version tags
- export VERSION_PATCH=${CI_PIPELINE_IID}
- export FULL_VERSION="v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
- export SHORT_SHA=${CI_COMMIT_SHORT_SHA}
- export BRANCH_TAG="${CI_COMMIT_REF_SLUG}-${SHORT_SHA}"
# Build once, tag multiple times
- |
docker build \
--build-arg VERSION=${FULL_VERSION} \
--build-arg GIT_COMMIT=${CI_COMMIT_SHA} \
--build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
-t ${DOCKER_IMAGE}:${FULL_VERSION} \
-t ${DOCKER_IMAGE}:${SHORT_SHA} \
-t ${DOCKER_IMAGE}:${BRANCH_TAG} \
.
# Tag as latest only for main branch
- |
if [ "$CI_COMMIT_BRANCH" == "main" ]; then
docker tag ${DOCKER_IMAGE}:${FULL_VERSION} ${DOCKER_IMAGE}:latest
fi
# Tag as staging for develop branch
- |
if [ "$CI_COMMIT_BRANCH" == "develop" ]; then
docker tag ${DOCKER_IMAGE}:${FULL_VERSION} ${DOCKER_IMAGE}:staging
fi
# Push all tags
- docker push ${DOCKER_IMAGE}:${FULL_VERSION}
- docker push ${DOCKER_IMAGE}:${SHORT_SHA}
- docker push ${DOCKER_IMAGE}:${BRANCH_TAG}
- |
if [ "$CI_COMMIT_BRANCH" == "main" ]; then
docker push ${DOCKER_IMAGE}:latest
fi
- |
if [ "$CI_COMMIT_BRANCH" == "develop" ]; then
docker push ${DOCKER_IMAGE}:staging
fi
# Save version info for deployment
- echo "FULL_VERSION=${FULL_VERSION}" > version.env
- echo "SHORT_SHA=${SHORT_SHA}" >> version.env
- echo "BRANCH_TAG=${BRANCH_TAG}" >> version.env
artifacts:
reports:
dotenv: discord-app-v2/version.env
only:
- main
- develop
- tags
# Deploy to staging (automatic for develop branch)
deploy:staging:
stage: deploy
image: alpine:latest
needs:
- build
before_script:
- apk add --no-cache openssh-client
- mkdir -p ~/.ssh
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- ssh-keyscan -H $VPS_HOST >> ~/.ssh/known_hosts
script:
- echo "Deploying version ${FULL_VERSION} to staging..."
- |
ssh $VPS_USER@$VPS_HOST << EOF
cd /path/to/discord-bot-staging
# Backup current version
docker inspect discord-bot-staging --format='{{.Image}}' > .last_version || true
# Update docker-compose with specific version
sed -i 's|image: ${DOCKER_IMAGE}:.*|image: ${DOCKER_IMAGE}:staging|' docker-compose.yml
# Pull and deploy
docker-compose pull
docker-compose up -d
# Wait for health check
sleep 10
if docker-compose ps | grep -q "Up (healthy)"; then
echo "✅ Deployment successful!"
docker image prune -f
else
echo "❌ Health check failed!"
exit 1
fi
EOF
environment:
name: staging
url: https://staging-bot.yourdomain.com
only:
- develop
# Deploy to production (manual approval required)
deploy:production:
stage: deploy
image: alpine:latest
needs:
- build
before_script:
- apk add --no-cache openssh-client
- mkdir -p ~/.ssh
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- ssh-keyscan -H $VPS_HOST >> ~/.ssh/known_hosts
script:
- echo "Deploying version ${FULL_VERSION} to production..."
- |
ssh $VPS_USER@$VPS_HOST << EOF
cd /path/to/discord-bot
# Backup current version for rollback
docker inspect discord-bot --format='{{.Image}}' > .last_version || true
echo "${FULL_VERSION}" > .deployed_version
# Create deployment record
echo "$(date -Iseconds) | ${FULL_VERSION} | ${CI_COMMIT_SHORT_SHA} | ${CI_COMMIT_MESSAGE}" >> deployments.log
# Update docker-compose with specific version tag
sed -i 's|image: ${DOCKER_IMAGE}:.*|image: ${DOCKER_IMAGE}:${FULL_VERSION}|' docker-compose.yml
# Pull and deploy
docker-compose pull
docker-compose up -d
# Wait for health check
sleep 10
if docker-compose ps | grep -q "Up (healthy)"; then
echo "✅ Deployment successful!"
echo "Deployed: ${FULL_VERSION}"
docker image prune -f
else
echo "❌ Health check failed! Rolling back..."
LAST_VERSION=\$(cat .last_version)
sed -i "s|image: ${DOCKER_IMAGE}:.*|image: \${LAST_VERSION}|" docker-compose.yml
docker-compose up -d
exit 1
fi
EOF
environment:
name: production
url: https://bot.yourdomain.com
when: manual # Require manual approval
only:
- main
- tags
# Rollback job (manual trigger)
rollback:production:
stage: deploy
image: alpine:latest
before_script:
- apk add --no-cache openssh-client
- mkdir -p ~/.ssh
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- ssh-keyscan -H $VPS_HOST >> ~/.ssh/known_hosts
script:
- |
ssh $VPS_USER@$VPS_HOST << 'EOF'
cd /path/to/discord-bot
# Show recent deployments
echo "Recent deployments:"
tail -n 10 deployments.log
# Get last successful version
LAST_VERSION=$(cat .last_version)
echo ""
echo "Rolling back to: ${LAST_VERSION}"
# Rollback
sed -i "s|image: ${DOCKER_IMAGE}:.*|image: ${LAST_VERSION}|" docker-compose.yml
docker-compose up -d
# Record rollback
echo "$(date -Iseconds) | ROLLBACK | ${LAST_VERSION}" >> deployments.log
echo "✅ Rollback complete!"
EOF
environment:
name: production
action: rollback
when: manual
only:
- main

View File

@ -1,536 +0,0 @@
# GitLab CI/CD Deployment Setup Guide
This guide will help you set up the complete CI/CD pipeline for Discord Bot v2.0.
---
## 📋 Prerequisites
- GitLab account (free tier)
- Docker Hub account
- SSH access to your Ubuntu VPS
- Git repository with Discord Bot v2.0 code
---
## 🚀 Step 1: GitLab Setup (5 minutes)
### 1.1 Create GitLab Project
```bash
# Option A: Mirror from existing GitHub repo
git remote add gitlab git@gitlab.com:yourusername/discord-bot.git
git push gitlab main
# Option B: Create new GitLab repo and push
# 1. Go to gitlab.com
# 2. Click "New Project"
# 3. Name it "discord-bot"
# 4. Set visibility to "Private"
# 5. Create project
# 6. Follow instructions to push existing repository
```
### 1.2 Add CI/CD Variables
Go to: **Settings > CI/CD > Variables**
Add the following variables (all marked as "Protected" and "Masked"):
| Variable | Value | Description |
|----------|-------|-------------|
| `DOCKER_USERNAME` | your-docker-hub-username | Docker Hub login |
| `DOCKER_PASSWORD` | your-docker-hub-token | Docker Hub access token (NOT password) |
| `SSH_PRIVATE_KEY` | your-ssh-private-key | SSH key for VPS access (see below) |
| `VPS_HOST` | your.vps.ip.address | VPS IP or hostname |
| `VPS_USER` | your-vps-username | SSH username (usually `ubuntu` or `root`) |
**Important Notes:**
- For `DOCKER_PASSWORD`: Use a Docker Hub access token, not your password
- Go to hub.docker.com > Account Settings > Security > New Access Token
- For `SSH_PRIVATE_KEY`: Copy your entire private key including headers
- `cat ~/.ssh/id_rsa` (or whatever key you use)
- Include `-----BEGIN OPENSSH PRIVATE KEY-----` and `-----END OPENSSH PRIVATE KEY-----`
---
## 🔑 Step 2: SSH Key Setup for VPS
### 2.1 Generate SSH Key (if you don't have one)
```bash
# On your local machine
ssh-keygen -t ed25519 -C "gitlab-ci@discord-bot" -f ~/.ssh/gitlab_ci_bot
# Copy public key to VPS
ssh-copy-id -i ~/.ssh/gitlab_ci_bot.pub your-user@your-vps-host
```
### 2.2 Add Private Key to GitLab
```bash
# Copy private key
cat ~/.ssh/gitlab_ci_bot
# Paste entire output (including headers) into GitLab CI/CD variable SSH_PRIVATE_KEY
```
### 2.3 Test SSH Access
```bash
ssh -i ~/.ssh/gitlab_ci_bot your-user@your-vps-host "echo 'Connection successful!'"
```
---
## 🐳 Step 3: Docker Hub Setup
### 3.1 Create Access Token
1. Go to https://hub.docker.com/settings/security
2. Click "New Access Token"
3. Name: "GitLab CI/CD"
4. Permissions: "Read, Write, Delete"
5. Copy token immediately (you won't see it again!)
### 3.2 Create Repository
1. Go to https://hub.docker.com/repositories
2. Click "Create Repository"
3. Name: "discord-bot-v2"
4. Visibility: Private or Public (your choice)
5. Create
---
## 🖥️ Step 4: VPS Setup
### 4.1 Create Directory Structure
```bash
# SSH into your VPS
ssh your-user@your-vps-host
# Create production directory
sudo mkdir -p /opt/discord-bot
sudo chown $USER:$USER /opt/discord-bot
cd /opt/discord-bot
# Create staging directory (optional)
sudo mkdir -p /opt/discord-bot-staging
sudo chown $USER:$USER /opt/discord-bot-staging
```
### 4.2 Create docker-compose.yml (Production)
```bash
cd /opt/discord-bot
nano docker-compose.yml
```
Paste:
```yaml
version: '3.8'
services:
bot:
image: yourusername/discord-bot-v2:latest
container_name: discord-bot
restart: unless-stopped
env_file:
- .env.production
volumes:
- ./logs:/app/logs
- ./storage:/app/storage
networks:
- bot-network
healthcheck:
test: ["CMD", "python", "-c", "import discord; print('ok')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
redis:
image: redis:7-alpine
container_name: discord-redis
restart: unless-stopped
volumes:
- redis-data:/data
networks:
- bot-network
volumes:
redis-data:
networks:
bot-network:
```
### 4.3 Create Environment File
```bash
nano .env.production
```
Paste:
```bash
BOT_TOKEN=your_discord_bot_token
API_TOKEN=your_database_api_token
DB_URL=http://your-api-url:8000
GUILD_ID=your_discord_server_id
LOG_LEVEL=INFO
REDIS_URL=redis://redis:6379
REDIS_CACHE_TTL=300
```
### 4.4 Create Rollback Script
```bash
nano rollback.sh
chmod +x rollback.sh
```
Paste:
```bash
#!/bin/bash
set -e
COMPOSE_FILE="docker-compose.yml"
LOG_FILE="deployments.log"
echo "=== Discord Bot Rollback ==="
echo ""
# Show recent deployments
echo "Recent deployments:"
tail -n 10 $LOG_FILE | column -t -s '|'
echo ""
# Show current version
CURRENT=$(grep "image:" $COMPOSE_FILE | awk '{print $2}')
echo "Current version: $CURRENT"
echo ""
# Show last version
if [ -f .last_version ]; then
LAST=$(cat .last_version)
echo "Last version: $LAST"
echo ""
read -p "Rollback to this version? (y/N): " confirm
if [ "$confirm" != "y" ]; then
echo "Rollback cancelled."
exit 0
fi
# Perform rollback
echo "Rolling back..."
sed -i "s|image:.*|image: $LAST|" $COMPOSE_FILE
docker-compose up -d
# Record rollback
echo "$(date -Iseconds) | ROLLBACK | $LAST" >> $LOG_FILE
echo "✅ Rollback complete!"
else
echo "❌ No previous version found!"
exit 1
fi
```
### 4.5 Initialize Deployment Log
```bash
touch deployments.log
echo "$(date -Iseconds) | INIT | Manual Setup" >> deployments.log
```
---
## 📁 Step 5: Update Project Files
### 5.1 Copy GitLab CI Configuration
```bash
# On your local machine, in project root
cp discord-app-v2/.gitlab-ci.yml .gitlab-ci.yml
# Update DOCKER_IMAGE variable with your Docker Hub username
sed -i 's/yourusername/YOUR_ACTUAL_USERNAME/' .gitlab-ci.yml
```
### 5.2 Update Dockerfile
```bash
# Replace existing Dockerfile with versioned one
cd discord-app-v2
mv Dockerfile Dockerfile.old
cp Dockerfile.versioned Dockerfile
```
### 5.3 Add Version Command to Bot
Edit `discord-app-v2/bot.py` and add:
```python
import os
BOT_VERSION = os.getenv('BOT_VERSION', 'dev')
GIT_COMMIT = os.getenv('BOT_GIT_COMMIT', 'unknown')
BUILD_DATE = os.getenv('BOT_BUILD_DATE', 'unknown')
@bot.tree.command(name="version", description="Display bot version info")
async def version_command(interaction: discord.Interaction):
embed = discord.Embed(
title="🤖 Bot Version Information",
color=0x00ff00
)
embed.add_field(name="Version", value=BOT_VERSION, inline=False)
embed.add_field(name="Git Commit", value=GIT_COMMIT[:8], inline=True)
embed.add_field(name="Build Date", value=BUILD_DATE, inline=True)
await interaction.response.send_message(embed=embed, ephemeral=True)
```
---
## 🧪 Step 6: Test the Pipeline
### 6.1 Initial Commit
```bash
git add .
git commit -m "Setup GitLab CI/CD pipeline"
git push gitlab main
```
### 6.2 Watch Pipeline Execute
1. Go to GitLab project page
2. Click "CI/CD > Pipelines"
3. Watch your pipeline run:
- ✅ Test stage should run
- ✅ Build stage should run
- ⏸️ Deploy stage waits for manual trigger
### 6.3 Manual Production Deploy
1. In GitLab pipeline view, find "deploy:production" job
2. Click the "Play" button ▶️
3. Watch deployment execute
4. Verify on VPS:
```bash
ssh your-user@your-vps-host
cd /opt/discord-bot
docker-compose ps
tail -f logs/discord_bot_v2.log
```
---
## ✅ Step 7: Verify Everything Works
### 7.1 Check Bot Status
```bash
# On VPS
docker-compose ps
# Should show:
# NAME STATUS
# discord-bot Up (healthy)
# discord-redis Up
```
### 7.2 Check Version in Discord
In your Discord server:
```
/version
```
Should show something like:
```
Version: v2.1.1
Git Commit: a1b2c3d4
Build Date: 2025-01-19T10:30:00Z
```
### 7.3 Check Deployment Log
```bash
# On VPS
cat /opt/discord-bot/deployments.log
```
---
## 🔄 Step 8: Create Development Workflow
### 8.1 Create Develop Branch
```bash
git checkout -b develop
git push gitlab develop
```
### 8.2 Set Up Branch Protection (Optional)
In GitLab:
1. Settings > Repository > Protected Branches
2. Protect `main`: Require merge requests, maintainers can push
3. Protect `develop`: Developers can push
---
## 🎯 Usage Workflows
### Regular Feature Development
```bash
# Create feature branch
git checkout -b feature/new-feature develop
# Make changes, commit
git add .
git commit -m "Add new feature"
git push gitlab feature/new-feature
# Merge to develop (auto-deploys to staging if configured)
git checkout develop
git merge feature/new-feature
git push gitlab develop
# After testing, merge to main
git checkout main
git merge develop
git push gitlab main
# In GitLab UI, manually trigger production deploy
```
### Hotfix
```bash
# Create from main
git checkout -b hotfix/critical-bug main
# Fix and commit
git add .
git commit -m "Fix critical bug"
git push gitlab hotfix/critical-bug
# Merge to main
git checkout main
git merge hotfix/critical-bug
git push gitlab main
# Manually deploy in GitLab
```
### Rollback
**Option 1 - GitLab UI:**
1. CI/CD > Pipelines
2. Find pipeline with working version
3. Click "Rollback" on deploy:production job
**Option 2 - VPS Script:**
```bash
ssh your-user@your-vps-host
cd /opt/discord-bot
./rollback.sh
```
**Option 3 - Manual Job:**
1. CI/CD > Pipelines > Latest
2. Click "Play" on rollback:production job
---
## 🐛 Troubleshooting
### Pipeline Fails at Build Stage
**Error**: "Cannot connect to Docker daemon"
**Fix**: GitLab runners need Docker-in-Docker enabled (already configured in `.gitlab-ci.yml`)
**Error**: "Permission denied for Docker Hub"
**Fix**: Check `DOCKER_USERNAME` and `DOCKER_PASSWORD` variables are correct
### Pipeline Fails at Deploy Stage
**Error**: "Permission denied (publickey)"
**Fix**:
1. Check `SSH_PRIVATE_KEY` variable includes headers
2. Verify public key is in VPS `~/.ssh/authorized_keys`
3. Test: `ssh -i ~/.ssh/gitlab_ci_bot your-user@your-vps-host`
**Error**: "docker-compose: command not found"
**Fix**: Install docker-compose on VPS:
```bash
sudo apt-get update
sudo apt-get install docker-compose-plugin
```
### Bot Doesn't Start on VPS
**Check logs:**
```bash
cd /opt/discord-bot
docker-compose logs -f bot
```
**Common issues:**
- Missing/wrong `.env.production` values
- Bot token expired
- Database API unreachable
---
## 📊 Version Bumping
Update version in `.gitlab-ci.yml`:
```yaml
variables:
VERSION_MAJOR: "2"
VERSION_MINOR: "1" # ← Change this for new features
```
**Rules:**
- **Patch**: Auto-increments each pipeline
- **Minor**: Manual bump for new features
- **Major**: Manual bump for breaking changes
---
## 🎓 What You Get
**Automated Testing**: Every push runs tests
**Automated Builds**: Docker images built on CI
**Semantic Versioning**: v2.1.X format
**Manual Production Deploys**: Approval required
**Automatic Rollback**: On health check failure
**Quick Manual Rollback**: 3 methods available
**Deployment History**: Full audit trail
**Version Visibility**: `/version` command
---
## 📞 Support
If you get stuck:
1. Check GitLab pipeline logs
2. Check VPS docker logs: `docker-compose logs`
3. Check deployment log: `cat deployments.log`
4. Verify all CI/CD variables are set correctly
---
**Setup Time**: ~30 minutes
**Deployment Time After Setup**: ~2-3 minutes
**Rollback Time**: ~1-2 minutes
**You're all set! 🚀**

View File

@ -1,315 +0,0 @@
# GitLab CI/CD Quick Reference
Quick commands and reminders for daily development.
---
## 🔄 Common Workflows
### Deploy Feature to Production
```bash
# 1. Develop feature
git checkout -b feature/my-feature develop
# ... make changes ...
git commit -m "Add my feature"
git push gitlab feature/my-feature
# 2. Merge to develop for staging test (optional)
git checkout develop
git merge feature/my-feature
git push gitlab develop
# → Auto-deploys to staging
# 3. Merge to main
git checkout main
git merge develop
git push gitlab main
# 4. In GitLab UI: CI/CD > Pipelines > Click ▶️ on deploy:production
```
### Emergency Rollback
```bash
# Option 1: VPS Script (fastest)
ssh user@vps "cd /opt/discord-bot && ./rollback.sh"
# Option 2: GitLab UI
# CI/CD > Pipelines > Click ▶️ on rollback:production
# Option 3: Manual
ssh user@vps
cd /opt/discord-bot
# Edit docker-compose.yml to previous version
docker-compose up -d
```
### Check Deployment Status
```bash
# Check running version on VPS
ssh user@vps "cd /opt/discord-bot && docker inspect discord-bot --format '{{.Config.Labels}}' | grep version"
# Check recent deployments
ssh user@vps "cd /opt/discord-bot && tail -10 deployments.log"
# Check bot health
ssh user@vps "cd /opt/discord-bot && docker-compose ps"
```
---
## 🏷️ Version Management
### Current Version Strategy
| Format | Example | Auto/Manual | When |
|--------|---------|-------------|------|
| Major | `v2.x.x` | Manual | Breaking changes |
| Minor | `v2.1.x` | Manual | New features |
| Patch | `v2.1.123` | Auto | Every build |
### Bump Version
Edit `.gitlab-ci.yml`:
```yaml
variables:
VERSION_MAJOR: "2"
VERSION_MINOR: "2" # ← Change this
```
Then:
```bash
git add .gitlab-ci.yml
git commit -m "Bump version to v2.2.x"
git push gitlab main
```
---
## 🐳 Docker Tags Generated
Every build creates:
- `v2.1.123` - Full semantic version
- `a1b2c3d` - Git commit SHA
- `main-a1b2c3d` - Branch + SHA
- `latest` - Latest main branch (production)
- `staging` - Latest develop branch (staging)
---
## 🔍 Useful Commands
### Check Pipeline Status
```bash
# From CLI (requires gitlab-ci-lint or gitlab CLI)
gitlab-ci-lint .gitlab-ci.yml
# Or visit:
# https://gitlab.com/yourusername/discord-bot/-/pipelines
```
### View Logs
```bash
# Bot logs
ssh user@vps "cd /opt/discord-bot && docker-compose logs -f bot"
# Redis logs
ssh user@vps "cd /opt/discord-bot && docker-compose logs -f redis"
# Deployment history
ssh user@vps "cd /opt/discord-bot && cat deployments.log | column -t -s '|'"
```
### Test Locally Before Push
```bash
cd discord-app-v2
python -m pytest --tb=short -q
```
### Build Docker Image Locally
```bash
cd discord-app-v2
docker build \
--build-arg VERSION="dev" \
--build-arg GIT_COMMIT=$(git rev-parse --short HEAD) \
--build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
-t discord-bot-v2:local .
```
---
## 🎯 GitLab CI/CD Variables
**Required Variables** (Settings > CI/CD > Variables):
| Variable | Type | Example |
|----------|------|---------|
| `DOCKER_USERNAME` | Masked | `youruser` |
| `DOCKER_PASSWORD` | Masked | `dckr_pat_abc123...` |
| `SSH_PRIVATE_KEY` | Masked | `-----BEGIN OPENSSH...` |
| `VPS_HOST` | Plain | `123.456.789.0` |
| `VPS_USER` | Plain | `ubuntu` |
---
## 🚨 Emergency Procedures
### Build Failing
1. Check GitLab pipeline logs
2. Run tests locally: `pytest`
3. Check Docker build: `docker build ...`
4. Fix issues
5. Push again
### Deploy Failing
1. Check SSH access: `ssh user@vps`
2. Check docker-compose.yml exists
3. Check .env.production has all vars
4. Check VPS disk space: `df -h`
5. Check Docker is running: `docker ps`
### Bot Not Starting After Deploy
```bash
# SSH to VPS
ssh user@vps
cd /opt/discord-bot
# Check logs
docker-compose logs bot | tail -50
# Check health
docker-compose ps
# Restart
docker-compose restart bot
# Nuclear option: full restart
docker-compose down
docker-compose up -d
```
### Rollback Needed Immediately
```bash
# Fastest: VPS script
ssh user@vps "cd /opt/discord-bot && ./rollback.sh"
# Confirm version
ssh user@vps "cd /opt/discord-bot && docker-compose ps"
```
---
## 📊 Health Checks
### Bot Health
```bash
# Check if bot is healthy
ssh user@vps "docker inspect discord-bot --format '{{.State.Health.Status}}'"
# Should show: healthy
# Check Discord connection (in Discord)
/version
```
### Redis Health
```bash
ssh user@vps "docker exec discord-redis redis-cli ping"
# Should show: PONG
```
### Full System Check
```bash
ssh user@vps << 'EOF'
cd /opt/discord-bot
echo "=== Container Status ==="
docker-compose ps
echo ""
echo "=== Recent Logs ==="
docker-compose logs --tail=10 bot
echo ""
echo "=== Deployment History ==="
tail -5 deployments.log
EOF
```
---
## 🔐 Security Reminders
- ✅ Never commit `.env` files
- ✅ Use GitLab CI/CD variables for secrets
- ✅ Mark all secrets as "Masked" in GitLab
- ✅ Rotate SSH keys periodically
- ✅ Use Docker Hub access tokens, not passwords
- ✅ Keep VPS firewall enabled
---
## 📈 Monitoring
### Check Metrics
```bash
# If Prometheus is set up
curl http://vps-ip:8000/metrics
# Check bot uptime
ssh user@vps "docker inspect discord-bot --format '{{.State.StartedAt}}'"
```
### Watch Live Logs
```bash
ssh user@vps "cd /opt/discord-bot && docker-compose logs -f --tail=100"
```
---
## 🎓 Tips & Tricks
### Skip CI for Minor Changes
```bash
git commit -m "Update README [skip ci]"
```
### Test in Staging First
```bash
# Push to develop → auto-deploys to staging
git push gitlab develop
# Test thoroughly, then merge to main
```
### View All Available Versions
```bash
# On Docker Hub
docker search yourusername/discord-bot-v2
# On VPS
ssh user@vps "docker images yourusername/discord-bot-v2"
```
### Clean Up Old Images
```bash
# On VPS (run monthly)
ssh user@vps "docker image prune -a -f"
```
---
## 📞 Getting Help
1. **Check Logs**: Always start with logs
2. **GitLab Pipeline**: Look at failed job output
3. **Docker Logs**: `docker-compose logs`
4. **Deployment Log**: `cat deployments.log`
---
**Last Updated**: January 2025
**Bot Version**: v2.1.x
**CI/CD Platform**: GitLab CI/CD

View File

@ -1,517 +0,0 @@
# VPS Helper Scripts
Collection of useful scripts for managing the Discord bot on your VPS.
---
## 📍 Script Locations
All scripts should be placed in `/opt/discord-bot/` on your VPS.
```bash
/opt/discord-bot/
├── docker-compose.yml
├── .env.production
├── rollback.sh # Rollback to previous version
├── deploy-manual.sh # Manual deployment script
├── health-check.sh # Check bot health
├── logs-view.sh # View logs easily
├── cleanup.sh # Clean up old Docker images
└── deployments.log # Auto-generated deployment history
```
---
## 🔄 rollback.sh
Already created during setup. For reference:
```bash
#!/bin/bash
set -e
COMPOSE_FILE="docker-compose.yml"
LOG_FILE="deployments.log"
echo "=== Discord Bot Rollback ==="
echo ""
# Show recent deployments
echo "Recent deployments:"
tail -n 10 $LOG_FILE | column -t -s '|'
echo ""
# Show current version
CURRENT=$(grep "image:" $COMPOSE_FILE | awk '{print $2}')
echo "Current version: $CURRENT"
echo ""
# Show last version
if [ -f .last_version ]; then
LAST=$(cat .last_version)
echo "Last version: $LAST"
echo ""
read -p "Rollback to this version? (y/N): " confirm
if [ "$confirm" != "y" ]; then
echo "Rollback cancelled."
exit 0
fi
# Perform rollback
echo "Rolling back..."
sed -i "s|image:.*|image: $LAST|" $COMPOSE_FILE
docker-compose up -d
# Record rollback
echo "$(date -Iseconds) | ROLLBACK | $LAST" >> $LOG_FILE
echo "✅ Rollback complete!"
else
echo "❌ No previous version found!"
exit 1
fi
```
---
## 🚀 deploy-manual.sh
For manual deployments (bypassing GitLab):
```bash
#!/bin/bash
set -e
COMPOSE_FILE="docker-compose.yml"
LOG_FILE="deployments.log"
IMAGE="yourusername/discord-bot-v2"
echo "=== Manual Discord Bot Deployment ==="
echo ""
# Show available versions
echo "Available versions on Docker Hub:"
echo "(Showing last 10 tags)"
curl -s "https://hub.docker.com/v2/repositories/${IMAGE}/tags?page_size=10" | \
grep -o '"name":"[^"]*' | \
grep -o '[^"]*$'
echo ""
# Prompt for version
read -p "Enter version to deploy (or 'latest'): " VERSION
if [ -z "$VERSION" ]; then
echo "❌ No version specified!"
exit 1
fi
# Backup current version
docker inspect discord-bot --format='{{.Image}}' > .last_version || true
# Update docker-compose
sed -i "s|image: ${IMAGE}:.*|image: ${IMAGE}:${VERSION}|" $COMPOSE_FILE
# Pull and deploy
echo "Pulling ${IMAGE}:${VERSION}..."
docker-compose pull
echo "Deploying..."
docker-compose up -d
# Wait for health check
echo "Waiting for health check..."
sleep 10
if docker-compose ps | grep -q "Up (healthy)"; then
echo "✅ Deployment successful!"
echo "$(date -Iseconds) | MANUAL | ${VERSION} | Manual deployment" >> $LOG_FILE
docker image prune -f
else
echo "❌ Health check failed! Rolling back..."
LAST_VERSION=$(cat .last_version)
sed -i "s|image: ${IMAGE}:.*|image: ${LAST_VERSION}|" $COMPOSE_FILE
docker-compose up -d
exit 1
fi
```
**Usage:**
```bash
cd /opt/discord-bot
./deploy-manual.sh
```
---
## 🏥 health-check.sh
Comprehensive health check:
```bash
#!/bin/bash
echo "=== Discord Bot Health Check ==="
echo ""
# Container status
echo "📦 Container Status:"
docker-compose ps
echo ""
# Bot health
BOT_HEALTH=$(docker inspect discord-bot --format '{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
echo "🤖 Bot Health: $BOT_HEALTH"
# Redis health
REDIS_HEALTH=$(docker exec discord-redis redis-cli ping 2>/dev/null || echo "unreachable")
echo "💾 Redis Health: $REDIS_HEALTH"
echo ""
# Uptime
BOT_STARTED=$(docker inspect discord-bot --format '{{.State.StartedAt}}' 2>/dev/null || echo "unknown")
echo "⏱️ Bot Started: $BOT_STARTED"
echo ""
# Resource usage
echo "💻 Resource Usage:"
docker stats --no-stream discord-bot discord-redis
echo ""
# Recent errors
echo "⚠️ Recent Errors (last 10):"
docker-compose logs --tail=100 bot 2>&1 | grep -i error | tail -10 || echo "No recent errors"
echo ""
# Deployment history
echo "📜 Recent Deployments:"
tail -5 deployments.log | column -t -s '|'
echo ""
# Summary
echo "=== Summary ==="
if [ "$BOT_HEALTH" = "healthy" ] && [ "$REDIS_HEALTH" = "PONG" ]; then
echo "✅ All systems operational"
exit 0
else
echo "❌ Issues detected"
exit 1
fi
```
**Usage:**
```bash
cd /opt/discord-bot
./health-check.sh
```
**Cron for daily checks:**
```bash
# Run health check daily at 6 AM
0 6 * * * /opt/discord-bot/health-check.sh | mail -s "Bot Health Report" you@email.com
```
---
## 📋 logs-view.sh
Easy log viewing:
```bash
#!/bin/bash
echo "Discord Bot Logs Viewer"
echo ""
echo "Select option:"
echo "1) Live bot logs (follow)"
echo "2) Last 100 bot logs"
echo "3) Last 50 error logs"
echo "4) All logs (bot + redis)"
echo "5) Deployment history"
echo "6) Search logs"
echo ""
read -p "Choice [1-6]: " choice
case $choice in
1)
echo "Following live logs (Ctrl+C to exit)..."
docker-compose logs -f --tail=50 bot
;;
2)
docker-compose logs --tail=100 bot
;;
3)
docker-compose logs --tail=500 bot | grep -i error | tail -50
;;
4)
docker-compose logs --tail=100
;;
5)
cat deployments.log | column -t -s '|'
;;
6)
read -p "Search term: " term
docker-compose logs bot | grep -i "$term" | tail -50
;;
*)
echo "Invalid option"
exit 1
;;
esac
```
**Usage:**
```bash
cd /opt/discord-bot
./logs-view.sh
```
---
## 🧹 cleanup.sh
Clean up old Docker images and data:
```bash
#!/bin/bash
set -e
echo "=== Discord Bot Cleanup ==="
echo ""
# Show current disk usage
echo "💾 Current Disk Usage:"
df -h /var/lib/docker
echo ""
# Show Docker disk usage
echo "🐳 Docker Disk Usage:"
docker system df
echo ""
read -p "Proceed with cleanup? (y/N): " confirm
if [ "$confirm" != "y" ]; then
echo "Cleanup cancelled."
exit 0
fi
# Stop containers temporarily
echo "Stopping containers..."
docker-compose down
# Prune images (keep recent ones)
echo "Pruning old images..."
docker image prune -a -f --filter "until=720h" # Keep images from last 30 days
# Prune volumes (be careful!)
# Uncomment if you want to clean volumes
# echo "Pruning unused volumes..."
# docker volume prune -f
# Prune build cache
echo "Pruning build cache..."
docker builder prune -f
# Restart containers
echo "Restarting containers..."
docker-compose up -d
# Show new disk usage
echo ""
echo "✅ Cleanup complete!"
echo ""
echo "💾 New Disk Usage:"
df -h /var/lib/docker
echo ""
docker system df
```
**Usage:**
```bash
cd /opt/discord-bot
./cleanup.sh
```
**Cron for monthly cleanup:**
```bash
# Run cleanup first Sunday of month at 3 AM
0 3 1-7 * 0 /opt/discord-bot/cleanup.sh
```
---
## 🔍 version-info.sh
Show detailed version information:
```bash
#!/bin/bash
echo "=== Version Information ==="
echo ""
# Docker image version
echo "🐳 Docker Image:"
docker inspect discord-bot --format '{{.Config.Image}}'
echo ""
# Image labels
echo "🏷️ Build Metadata:"
docker inspect discord-bot --format '{{json .Config.Labels}}' | jq '.'
echo ""
# Environment variables (version info only)
echo "🔧 Environment:"
docker inspect discord-bot --format '{{range .Config.Env}}{{println .}}{{end}}' | grep BOT_
echo ""
# Currently deployed
echo "📦 Currently Deployed:"
cat .deployed_version 2>/dev/null || echo "Unknown"
echo ""
# Last deployment
echo "📅 Last Deployment:"
tail -1 deployments.log | column -t -s '|'
echo ""
# Available for rollback
echo "⏮️ Available for Rollback:"
cat .last_version 2>/dev/null || echo "None"
```
**Usage:**
```bash
cd /opt/discord-bot
./version-info.sh
```
---
## 📊 status-dashboard.sh
Combined status dashboard:
```bash
#!/bin/bash
clear
echo "╔════════════════════════════════════════════╗"
echo "║ Discord Bot Status Dashboard ║"
echo "╚════════════════════════════════════════════╝"
echo ""
# Version
echo "📦 Version: $(cat .deployed_version 2>/dev/null || echo 'Unknown')"
echo ""
# Health
BOT_HEALTH=$(docker inspect discord-bot --format '{{.State.Health.Status}}' 2>/dev/null || echo "down")
REDIS_HEALTH=$(docker exec discord-redis redis-cli ping 2>/dev/null || echo "DOWN")
if [ "$BOT_HEALTH" = "healthy" ]; then
echo "✅ Bot: $BOT_HEALTH"
else
echo "❌ Bot: $BOT_HEALTH"
fi
if [ "$REDIS_HEALTH" = "PONG" ]; then
echo "✅ Redis: UP"
else
echo "❌ Redis: $REDIS_HEALTH"
fi
echo ""
# Uptime
STARTED=$(docker inspect discord-bot --format '{{.State.StartedAt}}' 2>/dev/null || echo "unknown")
echo "⏱️ Uptime: $STARTED"
echo ""
# Resource usage
echo "💻 Resources:"
docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" discord-bot discord-redis
echo ""
# Recent deployments
echo "📜 Recent Deployments:"
tail -3 deployments.log | column -t -s '|'
echo ""
# Errors
ERROR_COUNT=$(docker-compose logs --tail=1000 bot 2>&1 | grep -ic error || echo 0)
echo "⚠️ Errors (last 1000 lines): $ERROR_COUNT"
echo ""
echo "╚════════════════════════════════════════════╝"
echo "Press Ctrl+C to exit, or run with 'watch' for live updates"
```
**Usage:**
```bash
# One-time view
cd /opt/discord-bot
./status-dashboard.sh
# Live updating (every 2 seconds)
watch -n 2 /opt/discord-bot/status-dashboard.sh
```
---
## 🚀 Quick Setup
Install all scripts at once:
```bash
ssh user@vps << 'EOF'
cd /opt/discord-bot
# Make scripts executable
chmod +x rollback.sh
chmod +x deploy-manual.sh
chmod +x health-check.sh
chmod +x logs-view.sh
chmod +x cleanup.sh
chmod +x version-info.sh
chmod +x status-dashboard.sh
echo "✅ All scripts are ready!"
ls -lah *.sh
EOF
```
---
## 🎯 Useful Aliases
Add to `~/.bashrc` on VPS:
```bash
# Discord Bot aliases
alias bot-status='cd /opt/discord-bot && ./status-dashboard.sh'
alias bot-logs='cd /opt/discord-bot && ./logs-view.sh'
alias bot-health='cd /opt/discord-bot && ./health-check.sh'
alias bot-rollback='cd /opt/discord-bot && ./rollback.sh'
alias bot-deploy='cd /opt/discord-bot && ./deploy-manual.sh'
alias bot-restart='cd /opt/discord-bot && docker-compose restart bot'
alias bot-down='cd /opt/discord-bot && docker-compose down'
alias bot-up='cd /opt/discord-bot && docker-compose up -d'
# Quick status
alias bs='bot-status'
alias bl='bot-logs'
```
Then:
```bash
source ~/.bashrc
# Now you can use:
bs # Status dashboard
bl # View logs
bot-health # Health check
```
---
**Tip**: Create a `README.txt` in `/opt/discord-bot/` listing all available scripts and their purposes!

190
AGENTS.md
View File

@ -1,190 +0,0 @@
# AGENTS.md - Discord Bot v2.0
Guidelines for AI coding agents working in this repository.
## Quick Reference
**Start bot**: `python bot.py`
**Run all tests**: `python -m pytest --tb=short -q`
**Run single test file**: `python -m pytest tests/test_models.py -v`
**Run single test**: `python -m pytest tests/test_models.py::TestTeamModel::test_team_creation_minimal -v`
**Run tests matching pattern**: `python -m pytest -k "test_player" -v`
## Project Structure
- `bot.py` - Main entry point
- `commands/` - Discord slash commands (package-based)
- `services/` - API service layer (BaseService pattern)
- `models/` - Pydantic data models
- `views/` - Discord UI components (embeds, modals)
- `utils/` - Logging, decorators, caching
- `tests/` - pytest test suite
## Code Style
### Imports
Order: stdlib, third-party, local. Separate groups with blank lines.
```python
import asyncio
from typing import Optional, List
import discord
from discord.ext import commands
from services.player_service import player_service
from utils.decorators import logged_command
```
### Formatting
- Line length: 100 characters max
- Docstrings: Google style with triple quotes
- Indentation: 4 spaces
- Trailing commas in multi-line structures
### Type Hints
Always use type hints for function signatures:
```python
async def get_player(self, player_id: int) -> Optional[Player]:
async def search_players(self, query: str, limit: int = 10) -> List[Player]:
```
### Naming Conventions
- Classes: `PascalCase` (PlayerService, TeamInfoCommands)
- Functions/methods: `snake_case` (get_player, search_players)
- Constants: `UPPER_SNAKE_CASE` (SBA_CURRENT_SEASON)
- Private: prefix with `_` (_client, _team_service)
### Error Handling
Use custom exceptions from `exceptions.py`. Prefer "raise or return" over Optional:
```python
from exceptions import APIException, PlayerNotFoundError
async def get_player(self, player_id: int) -> Player:
result = await self.get_by_id(player_id)
if result is None:
raise PlayerNotFoundError(f"Player {player_id} not found")
return result
```
## Discord Command Patterns
### Always use @logged_command decorator
Eliminates boilerplate logging. Class must have `self.logger` attribute:
```python
class PlayerInfoCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.logger = get_contextual_logger(f'{__name__}.PlayerInfoCommands')
@discord.app_commands.command(name="player")
@logged_command("/player")
async def player_command(self, interaction, name: str):
# Business logic only - no try/catch boilerplate needed
player = await player_service.get_player_by_name(name)
await interaction.followup.send(embed=create_embed(player))
```
### Autocomplete: Use standalone functions (not methods)
```python
async def player_name_autocomplete(
interaction: discord.Interaction,
current: str,
) -> List[discord.app_commands.Choice[str]]:
if len(current) < 2:
return []
try:
players = await player_service.search_players(current, limit=25)
return [discord.app_commands.Choice(name=p.name, value=p.name) for p in players]
except Exception:
return [] # Never break autocomplete
class MyCommands(commands.Cog):
@discord.app_commands.command()
@discord.app_commands.autocomplete(name=player_name_autocomplete)
async def my_command(self, interaction, name: str): ...
```
### Embed emoji rules
Template methods auto-add emojis. Never double up:
```python
# CORRECT - template adds emoji
embed = EmbedTemplate.success(title="Operation Completed") # Results in: "Operation Completed"
# WRONG - double emoji
embed = EmbedTemplate.success(title="Operation Completed") # Results in: " Operation Completed"
# For custom emoji, use create_base_embed
embed = EmbedTemplate.create_base_embed(title="Custom Title", color=EmbedColors.SUCCESS)
```
## Service Layer
### Never bypass services for API calls
```python
# CORRECT
player = await player_service.get_player(player_id)
# WRONG - never do this
client = await player_service.get_client()
await client.get(f'players/{player_id}')
```
### Key service methods
- `TeamService.get_team(team_id)` - not `get_team_by_id()`
- `PlayerService.search_players(query, limit, all_seasons=True)` - cross-season search
## Models
### Use from_api_data() classmethod
```python
player = Player.from_api_data(api_response)
```
### Database entities require id field
```python
class Player(SBABaseModel):
id: int = Field(..., description="Player ID from database") # Required, not Optional
```
## Testing
### Use aioresponses for HTTP mocking
```python
from aioresponses import aioresponses
@pytest.mark.asyncio
async def test_get_player():
with aioresponses() as m:
m.get("https://api.example.com/v3/players/1", payload={"id": 1, "name": "Test"})
result = await api_client.get("players", object_id=1)
assert result["name"] == "Test"
```
### Provide complete model data
Pydantic validates all fields. Use helper functions for test data:
```python
def create_player_data(player_id: int, name: str, **kwargs):
return {"id": player_id, "name": name, "wara": 2.5, "season": 13, "pos_1": "CF", **kwargs}
```
## Critical Rules
1. **Git**: Never commit directly to `main`. Create feature branches.
2. **Services**: Always use service layer methods, never direct API client access.
3. **Embeds**: Don't add emojis to titles when using template methods (success/error/warning/info).
4. **Tests**: Include docstrings explaining "what" and "why" for each test.
5. **Commits**: Do not commit without user approval.
## Documentation
Check `CLAUDE.md` files in directories for detailed patterns:
- `commands/CLAUDE.md` - Command architecture
- `services/CLAUDE.md` - Service patterns
- `models/CLAUDE.md` - Model validation
- `tests/CLAUDE.md` - Testing strategies

View File

@ -1,371 +0,0 @@
# Building and Pushing to Docker Hub
This guide covers building the Docker image and pushing it to Docker Hub for production deployment.
## Prerequisites
- Docker installed and running
- Docker Hub account (username: `manticorum67`)
- Write access to `manticorum67/major-domo-discordapp` repository
## Docker Hub Repository
**Repository**: `manticorum67/major-domo-discordapp`
**URL**: https://hub.docker.com/r/manticorum67/major-domo-discordapp
## Login to Docker Hub
```bash
# Login to Docker Hub
docker login
# Enter your username: manticorum67
# Enter your password/token: [your-password-or-token]
```
## Build and Push Workflow
### 1. Tag the Release
```bash
# Determine version number (use semantic versioning)
VERSION="2.0.0"
# Create git tag (optional but recommended)
git tag -a "v${VERSION}" -m "Release v${VERSION}"
git push origin "v${VERSION}"
```
### 2. Build the Image
```bash
# Build for production
docker build -t manticorum67/major-domo-discordapp:latest .
# Build with version tag
docker build -t manticorum67/major-domo-discordapp:${VERSION} .
# Or build both at once
docker build \
-t manticorum67/major-domo-discordapp:latest \
-t manticorum67/major-domo-discordapp:${VERSION} \
.
```
### 3. Test the Image Locally
```bash
# Test with docker run
docker run --rm \
--env-file .env \
-v $(pwd)/data:/data:ro \
-v $(pwd)/logs:/logs:rw \
manticorum67/major-domo-discordapp:latest
# Or test with docker-compose (development)
docker-compose -f docker-compose.dev.yml up
```
### 4. Push to Docker Hub
```bash
# Push latest tag
docker push manticorum67/major-domo-discordapp:latest
# Push version tag
docker push manticorum67/major-domo-discordapp:${VERSION}
# Or push all tags
docker push manticorum67/major-domo-discordapp --all-tags
```
## Complete Build and Push Script
```bash
#!/bin/bash
# build-and-push.sh
set -e # Exit on error
# Configuration
VERSION="${1:-latest}" # Use argument or default to 'latest'
DOCKER_REPO="manticorum67/major-domo-discordapp"
echo "🔨 Building Docker image..."
echo "Version: ${VERSION}"
echo "Repository: ${DOCKER_REPO}"
echo ""
# Build image with both tags
docker build \
-t ${DOCKER_REPO}:latest \
-t ${DOCKER_REPO}:${VERSION} \
.
echo ""
echo "✅ Build complete!"
echo ""
echo "📤 Pushing to Docker Hub..."
# Push both tags
docker push ${DOCKER_REPO}:latest
docker push ${DOCKER_REPO}:${VERSION}
echo ""
echo "✅ Push complete!"
echo ""
echo "🎉 Image available at:"
echo " docker pull ${DOCKER_REPO}:latest"
echo " docker pull ${DOCKER_REPO}:${VERSION}"
```
### Using the Build Script
```bash
# Make script executable
chmod +x build-and-push.sh
# Build and push with version
./build-and-push.sh 2.0.0
# Build and push as latest only
./build-and-push.sh
```
## Multi-Platform Builds (Optional)
To build for multiple architectures (amd64, arm64):
```bash
# Create a builder instance
docker buildx create --name multiarch --use
# Build and push for multiple platforms
docker buildx build \
--platform linux/amd64,linux/arm64 \
-t manticorum67/major-domo-discordapp:latest \
-t manticorum67/major-domo-discordapp:${VERSION} \
--push \
.
```
## Versioning Strategy
### Semantic Versioning
Use semantic versioning (MAJOR.MINOR.PATCH):
- **MAJOR**: Breaking changes
- **MINOR**: New features (backwards compatible)
- **PATCH**: Bug fixes
Examples:
- `2.0.0` - Major release with scorecard submission
- `2.1.0` - Added new command
- `2.1.1` - Fixed bug in existing command
### Tagging Strategy
Always maintain these tags:
1. **`:latest`** - Most recent stable release
2. **`:VERSION`** - Specific version (e.g., `2.0.0`)
3. **`:MAJOR.MINOR`** - Minor version (e.g., `2.0`) - optional
4. **`:MAJOR`** - Major version (e.g., `2`) - optional
### Example Tagging
```bash
VERSION="2.0.0"
# Tag with all versions
docker build \
-t manticorum67/major-domo-discordapp:latest \
-t manticorum67/major-domo-discordapp:2.0.0 \
-t manticorum67/major-domo-discordapp:2.0 \
-t manticorum67/major-domo-discordapp:2 \
.
# Push all tags
docker push manticorum67/major-domo-discordapp --all-tags
```
## GitHub Actions (Optional)
Automate builds with GitHub Actions:
```yaml
# .github/workflows/docker-build.yml
name: Build and Push Docker Image
on:
push:
tags:
- 'v*.*.*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract version
id: version
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
- name: Build and push
uses: docker/build-push-action@v4
with:
context: ./discord-app-v2
push: true
tags: |
manticorum67/major-domo-discordapp:latest
manticorum67/major-domo-discordapp:${{ steps.version.outputs.VERSION }}
```
## Production Deployment
After pushing to Docker Hub, deploy on production:
```bash
# On production server
cd /path/to/discord-app-v2
# Pull latest image
docker-compose pull
# Restart with new image
docker-compose up -d
# Verify it's running
docker-compose logs -f discord-bot
```
## Rollback to Previous Version
If a release has issues:
```bash
# Stop current version
docker-compose down
# Edit docker-compose.yml to use specific version
# Change: image: manticorum67/major-domo-discordapp:latest
# To: image: manticorum67/major-domo-discordapp:2.0.0
# Pull and start old version
docker-compose pull
docker-compose up -d
```
Or use a specific version directly:
```bash
docker-compose down
docker pull manticorum67/major-domo-discordapp:2.0.0
docker run -d \
--name major-domo-discord-bot-v2 \
--env-file .env \
-v $(pwd)/data:/data:ro \
-v $(pwd)/logs:/logs:rw \
manticorum67/major-domo-discordapp:2.0.0
```
## Image Size Optimization
The multi-stage build already optimizes size, but you can verify:
```bash
# Check image size
docker images manticorum67/major-domo-discordapp
# Expected size: ~150-200MB
# Inspect layers
docker history manticorum67/major-domo-discordapp:latest
```
## Troubleshooting
### Build Fails
```bash
# Build with verbose output
docker build --progress=plain -t manticorum67/major-domo-discordapp:latest .
# Check for errors in requirements.txt
docker build --no-cache -t manticorum67/major-domo-discordapp:latest .
```
### Push Fails
```bash
# Check if logged in
docker info | grep Username
# Re-login
docker logout
docker login
# Check repository permissions
docker push manticorum67/major-domo-discordapp:latest
```
### Image Won't Run
```bash
# Test image interactively
docker run -it --rm \
--entrypoint /bin/bash \
manticorum67/major-domo-discordapp:latest
# Inside container, check Python
python --version
pip list
ls -la /app
```
## Security Best Practices
1. **Use Docker Hub Access Tokens** instead of password
2. **Enable 2FA** on Docker Hub account
3. **Scan images** for vulnerabilities:
```bash
docker scan manticorum67/major-domo-discordapp:latest
```
4. **Sign images** (optional):
```bash
docker trust sign manticorum67/major-domo-discordapp:latest
```
## Cleanup
Remove old local images:
```bash
# Remove dangling images
docker image prune
# Remove all unused images
docker image prune -a
# Remove specific version
docker rmi manticorum67/major-domo-discordapp:1.0.0
```
## Additional Resources
- **Docker Hub**: https://hub.docker.com/r/manticorum67/major-domo-discordapp
- **Docker Documentation**: https://docs.docker.com/
- **Semantic Versioning**: https://semver.org/

View File

@ -1,49 +0,0 @@
# Enhanced Dockerfile with Version Metadata
# Rename to Dockerfile when ready to use
# Build stage
FROM python:3.11-slim as builder
WORKDIR /app
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --user --no-cache-dir -r requirements.txt
# Runtime stage
FROM python:3.11-slim
WORKDIR /app
# Copy dependencies from builder
COPY --from=builder /root/.local /root/.local
ENV PATH=/root/.local/bin:$PATH
# Add version metadata as build args
ARG VERSION="dev"
ARG GIT_COMMIT="unknown"
ARG BUILD_DATE="unknown"
# Store as labels (visible via `docker inspect`)
LABEL org.opencontainers.image.version="${VERSION}"
LABEL org.opencontainers.image.revision="${GIT_COMMIT}"
LABEL org.opencontainers.image.created="${BUILD_DATE}"
LABEL org.opencontainers.image.title="Discord Bot v2.0"
LABEL org.opencontainers.image.description="SBA Discord Bot - Modernized"
# Store as environment variables (accessible in bot)
ENV BOT_VERSION="${VERSION}"
ENV BOT_GIT_COMMIT="${GIT_COMMIT}"
ENV BOT_BUILD_DATE="${BUILD_DATE}"
# Copy application
COPY . .
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD python -c "import discord; print('ok')" || exit 1
CMD ["python", "bot.py"]

View File

@ -1,97 +0,0 @@
#!/bin/bash
# ============================================
# Build and Push Docker Image to Docker Hub
# ============================================
# Usage:
# ./build-and-push.sh # Build and push as 'latest'
# ./build-and-push.sh 2.0.0 # Build and push as 'latest' and '2.0.0'
set -e # Exit on error
# Configuration
VERSION="${1:-2.0.0}"
DOCKER_REPO="manticorum67/major-domo-discordapp"
# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}======================================${NC}"
echo -e "${BLUE}Docker Build and Push${NC}"
echo -e "${BLUE}======================================${NC}"
echo ""
echo -e "${YELLOW}Repository:${NC} ${DOCKER_REPO}"
echo -e "${YELLOW}Version:${NC} ${VERSION}"
echo ""
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo -e "${RED}❌ Error: Docker is not running${NC}"
exit 1
fi
# Check if logged in to Docker Hub
if ! docker info 2>/dev/null | grep -q "Username"; then
echo -e "${YELLOW}⚠️ Not logged in to Docker Hub${NC}"
echo -e "${YELLOW}Please log in:${NC}"
docker login
echo ""
fi
# Build image
echo -e "${BLUE}🔨 Building Docker image...${NC}"
echo ""
if [ "$VERSION" = "latest" ]; then
# Only tag as latest
docker build -t ${DOCKER_REPO}:latest .
else
# Tag as both latest and version
docker build \
-t ${DOCKER_REPO}:latest \
-t ${DOCKER_REPO}:${VERSION} \
.
fi
echo ""
echo -e "${GREEN}✅ Build complete!${NC}"
echo ""
# Confirm push
echo -e "${YELLOW}Ready to push to Docker Hub${NC}"
read -p "Continue? (y/n) " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo -e "${YELLOW}❌ Push cancelled${NC}"
exit 0
fi
# Push image
echo ""
echo -e "${BLUE}📤 Pushing to Docker Hub...${NC}"
echo ""
docker push ${DOCKER_REPO}:latest
if [ "$VERSION" != "latest" ]; then
docker push ${DOCKER_REPO}:${VERSION}
fi
echo ""
echo -e "${GREEN}✅ Push complete!${NC}"
echo ""
echo -e "${GREEN}🎉 Image available at:${NC}"
echo -e " docker pull ${DOCKER_REPO}:latest"
if [ "$VERSION" != "latest" ]; then
echo -e " docker pull ${DOCKER_REPO}:${VERSION}"
fi
echo ""
echo -e "${BLUE}======================================${NC}"
echo -e "${GREEN}Done!${NC}"
echo -e "${BLUE}======================================${NC}"

View File

@ -1,207 +0,0 @@
# Week 19 Transaction Recovery
## Overview
This script recovers the Week 19 transactions that were lost due to the `/dropadd` database persistence bug. These transactions were posted to Discord but never saved to the database.
## The Bug
**Root Cause**: The `/dropadd` command was missing a critical `create_transaction_batch()` call in the scheduled submission handler.
**Impact**: Week 19 transactions were:
- ✅ Created in memory
- ✅ Posted to Discord #transaction-log
- ❌ **NEVER saved to database**
- ❌ Lost when bot restarted
**Result**: The weekly freeze task found 0 transactions to process for Week 19.
## Recovery Process
### 1. Input Data
File: `.claude/week-19-transactions.md`
Contains 3 teams with 10 total moves:
- **Zephyr (DEN)**: 2 moves
- **Cavalry (CAN)**: 4 moves
- **Whale Sharks (WAI)**: 4 moves
### 2. Script Usage
```bash
# Step 1: Dry run to verify parsing and lookups
python scripts/recover_week19_transactions.py --dry-run
# Step 2: Review the preview output
# Verify all players and teams were found correctly
# Step 3: Execute to PRODUCTION (CRITICAL!)
python scripts/recover_week19_transactions.py --prod
# Or skip confirmation (use with extreme caution)
python scripts/recover_week19_transactions.py --prod --yes
```
**⚠️ IMPORTANT**: By default, the script uses whatever database is configured in `.env`. Use the `--prod` flag to explicitly send to production (`api.sba.manticorum.com`).
### 3. What the Script Does
1. **Parse** `.claude/week-19-transactions.md`
2. **Lookup** all players and teams via API services
3. **Validate** that all data is found
4. **Preview** all transactions that will be created
5. **Ask for confirmation** (unless --yes flag)
6. **POST** to database via `transaction_service.create_transaction_batch()`
7. **Report** success or failure for each team
### 4. Transaction Settings
All recovered transactions are created with:
- `week=19` - Correct historical week
- `season=12` - Current season
- `frozen=False` - Already processed (past thaw period)
- `cancelled=False` - Active transactions
- Unique `moveid` per team: `Season-012-Week-19-{timestamp}`
## Command-Line Options
- `--dry-run` - Parse and validate only, no database changes
- `--prod` - **Send to PRODUCTION database** (`api.sba.manticorum.com`) instead of dev
- `--yes` - Auto-confirm without prompting
- `--season N` - Override season (default: 12)
- `--week N` - Override week (default: 19)
**⚠️ DATABASE TARGETING:**
- **Without `--prod`**: Uses database from `.env` file (currently `sbadev.manticorum.com`)
- **With `--prod`**: Overrides to production (`api.sba.manticorum.com`)
## Example Output
### Dry Run Mode
```
======================================================================
TRANSACTION RECOVERY PREVIEW - Season 12, Week 19
======================================================================
Found 3 teams with 10 total moves:
======================================================================
Team: DEN (Zephyr)
Move ID: Season-012-Week-19-1761444914
Week: 19, Frozen: False, Cancelled: False
1. Fernando Cruz (0.22)
From: DENMiL → To: DEN
Player ID: 11782
2. Brandon Pfaadt (0.25)
From: DEN → To: DENMiL
Player ID: 11566
======================================================================
[... more teams ...]
🔍 DRY RUN MODE - No changes made to database
```
### Successful Execution
```
======================================================================
✅ RECOVERY COMPLETE
======================================================================
Team DEN: 2 moves (moveid: Season-012-Week-19-1761444914)
Team CAN: 4 moves (moveid: Season-012-Week-19-1761444915)
Team WAI: 4 moves (moveid: Season-012-Week-19-1761444916)
Total: 10 player moves recovered
These transactions are now in the database with:
- Week: 19
- Frozen: False (already processed)
- Cancelled: False (active)
Teams can view their moves with /mymoves
======================================================================
```
## Verification
After running the script, verify the transactions were created:
1. **Database Check**: Query transactions table for `week=19, season=12`
2. **Discord Commands**: Teams can use `/mymoves` to see their transactions
3. **Log Files**: Check `logs/recover_week19.log` for detailed execution log
## Troubleshooting
### Player Not Found
```
⚠️ Player not found: PlayerName
```
**Solution**: Check the exact player name spelling in `.claude/week-19-transactions.md`. The script uses fuzzy matching but exact matches work best.
### Team Not Found
```
❌ Team not found: ABC
```
**Solution**: Verify the team abbreviation exists in the database for season 12. Check the `TEAM_MAPPING` dictionary in the script.
### API Error
```
❌ Error posting transactions for DEN: [error message]
```
**Solution**:
1. Check API server is running
2. Verify `API_TOKEN` is valid
3. Check network connectivity
4. Review `logs/recover_week19.log` for details
## Safety Features
- ✅ **Dry-run mode** for safe testing
- ✅ **Preview** shows exact transactions before posting
- ✅ **Confirmation prompt** (unless --yes)
- ✅ **Per-team batching** limits damage on errors
- ✅ **Comprehensive logging** to `logs/recover_week19.log`
- ✅ **Validation** of all player/team lookups before posting
## Rollback
If you need to undo the recovery:
1. Check `logs/recover_week19.log` for transaction IDs
2. Use `transaction_service.cancel_transaction(moveid)` for each
3. Or manually update database: `UPDATE transactions SET cancelled=1 WHERE moveid='Season-012-Week-19-{timestamp}'`
## The Fix
The underlying bug has been fixed in `views/transaction_embed.py`:
```python
# NEW CODE (lines 243-248):
# Mark transactions as frozen for weekly processing
for txn in transactions:
txn.frozen = True
# POST transactions to database
created_transactions = await transaction_service.create_transaction_batch(transactions)
```
**This ensures all future `/dropadd` transactions are properly saved to the database.**
## Files
- `scripts/recover_week19_transactions.py` - Main recovery script
- `.claude/week-19-transactions.md` - Input data
- `logs/recover_week19.log` - Execution log
- `scripts/README_recovery.md` - This documentation

View File

@ -1,125 +0,0 @@
#!/usr/bin/env python3
"""
Process Week 19 Transactions
Moves all players to their new teams for week 19 transactions.
"""
import os
import sys
import asyncio
import logging
from typing import List, Dict, Any
# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.logging import get_contextual_logger
from services.api_client import APIClient
# Configure logging
logger = get_contextual_logger(f'{__name__}')
# API Configuration
API_BASE_URL = "https://api.sba.manticorum.com"
API_TOKEN = os.getenv("API_TOKEN", "")
# Transaction data (fetched from API)
TRANSACTIONS = [
{"player_id": 11782, "player_name": "Fernando Cruz", "old_team_id": 504, "new_team_id": 502},
{"player_id": 11566, "player_name": "Brandon Pfaadt", "old_team_id": 502, "new_team_id": 504},
{"player_id": 12127, "player_name": "Masataka Yoshida", "old_team_id": 531, "new_team_id": 529},
{"player_id": 12317, "player_name": "Sam Hilliard", "old_team_id": 529, "new_team_id": 531},
{"player_id": 11984, "player_name": "Jose Herrera", "old_team_id": 531, "new_team_id": 529},
{"player_id": 11723, "player_name": "Dillon Tate", "old_team_id": 529, "new_team_id": 531},
{"player_id": 11812, "player_name": "Giancarlo Stanton", "old_team_id": 528, "new_team_id": 526},
{"player_id": 12199, "player_name": "Nicholas Castellanos", "old_team_id": 528, "new_team_id": 526},
{"player_id": 11832, "player_name": "Hayden Birdsong", "old_team_id": 526, "new_team_id": 528},
{"player_id": 11890, "player_name": "Andrew McCutchen", "old_team_id": 526, "new_team_id": 528},
]
async def update_player_team(client: APIClient, player_id: int, new_team_id: int, player_name: str) -> bool:
"""
Update a player's team via PATCH request.
Args:
client: API client instance
player_id: Player ID to update
new_team_id: New team ID
player_name: Player name (for logging)
Returns:
True if successful, False otherwise
"""
try:
endpoint = f"/players/{player_id}"
params = [("team_id", str(new_team_id))]
logger.info(f"Updating {player_name} (ID: {player_id}) to team {new_team_id}")
response = await client.patch(endpoint, params=params)
logger.info(f"✓ Successfully updated {player_name}")
return True
except Exception as e:
logger.error(f"✗ Failed to update {player_name}: {e}")
return False
async def process_all_transactions():
"""Process all week 19 transactions."""
logger.info("=" * 70)
logger.info("PROCESSING WEEK 19 TRANSACTIONS")
logger.info("=" * 70)
if not API_TOKEN:
logger.error("API_TOKEN environment variable not set!")
return False
# Initialize API client
client = APIClient(base_url=API_BASE_URL, token=API_TOKEN)
success_count = 0
failure_count = 0
# Process each transaction
for i, transaction in enumerate(TRANSACTIONS, 1):
logger.info(f"\n[{i}/{len(TRANSACTIONS)}] Processing transaction:")
logger.info(f" Player: {transaction['player_name']}")
logger.info(f" Old Team ID: {transaction['old_team_id']}")
logger.info(f" New Team ID: {transaction['new_team_id']}")
success = await update_player_team(
client=client,
player_id=transaction["player_id"],
new_team_id=transaction["new_team_id"],
player_name=transaction["player_name"]
)
if success:
success_count += 1
else:
failure_count += 1
# Close the client session
await client.close()
# Print summary
logger.info("\n" + "=" * 70)
logger.info("TRANSACTION PROCESSING COMPLETE")
logger.info("=" * 70)
logger.info(f"✓ Successful: {success_count}/{len(TRANSACTIONS)}")
logger.info(f"✗ Failed: {failure_count}/{len(TRANSACTIONS)}")
logger.info("=" * 70)
return failure_count == 0
async def main():
"""Main entry point."""
success = await process_all_transactions()
sys.exit(0 if success else 1)
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,76 +0,0 @@
#!/bin/bash
# Process Week 19 Transactions
# Moves all players to their new teams for week 19 transactions
set -e
API_BASE_URL="https://api.sba.manticorum.com"
API_TOKEN="${API_TOKEN:-}"
if [ -z "$API_TOKEN" ]; then
echo "ERROR: API_TOKEN environment variable not set!"
exit 1
fi
echo "======================================================================"
echo "PROCESSING WEEK 19 TRANSACTIONS"
echo "======================================================================"
# Transaction data: player_id:new_team_id:player_name
TRANSACTIONS=(
"11782:502:Fernando Cruz"
"11566:504:Brandon Pfaadt"
"12127:529:Masataka Yoshida"
"12317:531:Sam Hilliard"
"11984:529:Jose Herrera"
"11723:531:Dillon Tate"
"11812:526:Giancarlo Stanton"
"12199:526:Nicholas Castellanos"
"11832:528:Hayden Birdsong"
"11890:528:Andrew McCutchen"
)
SUCCESS_COUNT=0
FAILURE_COUNT=0
TOTAL=${#TRANSACTIONS[@]}
for i in "${!TRANSACTIONS[@]}"; do
IFS=':' read -r player_id new_team_id player_name <<< "${TRANSACTIONS[$i]}"
echo ""
echo "[$((i+1))/$TOTAL] Processing transaction:"
echo " Player: $player_name"
echo " Player ID: $player_id"
echo " New Team ID: $new_team_id"
response=$(curl -s -w "\n%{http_code}" -X PATCH \
"${API_BASE_URL}/players/${player_id}?team_id=${new_team_id}" \
-H "Authorization: Bearer ${API_TOKEN}" \
-H "Content-Type: application/json")
http_code=$(echo "$response" | tail -n1)
body=$(echo "$response" | sed '$d')
if [ "$http_code" -eq 200 ] || [ "$http_code" -eq 204 ]; then
echo " ✓ Successfully updated $player_name"
((SUCCESS_COUNT++))
else
echo " ✗ Failed to update $player_name (HTTP $http_code)"
echo " Response: $body"
((FAILURE_COUNT++))
fi
done
echo ""
echo "======================================================================"
echo "TRANSACTION PROCESSING COMPLETE"
echo "======================================================================"
echo "✓ Successful: $SUCCESS_COUNT/$TOTAL"
echo "✗ Failed: $FAILURE_COUNT/$TOTAL"
echo "======================================================================"
if [ $FAILURE_COUNT -eq 0 ]; then
exit 0
else
exit 1
fi

View File

@ -1,227 +0,0 @@
#!/usr/bin/env python3
"""
Week 19 Transaction Recovery Script - Direct ID Version
Uses pre-known player IDs to bypass search, posting directly to production.
"""
import asyncio
import argparse
import logging
import sys
from datetime import datetime, UTC
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from models.transaction import Transaction
from models.player import Player
from models.team import Team
from services.player_service import player_service
from services.team_service import team_service
from services.transaction_service import transaction_service
from config import get_config
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('logs/recover_week19.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Week 19 transaction data with known player IDs
WEEK19_TRANSACTIONS = {
"DEN": [
{"player_id": 11782, "player_name": "Fernando Cruz", "swar": 0.22, "from": "DENMiL", "to": "DEN"},
{"player_id": 11566, "player_name": "Brandon Pfaadt", "swar": 0.25, "from": "DEN", "to": "DENMiL"},
],
"CAN": [
{"player_id": 12127, "player_name": "Masataka Yoshida", "swar": 0.96, "from": "CANMiL", "to": "CAN"},
{"player_id": 12317, "player_name": "Sam Hilliard", "swar": 0.92, "from": "CAN", "to": "CANMiL"},
{"player_id": 11984, "player_name": "Jose Herrera", "swar": 0.0, "from": "CANMiL", "to": "CAN"},
{"player_id": 11723, "player_name": "Dillon Tate", "swar": 0.0, "from": "CAN", "to": "CANMiL"},
],
"WAI": [
{"player_id": 11812, "player_name": "Giancarlo Stanton", "swar": 0.44, "from": "WAIMiL", "to": "WAI"},
{"player_id": 12199, "player_name": "Nicholas Castellanos", "swar": 0.35, "from": "WAIMiL", "to": "WAI"},
{"player_id": 11832, "player_name": "Hayden Birdsong", "swar": 0.21, "from": "WAI", "to": "WAIMiL"},
{"player_id": 12067, "player_name": "Kyle Nicolas", "swar": 0.18, "from": "WAI", "to": "WAIMiL"},
]
}
async def main():
"""Main script execution."""
parser = argparse.ArgumentParser(description='Recover Week 19 transactions with direct IDs')
parser.add_argument('--dry-run', action='store_true', help='Preview only, do not post')
parser.add_argument('--yes', action='store_true', help='Skip confirmation')
args = parser.parse_args()
# Set production database
import os
os.environ['DB_URL'] = 'https://sba.manticorum.com/api'
import config as config_module
config_module._config = None
config = get_config()
logger.warning(f"⚠️ PRODUCTION MODE: Using {config.db_url}")
print(f"\n{'='*70}")
print(f"⚠️ PRODUCTION DATABASE MODE")
print(f"Database: {config.db_url}")
print(f"{'='*70}\n")
season = 12
week = 19
timestamp_base = int(datetime.now(UTC).timestamp())
print("Loading team and player data from production...\n")
# Load all teams and players
teams_cache = {}
players_cache = {}
for team_abbrev, moves in WEEK19_TRANSACTIONS.items():
# Load main team
try:
team = await team_service.get_team_by_abbrev(team_abbrev, season)
if not team:
logger.error(f"❌ Team not found: {team_abbrev}")
return 1
teams_cache[team_abbrev] = team
except Exception as e:
logger.error(f"❌ Error loading team {team_abbrev}: {e}")
return 1
# Load all teams referenced in moves
for move in moves:
for team_key in [move["from"], move["to"]]:
if team_key not in teams_cache:
try:
team_obj = await team_service.get_team_by_abbrev(team_key, season)
if not team_obj:
logger.error(f"❌ Team not found: {team_key}")
return 1
teams_cache[team_key] = team_obj
except Exception as e:
logger.error(f"❌ Error loading team {team_key}: {e}")
return 1
# Load player by ID
player_id = move["player_id"]
if player_id not in players_cache:
try:
player = await player_service.get_player(player_id)
if not player:
logger.error(f"❌ Player not found: {player_id} ({move['player_name']})")
return 1
players_cache[player_id] = player
except Exception as e:
logger.error(f"❌ Error loading player {player_id}: {e}")
return 1
# Show preview
print("="*70)
print(f"TRANSACTION RECOVERY PREVIEW - Season {season}, Week {week}")
print("="*70)
print(f"\nFound {len(WEEK19_TRANSACTIONS)} teams with {sum(len(moves) for moves in WEEK19_TRANSACTIONS.values())} total moves:\n")
for idx, (team_abbrev, moves) in enumerate(WEEK19_TRANSACTIONS.items()):
moveid = f"Season-{season:03d}-Week-{week:02d}-{timestamp_base + idx}"
team = teams_cache[team_abbrev]
print("="*70)
print(f"Team: {team_abbrev} ({team.lname})")
print(f"Move ID: {moveid}")
print(f"Week: {week}, Frozen: False, Cancelled: False")
print()
for i, move in enumerate(moves, 1):
player = players_cache[move["player_id"]]
print(f"{i}. {player.name} ({move['swar']})")
print(f" From: {move['from']} → To: {move['to']}")
print(f" Player ID: {player.id}")
print()
print("="*70)
print(f"Total: {sum(len(moves) for moves in WEEK19_TRANSACTIONS.values())} moves across {len(WEEK19_TRANSACTIONS)} teams")
print(f"Status: PROCESSED (frozen=False)")
print(f"Season: {season}, Week: {week}")
print("="*70)
if args.dry_run:
print("\n🔍 DRY RUN MODE - No changes made to database")
logger.info("Dry run completed successfully")
return 0
# Confirmation
if not args.yes:
print("\n🚨 PRODUCTION DATABASE - This will POST to LIVE DATA!")
print(f"Database: {config.db_url}")
response = input("Continue with database POST? [y/N]: ")
if response.lower() != 'y':
print("❌ Cancelled by user")
return 0
# Create and post transactions
print("\nPosting transactions to production database...")
results = {}
for idx, (team_abbrev, moves) in enumerate(WEEK19_TRANSACTIONS.items()):
moveid = f"Season-{season:03d}-Week-{week:02d}-{timestamp_base + idx}"
txn_objects = []
for move in moves:
player = players_cache[move["player_id"]]
from_team = teams_cache[move["from"]]
to_team = teams_cache[move["to"]]
transaction = Transaction(
id=0,
week=week,
season=season,
moveid=moveid,
player=player,
oldteam=from_team,
newteam=to_team,
cancelled=False,
frozen=False
)
txn_objects.append(transaction)
try:
logger.info(f"Posting {len(txn_objects)} moves for {team_abbrev}...")
created = await transaction_service.create_transaction_batch(txn_objects)
results[team_abbrev] = created
logger.info(f"✅ Successfully posted {len(created)} moves for {team_abbrev}")
except Exception as e:
logger.error(f"❌ Error posting for {team_abbrev}: {e}")
continue
# Show results
print("\n" + "="*70)
print("✅ RECOVERY COMPLETE")
print("="*70)
total_moves = 0
for team_abbrev, created_txns in results.items():
print(f"\nTeam {team_abbrev}: {len(created_txns)} moves (moveid: {created_txns[0].moveid if created_txns else 'N/A'})")
total_moves += len(created_txns)
print(f"\nTotal: {total_moves} player moves recovered")
print("\nThese transactions are now in PRODUCTION database with:")
print(f" - Week: {week}")
print(" - Frozen: False (already processed)")
print(" - Cancelled: False (active)")
print("\nTeams can view their moves with /mymoves")
print("="*70)
logger.info(f"Recovery completed: {total_moves} moves posted to PRODUCTION")
return 0
if __name__ == '__main__':
sys.exit(asyncio.run(main()))

View File

@ -1,453 +0,0 @@
#!/usr/bin/env python3
"""
Week 19 Transaction Recovery Script
Recovers lost Week 19 transactions that were posted to Discord but never
saved to the database due to the missing database POST bug in /dropadd.
Usage:
python scripts/recover_week19_transactions.py --dry-run # Test only
python scripts/recover_week19_transactions.py # Execute with confirmation
python scripts/recover_week19_transactions.py --yes # Execute without confirmation
"""
import argparse
import asyncio
import logging
import re
import sys
from datetime import datetime, UTC
from pathlib import Path
from typing import List, Dict, Tuple, Optional
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from models.transaction import Transaction
from models.player import Player
from models.team import Team
from services.player_service import player_service
from services.team_service import team_service
from services.transaction_service import transaction_service
from config import get_config
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('logs/recover_week19.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Team name to abbreviation mapping
TEAM_MAPPING = {
"Zephyr": "DEN",
"Cavalry": "CAN",
"Whale Sharks": "WAI"
}
class TransactionMove:
"""Represents a single player move from the markdown file."""
def __init__(self, player_name: str, swar: float, from_team: str, to_team: str):
self.player_name = player_name
self.swar = swar
self.from_team = from_team
self.to_team = to_team
self.player: Optional[Player] = None
self.from_team_obj: Optional[Team] = None
self.to_team_obj: Optional[Team] = None
def __repr__(self):
return f"{self.player_name} ({self.swar}): {self.from_team}{self.to_team}"
class TeamTransaction:
"""Represents all moves for a single team."""
def __init__(self, team_name: str, team_abbrev: str):
self.team_name = team_name
self.team_abbrev = team_abbrev
self.moves: List[TransactionMove] = []
self.team_obj: Optional[Team] = None
def add_move(self, move: TransactionMove):
self.moves.append(move)
def __repr__(self):
return f"{self.team_abbrev} ({self.team_name}): {len(self.moves)} moves"
def parse_transaction_file(file_path: str) -> List[TeamTransaction]:
"""
Parse the markdown file and extract all transactions.
Args:
file_path: Path to the markdown file
Returns:
List of TeamTransaction objects
"""
logger.info(f"Parsing: {file_path}")
with open(file_path, 'r') as f:
content = f.read()
transactions = []
current_team = None
# Pattern to match player moves: "PlayerName (sWAR) from OLDTEAM to NEWTEAM"
move_pattern = re.compile(r'^(.+?)\s*\((\d+\.\d+)\)\s+from\s+(\w+)\s+to\s+(\w+)\s*$', re.MULTILINE)
lines = content.split('\n')
for i, line in enumerate(lines, 1):
line = line.strip()
# New transaction section
if line.startswith('# Week 19 Transaction'):
current_team = None
continue
# Team name line
if line and current_team is None and line in TEAM_MAPPING:
team_abbrev = TEAM_MAPPING[line]
current_team = TeamTransaction(line, team_abbrev)
transactions.append(current_team)
logger.debug(f"Found team: {line} ({team_abbrev})")
continue
# Skip headers
if line == 'Player Moves':
continue
# Parse player move
if current_team and line:
match = move_pattern.match(line)
if match:
player_name = match.group(1).strip()
swar = float(match.group(2))
from_team = match.group(3)
to_team = match.group(4)
move = TransactionMove(player_name, swar, from_team, to_team)
current_team.add_move(move)
logger.debug(f" Parsed move: {move}")
logger.info(f"Parsed {len(transactions)} teams with {sum(len(t.moves) for t in transactions)} total moves")
return transactions
async def lookup_players_and_teams(transactions: List[TeamTransaction], season: int) -> bool:
"""
Lookup all players and teams via API services.
Args:
transactions: List of TeamTransaction objects
season: Season number
Returns:
True if all lookups successful, False if any failures
"""
logger.info("Looking up players and teams from database...")
all_success = True
for team_txn in transactions:
# Lookup main team
try:
team_obj = await team_service.get_team_by_abbrev(team_txn.team_abbrev, season)
if not team_obj:
logger.error(f"❌ Team not found: {team_txn.team_abbrev}")
all_success = False
continue
team_txn.team_obj = team_obj
logger.debug(f"✓ Found team: {team_txn.team_abbrev} (ID: {team_obj.id})")
except Exception as e:
logger.error(f"❌ Error looking up team {team_txn.team_abbrev}: {e}")
all_success = False
continue
# Lookup each player and their teams
for move in team_txn.moves:
# Lookup player
try:
players = await player_service.search_players(move.player_name, limit=5, season=season)
if not players:
logger.warning(f"⚠️ Player not found: {move.player_name}")
all_success = False
continue
# Try exact match first
player = None
for p in players:
if p.name.lower() == move.player_name.lower():
player = p
break
if not player:
player = players[0] # Use first match
logger.warning(f"⚠️ Using fuzzy match for '{move.player_name}': {player.name}")
move.player = player
logger.debug(f" ✓ Found player: {player.name} (ID: {player.id})")
except Exception as e:
logger.error(f"❌ Error looking up player {move.player_name}: {e}")
all_success = False
continue
# Lookup from team
try:
from_team = await team_service.get_team_by_abbrev(move.from_team, season)
if not from_team:
logger.error(f"❌ From team not found: {move.from_team}")
all_success = False
continue
move.from_team_obj = from_team
logger.debug(f" From: {from_team.abbrev} (ID: {from_team.id})")
except Exception as e:
logger.error(f"❌ Error looking up from team {move.from_team}: {e}")
all_success = False
continue
# Lookup to team
try:
to_team = await team_service.get_team_by_abbrev(move.to_team, season)
if not to_team:
logger.error(f"❌ To team not found: {move.to_team}")
all_success = False
continue
move.to_team_obj = to_team
logger.debug(f" To: {to_team.abbrev} (ID: {to_team.id})")
except Exception as e:
logger.error(f"❌ Error looking up to team {move.to_team}: {e}")
all_success = False
continue
return all_success
def show_preview(transactions: List[TeamTransaction], season: int, week: int):
"""
Display a preview of all transactions that will be created.
Args:
transactions: List of TeamTransaction objects
season: Season number
week: Week number
"""
print("\n" + "=" * 70)
print(f"TRANSACTION RECOVERY PREVIEW - Season {season}, Week {week}")
print("=" * 70)
print(f"\nFound {len(transactions)} teams with {sum(len(t.moves) for t in transactions)} total moves:\n")
timestamp_base = int(datetime.now(UTC).timestamp())
for idx, team_txn in enumerate(transactions):
moveid = f"Season-{season:03d}-Week-{week:02d}-{timestamp_base + idx}"
print("=" * 70)
print(f"Team: {team_txn.team_abbrev} ({team_txn.team_name})")
print(f"Move ID: {moveid}")
print(f"Week: {week}, Frozen: False, Cancelled: False")
print()
for i, move in enumerate(team_txn.moves, 1):
print(f"{i}. {move.player_name} ({move.swar})")
print(f" From: {move.from_team} → To: {move.to_team}")
if move.player:
print(f" Player ID: {move.player.id}")
print()
print("=" * 70)
print(f"Total: {sum(len(t.moves) for t in transactions)} moves across {len(transactions)} teams")
print(f"Status: PROCESSED (frozen=False)")
print(f"Season: {season}, Week: {week}")
print("=" * 70)
async def create_and_post_transactions(
transactions: List[TeamTransaction],
season: int,
week: int
) -> Dict[str, List[Transaction]]:
"""
Create Transaction objects and POST to database.
Args:
transactions: List of TeamTransaction objects
season: Season number
week: Week number
Returns:
Dictionary mapping team abbreviation to list of created Transaction objects
"""
logger.info("Creating and posting transactions to database...")
config = get_config()
fa_team = Team(
id=config.free_agent_team_id,
abbrev="FA",
sname="Free Agents",
lname="Free Agency",
season=season
)
results = {}
timestamp_base = int(datetime.now(UTC).timestamp())
for idx, team_txn in enumerate(transactions):
moveid = f"Season-{season:03d}-Week-{week:02d}-{timestamp_base + idx}"
# Create Transaction objects for this team
txn_objects = []
for move in team_txn.moves:
if not move.player or not move.from_team_obj or not move.to_team_obj:
logger.warning(f"Skipping move due to missing data: {move}")
continue
transaction = Transaction(
id=0, # Will be assigned by API
week=week,
season=season,
moveid=moveid,
player=move.player,
oldteam=move.from_team_obj,
newteam=move.to_team_obj,
cancelled=False,
frozen=False # Already processed
)
txn_objects.append(transaction)
if not txn_objects:
logger.warning(f"No valid transactions for {team_txn.team_abbrev}, skipping")
continue
# POST to database
try:
logger.info(f"Posting {len(txn_objects)} moves for {team_txn.team_abbrev}...")
created = await transaction_service.create_transaction_batch(txn_objects)
results[team_txn.team_abbrev] = created
logger.info(f"✅ Successfully posted {len(created)} moves for {team_txn.team_abbrev}")
except Exception as e:
logger.error(f"❌ Error posting transactions for {team_txn.team_abbrev}: {e}")
continue
return results
async def main():
"""Main script execution."""
parser = argparse.ArgumentParser(description='Recover Week 19 transactions')
parser.add_argument('--dry-run', action='store_true', help='Parse and validate only, do not post to database')
parser.add_argument('--yes', action='store_true', help='Skip confirmation prompt')
parser.add_argument('--prod', action='store_true', help='Send to PRODUCTION database (api.sba.manticorum.com)')
parser.add_argument('--season', type=int, default=12, help='Season number (default: 12)')
parser.add_argument('--week', type=int, default=19, help='Week number (default: 19)')
args = parser.parse_args()
# Get current database configuration
config = get_config()
current_db = config.db_url
if args.prod:
# Override to production database
import os
os.environ['DB_URL'] = 'https://api.sba.manticorum.com/'
# Clear cached config and reload
import config as config_module
config_module._config = None
config = get_config()
logger.warning(f"⚠️ PRODUCTION MODE: Using {config.db_url}")
print(f"\n{'='*70}")
print(f"⚠️ PRODUCTION DATABASE MODE")
print(f"Database: {config.db_url}")
print(f"{'='*70}\n")
else:
logger.info(f"Using database: {current_db}")
print(f"\nDatabase: {current_db}\n")
# File path
file_path = Path(__file__).parent.parent / '.claude' / 'week-19-transactions.md'
if not file_path.exists():
logger.error(f"❌ Input file not found: {file_path}")
return 1
# Parse the file
try:
transactions = parse_transaction_file(str(file_path))
except Exception as e:
logger.error(f"❌ Error parsing file: {e}")
return 1
if not transactions:
logger.error("❌ No transactions found in file")
return 1
# Lookup players and teams
try:
success = await lookup_players_and_teams(transactions, args.season)
if not success:
logger.error("❌ Some lookups failed. Review errors above.")
return 1
except Exception as e:
logger.error(f"❌ Error during lookups: {e}")
return 1
# Show preview
show_preview(transactions, args.season, args.week)
if args.dry_run:
print("\n🔍 DRY RUN MODE - No changes made to database")
logger.info("Dry run completed successfully")
return 0
# Confirmation
if not args.yes:
if args.prod:
print("\n🚨 PRODUCTION DATABASE - This will POST to LIVE DATA!")
print(f"Database: {config.db_url}")
else:
print(f"\n⚠️ This will POST these transactions to: {config.db_url}")
response = input("Continue with database POST? [y/N]: ")
if response.lower() != 'y':
print("❌ Cancelled by user")
logger.info("Cancelled by user")
return 0
# Create and post transactions
try:
results = await create_and_post_transactions(transactions, args.season, args.week)
except Exception as e:
logger.error(f"❌ Error posting transactions: {e}")
return 1
# Show results
print("\n" + "=" * 70)
print("✅ RECOVERY COMPLETE")
print("=" * 70)
total_moves = 0
for team_abbrev, created_txns in results.items():
print(f"\nTeam {team_abbrev}: {len(created_txns)} moves (moveid: {created_txns[0].moveid if created_txns else 'N/A'})")
total_moves += len(created_txns)
print(f"\nTotal: {total_moves} player moves recovered")
print("\nThese transactions are now in the database with:")
print(f" - Week: {args.week}")
print(" - Frozen: False (already processed)")
print(" - Cancelled: False (active)")
print("\nTeams can view their moves with /mymoves")
print("=" * 70)
logger.info(f"Recovery completed: {total_moves} moves posted to database")
return 0
if __name__ == '__main__':
sys.exit(asyncio.run(main()))

View File

@ -1,312 +0,0 @@
#!/usr/bin/env python3
"""
Real Data Testing Script
Safely test services with real cloud database data (READ-ONLY operations only).
Uses structured logging to demonstrate contextual information with real data.
"""
import asyncio
import os
import sys
from pathlib import Path
# Load testing environment
os.environ.setdefault('BOT_TOKEN', 'dummy_token')
os.environ.setdefault('GUILD_ID', '123456789')
os.environ.setdefault('API_TOKEN', 'Tp3aO3jhYve5NJF1IqOmJTmk')
os.environ.setdefault('DB_URL', 'https://sbadev.manticorum.com/api')
os.environ.setdefault('LOG_LEVEL', 'DEBUG')
os.environ.setdefault('ENVIRONMENT', 'testing')
os.environ.setdefault('TESTING', 'true')
from config import get_config
from services.player_service import player_service
from utils.logging import get_contextual_logger, set_discord_context
from api.client import cleanup_global_client
logger = get_contextual_logger('test_real_data')
class MockInteraction:
"""Mock Discord interaction for testing context."""
def __init__(self, user_id="999888777", guild_id="111222333"):
self.user = MockUser(user_id)
self.guild = MockGuild(guild_id)
self.channel = MockChannel()
class MockUser:
def __init__(self, user_id):
self.id = int(user_id)
class MockGuild:
def __init__(self, guild_id):
self.id = int(guild_id)
self.name = "SBA Test Guild"
class MockChannel:
def __init__(self):
self.id = 444555666
import pytest
@pytest.mark.asyncio
async def test_player_search():
"""Test player search with real data."""
print("🔍 Testing Player Search...")
# Set up logging context
mock_interaction = MockInteraction()
set_discord_context(
interaction=mock_interaction,
command="/player",
test_type="player_search"
)
trace_id = logger.start_operation("real_data_test_player_search")
try:
# Test 1: Search for a common name (should find multiple)
logger.info("Testing search for common player name")
players = await player_service.get_players_by_name("Smith", get_config().sba_season)
logger.info("Common name search completed",
search_term="Smith",
results_found=len(players))
if players:
print(f" ✅ Found {len(players)} players with 'Smith' in name")
for i, player in enumerate(players[:3]): # Show first 3
print(f" {i+1}. {player.name} ({player.primary_position}) - Season {player.season}")
else:
print(" ⚠️ No players found with 'Smith' - unusual for baseball!")
# Test 2: Search for specific player (exact match)
logger.info("Testing search for specific player")
players = await player_service.get_players_by_name("Mike Trout", get_config().sba_season)
logger.info("Specific player search completed",
search_term="Mike Trout",
results_found=len(players))
if players:
player = players[0]
print(f" ✅ Found Mike Trout: {player.name} (WARA: {player.wara})")
# Get with team info
logger.debug("Testing get_player (with team data)", player_id=player.id)
player_with_team = await player_service.get_player(player.id)
if player_with_team and hasattr(player_with_team, 'team') and player_with_team.team:
print(f" Team: {player_with_team.team.abbrev} - {player_with_team.team.sname}")
logger.info("Player with team retrieved successfully",
player_name=player_with_team.name,
team_abbrev=player_with_team.team.abbrev)
else:
print(" Team: Not found or no team association")
logger.warning("Player team information not available")
else:
print(" ❌ Mike Trout not found - checking if database has current players")
# Test 3: Get player by ID (if we found any players)
if players:
test_player = players[0]
logger.info("Testing get_by_id", player_id=test_player.id)
player_by_id = await player_service.get_by_id(test_player.id)
if player_by_id:
print(f" ✅ Retrieved by ID: {player_by_id.name} (ID: {player_by_id.id})")
logger.info("Get by ID successful",
player_id=player_by_id.id,
player_name=player_by_id.name)
else:
print(f" ❌ Failed to retrieve player ID {test_player.id}")
logger.error("Get by ID failed", player_id=test_player.id)
return True
except Exception as e:
logger.error("Player search test failed", error=e)
print(f" ❌ Error: {e}")
return False
@pytest.mark.asyncio
async def test_player_service_methods():
"""Test various player service methods."""
print("🔧 Testing Player Service Methods...")
set_discord_context(
command="/test-service-methods",
test_type="service_methods"
)
trace_id = logger.start_operation("test_service_methods")
try:
# Test get_all with limit (need to include season)
logger.info("Testing get_all with limit")
players, total_count = await player_service.get_all(params=[
('season', str(get_config().sba_season)),
('limit', '10')
])
print(f" ✅ Retrieved {len(players)} of {total_count} total players")
logger.info("Get all players completed",
retrieved_count=len(players),
total_count=total_count,
limit=10,
season=get_config().sba_season)
if players:
print(" Sample players:")
for i, player in enumerate(players[:3]):
print(f" {i+1}. {player.name} ({player.primary_position}) - WARA: {player.wara}")
# Test search by position (if we have players)
if players:
test_position = players[0].primary_position
logger.info("Testing position search", position=test_position)
position_players = await player_service.get_players_by_position(test_position, get_config().sba_season)
print(f" ✅ Found {len(position_players)} players at position {test_position}")
logger.info("Position search completed",
position=test_position,
players_found=len(position_players))
return True
except Exception as e:
logger.error("Service methods test failed", error=e)
print(f" ❌ Error: {e}")
return False
@pytest.mark.asyncio
async def test_api_connectivity():
"""Test basic API connectivity."""
print("🌐 Testing API Connectivity...")
set_discord_context(
command="/test-api",
test_type="connectivity"
)
trace_id = logger.start_operation("test_api_connectivity")
try:
from api.client import get_global_client
from config import get_config
logger.info("Testing basic API connection")
client = await get_global_client()
# Test current endpoint (usually lightweight)
logger.debug("Making API call to current endpoint")
current_data = await client.get('current')
if current_data:
print(" ✅ API connection successful")
logger.info("API connectivity test passed",
endpoint='current',
response_received=True)
# Show some basic info about the league
if isinstance(current_data, dict):
season = current_data.get('season', 'Unknown')
week = current_data.get('week', 'Unknown')
print(f" Current season: {season}, Week: {week}")
logger.info("Current league info retrieved",
season=season,
week=week)
else:
print(" ⚠️ API connected but returned no data")
logger.warning("API connection successful but no data returned")
return True
except Exception as e:
logger.error("API connectivity test failed", error=e)
print(f" ❌ API Error: {e}")
return False
async def main():
"""Run all real data tests."""
print("🧪 Testing Discord Bot v2.0 with Real Cloud Database")
print("=" * 60)
print(f"🌐 API URL: https://sbadev.manticorum.com/api")
print(f"📝 Logging: Check logs/discord_bot_v2.json for structured output")
print()
# Initialize logging
import logging
from logging.handlers import RotatingFileHandler
from utils.logging import JSONFormatter
os.makedirs('logs', exist_ok=True)
# Set up logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# Console handler
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
console_handler.setFormatter(console_formatter)
# JSON file handler for structured logging
json_handler = RotatingFileHandler('logs/discord_bot_v2.json', maxBytes=2*1024*1024, backupCount=3)
json_handler.setFormatter(JSONFormatter())
root_logger.addHandler(console_handler)
root_logger.addHandler(json_handler)
# Run tests
tests = [
("API Connectivity", test_api_connectivity),
("Player Search", test_player_search),
("Player Service Methods", test_player_service_methods),
]
passed = 0
failed = 0
for test_name, test_func in tests:
try:
print(f"\n📋 {test_name}")
print("-" * 40)
success = await test_func()
if success:
passed += 1
print(f"{test_name} PASSED")
else:
failed += 1
print(f"{test_name} FAILED")
except Exception as e:
failed += 1
print(f"{test_name} CRASHED: {e}")
print("\n" + "=" * 60)
print(f"📊 Test Results: {passed} passed, {failed} failed")
if failed == 0:
print("🎉 All tests passed! Services work with real data!")
else:
print("⚠️ Some tests failed. Check logs for details.")
print(f"\n📁 Structured logs available at: logs/discord_bot_v2.json")
print(" Use jq to query: jq '.context.test_type' logs/discord_bot_v2.json")
# Cleanup
await cleanup_global_client()
return failed == 0
if __name__ == "__main__":
try:
success = asyncio.run(main())
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n🛑 Testing interrupted by user")
sys.exit(1)