## Tdarr Plugin Stack Research & Configuration - Research optimal H.265/HEVC plugin stacks for quality-focused transcoding - Configure GPU threshold (95%) to prevent self-termination during transcoding - Add Tdarr exception logic to distinguish transcoding from gaming GPU usage - Update gaming detection to preserve active transcoding jobs ## Automated System Maintenance - Add cron job for automatic cleanup of abandoned Tdarr temp directories - Cleanup runs every 6 hours, preserves active jobs (< 6 hours old) - Prevents /tmp filesystem bloat from interrupted transcoding jobs - Safe cleanup only targets Tdarr-specific work directories ## Enhanced Documentation - Add comprehensive Tdarr automation documentation in scripts/tdarr/README.md - Document cleanup system and its relationship to main scheduler - Update CLAUDE.md with Tdarr keyword triggers and context loading - Add troubleshooting section for both scheduler and cleanup cron jobs ## System Architecture Improvements - Organize Tdarr scripts under dedicated scripts/tdarr/ directory - Maintain backwards compatibility with existing cron jobs - Add gaming-aware scheduling with configurable time windows - Implement robust GPU usage detection with Tdarr transcoding awareness 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
75 lines
2.5 KiB
Bash
Executable File
75 lines
2.5 KiB
Bash
Executable File
#!/bin/bash
|
|
# Tdarr Unmapped Node with GPU Support - NVMe Cache Optimization
|
|
# This script starts an unmapped Tdarr node with local NVMe cache
|
|
|
|
set -e
|
|
|
|
CONTAINER_NAME="tdarr-node-gpu-unmapped"
|
|
SERVER_IP="10.10.0.43"
|
|
SERVER_PORT="8266" # Standard server port
|
|
NODE_NAME="nobara-pc-gpu-unmapped"
|
|
|
|
echo "🚀 Starting UNMAPPED Tdarr Node with GPU support using Podman..."
|
|
|
|
# Stop and remove existing container if it exists
|
|
if podman ps -a --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo "🛑 Stopping existing container: ${CONTAINER_NAME}"
|
|
podman stop "${CONTAINER_NAME}" 2>/dev/null || true
|
|
podman rm "${CONTAINER_NAME}" 2>/dev/null || true
|
|
fi
|
|
|
|
# Create required directories
|
|
echo "📁 Creating required directories..."
|
|
mkdir -p ./media ./tmp
|
|
|
|
# Start Tdarr node with GPU support - CLEAN VERSION
|
|
echo "🎬 Starting Clean Tdarr Node container..."
|
|
podman run -d --name "${CONTAINER_NAME}" \
|
|
--gpus all \
|
|
--restart unless-stopped \
|
|
-e TZ=America/Chicago \
|
|
-e UMASK_SET=002 \
|
|
-e nodeName="${NODE_NAME}" \
|
|
-e serverIP="${SERVER_IP}" \
|
|
-e serverPort="${SERVER_PORT}" \
|
|
-e nodeType=unmapped \
|
|
-e inContainer=true \
|
|
-e ffmpegVersion=6 \
|
|
-e logLevel=DEBUG \
|
|
-e NVIDIA_DRIVER_CAPABILITIES=all \
|
|
-e NVIDIA_VISIBLE_DEVICES=all \
|
|
-v "/mnt/NV2/tdarr-cache:/cache" \
|
|
ghcr.io/haveagitgat/tdarr_node:latest
|
|
|
|
echo "⏳ Waiting for container to initialize..."
|
|
sleep 5
|
|
|
|
# Check container status
|
|
if podman ps --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo "✅ Unmapped Tdarr Node is running successfully!"
|
|
echo ""
|
|
echo "📊 Container Status:"
|
|
podman ps --filter "name=${CONTAINER_NAME}" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
|
echo ""
|
|
echo "🔍 Testing GPU Access:"
|
|
if podman exec "${CONTAINER_NAME}" nvidia-smi --query-gpu=name --format=csv,noheader,nounits 2>/dev/null; then
|
|
echo "🎉 GPU is accessible in container!"
|
|
else
|
|
echo "⚠️ GPU test failed, but container is running"
|
|
fi
|
|
echo ""
|
|
echo "🌐 Connection Details:"
|
|
echo " Server: ${SERVER_IP}:${SERVER_PORT}"
|
|
echo " Node Name: ${NODE_NAME}"
|
|
echo " Web UI: http://${SERVER_IP}:8265"
|
|
echo ""
|
|
echo "📋 Container Management:"
|
|
echo " View logs: podman logs ${CONTAINER_NAME}"
|
|
echo " Stop: podman stop ${CONTAINER_NAME}"
|
|
echo " Remove: podman rm ${CONTAINER_NAME}"
|
|
else
|
|
echo "❌ Failed to start container"
|
|
echo "📋 Checking logs..."
|
|
podman logs "${CONTAINER_NAME}" --tail 10
|
|
exit 1
|
|
fi |