claude-home/examples/docker/tdarr-node-local/start-tdarr-mapped-node.sh
Cal Corum df3d22b218 CLAUDE: Expand documentation system and organize operational scripts
- Add comprehensive Tdarr troubleshooting and GPU transcoding documentation
- Create /scripts directory for active operational scripts
- Archive mapped node example in /examples for reference
- Update CLAUDE.md with scripts directory context triggers
- Add distributed transcoding patterns and NVIDIA troubleshooting guides
- Enhance documentation structure with clear directory usage guidelines

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-09 15:53:09 -05:00

83 lines
3.2 KiB
Bash
Executable File

#!/bin/bash
# Tdarr Mapped Node with GPU Support - Example Script
# This script starts a MAPPED Tdarr node container with NVIDIA GPU acceleration using Podman
#
# MAPPED NODES: Direct access to media files via volume mounts
# Use this approach when you want the node to directly access your media library
# for local processing without server coordination for file transfers
#
# Configure these variables for your setup:
set -e
CONTAINER_NAME="tdarr-node-gpu-mapped"
SERVER_IP="YOUR_SERVER_IP" # e.g., "10.10.0.43" or "192.168.1.100"
SERVER_PORT="8266" # Default Tdarr server port
NODE_NAME="YOUR_NODE_NAME" # e.g., "workstation-gpu" or "local-gpu-node"
MEDIA_PATH="/path/to/your/media" # e.g., "/mnt/media" or "/home/user/Videos"
CACHE_PATH="/path/to/cache" # e.g., "/mnt/ssd/tdarr-cache"
echo "🚀 Starting MAPPED Tdarr Node with GPU support using Podman..."
echo " Media Path: ${MEDIA_PATH}"
echo " Cache Path: ${CACHE_PATH}"
# Stop and remove existing container if it exists
if podman ps -a --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then
echo "🛑 Stopping existing container: ${CONTAINER_NAME}"
podman stop "${CONTAINER_NAME}" 2>/dev/null || true
podman rm "${CONTAINER_NAME}" 2>/dev/null || true
fi
# Start Tdarr node with GPU support
echo "🎬 Starting Tdarr Node container..."
podman run -d --name "${CONTAINER_NAME}" \
--gpus all \
--restart unless-stopped \
-e TZ=America/Chicago \
-e UMASK_SET=002 \
-e nodeName="${NODE_NAME}" \
-e serverIP="${SERVER_IP}" \
-e serverPort="${SERVER_PORT}" \
-e inContainer=true \
-e ffmpegVersion=6 \
-e logLevel=DEBUG \
-e NVIDIA_DRIVER_CAPABILITIES=all \
-e NVIDIA_VISIBLE_DEVICES=all \
-v "${MEDIA_PATH}:/media" \
-v "${CACHE_PATH}:/temp" \
ghcr.io/haveagitgat/tdarr_node:latest
echo "⏳ Waiting for container to initialize..."
sleep 5
# Check container status
if podman ps --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then
echo "✅ Mapped Tdarr Node is running successfully!"
echo ""
echo "📊 Container Status:"
podman ps --filter "name=${CONTAINER_NAME}" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
echo ""
echo "🔍 Testing GPU Access:"
if podman exec "${CONTAINER_NAME}" nvidia-smi --query-gpu=name --format=csv,noheader,nounits 2>/dev/null; then
echo "🎉 GPU is accessible in container!"
else
echo "⚠️ GPU test failed, but container is running"
fi
echo ""
echo "🌐 Connection Details:"
echo " Server: ${SERVER_IP}:${SERVER_PORT}"
echo " Node Name: ${NODE_NAME}"
echo ""
echo "🧪 Test NVENC encoding:"
echo " podman exec ${CONTAINER_NAME} /usr/local/bin/tdarr-ffmpeg -f lavfi -i testsrc2=duration=5:size=1920x1080:rate=30 -c:v h264_nvenc -preset fast -t 5 /tmp/test.mp4"
echo ""
echo "📋 Container Management:"
echo " View logs: podman logs ${CONTAINER_NAME}"
echo " Stop: podman stop ${CONTAINER_NAME}"
echo " Remove: podman rm ${CONTAINER_NAME}"
else
echo "❌ Failed to start container"
echo "📋 Checking logs..."
podman logs "${CONTAINER_NAME}" --tail 10
exit 1
fi