version: "3.4" services: tdarr-node: container_name: tdarr-node-local-gpu image: ghcr.io/haveagitgat/tdarr_node:latest restart: unless-stopped environment: - TZ=America/Chicago - UMASK_SET=002 - nodeName=local-workstation-gpu - serverIP=192.168.1.100 # Replace with your Tdarr server IP - serverPort=8266 - inContainer=true - ffmpegVersion=6 # NVIDIA environment variables - NVIDIA_DRIVER_CAPABILITIES=all - NVIDIA_VISIBLE_DEVICES=all volumes: # Media access (same as server) - /mnt/media:/media # Replace with your media path # Local transcoding cache - ./temp:/temp devices: - /dev/dri:/dev/dri # Intel/AMD GPU fallback # GPU configuration - choose ONE method: # Method 1: Deploy syntax (recommended) deploy: resources: limits: memory: 16G # GPU transcoding uses less RAM reservations: memory: 8G devices: - driver: nvidia count: all capabilities: [gpu] # Method 2: Runtime (alternative) # runtime: nvidia # Method 3: CDI (future) # devices: # - nvidia.com/gpu=all