Detect hardware acceleration capabilities and provide suggestions.

This commit is contained in:
SergeantPanda 2025-05-09 17:19:46 -05:00
parent f762e1b923
commit aff93591fd
4 changed files with 252 additions and 0 deletions

View file

@ -13,6 +13,22 @@ services:
- DISPATCHARR_ENV=aio
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
# Optional for hardware acceleration
#group_add:
# - video
# #- render # Uncomment if your GPU requires it
#devices:
# - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API)
# Uncomment the following lines for NVIDIA GPU support
# NVidia GPU support (requires NVIDIA Container Toolkit)
#deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
volumes:
dispatcharr_data:

View file

@ -14,6 +14,21 @@ services:
- POSTGRES_PASSWORD=secret
- REDIS_HOST=redis
- CELERY_BROKER_URL=redis://redis:6379/0
# Optional for hardware acceleration
#group_add:
# - video
# #- render # Uncomment if your GPU requires it
#devices:
# - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API)
# Uncomment the following lines for NVIDIA GPU support
# NVidia GPU support (requires NVIDIA Container Toolkit)
#deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
celery:
image: dispatcharr/dispatcharr:alpha-v1

View file

@ -82,6 +82,7 @@ echo "Starting init process..."
. /app/docker/init/01-user-setup.sh
. /app/docker/init/02-postgres.sh
. /app/docker/init/03-init-dispatcharr.sh
. /app/docker/init/04-check-gpu.sh
# Start PostgreSQL
echo "Starting Postgres..."

220
docker/init/04-check-gpu.sh Normal file
View file

@ -0,0 +1,220 @@
#!/bin/bash
echo "🔍 Checking for GPU acceleration devices..."
# Helper function for device access checks
check_dev() {
local dev=$1
if [ -e "$dev" ]; then
if [ -r "$dev" ] && [ -w "$dev" ]; then
echo "✅ Device $dev is accessible."
else
echo "⚠️ Device $dev exists but is not accessible. Check permissions or container runtime options."
fi
else
echo " Device $dev does not exist."
fi
}
# Check Intel/AMD VAAPI devices
echo "🔍 Checking for Intel/AMD (VAAPI) devices..."
for dev in /dev/dri/renderD* /dev/dri/card*; do
[ -e "$dev" ] && check_dev "$dev"
done
# Check NVIDIA device nodes
echo "🔍 Checking for NVIDIA devices..."
NVIDIA_FOUND=false
for dev in /dev/nvidia*; do
[ -e "$dev" ] && NVIDIA_FOUND=true && check_dev "$dev"
done
if [ "$NVIDIA_FOUND" = false ]; then
echo " No NVIDIA device nodes found under /dev."
fi
# Check group membership for GPU access - context-aware based on hardware
echo "🔍 Checking user group memberships..."
VIDEO_GID=$(getent group video | cut -d: -f3)
RENDER_GID=$(getent group render | cut -d: -f3)
NVIDIA_CONTAINER_TOOLKIT_FOUND=false
# Check if NVIDIA Container Toolkit is present through environment or CLI tool
if command -v nvidia-container-cli >/dev/null 2>&1; then
NVIDIA_CONTAINER_TOOLKIT_FOUND=true
# Check for environment variables set by NVIDIA Container Runtime
elif [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then
NVIDIA_CONTAINER_TOOLKIT_FOUND=true
echo "✅ NVIDIA Container Toolkit detected (via environment variables)."
echo " The container is properly configured with Docker Compose's 'driver: nvidia' syntax."
fi
# For NVIDIA GPUs with Container Toolkit, video group is optional
if [ "$NVIDIA_FOUND" = true ] && [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = true ]; then
if [ -n "$VIDEO_GID" ] && id -G | grep -qw "$VIDEO_GID"; then
echo "✅ User is in the 'video' group (GID $VIDEO_GID)."
echo " Note: With NVIDIA Container Toolkit properly configured, this is usually not required."
elif [ -n "$VIDEO_GID" ]; then
echo " User is not in the 'video' group, but NVIDIA Container Toolkit is present."
echo " This is typically fine as the Container Toolkit handles device permissions."
fi
# For other GPU types (or NVIDIA without Toolkit), video/render group is important
else
if [ -n "$VIDEO_GID" ]; then
if id -G | grep -qw "$VIDEO_GID"; then
echo "✅ User is in the 'video' group (GID $VIDEO_GID)."
else
echo "⚠️ User is NOT in the 'video' group (GID $VIDEO_GID). Hardware acceleration may not work."
fi
elif [ -n "$RENDER_GID" ]; then
if id -G | grep -qw "$RENDER_GID"; then
echo "✅ User is in the 'render' group (GID $RENDER_GID)."
else
echo "⚠️ User is NOT in the 'render' group (GID $RENDER_GID). Hardware acceleration may not work."
fi
else
echo "⚠️ Neither 'video' nor 'render' groups found on this system."
fi
fi
# Check NVIDIA Container Toolkit support
echo "🔍 Checking NVIDIA container runtime support..."
if command -v nvidia-container-cli >/dev/null 2>&1; then
echo "✅ NVIDIA Container Toolkit detected (nvidia-container-cli found)."
if nvidia-container-cli info >/dev/null 2>&1; then
echo "✅ NVIDIA container runtime is functional."
else
echo "⚠️ nvidia-container-cli found, but 'info' command failed. Runtime may be misconfigured."
fi
elif [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then
echo "✅ NVIDIA Container Toolkit detected through environment variables."
echo " Your Docker Compose configuration with 'driver: nvidia' and 'capabilities: [gpu]' is working correctly."
echo " This is the modern, recommended way to use NVIDIA GPUs with containers."
else
echo " NVIDIA Container Toolkit not detected."
# Only show this message if NVIDIA devices are found but toolkit is missing
if [ "$NVIDIA_FOUND" = true ]; then
echo " You appear to be using direct device passthrough for NVIDIA GPU access."
echo " This method works, but consider using Docker Compose's 'deploy' configuration:"
echo " deploy:"
echo " resources:"
echo " reservations:"
echo " devices:"
echo " - driver: nvidia"
echo " count: all"
echo " capabilities: [gpu]"
fi
fi
# Run nvidia-smi if available
if command -v nvidia-smi >/dev/null 2>&1; then
echo "🔍 Running nvidia-smi to verify GPU visibility..."
if nvidia-smi >/dev/null 2>&1; then
echo "✅ nvidia-smi successful - GPU is accessible to container!"
echo " This confirms hardware acceleration should be available to FFmpeg."
else
echo "⚠️ nvidia-smi command failed. GPU may not be properly mapped into container."
fi
else
echo " nvidia-smi not installed or not in PATH."
fi
# Show relevant environment variables with contextual suggestions
echo "🔍 Checking GPU-related environment variables..."
# Set flags based on device detection
DRI_DEVICES_FOUND=false
for dev in /dev/dri/renderD* /dev/dri/card*; do
if [ -e "$dev" ]; then
DRI_DEVICES_FOUND=true
break
fi
done
# Give contextual suggestions based on detected hardware
if [ "$DRI_DEVICES_FOUND" = true ]; then
if [ -n "$LIBVA_DRIVER_NAME" ]; then
echo " LIBVA_DRIVER_NAME is set to '$LIBVA_DRIVER_NAME'"
else
echo "💡 Consider setting LIBVA_DRIVER_NAME to 'i965' (Intel) or 'radeonsi' (AMD) for VAAPI acceleration"
fi
fi
if [ "$NVIDIA_FOUND" = true ]; then
if [ -n "$NVIDIA_VISIBLE_DEVICES" ]; then
echo " NVIDIA_VISIBLE_DEVICES is set to '$NVIDIA_VISIBLE_DEVICES'"
else
echo "💡 Consider setting NVIDIA_VISIBLE_DEVICES to 'all' or specific indices (e.g., '0,1')"
fi
if [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then
echo " NVIDIA_DRIVER_CAPABILITIES is set to '$NVIDIA_DRIVER_CAPABILITIES'"
else
echo "💡 Consider setting NVIDIA_DRIVER_CAPABILITIES to 'all' or 'compute,video,utility' for full functionality"
fi
if [ -n "$CUDA_VISIBLE_DEVICES" ]; then
echo " CUDA_VISIBLE_DEVICES is set to '$CUDA_VISIBLE_DEVICES'"
fi
fi
# Check FFmpeg hardware acceleration support
echo "🔍 Checking FFmpeg hardware acceleration capabilities..."
if command -v ffmpeg >/dev/null 2>&1; then
HWACCEL=$(ffmpeg -hide_banner -hwaccels 2>/dev/null | grep -v "Hardware acceleration methods:" || echo "None found")
echo "Available FFmpeg hardware acceleration methods:"
echo "$HWACCEL"
else
echo "⚠️ FFmpeg not found in PATH."
fi
# Provide a final summary of the hardware acceleration setup
echo "📋 ===================== SUMMARY ====================="
# Identify which GPU type is active and working
if [ "$NVIDIA_FOUND" = true ] && (nvidia-smi >/dev/null 2>&1 || [ -n "$NVIDIA_VISIBLE_DEVICES" ]); then
echo "🔰 NVIDIA GPU: ACTIVE"
if [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = true ]; then
echo "✅ NVIDIA Container Toolkit: CONFIGURED CORRECTLY"
elif [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then
echo "✅ NVIDIA Docker configuration: USING MODERN DEPLOYMENT"
else
echo "⚠️ NVIDIA setup method: DIRECT DEVICE MAPPING (functional but not optimal)"
fi
# Display FFmpeg NVIDIA acceleration methods
if echo "$HWACCEL" | grep -q "cuda\|nvenc\|cuvid"; then
echo "✅ FFmpeg NVIDIA acceleration: AVAILABLE"
else
echo "⚠️ FFmpeg NVIDIA acceleration: NOT DETECTED"
fi
elif [ "$DRI_DEVICES_FOUND" = true ]; then
# Intel/AMD detection
if [ -n "$LIBVA_DRIVER_NAME" ]; then
echo "🔰 ${LIBVA_DRIVER_NAME^^} GPU: ACTIVE"
else
echo "🔰 INTEL/AMD GPU: ACTIVE"
fi
# Check group membership
if [ -n "$VIDEO_GID" ] && id -G | grep -qw "$VIDEO_GID"; then
echo "✅ Video group membership: CORRECT"
elif [ -n "$RENDER_GID" ] && id -G | grep -qw "$RENDER_GID"; then
echo "✅ Render group membership: CORRECT"
else
echo "⚠️ Group membership: MISSING (may cause permission issues)"
fi
# Display FFmpeg VAAPI acceleration method
if echo "$HWACCEL" | grep -q "vaapi"; then
echo "✅ FFmpeg VAAPI acceleration: AVAILABLE"
else
echo "⚠️ FFmpeg VAAPI acceleration: NOT DETECTED"
fi
else
echo "❌ NO GPU ACCELERATION DETECTED"
echo "⚠️ Hardware acceleration is unavailable or misconfigured"
fi
echo "📋 =================================================="
echo "✅ GPU detection script complete."