Merge pull request #488 from stlalpha/feature/automated-backups

Enhancement: Add automated configuration backups
This commit is contained in:
SergeantPanda 2025-12-15 16:17:33 -06:00 committed by GitHub
commit 4878e92f44
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 3280 additions and 0 deletions

View file

@ -27,6 +27,7 @@ urlpatterns = [
path('core/', include(('core.api_urls', 'core'), namespace='core')),
path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')),
path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')),
path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')),
# path('output/', include(('apps.output.api_urls', 'output'), namespace='output')),
#path('player/', include(('apps.player.api_urls', 'player'), namespace='player')),
#path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')),

0
apps/backups/__init__.py Normal file
View file

18
apps/backups/api_urls.py Normal file
View file

@ -0,0 +1,18 @@
from django.urls import path
from . import api_views
app_name = "backups"
urlpatterns = [
path("", api_views.list_backups, name="backup-list"),
path("create/", api_views.create_backup, name="backup-create"),
path("upload/", api_views.upload_backup, name="backup-upload"),
path("schedule/", api_views.get_schedule, name="backup-schedule-get"),
path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"),
path("status/<str:task_id>/", api_views.backup_status, name="backup-status"),
path("<str:filename>/download-token/", api_views.get_download_token, name="backup-download-token"),
path("<str:filename>/download/", api_views.download_backup, name="backup-download"),
path("<str:filename>/delete/", api_views.delete_backup, name="backup-delete"),
path("<str:filename>/restore/", api_views.restore_backup, name="backup-restore"),
]

364
apps/backups/api_views.py Normal file
View file

@ -0,0 +1,364 @@
import hashlib
import hmac
import logging
import os
from pathlib import Path
from celery.result import AsyncResult
from django.conf import settings
from django.http import HttpResponse, StreamingHttpResponse, Http404
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes, parser_classes
from rest_framework.permissions import IsAdminUser, AllowAny
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from . import services
from .tasks import create_backup_task, restore_backup_task
from .scheduler import get_schedule_settings, update_schedule_settings
logger = logging.getLogger(__name__)
def _generate_task_token(task_id: str) -> str:
"""Generate a signed token for task status access without auth."""
secret = settings.SECRET_KEY.encode()
return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32]
def _verify_task_token(task_id: str, token: str) -> bool:
"""Verify a task token is valid."""
expected = _generate_task_token(task_id)
return hmac.compare_digest(expected, token)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def list_backups(request):
"""List all available backup files."""
try:
backups = services.list_backups()
return Response(backups, status=status.HTTP_200_OK)
except Exception as e:
return Response(
{"detail": f"Failed to list backups: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
def create_backup(request):
"""Create a new backup (async via Celery)."""
try:
task = create_backup_task.delay()
return Response(
{
"detail": "Backup started",
"task_id": task.id,
"task_token": _generate_task_token(task.id),
},
status=status.HTTP_202_ACCEPTED,
)
except Exception as e:
return Response(
{"detail": f"Failed to start backup: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([AllowAny])
def backup_status(request, task_id):
"""Check the status of a backup/restore task.
Requires either:
- Valid admin authentication, OR
- Valid task_token query parameter
"""
# Check for token-based auth (for restore when session is invalidated)
token = request.query_params.get("token")
if token:
if not _verify_task_token(task_id, token):
return Response(
{"detail": "Invalid task token"},
status=status.HTTP_403_FORBIDDEN,
)
else:
# Fall back to admin auth check
if not request.user.is_authenticated or not request.user.is_staff:
return Response(
{"detail": "Authentication required"},
status=status.HTTP_401_UNAUTHORIZED,
)
try:
result = AsyncResult(task_id)
if result.ready():
task_result = result.get()
if task_result.get("status") == "completed":
return Response({
"state": "completed",
"result": task_result,
})
else:
return Response({
"state": "failed",
"error": task_result.get("error", "Unknown error"),
})
elif result.failed():
return Response({
"state": "failed",
"error": str(result.result),
})
else:
return Response({
"state": result.state.lower(),
})
except Exception as e:
return Response(
{"detail": f"Failed to get task status: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def get_download_token(request, filename):
"""Get a signed token for downloading a backup file."""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise Http404("Backup file not found")
token = _generate_task_token(filename)
return Response({"token": token})
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Failed to generate token: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([AllowAny])
def download_backup(request, filename):
"""Download a backup file.
Requires either:
- Valid admin authentication, OR
- Valid download_token query parameter
"""
# Check for token-based auth (avoids CORS preflight issues)
token = request.query_params.get("token")
if token:
if not _verify_task_token(filename, token):
return Response(
{"detail": "Invalid download token"},
status=status.HTTP_403_FORBIDDEN,
)
else:
# Fall back to admin auth check
if not request.user.is_authenticated or not request.user.is_staff:
return Response(
{"detail": "Authentication required"},
status=status.HTTP_401_UNAUTHORIZED,
)
try:
# Security: prevent path traversal by checking for suspicious characters
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = (backup_dir / filename).resolve()
# Security: ensure the resolved path is still within backup_dir
if not str(backup_file).startswith(str(backup_dir.resolve())):
raise Http404("Invalid filename")
if not backup_file.exists() or not backup_file.is_file():
raise Http404("Backup file not found")
file_size = backup_file.stat().st_size
# Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly
# Fall back to streaming for non-nginx deployments
use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true"
logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}")
if use_nginx_accel:
# X-Accel-Redirect: Django returns immediately, nginx serves file
logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}")
response = HttpResponse()
response["X-Accel-Redirect"] = f"/protected-backups/{filename}"
response["Content-Type"] = "application/zip"
response["Content-Length"] = file_size
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
else:
# Streaming fallback for non-nginx deployments
logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)")
def file_iterator(file_path, chunk_size=2 * 1024 * 1024):
with open(file_path, "rb") as f:
while chunk := f.read(chunk_size):
yield chunk
response = StreamingHttpResponse(
file_iterator(backup_file),
content_type="application/zip",
)
response["Content-Length"] = file_size
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Download failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["DELETE"])
@permission_classes([IsAdminUser])
def delete_backup(request, filename):
"""Delete a backup file."""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
services.delete_backup(filename)
return Response(
{"detail": "Backup deleted successfully"},
status=status.HTTP_204_NO_CONTENT,
)
except FileNotFoundError:
raise Http404("Backup file not found")
except Exception as e:
return Response(
{"detail": f"Delete failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
@parser_classes([MultiPartParser, FormParser])
def upload_backup(request):
"""Upload a backup file for restoration."""
uploaded = request.FILES.get("file")
if not uploaded:
return Response(
{"detail": "No file uploaded"},
status=status.HTTP_400_BAD_REQUEST,
)
try:
backup_dir = services.get_backup_dir()
filename = uploaded.name or "uploaded-backup.zip"
# Ensure unique filename
backup_file = backup_dir / filename
counter = 1
while backup_file.exists():
name_parts = filename.rsplit(".", 1)
if len(name_parts) == 2:
backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}"
else:
backup_file = backup_dir / f"{filename}-{counter}"
counter += 1
# Save uploaded file
with backup_file.open("wb") as f:
for chunk in uploaded.chunks():
f.write(chunk)
return Response(
{
"detail": "Backup uploaded successfully",
"filename": backup_file.name,
},
status=status.HTTP_201_CREATED,
)
except Exception as e:
return Response(
{"detail": f"Upload failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
def restore_backup(request, filename):
"""Restore from a backup file (async via Celery). WARNING: This will flush the database!"""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise Http404("Backup file not found")
task = restore_backup_task.delay(filename)
return Response(
{
"detail": "Restore started",
"task_id": task.id,
"task_token": _generate_task_token(task.id),
},
status=status.HTTP_202_ACCEPTED,
)
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Failed to start restore: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def get_schedule(request):
"""Get backup schedule settings."""
try:
settings = get_schedule_settings()
return Response(settings)
except Exception as e:
return Response(
{"detail": f"Failed to get schedule: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["PUT"])
@permission_classes([IsAdminUser])
def update_schedule(request):
"""Update backup schedule settings."""
try:
settings = update_schedule_settings(request.data)
return Response(settings)
except ValueError as e:
return Response(
{"detail": str(e)},
status=status.HTTP_400_BAD_REQUEST,
)
except Exception as e:
return Response(
{"detail": f"Failed to update schedule: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)

7
apps/backups/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class BackupsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "apps.backups"
verbose_name = "Backups"

View file

0
apps/backups/models.py Normal file
View file

198
apps/backups/scheduler.py Normal file
View file

@ -0,0 +1,198 @@
import json
import logging
from django_celery_beat.models import PeriodicTask, CrontabSchedule
from core.models import CoreSettings
logger = logging.getLogger(__name__)
BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task"
SETTING_KEYS = {
"enabled": "backup_schedule_enabled",
"frequency": "backup_schedule_frequency",
"time": "backup_schedule_time",
"day_of_week": "backup_schedule_day_of_week",
"retention_count": "backup_retention_count",
"cron_expression": "backup_schedule_cron_expression",
}
DEFAULTS = {
"enabled": False,
"frequency": "daily",
"time": "03:00",
"day_of_week": 0, # Sunday
"retention_count": 0,
"cron_expression": "",
}
def _get_setting(key: str, default=None):
"""Get a backup setting from CoreSettings."""
try:
setting = CoreSettings.objects.get(key=SETTING_KEYS[key])
value = setting.value
if key == "enabled":
return value.lower() == "true"
elif key in ("day_of_week", "retention_count"):
return int(value)
return value
except CoreSettings.DoesNotExist:
return default if default is not None else DEFAULTS.get(key)
def _set_setting(key: str, value) -> None:
"""Set a backup setting in CoreSettings."""
str_value = str(value).lower() if isinstance(value, bool) else str(value)
CoreSettings.objects.update_or_create(
key=SETTING_KEYS[key],
defaults={
"name": f"Backup {key.replace('_', ' ').title()}",
"value": str_value,
},
)
def get_schedule_settings() -> dict:
"""Get all backup schedule settings."""
return {
"enabled": _get_setting("enabled"),
"frequency": _get_setting("frequency"),
"time": _get_setting("time"),
"day_of_week": _get_setting("day_of_week"),
"retention_count": _get_setting("retention_count"),
"cron_expression": _get_setting("cron_expression"),
}
def update_schedule_settings(data: dict) -> dict:
"""Update backup schedule settings and sync the PeriodicTask."""
# Validate
if "frequency" in data and data["frequency"] not in ("daily", "weekly"):
raise ValueError("frequency must be 'daily' or 'weekly'")
if "time" in data:
try:
hour, minute = data["time"].split(":")
int(hour)
int(minute)
except (ValueError, AttributeError):
raise ValueError("time must be in HH:MM format")
if "day_of_week" in data:
day = int(data["day_of_week"])
if day < 0 or day > 6:
raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)")
if "retention_count" in data:
count = int(data["retention_count"])
if count < 0:
raise ValueError("retention_count must be >= 0")
# Update settings
for key in ("enabled", "frequency", "time", "day_of_week", "retention_count", "cron_expression"):
if key in data:
_set_setting(key, data[key])
# Sync the periodic task
_sync_periodic_task()
return get_schedule_settings()
def _sync_periodic_task() -> None:
"""Create, update, or delete the scheduled backup task based on settings."""
settings = get_schedule_settings()
if not settings["enabled"]:
# Delete the task if it exists
task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first()
if task:
old_crontab = task.crontab
task.delete()
_cleanup_orphaned_crontab(old_crontab)
logger.info("Backup schedule disabled, removed periodic task")
return
# Get old crontab before creating new one
old_crontab = None
try:
old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME)
old_crontab = old_task.crontab
except PeriodicTask.DoesNotExist:
pass
# Check if using cron expression (advanced mode)
if settings["cron_expression"]:
# Parse cron expression: "minute hour day month weekday"
try:
parts = settings["cron_expression"].split()
if len(parts) != 5:
raise ValueError("Cron expression must have 5 parts: minute hour day month weekday")
minute, hour, day_of_month, month_of_year, day_of_week = parts
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week=day_of_week,
day_of_month=day_of_month,
month_of_year=month_of_year,
timezone=CoreSettings.get_system_time_zone(),
)
except Exception as e:
logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}")
raise ValueError(f"Invalid cron expression: {e}")
else:
# Use simple frequency-based scheduling
# Parse time
hour, minute = settings["time"].split(":")
# Build crontab based on frequency
system_tz = CoreSettings.get_system_time_zone()
if settings["frequency"] == "daily":
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week="*",
day_of_month="*",
month_of_year="*",
timezone=system_tz,
)
else: # weekly
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week=str(settings["day_of_week"]),
day_of_month="*",
month_of_year="*",
timezone=system_tz,
)
# Create or update the periodic task
task, created = PeriodicTask.objects.update_or_create(
name=BACKUP_SCHEDULE_TASK_NAME,
defaults={
"task": "apps.backups.tasks.scheduled_backup_task",
"crontab": crontab,
"enabled": True,
"kwargs": json.dumps({"retention_count": settings["retention_count"]}),
},
)
# Clean up old crontab if it changed and is orphaned
if old_crontab and old_crontab.id != crontab.id:
_cleanup_orphaned_crontab(old_crontab)
action = "Created" if created else "Updated"
logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}")
def _cleanup_orphaned_crontab(crontab_schedule):
"""Delete old CrontabSchedule from backup task."""
if crontab_schedule is None:
return
logger.debug(f"Cleaning up old CrontabSchedule: {crontab_schedule.id}")
crontab_schedule.delete()

320
apps/backups/services.py Normal file
View file

@ -0,0 +1,320 @@
import datetime
import json
import os
import shutil
import subprocess
import tempfile
from pathlib import Path
from zipfile import ZipFile, ZIP_DEFLATED
import logging
import pytz
from django.conf import settings
from core.models import CoreSettings
logger = logging.getLogger(__name__)
def get_backup_dir() -> Path:
"""Get the backup directory, creating it if necessary."""
backup_dir = Path(settings.BACKUP_ROOT)
backup_dir.mkdir(parents=True, exist_ok=True)
return backup_dir
def _is_postgresql() -> bool:
"""Check if we're using PostgreSQL."""
return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql"
def _get_pg_env() -> dict:
"""Get environment variables for PostgreSQL commands."""
db_config = settings.DATABASES["default"]
env = os.environ.copy()
env["PGPASSWORD"] = db_config.get("PASSWORD", "")
return env
def _get_pg_args() -> list[str]:
"""Get common PostgreSQL command arguments."""
db_config = settings.DATABASES["default"]
return [
"-h", db_config.get("HOST", "localhost"),
"-p", str(db_config.get("PORT", 5432)),
"-U", db_config.get("USER", "postgres"),
"-d", db_config.get("NAME", "dispatcharr"),
]
def _dump_postgresql(output_file: Path) -> None:
"""Dump PostgreSQL database using pg_dump."""
logger.info("Dumping PostgreSQL database with pg_dump...")
cmd = [
"pg_dump",
*_get_pg_args(),
"-Fc", # Custom format for pg_restore
"-v", # Verbose
"-f", str(output_file),
]
result = subprocess.run(
cmd,
env=_get_pg_env(),
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"pg_dump failed: {result.stderr}")
raise RuntimeError(f"pg_dump failed: {result.stderr}")
logger.debug(f"pg_dump output: {result.stderr}")
def _restore_postgresql(dump_file: Path) -> None:
"""Restore PostgreSQL database using pg_restore."""
logger.info("[PG_RESTORE] Starting pg_restore...")
logger.info(f"[PG_RESTORE] Dump file: {dump_file}")
pg_args = _get_pg_args()
logger.info(f"[PG_RESTORE] Connection args: {pg_args}")
cmd = [
"pg_restore",
"--clean", # Clean (drop) database objects before recreating
*pg_args,
"-v", # Verbose
str(dump_file),
]
logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}")
result = subprocess.run(
cmd,
env=_get_pg_env(),
capture_output=True,
text=True,
)
logger.info(f"[PG_RESTORE] Return code: {result.returncode}")
# pg_restore may return non-zero even on partial success
# Check for actual errors vs warnings
if result.returncode != 0:
# Some errors during restore are expected (e.g., "does not exist" when cleaning)
# Only fail on critical errors
stderr = result.stderr.lower()
if "fatal" in stderr or "could not connect" in stderr:
logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}")
raise RuntimeError(f"pg_restore failed: {result.stderr}")
else:
logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...")
logger.info("[PG_RESTORE] Completed successfully")
def _dump_sqlite(output_file: Path) -> None:
"""Dump SQLite database using sqlite3 .backup command."""
logger.info("Dumping SQLite database with sqlite3 .backup...")
db_path = Path(settings.DATABASES["default"]["NAME"])
if not db_path.exists():
raise FileNotFoundError(f"SQLite database not found: {db_path}")
# Use sqlite3 .backup command via stdin for reliable execution
result = subprocess.run(
["sqlite3", str(db_path)],
input=f".backup '{output_file}'\n",
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"sqlite3 backup failed: {result.stderr}")
raise RuntimeError(f"sqlite3 backup failed: {result.stderr}")
# Verify the backup file was created
if not output_file.exists():
raise RuntimeError("sqlite3 backup failed: output file not created")
logger.info(f"sqlite3 backup completed successfully: {output_file}")
def _restore_sqlite(dump_file: Path) -> None:
"""Restore SQLite database by replacing the database file."""
logger.info("Restoring SQLite database...")
db_path = Path(settings.DATABASES["default"]["NAME"])
backup_current = None
# Backup current database before overwriting
if db_path.exists():
backup_current = db_path.with_suffix(".db.bak")
shutil.copy2(db_path, backup_current)
logger.info(f"Backed up current database to {backup_current}")
# Ensure parent directory exists
db_path.parent.mkdir(parents=True, exist_ok=True)
# The backup file from _dump_sqlite is a complete SQLite database file
# We can simply copy it over the existing database
shutil.copy2(dump_file, db_path)
# Verify the restore worked by checking if sqlite3 can read it
result = subprocess.run(
["sqlite3", str(db_path)],
input=".tables\n",
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"sqlite3 verification failed: {result.stderr}")
# Try to restore from backup
if backup_current and backup_current.exists():
shutil.copy2(backup_current, db_path)
logger.info("Restored original database from backup")
raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}")
logger.info("sqlite3 restore completed successfully")
def create_backup() -> Path:
"""
Create a backup archive containing database dump and data directories.
Returns the path to the created backup file.
"""
backup_dir = get_backup_dir()
# Use system timezone for filename (user-friendly), but keep internal timestamps as UTC
system_tz_name = CoreSettings.get_system_time_zone()
try:
system_tz = pytz.timezone(system_tz_name)
now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz)
timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S")
except Exception as e:
logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC")
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S")
backup_name = f"dispatcharr-backup-{timestamp}.zip"
backup_file = backup_dir / backup_name
logger.info(f"Creating backup: {backup_name}")
with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir:
temp_path = Path(temp_dir)
# Determine database type and dump accordingly
if _is_postgresql():
db_dump_file = temp_path / "database.dump"
_dump_postgresql(db_dump_file)
db_type = "postgresql"
else:
db_dump_file = temp_path / "database.sqlite3"
_dump_sqlite(db_dump_file)
db_type = "sqlite"
# Create ZIP archive with compression and ZIP64 support for large files
with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file:
# Add database dump
zip_file.write(db_dump_file, db_dump_file.name)
# Add metadata
metadata = {
"format": "dispatcharr-backup",
"version": 2,
"database_type": db_type,
"database_file": db_dump_file.name,
"created_at": datetime.datetime.now(datetime.UTC).isoformat(),
}
zip_file.writestr("metadata.json", json.dumps(metadata, indent=2))
logger.info(f"Backup created successfully: {backup_file}")
return backup_file
def restore_backup(backup_file: Path) -> None:
"""
Restore from a backup archive.
WARNING: This will overwrite the database!
"""
if not backup_file.exists():
raise FileNotFoundError(f"Backup file not found: {backup_file}")
logger.info(f"Restoring from backup: {backup_file}")
with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir:
temp_path = Path(temp_dir)
# Extract backup
logger.debug("Extracting backup archive...")
with ZipFile(backup_file, "r") as zip_file:
zip_file.extractall(temp_path)
# Read metadata
metadata_file = temp_path / "metadata.json"
if not metadata_file.exists():
raise ValueError("Invalid backup: missing metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
# Restore database
_restore_database(temp_path, metadata)
logger.info("Restore completed successfully")
def _restore_database(temp_path: Path, metadata: dict) -> None:
"""Restore database from backup."""
db_type = metadata.get("database_type", "postgresql")
db_file = metadata.get("database_file", "database.dump")
dump_file = temp_path / db_file
if not dump_file.exists():
raise ValueError(f"Invalid backup: missing {db_file}")
current_db_type = "postgresql" if _is_postgresql() else "sqlite"
if db_type != current_db_type:
raise ValueError(
f"Database type mismatch: backup is {db_type}, "
f"but current database is {current_db_type}"
)
if db_type == "postgresql":
_restore_postgresql(dump_file)
else:
_restore_sqlite(dump_file)
def list_backups() -> list[dict]:
"""List all available backup files with metadata."""
backup_dir = get_backup_dir()
backups = []
for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True):
# Use UTC timezone so frontend can convert to user's local time
created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC)
backups.append({
"name": backup_file.name,
"size": backup_file.stat().st_size,
"created": created_time.isoformat(),
})
return backups
def delete_backup(filename: str) -> None:
"""Delete a backup file."""
backup_dir = get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise FileNotFoundError(f"Backup file not found: {filename}")
if not backup_file.is_file():
raise ValueError(f"Invalid backup file: {filename}")
backup_file.unlink()
logger.info(f"Deleted backup: {filename}")

106
apps/backups/tasks.py Normal file
View file

@ -0,0 +1,106 @@
import logging
import traceback
from celery import shared_task
from . import services
logger = logging.getLogger(__name__)
def _cleanup_old_backups(retention_count: int) -> int:
"""Delete old backups, keeping only the most recent N. Returns count deleted."""
if retention_count <= 0:
return 0
backups = services.list_backups()
if len(backups) <= retention_count:
return 0
# Backups are sorted newest first, so delete from the end
to_delete = backups[retention_count:]
deleted = 0
for backup in to_delete:
try:
services.delete_backup(backup["name"])
deleted += 1
logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}")
except Exception as e:
logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}")
return deleted
@shared_task(bind=True)
def create_backup_task(self):
"""Celery task to create a backup asynchronously."""
try:
logger.info(f"[BACKUP] Starting backup task {self.request.id}")
backup_file = services.create_backup()
logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}")
return {
"status": "completed",
"filename": backup_file.name,
"size": backup_file.stat().st_size,
}
except Exception as e:
logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}")
logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}
@shared_task(bind=True)
def restore_backup_task(self, filename: str):
"""Celery task to restore a backup asynchronously."""
try:
logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
logger.info(f"[RESTORE] Backup file path: {backup_file}")
services.restore_backup(backup_file)
logger.info(f"[RESTORE] Task {self.request.id} completed successfully")
return {
"status": "completed",
"filename": filename,
}
except Exception as e:
logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}")
logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}
@shared_task(bind=True)
def scheduled_backup_task(self, retention_count: int = 0):
"""Celery task for scheduled backups with optional retention cleanup."""
try:
logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}")
# Create backup
backup_file = services.create_backup()
logger.info(f"[SCHEDULED] Backup created: {backup_file.name}")
# Cleanup old backups if retention is set
deleted = 0
if retention_count > 0:
deleted = _cleanup_old_backups(retention_count)
logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)")
return {
"status": "completed",
"filename": backup_file.name,
"size": backup_file.stat().st_size,
"deleted_count": deleted,
}
except Exception as e:
logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}")
logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}

1163
apps/backups/tests.py Normal file

File diff suppressed because it is too large Load diff