feat(sync-server): add deployment and monitoring scripts

Add production deployment tooling:
- docker-compose.monitoring.yml: Dozzle + Uptime Kuma stack
- scripts/deploy.sh: Git pull + rebuild with health check
- scripts/backup.sh: PostgreSQL dump with optional rclone upload
This commit is contained in:
Johannes Millan 2025-12-19 14:26:12 +01:00
parent a1d8b34219
commit 57766f29dd
3 changed files with 219 additions and 0 deletions

View file

@ -0,0 +1,39 @@
# Monitoring stack for SuperSync Server
#
# Usage:
# docker compose -f docker-compose.yml -f docker-compose.monitoring.yml up -d
#
# Access via SSH tunnel (from local machine):
# ssh -L 8080:localhost:8080 -L 3001:localhost:3001 yourserver
#
# Then open:
# - Dozzle (logs): http://localhost:8080
# - Uptime Kuma (uptime): http://localhost:3001
services:
# Real-time log viewer (~40MB RAM)
# Read-only Docker socket access for viewing container logs
dozzle:
image: amir20/dozzle:latest
container_name: dozzle
restart: unless-stopped
ports:
- '127.0.0.1:8080:8080'
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- DOZZLE_NO_ANALYTICS=true
# Uptime monitoring + alerts (~120MB RAM)
# Provides health check monitoring and notifications
uptime-kuma:
image: louislam/uptime-kuma:1
container_name: uptime-kuma
restart: unless-stopped
ports:
- '127.0.0.1:3001:3001'
volumes:
- uptime-kuma-data:/app/data
volumes:
uptime-kuma-data:

View file

@ -0,0 +1,96 @@
#!/bin/bash
# SuperSync Server Backup Script
#
# Usage:
# ./scripts/backup.sh [--upload]
#
# This script:
# 1. Creates a PostgreSQL dump
# 2. Compresses it with gzip
# 3. Optionally uploads to remote storage (requires rclone)
# 4. Cleans up old backups (keeps 14 days)
#
# Options:
# --upload Upload to remote storage via rclone
#
# Setup for cron (daily at 3 AM):
# (crontab -l; echo "0 3 * * * /opt/supersync/packages/super-sync-server/scripts/backup.sh") | crontab -
#
# Rclone setup for offsite backup:
# 1. Install: curl https://rclone.org/install.sh | sudo bash
# 2. Configure: rclone config (follow prompts for B2/S3)
# 3. Set RCLONE_REMOTE below
set -e
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVER_DIR="$(dirname "$SCRIPT_DIR")"
BACKUP_DIR="${BACKUP_DIR:-$SERVER_DIR/backups}"
RETENTION_DAYS="${RETENTION_DAYS:-14}"
# Rclone remote name (e.g., "b2:supersync-backups" or "s3:my-bucket/supersync")
RCLONE_REMOTE="${RCLONE_REMOTE:-}"
# Database container name
DB_CONTAINER="${DB_CONTAINER:-supersync-postgres}"
DB_USER="${POSTGRES_USER:-supersync}"
DB_NAME="${POSTGRES_DB:-supersync}"
# Parse arguments
UPLOAD=false
if [ "$1" = "--upload" ]; then
UPLOAD=true
fi
# Create backup directory
mkdir -p "$BACKUP_DIR"
# Generate filename with timestamp
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="$BACKUP_DIR/supersync_$DATE.sql.gz"
echo "==> SuperSync Backup"
echo " Date: $DATE"
echo " Output: $BACKUP_FILE"
echo ""
# Step 1: Create PostgreSQL dump
echo "==> Creating database dump..."
docker exec "$DB_CONTAINER" pg_dump -U "$DB_USER" "$DB_NAME" | gzip > "$BACKUP_FILE"
# Get file size
SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
echo " Backup size: $SIZE"
# Step 2: Upload to remote (if enabled)
if [ "$UPLOAD" = true ]; then
if [ -z "$RCLONE_REMOTE" ]; then
echo ""
echo "Warning: --upload specified but RCLONE_REMOTE not set"
echo " Set RCLONE_REMOTE environment variable to enable uploads"
elif command -v rclone &> /dev/null; then
echo ""
echo "==> Uploading to $RCLONE_REMOTE..."
rclone copy "$BACKUP_FILE" "$RCLONE_REMOTE/"
echo " Upload complete"
else
echo ""
echo "Warning: rclone not installed, skipping upload"
echo " Install with: curl https://rclone.org/install.sh | sudo bash"
fi
fi
# Step 3: Clean up old backups
echo ""
echo "==> Cleaning up backups older than $RETENTION_DAYS days..."
DELETED=$(find "$BACKUP_DIR" -name "supersync_*.sql.gz" -mtime +"$RETENTION_DAYS" -delete -print | wc -l)
echo " Deleted $DELETED old backup(s)"
# List current backups
echo ""
echo "==> Current backups:"
ls -lh "$BACKUP_DIR"/supersync_*.sql.gz 2>/dev/null | tail -5 || echo " (none)"
echo ""
echo "==> Backup complete: $BACKUP_FILE"

View file

@ -0,0 +1,84 @@
#!/bin/bash
# SuperSync Server Deployment Script
#
# Usage:
# ./scripts/deploy.sh [--force]
#
# This script:
# 1. Pulls latest changes from git
# 2. Rebuilds and restarts containers
# 3. Verifies health check passes
#
# Options:
# --force Force rebuild without cache
set -e
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVER_DIR="$(dirname "$SCRIPT_DIR")"
REPO_DIR="$(dirname "$(dirname "$SERVER_DIR")")"
# Get domain from .env file
if [ -f "$SERVER_DIR/.env" ]; then
DOMAIN=$(grep -E '^DOMAIN=' "$SERVER_DIR/.env" | cut -d'=' -f2)
fi
if [ -z "$DOMAIN" ]; then
echo "Warning: DOMAIN not set in .env, using localhost for health check"
HEALTH_URL="http://localhost:1900/health"
else
HEALTH_URL="https://$DOMAIN/health"
fi
# Parse arguments
FORCE_BUILD=""
if [ "$1" = "--force" ]; then
FORCE_BUILD="--no-cache"
fi
echo "==> SuperSync Deployment"
echo " Server dir: $SERVER_DIR"
echo " Health URL: $HEALTH_URL"
echo ""
# Step 1: Pull latest changes
echo "==> Pulling latest changes..."
cd "$REPO_DIR"
git pull origin "$(git rev-parse --abbrev-ref HEAD)"
# Step 2: Build and restart
echo ""
echo "==> Building and restarting containers..."
cd "$SERVER_DIR"
# Check if monitoring compose exists and include it
COMPOSE_FILES="-f docker-compose.yml"
if [ -f "docker-compose.monitoring.yml" ]; then
COMPOSE_FILES="$COMPOSE_FILES -f docker-compose.monitoring.yml"
fi
docker compose $COMPOSE_FILES up -d --build $FORCE_BUILD
# Step 3: Wait for health check
echo ""
echo "==> Waiting for service to be healthy..."
sleep 5
# Retry health check up to 6 times (30 seconds total)
for i in {1..6}; do
if curl -sf "$HEALTH_URL" > /dev/null 2>&1; then
echo ""
echo "==> Deployment successful!"
echo " Service is healthy at $HEALTH_URL"
exit 0
fi
echo " Waiting... (attempt $i/6)"
sleep 5
done
echo ""
echo "==> Health check failed!"
echo " Recent logs:"
docker compose logs --tail=30 supersync
exit 1