Huge overhaul of logging. More standardized and we are now capturing logs from celery task and sening to console.

Also adds a new environmental variable: DISPATCHARR_LOG_LEVEL, log levels available: TRACE, DEBUG, INFO, WARNING, ERROR, CRITICAL
This commit is contained in:
SergeantPanda 2025-05-10 09:29:06 -05:00
parent 9c9e546f80
commit d3615e1a66
5 changed files with 167 additions and 11 deletions

View file

@ -2,6 +2,19 @@ from django.apps import AppConfig
from django.conf import settings
import os, logging
# Define TRACE level (5 is below DEBUG which is 10)
TRACE = 5
logging.addLevelName(TRACE, "TRACE")
# Add trace method to the Logger class
def trace(self, message, *args, **kwargs):
"""Log a message with TRACE level (more detailed than DEBUG)"""
if self.isEnabledFor(TRACE):
self._log(TRACE, message, args, **kwargs)
# Add the trace method to the Logger class
logging.Logger.trace = trace
class CoreConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'core'

View file

@ -33,6 +33,8 @@ last_known_data = {}
_last_log_times = {}
# Don't repeat similar log messages more often than this (in seconds)
LOG_THROTTLE_SECONDS = 300 # 5 minutes
# Track if this is the first scan since startup
_first_scan_completed = False
@shared_task
def beat_periodic_task():
@ -52,6 +54,7 @@ def throttled_log(logger_method, message, key=None, *args, **kwargs):
@shared_task
def scan_and_process_files():
global _first_scan_completed
redis_client = RedisClient.get_client()
now = time.time()
@ -84,7 +87,11 @@ def scan_and_process_files():
# Check if this file is already in the database
existing_m3u = M3UAccount.objects.filter(file_path=filepath).exists()
if existing_m3u:
logger.debug(f"Skipping {filename}: Already exists in database")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Already exists in database")
else:
logger.debug(f"Skipping {filename}: Already exists in database")
redis_client.set(redis_key, mtime, ex=REDIS_TTL)
m3u_skipped += 1
continue
@ -94,13 +101,21 @@ def scan_and_process_files():
# File too new — probably still being written
if age < MIN_AGE_SECONDS:
logger.debug(f"Skipping {filename}: Too new (age={age}s)")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Too new (age={age}s)")
else:
logger.debug(f"Skipping {filename}: Too new (age={age}s)")
m3u_skipped += 1
continue
# Skip if we've already processed this mtime
if stored_mtime and float(stored_mtime) >= mtime:
logger.debug(f"Skipping {filename}: Already processed this version")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Already processed this version")
else:
logger.debug(f"Skipping {filename}: Already processed this version")
m3u_skipped += 1
continue
@ -112,7 +127,11 @@ def scan_and_process_files():
redis_client.set(redis_key, mtime, ex=REDIS_TTL)
if not m3u_account.is_active:
logger.debug(f"Skipping {filename}: M3U account is inactive")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: M3U account is inactive")
else:
logger.debug(f"Skipping {filename}: M3U account is inactive")
m3u_skipped += 1
continue
@ -147,12 +166,20 @@ def scan_and_process_files():
filepath = os.path.join(EPG_WATCH_DIR, filename)
if not os.path.isfile(filepath):
logger.debug(f"Skipping {filename}: Not a file")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Not a file")
else:
logger.debug(f"Skipping {filename}: Not a file")
epg_skipped += 1
continue
if not filename.endswith('.xml') and not filename.endswith('.gz'):
logger.debug(f"Skipping {filename}: Not an XML or GZ file")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Not an XML or GZ file")
else:
logger.debug(f"Skipping {filename}: Not an XML or GZ file")
epg_skipped += 1
continue
@ -166,7 +193,11 @@ def scan_and_process_files():
# Check if this file is already in the database
existing_epg = EPGSource.objects.filter(file_path=filepath).exists()
if existing_epg:
logger.debug(f"Skipping {filename}: Already exists in database")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Already exists in database")
else:
logger.debug(f"Skipping {filename}: Already exists in database")
redis_client.set(redis_key, mtime, ex=REDIS_TTL)
epg_skipped += 1
continue
@ -176,13 +207,21 @@ def scan_and_process_files():
# File too new — probably still being written
if age < MIN_AGE_SECONDS:
logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Too new, possibly still being written (age={age}s)")
else:
logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)")
epg_skipped += 1
continue
# Skip if we've already processed this mtime
if stored_mtime and float(stored_mtime) >= mtime:
logger.debug(f"Skipping {filename}: Already processed this version")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: Already processed this version")
else:
logger.debug(f"Skipping {filename}: Already processed this version")
epg_skipped += 1
continue
@ -200,7 +239,11 @@ def scan_and_process_files():
redis_client.set(redis_key, mtime, ex=REDIS_TTL)
if not epg_source.is_active:
logger.debug(f"Skipping {filename}: EPG source is marked as inactive")
# Use trace level if not first scan
if _first_scan_completed:
logger.trace(f"Skipping {filename}: EPG source is marked as inactive")
else:
logger.debug(f"Skipping {filename}: EPG source is marked as inactive")
epg_skipped += 1
continue
@ -215,6 +258,9 @@ def scan_and_process_files():
logger.debug(f"EPG processing complete: {epg_processed} processed, {epg_skipped} skipped, {epg_errors} errors")
# Mark that the first scan is complete
_first_scan_completed = True
def fetch_channel_stats():
redis_client = RedisClient.get_client()

View file

@ -1,8 +1,28 @@
# dispatcharr/celery.py
import os
from celery import Celery
import logging
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dispatcharr.settings')
app = Celery("dispatcharr")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
# Configure Celery logging
app.conf.update(
worker_log_level='DEBUG',
worker_log_format='%(asctime)s %(levelname)s %(name)s: %(message)s',
beat_log_level='DEBUG',
worker_hijack_root_logger=False,
worker_task_log_format='%(asctime)s %(levelname)s %(task_name)s: %(message)s',
)
# Set only specific log messages to DEBUG level
# This maintains user configurability for all other loggers
@app.on_after_configure.connect
def setup_celery_logging(**kwargs):
# Only set specific loggers to DEBUG that handle the routine messages
# we want to suppress from INFO level
logging.getLogger('celery.beat').getChild('Scheduler').setLevel(logging.DEBUG)
logging.getLogger('celery.worker.strategy').setLevel(logging.DEBUG)
logging.getLogger('celery.app.trace').setLevel(logging.DEBUG)

View file

@ -232,3 +232,80 @@ PROXY_SETTINGS = {
'REDIS_CHUNK_TTL': 60, # How long to keep chunks in Redis (seconds)
}
}
# Map log level names to their numeric values
LOG_LEVEL_MAP = {
'TRACE': 5,
'DEBUG': 10,
'INFO': 20,
'WARNING': 30,
'ERROR': 40,
'CRITICAL': 50
}
# Get log level from environment variable, default to INFO if not set
LOG_LEVEL_NAME = os.environ.get('DISPATCHARR_LOG_LEVEL', 'INFO').upper()
LOG_LEVEL = LOG_LEVEL_MAP.get(LOG_LEVEL_NAME, 20) # Default to INFO (20) if invalid
# Add this to your existing LOGGING configuration or create one if it doesn't exist
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '{asctime} {levelname} {name} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'level': 5, # Always allow TRACE level messages through the handler
},
},
'loggers': {
'core.tasks': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use environment-configured level
'propagate': False, # Don't propagate to root logger to avoid duplicate logs
},
'apps.proxy': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use environment-configured level
'propagate': False, # Don't propagate to root logger
},
# Add parent logger for all app modules
'apps': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
# Celery loggers to capture task execution messages
'celery': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use configured log level for Celery logs
'propagate': False,
},
'celery.task': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use configured log level for task-specific logs
'propagate': False,
},
'celery.worker': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use configured log level for worker logs
'propagate': False,
},
'celery.beat': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use configured log level for scheduler logs
'propagate': False,
},
# Add any other loggers you need to capture TRACE logs from
},
'root': {
'handlers': ['console'],
'level': LOG_LEVEL, # Use user-configured level instead of hardcoded 'INFO'
},
}

View file

@ -9,7 +9,7 @@ exec-before = python /app/scripts/wait_for_redis.py
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker -l debug
attach-daemon = celery -A dispatcharr beat -l info
attach-daemon = celery -A dispatcharr beat -l debug
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev