mirror of
https://github.com/Dispatcharr/Dispatcharr.git
synced 2026-01-22 18:28:00 +00:00
Enhancement: Add system event logging and viewer with M3U/EPG endpoint caching
System Event Logging: - Add SystemEvent model with 15 event types tracking channel operations, client connections, M3U/EPG activities, and buffering events - Log detailed metrics for M3U/EPG refresh operations (streams/programs created/updated/deleted) - Track M3U/EPG downloads with client information (IP address, user agent, profile, channel count) - Record channel lifecycle events (start, stop, reconnect) with stream and client details - Monitor client connections/disconnections and buffering events with stream metadata Event Viewer UI: - Add SystemEvents component with real-time updates via WebSocket - Implement pagination, filtering by event type, and configurable auto-refresh - Display events with color-coded badges and type-specific icons - Integrate event viewer into Stats page with modal display - Add event management settings (retention period, refresh rate) M3U/EPG Endpoint Optimizations: - Implement content caching with 5-minute TTL to reduce duplicate processing - Add client-based event deduplication (2-second window) using IP and user agent hashing - Support HEAD requests for efficient preflight checks - Cache streamed EPG responses while maintaining streaming behavior for first request
This commit is contained in:
parent
204a5a0c76
commit
89a23164ff
18 changed files with 1022 additions and 67 deletions
|
|
@ -1434,6 +1434,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
|
|||
|
||||
logger.info(f"Starting recording for channel {channel.name}")
|
||||
|
||||
# Log system event for recording start
|
||||
try:
|
||||
from core.utils import log_system_event
|
||||
log_system_event(
|
||||
'recording_start',
|
||||
channel_id=channel.uuid,
|
||||
channel_name=channel.name,
|
||||
recording_id=recording_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log recording start event: {e}")
|
||||
|
||||
# Try to resolve the Recording row up front
|
||||
recording_obj = None
|
||||
try:
|
||||
|
|
@ -1827,6 +1839,20 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
|
|||
# After the loop, the file and response are closed automatically.
|
||||
logger.info(f"Finished recording for channel {channel.name}")
|
||||
|
||||
# Log system event for recording end
|
||||
try:
|
||||
from core.utils import log_system_event
|
||||
log_system_event(
|
||||
'recording_end',
|
||||
channel_id=channel.uuid,
|
||||
channel_name=channel.name,
|
||||
recording_id=recording_id,
|
||||
interrupted=interrupted,
|
||||
bytes_written=bytes_written
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log recording end event: {e}")
|
||||
|
||||
# Remux TS to MKV container
|
||||
remux_success = False
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ from asgiref.sync import async_to_sync
|
|||
from channels.layers import get_channel_layer
|
||||
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -1496,6 +1496,15 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
|
||||
# Log system event for EPG refresh
|
||||
log_system_event(
|
||||
event_type='epg_refresh',
|
||||
source_name=epg_source.name,
|
||||
programs=program_count,
|
||||
channels=channel_count,
|
||||
updated=updated_count,
|
||||
)
|
||||
|
||||
# Send completion notification with status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ from core.utils import (
|
|||
acquire_task_lock,
|
||||
release_task_lock,
|
||||
natural_sort_key,
|
||||
log_system_event,
|
||||
)
|
||||
from core.models import CoreSettings, UserAgent
|
||||
from asgiref.sync import async_to_sync
|
||||
|
|
@ -2840,6 +2841,17 @@ def refresh_single_m3u_account(account_id):
|
|||
account.updated_at = timezone.now()
|
||||
account.save(update_fields=["status", "last_message", "updated_at"])
|
||||
|
||||
# Log system event for M3U refresh
|
||||
log_system_event(
|
||||
event_type='m3u_refresh',
|
||||
account_name=account.name,
|
||||
elapsed_time=round(elapsed_time, 2),
|
||||
streams_created=streams_created,
|
||||
streams_updated=streams_updated,
|
||||
streams_deleted=streams_deleted,
|
||||
total_processed=streams_processed,
|
||||
)
|
||||
|
||||
# Send final update with complete metrics and explicitly include success status
|
||||
send_m3u_update(
|
||||
account_id,
|
||||
|
|
|
|||
|
|
@ -23,23 +23,64 @@ from django.db.models.functions import Lower
|
|||
import os
|
||||
from apps.m3u.utils import calculate_tuner_count
|
||||
import regex
|
||||
from core.utils import log_system_event
|
||||
import hashlib
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def get_client_identifier(request):
|
||||
"""Get client information including IP, user agent, and a unique hash identifier
|
||||
|
||||
Returns:
|
||||
tuple: (client_id_hash, client_ip, user_agent)
|
||||
"""
|
||||
# Get client IP (handle proxies)
|
||||
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
|
||||
if x_forwarded_for:
|
||||
client_ip = x_forwarded_for.split(',')[0].strip()
|
||||
else:
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
|
||||
# Get user agent
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
# Create a hash for a shorter cache key
|
||||
client_str = f"{client_ip}:{user_agent}"
|
||||
client_id_hash = hashlib.md5(client_str.encode()).hexdigest()[:12]
|
||||
|
||||
return client_id_hash, client_ip, user_agent
|
||||
|
||||
def m3u_endpoint(request, profile_name=None, user=None):
|
||||
logger.debug("m3u_endpoint called: method=%s, profile=%s", request.method, profile_name)
|
||||
if not network_access_allowed(request, "M3U_EPG"):
|
||||
return JsonResponse({"error": "Forbidden"}, status=403)
|
||||
|
||||
# Handle HEAD requests efficiently without generating content
|
||||
if request.method == "HEAD":
|
||||
logger.debug("Handling HEAD request for M3U")
|
||||
response = HttpResponse(content_type="audio/x-mpegurl")
|
||||
response["Content-Disposition"] = 'attachment; filename="channels.m3u"'
|
||||
return response
|
||||
|
||||
return generate_m3u(request, profile_name, user)
|
||||
|
||||
def epg_endpoint(request, profile_name=None, user=None):
|
||||
logger.debug("epg_endpoint called: method=%s, profile=%s", request.method, profile_name)
|
||||
if not network_access_allowed(request, "M3U_EPG"):
|
||||
return JsonResponse({"error": "Forbidden"}, status=403)
|
||||
|
||||
# Handle HEAD requests efficiently without generating content
|
||||
if request.method == "HEAD":
|
||||
logger.debug("Handling HEAD request for EPG")
|
||||
response = HttpResponse(content_type="application/xml")
|
||||
response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"'
|
||||
response["Cache-Control"] = "no-cache"
|
||||
return response
|
||||
|
||||
return generate_epg(request, profile_name, user)
|
||||
|
||||
@csrf_exempt
|
||||
@require_http_methods(["GET", "POST"])
|
||||
@require_http_methods(["GET", "POST", "HEAD"])
|
||||
def generate_m3u(request, profile_name=None, user=None):
|
||||
"""
|
||||
Dynamically generate an M3U file from channels.
|
||||
|
|
@ -47,7 +88,19 @@ def generate_m3u(request, profile_name=None, user=None):
|
|||
Supports both GET and POST methods for compatibility with IPTVSmarters.
|
||||
"""
|
||||
# Check if this is a POST request and the body is not empty (which we don't want to allow)
|
||||
logger.debug("Generating M3U for profile: %s, user: %s", profile_name, user.username if user else "Anonymous")
|
||||
logger.debug("Generating M3U for profile: %s, user: %s, method: %s", profile_name, user.username if user else "Anonymous", request.method)
|
||||
|
||||
# Check cache for recent identical request (helps with double-GET from browsers)
|
||||
from django.core.cache import cache
|
||||
cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}"
|
||||
content_cache_key = f"m3u_content:{cache_params}"
|
||||
|
||||
cached_content = cache.get(content_cache_key)
|
||||
if cached_content:
|
||||
logger.debug("Serving M3U from cache")
|
||||
response = HttpResponse(cached_content, content_type="audio/x-mpegurl")
|
||||
response["Content-Disposition"] = 'attachment; filename="channels.m3u"'
|
||||
return response
|
||||
# Check if this is a POST request with data (which we don't want to allow)
|
||||
if request.method == "POST" and request.body:
|
||||
if request.body.decode() != '{}':
|
||||
|
|
@ -184,6 +237,23 @@ def generate_m3u(request, profile_name=None, user=None):
|
|||
|
||||
m3u_content += extinf_line + stream_url + "\n"
|
||||
|
||||
# Cache the generated content for 2 seconds to handle double-GET requests
|
||||
cache.set(content_cache_key, m3u_content, 2)
|
||||
|
||||
# Log system event for M3U download (with deduplication based on client)
|
||||
client_id, client_ip, user_agent = get_client_identifier(request)
|
||||
event_cache_key = f"m3u_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}"
|
||||
if not cache.get(event_cache_key):
|
||||
log_system_event(
|
||||
event_type='m3u_download',
|
||||
profile=profile_name or 'all',
|
||||
user=user.username if user else 'anonymous',
|
||||
channels=channels.count(),
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds
|
||||
|
||||
response = HttpResponse(m3u_content, content_type="audio/x-mpegurl")
|
||||
response["Content-Disposition"] = 'attachment; filename="channels.m3u"'
|
||||
return response
|
||||
|
|
@ -1126,8 +1196,22 @@ def generate_epg(request, profile_name=None, user=None):
|
|||
by their associated EPGData record.
|
||||
This version filters data based on the 'days' parameter and sends keep-alives during processing.
|
||||
"""
|
||||
# Check cache for recent identical request (helps with double-GET from browsers)
|
||||
from django.core.cache import cache
|
||||
cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}"
|
||||
content_cache_key = f"epg_content:{cache_params}"
|
||||
|
||||
cached_content = cache.get(content_cache_key)
|
||||
if cached_content:
|
||||
logger.debug("Serving EPG from cache")
|
||||
response = HttpResponse(cached_content, content_type="application/xml")
|
||||
response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"'
|
||||
response["Cache-Control"] = "no-cache"
|
||||
return response
|
||||
|
||||
def epg_generator():
|
||||
"""Generator function that yields EPG data with keep-alives during processing""" # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive)
|
||||
"""Generator function that yields EPG data with keep-alives during processing"""
|
||||
# Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive)
|
||||
|
||||
xml_lines = []
|
||||
xml_lines.append('<?xml version="1.0" encoding="UTF-8"?>')
|
||||
|
|
@ -1286,7 +1370,8 @@ def generate_epg(request, profile_name=None, user=None):
|
|||
xml_lines.append(" </channel>")
|
||||
|
||||
# Send all channel definitions
|
||||
yield '\n'.join(xml_lines) + '\n'
|
||||
channel_xml = '\n'.join(xml_lines) + '\n'
|
||||
yield channel_xml
|
||||
xml_lines = [] # Clear to save memory
|
||||
|
||||
# Process programs for each channel
|
||||
|
|
@ -1676,7 +1761,8 @@ def generate_epg(request, profile_name=None, user=None):
|
|||
|
||||
# Send batch when full or send keep-alive
|
||||
if len(program_batch) >= batch_size:
|
||||
yield '\n'.join(program_batch) + '\n'
|
||||
batch_xml = '\n'.join(program_batch) + '\n'
|
||||
yield batch_xml
|
||||
program_batch = []
|
||||
|
||||
# Move to next chunk
|
||||
|
|
@ -1684,12 +1770,40 @@ def generate_epg(request, profile_name=None, user=None):
|
|||
|
||||
# Send remaining programs in batch
|
||||
if program_batch:
|
||||
yield '\n'.join(program_batch) + '\n'
|
||||
batch_xml = '\n'.join(program_batch) + '\n'
|
||||
yield batch_xml
|
||||
|
||||
# Send final closing tag and completion message
|
||||
yield "</tv>\n" # Return streaming response
|
||||
yield "</tv>\n"
|
||||
|
||||
# Log system event for EPG download after streaming completes (with deduplication based on client)
|
||||
client_id, client_ip, user_agent = get_client_identifier(request)
|
||||
event_cache_key = f"epg_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}"
|
||||
if not cache.get(event_cache_key):
|
||||
log_system_event(
|
||||
event_type='epg_download',
|
||||
profile=profile_name or 'all',
|
||||
user=user.username if user else 'anonymous',
|
||||
channels=channels.count(),
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds
|
||||
|
||||
# Wrapper generator that collects content for caching
|
||||
def caching_generator():
|
||||
collected_content = []
|
||||
for chunk in epg_generator():
|
||||
collected_content.append(chunk)
|
||||
yield chunk
|
||||
# After streaming completes, cache the full content
|
||||
full_content = ''.join(collected_content)
|
||||
cache.set(content_cache_key, full_content, 300)
|
||||
logger.debug("Cached EPG content (%d bytes)", len(full_content))
|
||||
|
||||
# Return streaming response
|
||||
response = StreamingHttpResponse(
|
||||
streaming_content=epg_generator(),
|
||||
streaming_content=caching_generator(),
|
||||
content_type="application/xml"
|
||||
)
|
||||
response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"'
|
||||
|
|
|
|||
|
|
@ -34,6 +34,10 @@ class ClientManager:
|
|||
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
|
||||
self.last_heartbeat_time = {}
|
||||
|
||||
# Get ProxyServer instance for ownership checks
|
||||
from .server import ProxyServer
|
||||
self.proxy_server = ProxyServer.get_instance()
|
||||
|
||||
# Start heartbeat thread for local clients
|
||||
self._start_heartbeat_thread()
|
||||
self._registered_clients = set() # Track already registered client IDs
|
||||
|
|
@ -337,16 +341,30 @@ class ClientManager:
|
|||
|
||||
self._notify_owner_of_activity()
|
||||
|
||||
# Publish client disconnected event
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
# Check if we're the owner - if so, handle locally; if not, publish event
|
||||
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
|
||||
|
||||
if am_i_owner:
|
||||
# We're the owner - handle the disconnect directly
|
||||
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
|
||||
if remaining == 0:
|
||||
# Trigger shutdown check directly via ProxyServer method
|
||||
logger.debug(f"No clients left - triggering immediate shutdown check")
|
||||
# Spawn greenlet to avoid blocking
|
||||
import gevent
|
||||
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
|
||||
else:
|
||||
# We're not the owner - publish event so owner can handle it
|
||||
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED,
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import gevent # Add gevent import
|
|||
from typing import Dict, Optional, Set
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from core.utils import RedisClient
|
||||
from core.utils import RedisClient, log_system_event
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .stream_manager import StreamManager
|
||||
from .stream_buffer import StreamBuffer
|
||||
|
|
@ -194,35 +194,11 @@ class ProxyServer:
|
|||
self.redis_client.delete(disconnect_key)
|
||||
|
||||
elif event_type == EventType.CLIENT_DISCONNECTED:
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
|
||||
# Check if any clients remain
|
||||
if channel_id in self.client_managers:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
client_id = data.get("client_id")
|
||||
worker_id = data.get("worker_id")
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}")
|
||||
# Delegate to dedicated method
|
||||
self.handle_client_disconnect(channel_id)
|
||||
|
||||
|
||||
elif event_type == EventType.STREAM_SWITCH:
|
||||
|
|
@ -646,6 +622,29 @@ class ProxyServer:
|
|||
logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}")
|
||||
self.stream_managers[channel_id] = stream_manager
|
||||
|
||||
# Log channel start event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Get stream name if stream_id is available
|
||||
stream_name = None
|
||||
if channel_stream_id:
|
||||
try:
|
||||
stream_obj = Stream.objects.get(id=channel_stream_id)
|
||||
stream_name = stream_obj.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_start',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
stream_name=stream_name,
|
||||
stream_id=channel_stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel start event: {e}")
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
|
|
@ -800,6 +799,44 @@ class ProxyServer:
|
|||
logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def handle_client_disconnect(self, channel_id):
|
||||
"""
|
||||
Handle client disconnect event - check if channel should shut down.
|
||||
Can be called directly by owner or via PubSub from non-owner workers.
|
||||
"""
|
||||
if channel_id not in self.client_managers:
|
||||
return
|
||||
|
||||
try:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling client disconnect for channel {channel_id}: {e}")
|
||||
|
||||
def stop_channel(self, channel_id):
|
||||
"""Stop a channel with proper ownership handling"""
|
||||
try:
|
||||
|
|
@ -847,6 +884,41 @@ class ProxyServer:
|
|||
self.release_ownership(channel_id)
|
||||
logger.info(f"Released ownership of channel {channel_id}")
|
||||
|
||||
# Log channel stop event (after cleanup, before releasing ownership section ends)
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Calculate runtime and get total bytes from metadata
|
||||
runtime = None
|
||||
total_bytes = None
|
||||
if self.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(channel_id)
|
||||
metadata = self.redis_client.hgetall(metadata_key)
|
||||
if metadata:
|
||||
# Calculate runtime from init_time
|
||||
if b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
runtime = round(time.time() - init_time, 2)
|
||||
except Exception:
|
||||
pass
|
||||
# Get total bytes transferred
|
||||
if b'total_bytes' in metadata:
|
||||
try:
|
||||
total_bytes = int(metadata[b'total_bytes'].decode('utf-8'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_stop',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
runtime=runtime,
|
||||
total_bytes=total_bytes
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel stop event: {e}")
|
||||
|
||||
# Always clean up local resources - WITH SAFE CHECKS
|
||||
if channel_id in self.stream_managers:
|
||||
del self.stream_managers[channel_id]
|
||||
|
|
@ -968,6 +1040,13 @@ class ProxyServer:
|
|||
|
||||
# If in connecting or waiting_for_clients state, check grace period
|
||||
if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Get connection_ready_time from metadata (indicates if channel reached ready state)
|
||||
connection_ready_time = None
|
||||
if metadata and b'connection_ready_time' in metadata:
|
||||
|
|
@ -1048,6 +1127,13 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period")
|
||||
# If active and no clients, start normal shutdown procedure
|
||||
elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Check if there's a pending no-clients timeout
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
disconnect_time = None
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from ..server import ProxyServer
|
|||
from ..redis_keys import RedisKeys
|
||||
from ..constants import EventType, ChannelState, ChannelMetadataField
|
||||
from ..url_utils import get_stream_info_for_switch
|
||||
from core.utils import log_system_event
|
||||
|
||||
logger = logging.getLogger("ts_proxy")
|
||||
|
||||
|
|
@ -598,7 +599,7 @@ class ChannelService:
|
|||
def _update_stream_stats_in_db(stream_id, **stats):
|
||||
"""Update stream stats in database"""
|
||||
from django.db import connection
|
||||
|
||||
|
||||
try:
|
||||
from apps.channels.models import Stream
|
||||
from django.utils import timezone
|
||||
|
|
@ -624,7 +625,7 @@ class ChannelService:
|
|||
except Exception as e:
|
||||
logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
finally:
|
||||
# Always close database connection after update
|
||||
try:
|
||||
|
|
@ -700,6 +701,7 @@ class ChannelService:
|
|||
RedisKeys.events_channel(channel_id),
|
||||
json.dumps(switch_request)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import logging
|
|||
import threading
|
||||
import gevent # Add this import at the top of your file
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel
|
||||
from core.utils import log_system_event
|
||||
from .server import ProxyServer
|
||||
from .utils import create_ts_packet, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -88,6 +90,20 @@ class StreamGenerator:
|
|||
if not self._setup_streaming():
|
||||
return
|
||||
|
||||
# Log client connect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_connect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client connect event: {e}")
|
||||
|
||||
# Main streaming loop
|
||||
for chunk in self._stream_data_generator():
|
||||
yield chunk
|
||||
|
|
@ -439,6 +455,22 @@ class StreamGenerator:
|
|||
total_clients = client_manager.get_total_client_count()
|
||||
logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})")
|
||||
|
||||
# Log client disconnect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_disconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None,
|
||||
duration=round(elapsed, 2),
|
||||
bytes_sent=self.bytes_sent
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client disconnect event: {e}")
|
||||
|
||||
# Schedule channel shutdown if no clients left
|
||||
if not stream_released: # Only if we haven't already released the stream
|
||||
self._schedule_channel_shutdown_if_needed(local_clients)
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from apps.proxy.config import TSConfig as Config
|
|||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.utils import log_system_event
|
||||
from .stream_buffer import StreamBuffer
|
||||
from .utils import detect_stream_type, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -260,6 +261,20 @@ class StreamManager:
|
|||
# Store connection start time to measure success duration
|
||||
connection_start_time = time.time()
|
||||
|
||||
# Log reconnection event if this is a retry (not first attempt)
|
||||
if self.retry_count > 0:
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
attempt=self.retry_count + 1,
|
||||
max_attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
# Successfully connected - read stream data until disconnect/error
|
||||
self._process_stream_data()
|
||||
# If we get here, the connection was closed/failed
|
||||
|
|
@ -289,6 +304,20 @@ class StreamManager:
|
|||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}")
|
||||
|
||||
# Log connection error event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_failed',
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log connection error event: {e}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -302,6 +331,21 @@ class StreamManager:
|
|||
|
||||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
|
||||
# Log connection error event with exception details
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_exception',
|
||||
error_message=str(e)[:200],
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as log_error:
|
||||
logger.error(f"Could not log connection error event: {log_error}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -702,6 +746,19 @@ class StreamManager:
|
|||
# Reset buffering state
|
||||
self.buffering = False
|
||||
self.buffering_start_time = None
|
||||
|
||||
# Log failover event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_failover',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='buffering_timeout',
|
||||
duration=buffering_duration
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log failover event: {e}")
|
||||
else:
|
||||
logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout")
|
||||
else:
|
||||
|
|
@ -709,6 +766,19 @@ class StreamManager:
|
|||
self.buffering = True
|
||||
self.buffering_start_time = time.time()
|
||||
logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x")
|
||||
|
||||
# Log system event for buffering
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_buffering',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
speed=ffmpeg_speed
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log buffering event: {e}")
|
||||
|
||||
# Log buffering warning
|
||||
logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected")
|
||||
# Set channel state to buffering
|
||||
|
|
@ -1004,6 +1074,19 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.warning(f"Failed to reset buffer position: {e}")
|
||||
|
||||
# Log stream switch event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'stream_switch',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
new_url=new_url[:100] if new_url else None,
|
||||
stream_id=stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log stream switch event: {e}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
@ -1122,6 +1205,19 @@ class StreamManager:
|
|||
if connection_result:
|
||||
self.connection_start_time = time.time()
|
||||
logger.info(f"Reconnect successful for channel {self.channel_id}")
|
||||
|
||||
# Log reconnection event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='health_monitor'
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Reconnect failed for channel {self.channel_id}")
|
||||
|
|
@ -1199,25 +1295,17 @@ class StreamManager:
|
|||
logger.debug(f"Error closing socket for channel {self.channel_id}: {e}")
|
||||
pass
|
||||
|
||||
# Enhanced transcode process cleanup with more aggressive termination
|
||||
# Enhanced transcode process cleanup with immediate termination
|
||||
if self.transcode_process:
|
||||
try:
|
||||
# First try polite termination
|
||||
logger.debug(f"Terminating transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.terminate()
|
||||
logger.debug(f"Killing transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
# Give it a short time to terminate gracefully
|
||||
# Give it a very short time to die
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
self.transcode_process.wait(timeout=0.5)
|
||||
except subprocess.TimeoutExpired:
|
||||
# If it doesn't terminate quickly, kill it
|
||||
logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}")
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,16 @@
|
|||
|
||||
from django.urls import path, include
|
||||
from rest_framework.routers import DefaultRouter
|
||||
from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint, TimezoneListView
|
||||
from .api_views import (
|
||||
UserAgentViewSet,
|
||||
StreamProfileViewSet,
|
||||
CoreSettingsViewSet,
|
||||
environment,
|
||||
version,
|
||||
rehash_streams_endpoint,
|
||||
TimezoneListView,
|
||||
get_system_events
|
||||
)
|
||||
|
||||
router = DefaultRouter()
|
||||
router.register(r'useragents', UserAgentViewSet, basename='useragent')
|
||||
|
|
@ -13,5 +22,6 @@ urlpatterns = [
|
|||
path('version/', version, name='version'),
|
||||
path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'),
|
||||
path('timezones/', TimezoneListView.as_view(), name='timezones'),
|
||||
path('system-events/', get_system_events, name='system_events'),
|
||||
path('', include(router.urls)),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -396,3 +396,64 @@ class TimezoneListView(APIView):
|
|||
'grouped': grouped,
|
||||
'count': len(all_timezones)
|
||||
})
|
||||
|
||||
|
||||
# ─────────────────────────────
|
||||
# System Events API
|
||||
# ─────────────────────────────
|
||||
@api_view(['GET'])
|
||||
@permission_classes([IsAuthenticated])
|
||||
def get_system_events(request):
|
||||
"""
|
||||
Get recent system events (channel start/stop, buffering, client connections, etc.)
|
||||
|
||||
Query Parameters:
|
||||
limit: Number of events to return per page (default: 100, max: 1000)
|
||||
offset: Number of events to skip (for pagination, default: 0)
|
||||
event_type: Filter by specific event type (optional)
|
||||
"""
|
||||
from core.models import SystemEvent
|
||||
|
||||
try:
|
||||
# Get pagination params
|
||||
limit = min(int(request.GET.get('limit', 100)), 1000)
|
||||
offset = int(request.GET.get('offset', 0))
|
||||
|
||||
# Start with all events
|
||||
events = SystemEvent.objects.all()
|
||||
|
||||
# Filter by event_type if provided
|
||||
event_type = request.GET.get('event_type')
|
||||
if event_type:
|
||||
events = events.filter(event_type=event_type)
|
||||
|
||||
# Get total count before applying pagination
|
||||
total_count = events.count()
|
||||
|
||||
# Apply offset and limit for pagination
|
||||
events = events[offset:offset + limit]
|
||||
|
||||
# Serialize the data
|
||||
events_data = [{
|
||||
'id': event.id,
|
||||
'event_type': event.event_type,
|
||||
'event_type_display': event.get_event_type_display(),
|
||||
'timestamp': event.timestamp.isoformat(),
|
||||
'channel_id': str(event.channel_id) if event.channel_id else None,
|
||||
'channel_name': event.channel_name,
|
||||
'details': event.details
|
||||
} for event in events]
|
||||
|
||||
return Response({
|
||||
'events': events_data,
|
||||
'count': len(events_data),
|
||||
'total': total_count,
|
||||
'offset': offset,
|
||||
'limit': limit
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching system events: {e}")
|
||||
return Response({
|
||||
'error': 'Failed to fetch system events'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
|
|
|||
28
core/migrations/0017_systemevent.py
Normal file
28
core/migrations/0017_systemevent.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# Generated by Django 5.2.4 on 2025-11-20 20:47
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0016_update_dvr_template_paths'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='SystemEvent',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)),
|
||||
('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)),
|
||||
('channel_id', models.UUIDField(blank=True, db_index=True, null=True)),
|
||||
('channel_name', models.CharField(blank=True, max_length=255, null=True)),
|
||||
('details', models.JSONField(blank=True, default=dict)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-timestamp'],
|
||||
'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -375,3 +375,43 @@ class CoreSettings(models.Model):
|
|||
return rules
|
||||
except Exception:
|
||||
return rules
|
||||
|
||||
|
||||
class SystemEvent(models.Model):
|
||||
"""
|
||||
Tracks system events like channel start/stop, buffering, failover, client connections.
|
||||
Maintains a rolling history based on max_system_events setting.
|
||||
"""
|
||||
EVENT_TYPES = [
|
||||
('channel_start', 'Channel Started'),
|
||||
('channel_stop', 'Channel Stopped'),
|
||||
('channel_buffering', 'Channel Buffering'),
|
||||
('channel_failover', 'Channel Failover'),
|
||||
('channel_reconnect', 'Channel Reconnected'),
|
||||
('channel_error', 'Channel Error'),
|
||||
('client_connect', 'Client Connected'),
|
||||
('client_disconnect', 'Client Disconnected'),
|
||||
('recording_start', 'Recording Started'),
|
||||
('recording_end', 'Recording Ended'),
|
||||
('stream_switch', 'Stream Switched'),
|
||||
('m3u_refresh', 'M3U Refreshed'),
|
||||
('m3u_download', 'M3U Downloaded'),
|
||||
('epg_refresh', 'EPG Refreshed'),
|
||||
('epg_download', 'EPG Downloaded'),
|
||||
]
|
||||
|
||||
event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True)
|
||||
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
|
||||
channel_id = models.UUIDField(null=True, blank=True, db_index=True)
|
||||
channel_name = models.CharField(max_length=255, null=True, blank=True)
|
||||
details = models.JSONField(default=dict, blank=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-timestamp']
|
||||
indexes = [
|
||||
models.Index(fields=['-timestamp']),
|
||||
models.Index(fields=['event_type', '-timestamp']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}"
|
||||
|
|
|
|||
|
|
@ -388,3 +388,48 @@ def validate_flexible_url(value):
|
|||
|
||||
# If it doesn't match our flexible patterns, raise the original error
|
||||
raise ValidationError("Enter a valid URL.")
|
||||
|
||||
|
||||
def log_system_event(event_type, channel_id=None, channel_name=None, **details):
|
||||
"""
|
||||
Log a system event and maintain the configured max history.
|
||||
|
||||
Args:
|
||||
event_type: Type of event (e.g., 'channel_start', 'client_connect')
|
||||
channel_id: Optional UUID of the channel
|
||||
channel_name: Optional name of the channel
|
||||
**details: Additional details to store in the event (stored as JSON)
|
||||
|
||||
Example:
|
||||
log_system_event('channel_start', channel_id=uuid, channel_name='CNN',
|
||||
stream_url='http://...', user='admin')
|
||||
"""
|
||||
from core.models import SystemEvent, CoreSettings
|
||||
|
||||
try:
|
||||
# Create the event
|
||||
SystemEvent.objects.create(
|
||||
event_type=event_type,
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_name,
|
||||
details=details
|
||||
)
|
||||
|
||||
# Get max events from settings (default 100)
|
||||
try:
|
||||
max_events_setting = CoreSettings.objects.filter(key='max-system-events').first()
|
||||
max_events = int(max_events_setting.value) if max_events_setting else 100
|
||||
except Exception:
|
||||
max_events = 100
|
||||
|
||||
# Delete old events beyond the limit (keep it efficient with a single query)
|
||||
total_count = SystemEvent.objects.count()
|
||||
if total_count > max_events:
|
||||
# Get the ID of the event at the cutoff point
|
||||
cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events]
|
||||
# Delete all events with ID less than cutoff (older events)
|
||||
SystemEvent.objects.filter(id__lt=cutoff_event).delete()
|
||||
|
||||
except Exception as e:
|
||||
# Don't let event logging break the main application
|
||||
logger.error(f"Failed to log system event {event_type}: {e}")
|
||||
|
|
|
|||
|
|
@ -2481,4 +2481,21 @@ export default class API {
|
|||
errorNotification('Failed to update playback position', e);
|
||||
}
|
||||
}
|
||||
|
||||
static async getSystemEvents(limit = 100, offset = 0, eventType = null) {
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
params.append('limit', limit);
|
||||
params.append('offset', offset);
|
||||
if (eventType) {
|
||||
params.append('event_type', eventType);
|
||||
}
|
||||
const response = await request(
|
||||
`${host}/api/core/system-events/?${params.toString()}`
|
||||
);
|
||||
return response;
|
||||
} catch (e) {
|
||||
errorNotification('Failed to retrieve system events', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
304
frontend/src/components/SystemEvents.jsx
Normal file
304
frontend/src/components/SystemEvents.jsx
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
import React, { useState, useEffect, useCallback } from 'react';
|
||||
import {
|
||||
ActionIcon,
|
||||
Box,
|
||||
Button,
|
||||
Card,
|
||||
Group,
|
||||
NumberInput,
|
||||
Pagination,
|
||||
Select,
|
||||
Stack,
|
||||
Text,
|
||||
Title,
|
||||
} from '@mantine/core';
|
||||
import {
|
||||
ChevronDown,
|
||||
CirclePlay,
|
||||
Download,
|
||||
Gauge,
|
||||
HardDriveDownload,
|
||||
List,
|
||||
RefreshCw,
|
||||
SquareX,
|
||||
Timer,
|
||||
Users,
|
||||
Video,
|
||||
} from 'lucide-react';
|
||||
import dayjs from 'dayjs';
|
||||
import API from '../api';
|
||||
import useLocalStorage from '../hooks/useLocalStorage';
|
||||
|
||||
const SystemEvents = () => {
|
||||
const [events, setEvents] = useState([]);
|
||||
const [totalEvents, setTotalEvents] = useState(0);
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [dateFormatSetting] = useLocalStorage('date-format', 'mdy');
|
||||
const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM';
|
||||
const [eventsRefreshInterval, setEventsRefreshInterval] = useLocalStorage(
|
||||
'events-refresh-interval',
|
||||
0
|
||||
);
|
||||
const [eventsLimit, setEventsLimit] = useLocalStorage('events-limit', 100);
|
||||
const [currentPage, setCurrentPage] = useState(1);
|
||||
|
||||
// Calculate offset based on current page and limit
|
||||
const offset = (currentPage - 1) * eventsLimit;
|
||||
const totalPages = Math.ceil(totalEvents / eventsLimit);
|
||||
|
||||
const fetchEvents = useCallback(async () => {
|
||||
try {
|
||||
setIsLoading(true);
|
||||
const response = await API.getSystemEvents(eventsLimit, offset);
|
||||
if (response && response.events) {
|
||||
setEvents(response.events);
|
||||
setTotalEvents(response.total || 0);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching system events:', error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
}, [eventsLimit, offset]);
|
||||
|
||||
// Fetch events on mount and when eventsRefreshInterval changes
|
||||
useEffect(() => {
|
||||
fetchEvents();
|
||||
|
||||
// Set up polling if interval is set and events section is expanded
|
||||
if (eventsRefreshInterval > 0 && isExpanded) {
|
||||
const interval = setInterval(fetchEvents, eventsRefreshInterval * 1000);
|
||||
return () => clearInterval(interval);
|
||||
}
|
||||
}, [fetchEvents, eventsRefreshInterval, isExpanded]);
|
||||
|
||||
// Reset to first page when limit changes
|
||||
useEffect(() => {
|
||||
setCurrentPage(1);
|
||||
}, [eventsLimit]);
|
||||
|
||||
const getEventIcon = (eventType) => {
|
||||
switch (eventType) {
|
||||
case 'channel_start':
|
||||
return <CirclePlay size={16} />;
|
||||
case 'channel_stop':
|
||||
return <SquareX size={16} />;
|
||||
case 'channel_reconnect':
|
||||
return <RefreshCw size={16} />;
|
||||
case 'channel_buffering':
|
||||
return <Timer size={16} />;
|
||||
case 'channel_failover':
|
||||
return <HardDriveDownload size={16} />;
|
||||
case 'client_connect':
|
||||
return <Users size={16} />;
|
||||
case 'client_disconnect':
|
||||
return <Users size={16} />;
|
||||
case 'recording_start':
|
||||
return <Video size={16} />;
|
||||
case 'recording_end':
|
||||
return <Video size={16} />;
|
||||
case 'stream_switch':
|
||||
return <HardDriveDownload size={16} />;
|
||||
case 'm3u_refresh':
|
||||
return <RefreshCw size={16} />;
|
||||
case 'm3u_download':
|
||||
return <Download size={16} />;
|
||||
case 'epg_refresh':
|
||||
return <RefreshCw size={16} />;
|
||||
case 'epg_download':
|
||||
return <Download size={16} />;
|
||||
default:
|
||||
return <Gauge size={16} />;
|
||||
}
|
||||
};
|
||||
|
||||
const getEventColor = (eventType) => {
|
||||
switch (eventType) {
|
||||
case 'channel_start':
|
||||
case 'client_connect':
|
||||
case 'recording_start':
|
||||
return 'green';
|
||||
case 'channel_reconnect':
|
||||
return 'yellow';
|
||||
case 'channel_stop':
|
||||
case 'client_disconnect':
|
||||
case 'recording_end':
|
||||
return 'gray';
|
||||
case 'channel_buffering':
|
||||
return 'yellow';
|
||||
case 'channel_failover':
|
||||
case 'channel_error':
|
||||
return 'orange';
|
||||
case 'stream_switch':
|
||||
return 'blue';
|
||||
case 'm3u_refresh':
|
||||
case 'epg_refresh':
|
||||
return 'cyan';
|
||||
case 'm3u_download':
|
||||
case 'epg_download':
|
||||
return 'teal';
|
||||
default:
|
||||
return 'gray';
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Card
|
||||
shadow="sm"
|
||||
padding="sm"
|
||||
radius="md"
|
||||
withBorder
|
||||
style={{
|
||||
color: '#fff',
|
||||
backgroundColor: '#27272A',
|
||||
width: '100%',
|
||||
maxWidth: isExpanded ? '100%' : '800px',
|
||||
marginLeft: 'auto',
|
||||
marginRight: 'auto',
|
||||
transition: 'max-width 0.3s ease',
|
||||
}}
|
||||
>
|
||||
<Group justify="space-between" mb={isExpanded ? 'sm' : 0}>
|
||||
<Group gap="xs">
|
||||
<Gauge size={20} />
|
||||
<Title order={4}>System Events</Title>
|
||||
</Group>
|
||||
<Group gap="xs">
|
||||
<NumberInput
|
||||
size="xs"
|
||||
label="Events Per Page"
|
||||
value={eventsLimit}
|
||||
onChange={(value) => setEventsLimit(value || 10)}
|
||||
min={10}
|
||||
max={1000}
|
||||
step={10}
|
||||
style={{ width: 130 }}
|
||||
/>
|
||||
<Select
|
||||
size="xs"
|
||||
label="Auto Refresh"
|
||||
value={eventsRefreshInterval.toString()}
|
||||
onChange={(value) => setEventsRefreshInterval(parseInt(value))}
|
||||
data={[
|
||||
{ value: '0', label: 'Manual' },
|
||||
{ value: '5', label: '5s' },
|
||||
{ value: '10', label: '10s' },
|
||||
{ value: '30', label: '30s' },
|
||||
{ value: '60', label: '1m' },
|
||||
]}
|
||||
style={{ width: 120 }}
|
||||
/>
|
||||
<Button
|
||||
size="xs"
|
||||
variant="subtle"
|
||||
onClick={fetchEvents}
|
||||
loading={isLoading}
|
||||
style={{ marginTop: 'auto' }}
|
||||
>
|
||||
Refresh
|
||||
</Button>
|
||||
<ActionIcon
|
||||
variant="subtle"
|
||||
onClick={() => setIsExpanded(!isExpanded)}
|
||||
>
|
||||
<ChevronDown
|
||||
size={18}
|
||||
style={{
|
||||
transform: isExpanded ? 'rotate(180deg)' : 'rotate(0deg)',
|
||||
transition: 'transform 0.2s',
|
||||
}}
|
||||
/>
|
||||
</ActionIcon>
|
||||
</Group>
|
||||
</Group>
|
||||
|
||||
{isExpanded && (
|
||||
<>
|
||||
{totalEvents > eventsLimit && (
|
||||
<Group justify="space-between" align="center" mt="sm" mb="xs">
|
||||
<Text size="xs" c="dimmed">
|
||||
Showing {offset + 1}-
|
||||
{Math.min(offset + eventsLimit, totalEvents)} of {totalEvents}
|
||||
</Text>
|
||||
<Pagination
|
||||
total={totalPages}
|
||||
value={currentPage}
|
||||
onChange={setCurrentPage}
|
||||
size="sm"
|
||||
/>
|
||||
</Group>
|
||||
)}
|
||||
<Stack
|
||||
gap="xs"
|
||||
mt="sm"
|
||||
style={{
|
||||
maxHeight: '60vh',
|
||||
overflowY: 'auto',
|
||||
}}
|
||||
>
|
||||
{events.length === 0 ? (
|
||||
<Text size="sm" c="dimmed" ta="center" py="xl">
|
||||
No events recorded yet
|
||||
</Text>
|
||||
) : (
|
||||
events.map((event) => (
|
||||
<Box
|
||||
key={event.id}
|
||||
p="xs"
|
||||
style={{
|
||||
backgroundColor: '#1A1B1E',
|
||||
borderRadius: '4px',
|
||||
borderLeft: `3px solid var(--mantine-color-${getEventColor(event.event_type)}-6)`,
|
||||
}}
|
||||
>
|
||||
<Group justify="space-between" wrap="nowrap">
|
||||
<Group gap="xs" style={{ flex: 1, minWidth: 0 }}>
|
||||
<Box c={`${getEventColor(event.event_type)}.6`}>
|
||||
{getEventIcon(event.event_type)}
|
||||
</Box>
|
||||
<Stack gap={2} style={{ flex: 1, minWidth: 0 }}>
|
||||
<Group gap="xs" wrap="nowrap">
|
||||
<Text size="sm" fw={500}>
|
||||
{event.event_type_display || event.event_type}
|
||||
</Text>
|
||||
{event.channel_name && (
|
||||
<Text
|
||||
size="sm"
|
||||
c="dimmed"
|
||||
truncate
|
||||
style={{ maxWidth: '300px' }}
|
||||
>
|
||||
{event.channel_name}
|
||||
</Text>
|
||||
)}
|
||||
</Group>
|
||||
{event.details &&
|
||||
Object.keys(event.details).length > 0 && (
|
||||
<Text size="xs" c="dimmed">
|
||||
{Object.entries(event.details)
|
||||
.filter(
|
||||
([key]) =>
|
||||
!['stream_url', 'new_url'].includes(key)
|
||||
)
|
||||
.map(([key, value]) => `${key}: ${value}`)
|
||||
.join(', ')}
|
||||
</Text>
|
||||
)}
|
||||
</Stack>
|
||||
</Group>
|
||||
<Text size="xs" c="dimmed" style={{ whiteSpace: 'nowrap' }}>
|
||||
{dayjs(event.timestamp).format(`${dateFormat} HH:mm:ss`)}
|
||||
</Text>
|
||||
</Group>
|
||||
</Box>
|
||||
))
|
||||
)}
|
||||
</Stack>
|
||||
</>
|
||||
)}
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
export default SystemEvents;
|
||||
|
|
@ -285,7 +285,8 @@ const SettingsPage = () => {
|
|||
acc[key] = (value) => {
|
||||
const cidrs = value.split(',');
|
||||
const ipv4CidrRegex = /^([0-9]{1,3}\.){3}[0-9]{1,3}\/\d+$/;
|
||||
const ipv6CidrRegex = /(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}(?:[0-9]{1,3}\.){3}[0-9]{1,3}(?![:.\w]))(([0-9A-F]{1,4}:){0,5}|:)((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}(?![:.\w]))(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}:|:(:[A-F0-9]{1,4}){7})(?![:.\w])\/(?:12[0-8]|1[01][0-9]|[1-9]?[0-9])/;
|
||||
const ipv6CidrRegex =
|
||||
/(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}(?:[0-9]{1,3}\.){3}[0-9]{1,3}(?![:.\w]))(([0-9A-F]{1,4}:){0,5}|:)((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}(?![:.\w]))(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}:|:(:[A-F0-9]{1,4}){7})(?![:.\w])\/(?:12[0-8]|1[01][0-9]|[1-9]?[0-9])/;
|
||||
for (const cidr of cidrs) {
|
||||
if (cidr.match(ipv4CidrRegex) || cidr.match(ipv6CidrRegex)) {
|
||||
continue;
|
||||
|
|
@ -1093,6 +1094,46 @@ const SettingsPage = () => {
|
|||
</Accordion.Panel>
|
||||
</Accordion.Item>
|
||||
|
||||
<Accordion.Item value="system-settings">
|
||||
<Accordion.Control>System Settings</Accordion.Control>
|
||||
<Accordion.Panel>
|
||||
<Stack gap="md">
|
||||
{generalSettingsSaved && (
|
||||
<Alert
|
||||
variant="light"
|
||||
color="green"
|
||||
title="Saved Successfully"
|
||||
/>
|
||||
)}
|
||||
<Text size="sm" c="dimmed">
|
||||
Configure how many system events (channel start/stop,
|
||||
buffering, etc.) to keep in the database. Events are
|
||||
displayed on the Stats page.
|
||||
</Text>
|
||||
<NumberInput
|
||||
label="Maximum System Events"
|
||||
description="Number of events to retain (minimum: 10, maximum: 1000)"
|
||||
value={form.values['max-system-events'] || 100}
|
||||
onChange={(value) => {
|
||||
form.setFieldValue('max-system-events', value);
|
||||
}}
|
||||
min={10}
|
||||
max={1000}
|
||||
step={10}
|
||||
/>
|
||||
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
|
||||
<Button
|
||||
onClick={form.onSubmit(onSubmit)}
|
||||
disabled={form.submitting}
|
||||
variant="default"
|
||||
>
|
||||
Save
|
||||
</Button>
|
||||
</Flex>
|
||||
</Stack>
|
||||
</Accordion.Panel>
|
||||
</Accordion.Item>
|
||||
|
||||
<Accordion.Item value="user-agents">
|
||||
<Accordion.Control>User-Agents</Accordion.Control>
|
||||
<Accordion.Panel>
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import {
|
|||
Container,
|
||||
Flex,
|
||||
Group,
|
||||
Pagination,
|
||||
Progress,
|
||||
SimpleGrid,
|
||||
Stack,
|
||||
|
|
@ -25,9 +26,11 @@ import useLogosStore from '../store/logos';
|
|||
import logo from '../images/logo.png';
|
||||
import {
|
||||
ChevronDown,
|
||||
CirclePlay,
|
||||
Gauge,
|
||||
HardDriveDownload,
|
||||
HardDriveUpload,
|
||||
RefreshCw,
|
||||
SquareX,
|
||||
Timer,
|
||||
Users,
|
||||
|
|
@ -44,6 +47,7 @@ import { useLocation } from 'react-router-dom';
|
|||
import { notifications } from '@mantine/notifications';
|
||||
import { CustomTable, useTable } from '../components/tables/CustomTable';
|
||||
import useLocalStorage from '../hooks/useLocalStorage';
|
||||
import SystemEvents from '../components/SystemEvents';
|
||||
|
||||
dayjs.extend(duration);
|
||||
dayjs.extend(relativeTime);
|
||||
|
|
@ -1545,6 +1549,7 @@ const ChannelsPage = () => {
|
|||
display: 'grid',
|
||||
gap: '1rem',
|
||||
padding: '10px',
|
||||
paddingBottom: '120px',
|
||||
gridTemplateColumns: 'repeat(auto-fill, minmax(500px, 1fr))',
|
||||
}}
|
||||
>
|
||||
|
|
@ -1583,6 +1588,23 @@ const ChannelsPage = () => {
|
|||
})
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* System Events Section - Fixed at bottom */}
|
||||
<Box
|
||||
style={{
|
||||
position: 'fixed',
|
||||
bottom: 0,
|
||||
left: 'var(--app-shell-navbar-width, 0)',
|
||||
right: 0,
|
||||
zIndex: 100,
|
||||
padding: '0 1rem 1rem 1rem',
|
||||
pointerEvents: 'none',
|
||||
}}
|
||||
>
|
||||
<Box style={{ pointerEvents: 'auto' }}>
|
||||
<SystemEvents />
|
||||
</Box>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue