From 4c79af1f30274d71c0c9b6f8d068e1081ed2df82 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 28 May 2025 15:26:42 -0500 Subject: [PATCH 01/25] Log ffmpeg stats --- apps/proxy/ts_proxy/stream_manager.py | 78 ++++++++++++++++++++------- 1 file changed, 59 insertions(+), 19 deletions(-) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 054de05b..973f86b9 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -376,26 +376,48 @@ class StreamManager: logger.debug(f"Started stderr reader thread for channel {self.channel_id}") def _read_stderr(self): - """Read and log ffmpeg stderr output""" + """Read and log ffmpeg stderr output with stats lines combined""" try: - for error_line in iter(self.transcode_process.stderr.readline, b''): - if error_line: - error_line = error_line.decode('utf-8', errors='replace').strip() - try: - # Wrap the logging call in a try-except to prevent crashes due to logging errors - logger.debug(f"Transcode stderr [{self.channel_id}]: {error_line}") - except OSError as e: - # If logging fails, try a simplified log message - if e.errno == 105: # No buffer space available - try: - # Try a much shorter message without the error content - logger.warning(f"Logging error (buffer full) in channel {self.channel_id}") - except: - # If even that fails, we have to silently continue - pass - except Exception: - # Ignore other logging errors to prevent thread crashes - pass + buffer = b"" + in_stats_line = False + # Common FFmpeg stats line prefixes for detection + stats_prefixes = [b"frame=", b"size=", b"time=", b"bitrate=", b"speed="] + + # Read in small chunks + while self.transcode_process and self.transcode_process.stderr: + chunk = self.transcode_process.stderr.read(1) + if not chunk: # EOF + break + + buffer += chunk + + # Check if we have a complete line + if chunk == b'\n' or chunk == b'\r': + # We have a complete line + if buffer.strip(): + line = buffer.decode('utf-8', errors='replace').strip() + # Check if this is a stats line + if any(stat_prefix in line for stat_prefix in [p.decode() for p in stats_prefixes]): + self._log_stderr_content(f"FFmpeg stats: {line}") + else: + self._log_stderr_content(line) + buffer = b"" + in_stats_line = False + continue + + # Check if this is the start of a new non-stats line after we were in a stats line + if in_stats_line: + # If we see two consecutive newlines or a non-stats-related line, + # consider the stats block complete + if chunk == b'\n' or chunk == b'\r': + in_stats_line = False + if buffer.strip(): + self._log_stderr_content(buffer.decode('utf-8', errors='replace').strip()) + buffer = b"" + # Check if this is the start of a new stats line + elif any(prefix in buffer for prefix in stats_prefixes): + # We're now in a stats line + in_stats_line = True except Exception as e: # Catch any other exceptions in the thread to prevent crashes try: @@ -404,6 +426,24 @@ class StreamManager: # Again, if logging fails, continue silently pass + def _log_stderr_content(self, content): + """Helper method to log stderr content with error handling""" + try: + # Wrap the logging call in a try-except to prevent crashes due to logging errors + logger.debug(f"Transcode stderr [{self.channel_id}]: {content}") + except OSError as e: + # If logging fails, try a simplified log message + if e.errno == 105: # No buffer space available + try: + # Try a much shorter message without the error content + logger.warning(f"Logging error (buffer full) in channel {self.channel_id}") + except: + # If even that fails, we have to silently continue + pass + except Exception: + # Ignore other logging errors to prevent thread crashes + pass + def _establish_http_connection(self): """Establish a direct HTTP connection to the stream""" try: From 669120e35acee4fe183a6dca5b3e0fa80813c76d Mon Sep 17 00:00:00 2001 From: Marlon Alkan Date: Sat, 31 May 2025 01:25:28 +0200 Subject: [PATCH 02/25] apps: output: views.py: exclude M3U endpointfrom CSRF protection Excludes the M3U generation view from CSRF protection. This allows IPTV Smarters apps (I only tested on Android) to access the playlist. Otherwise a 403 error is returned because instead of an GET request these apps are doing a POST request with empty body to the endpoint. --- apps/output/views.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/apps/output/views.py b/apps/output/views.py index d550ec8d..680dd288 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -3,10 +3,12 @@ from django.urls import reverse from apps.channels.models import Channel, ChannelProfile from apps.epg.models import ProgramData from django.utils import timezone +from django.views.decorators.csrf import csrf_exempt from datetime import datetime, timedelta import re import html # Add this import for XML escaping +@csrf_exempt def generate_m3u(request, profile_name=None): """ Dynamically generate an M3U file from channels. From e6c30f178f112f2671750c280f07a29d3ade40b4 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Tue, 3 Jun 2025 21:35:26 +0000 Subject: [PATCH 03/25] Release v0.5.2 --- version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.py b/version.py index 62c02ebc..193a1ea5 100644 --- a/version.py +++ b/version.py @@ -1,5 +1,5 @@ """ Dispatcharr version information. """ -__version__ = '0.5.1' # Follow semantic versioning (MAJOR.MINOR.PATCH) +__version__ = '0.5.2' # Follow semantic versioning (MAJOR.MINOR.PATCH) __timestamp__ = None # Set during CI/CD build process From 1f0e643954b801bae8a6e90a4a165c9f0662206c Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 3 Jun 2025 19:10:14 -0500 Subject: [PATCH 04/25] Add support for dynamic tvg-id source selection in M3U generation `tvg_id_source` accepts `tvg_id`, `gracenote` or the default if nothing is selected `channel_number` --- apps/output/views.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index d550ec8d..bbcc9269 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -24,6 +24,10 @@ def generate_m3u(request, profile_name=None): # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + # Get the source to use for tvg-id value + # Options: 'channel_number' (default), 'tvg_id', 'gracenote' + tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() + m3u_content = "#EXTM3U\n" for channel in channels: group_title = channel.channel_group.name if channel.channel_group else "Default" @@ -37,8 +41,15 @@ def generate_m3u(request, profile_name=None): else: formatted_channel_number = "" - # Use formatted channel number for tvg_id to ensure proper matching with EPG - tvg_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + # Determine the tvg-id based on the selected source + if tvg_id_source == 'tvg_id' and channel.tvg_id: + tvg_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + tvg_id = channel.tvc_guide_stationid + else: + # Default to channel number (original behavior) + tvg_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + tvg_name = channel.name tvg_logo = "" From 5cb2be7c9334af7cc28ed7dbbbd0b3371c302dab Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 3 Jun 2025 19:37:57 -0500 Subject: [PATCH 05/25] Add support for dynamic tvg-id source selection in EPG generation tvg_id_source accepts tvg_id, gracenote or the default if nothing is selected channel_number --- apps/output/views.py | 53 +++++++++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index bbcc9269..eb00270d 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -199,18 +199,33 @@ def generate_epg(request, profile_name=None): else: channels = Channel.objects.all() + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + + # Get the source to use for tvg-id value + # Options: 'channel_number' (default), 'tvg_id', 'gracenote' + tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() + # Retrieve all active channels for channel in channels: # Format channel number as integer if it has no decimal component - same as M3U generation if channel.channel_number is not None: if channel.channel_number == int(channel.channel_number): - formatted_channel_number = str(int(channel.channel_number)) + formatted_channel_number = int(channel.channel_number) else: - formatted_channel_number = str(channel.channel_number) + formatted_channel_number = channel.channel_number else: - formatted_channel_number = str(channel.id) - # Check if the request wants to use direct logo URLs instead of cache - use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + formatted_channel_number = "" + + # Determine the channel ID based on the selected source + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid + else: + # Default to channel number (original behavior) + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + # Add channel logo if available tvg_logo = "" if channel.logo: @@ -226,21 +241,29 @@ def generate_epg(request, profile_name=None): else: tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) display_name = channel.epg_data.name if channel.epg_data else channel.name - xml_lines.append(f' ') + xml_lines.append(f' ') xml_lines.append(f' {html.escape(display_name)}') xml_lines.append(f' ') xml_lines.append(' ') for channel in channels: - # Use the same formatting for channel ID in program entries - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = str(int(channel.channel_number)) - else: - formatted_channel_number = str(channel.channel_number) + # Use the same channel ID determination for program entries + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid else: - formatted_channel_number = str(channel.id) + # Get formatted channel number + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" + # Default to channel number + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) display_name = channel.epg_data.name if channel.epg_data else channel.name if not channel.epg_data: @@ -249,7 +272,7 @@ def generate_epg(request, profile_name=None): num_days = 1 # Default to 1 days of dummy EPG data program_length_hours = 4 # Default to 4-hour program blocks generate_dummy_epg( - formatted_channel_number, + channel_id, display_name, xml_lines, num_days=num_days, @@ -260,7 +283,7 @@ def generate_epg(request, profile_name=None): for prog in programs: start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") - xml_lines.append(f' ') + xml_lines.append(f' ') xml_lines.append(f' {html.escape(prog.title)}') # Add subtitle if available From 722965b987c498ba66ced2480b36bfb3bc51b539 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 3 Jun 2025 21:32:24 -0500 Subject: [PATCH 06/25] Replaced old images with ghcr images. --- docker/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 59e21010..d195fbdc 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,6 +1,6 @@ services: web: - image: dispatcharr/dispatcharr:alpha-v1 + image: ghcr.io/dispatcharr/dispatcharr:latest container_name: dispatcharr_web ports: - 9191:9191 @@ -32,7 +32,7 @@ services: # capabilities: [gpu] celery: - image: dispatcharr/dispatcharr:alpha-v1 + image: ghcr.io/dispatcharr/dispatcharr:latest container_name: dispatcharr_celery depends_on: - db From e7bf8cbedebb4d72c7edf867e1b198ea3cae653e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 3 Jun 2025 22:00:17 -0500 Subject: [PATCH 07/25] Added support for LIVE tag and dd_progrid numbering systems for epg. --- apps/epg/tasks.py | 5 ++++- apps/output/views.py | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 3c2df895..d3062171 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1634,6 +1634,9 @@ def extract_custom_properties(prog): elif system == 'onscreen' and ep_num.text: # Just store the raw onscreen format custom_props['onscreen_episode'] = ep_num.text.strip() + elif system == 'dd_progid' and ep_num.text: + # Store the dd_progid format + custom_props['dd_progid'] = ep_num.text.strip() # Extract ratings more efficiently rating_elem = prog.find('rating') @@ -1669,7 +1672,7 @@ def extract_custom_properties(prog): custom_props['icon'] = icon_elem.get('src') # Simpler approach for boolean flags - for kw in ['previously-shown', 'premiere', 'new']: + for kw in ['previously-shown', 'premiere', 'new', 'live']: if prog.find(kw) is not None: custom_props[kw.replace('-', '_')] = True diff --git a/apps/output/views.py b/apps/output/views.py index eb00270d..afe3927a 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -314,6 +314,10 @@ def generate_epg(request, profile_name=None): if 'onscreen_episode' in custom_data: xml_lines.append(f' {html.escape(custom_data["onscreen_episode"])}') + # Handle dd_progid format + if 'dd_progid' in custom_data: + xml_lines.append(f' {html.escape(custom_data["dd_progid"])}') + # Add season and episode numbers in xmltv_ns format if available if 'season' in custom_data and 'episode' in custom_data: season = int(custom_data['season']) - 1 if str(custom_data['season']).isdigit() else 0 @@ -360,6 +364,9 @@ def generate_epg(request, profile_name=None): if custom_data.get('new', False): xml_lines.append(f' ') + if custom_data.get('live', False): + xml_lines.append(f' ') + except Exception as e: xml_lines.append(f' ') From a767f28eb66745e0c713c360314d5e3cbb7bed20 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 4 Jun 2025 17:06:00 -0500 Subject: [PATCH 08/25] Allow stale stream days to be set to 0 to disable retention completely. --- frontend/src/components/forms/M3U.jsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/components/forms/M3U.jsx b/frontend/src/components/forms/M3U.jsx index 9affa984..6a2a161a 100644 --- a/frontend/src/components/forms/M3U.jsx +++ b/frontend/src/components/forms/M3U.jsx @@ -225,7 +225,7 @@ const M3U = ({ id="account_type" name="account_type" label="Account Type" - description="Standard for direct M3U URLs, Xtream Codes for panel-based services" + description={<>Standard for direct M3U URLs,
Xtream Codes for panel-based services} data={[ { value: 'STD', @@ -233,7 +233,7 @@ const M3U = ({ }, { value: 'XC', - label: 'XTream Codes', + label: 'Xtream Codes', }, ]} key={form.key('account_type')} @@ -324,7 +324,7 @@ const M3U = ({ /> Date: Wed, 4 Jun 2025 17:16:59 -0500 Subject: [PATCH 09/25] Don't auto populate stale field to 7 days if 0 is set. Closes #123 --- frontend/src/components/forms/M3U.jsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/forms/M3U.jsx b/frontend/src/components/forms/M3U.jsx index 6a2a161a..24ddd377 100644 --- a/frontend/src/components/forms/M3U.jsx +++ b/frontend/src/components/forms/M3U.jsx @@ -85,7 +85,7 @@ const M3U = ({ account_type: m3uAccount.account_type, username: m3uAccount.username ?? '', password: '', - stale_stream_days: m3uAccount.stale_stream_days || 7, + stale_stream_days: m3uAccount.stale_stream_days !== undefined && m3uAccount.stale_stream_days !== null ? m3uAccount.stale_stream_days : 7, }); if (m3uAccount.account_type == 'XC') { From 5e2757f578156346f9e9d1384393a9f5c106e8ce Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 4 Jun 2025 17:42:11 -0500 Subject: [PATCH 10/25] Set back to a minimum of 1. Task needs to be updated first or all streams get removed. --- frontend/src/components/forms/M3U.jsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/forms/M3U.jsx b/frontend/src/components/forms/M3U.jsx index 24ddd377..caa7ae31 100644 --- a/frontend/src/components/forms/M3U.jsx +++ b/frontend/src/components/forms/M3U.jsx @@ -324,7 +324,7 @@ const M3U = ({ /> Date: Thu, 5 Jun 2025 21:26:11 -0500 Subject: [PATCH 11/25] Calculate stale time from start of scan not start of stale cleanup function. --- apps/m3u/tasks.py | 9 +++++---- frontend/src/components/forms/M3U.jsx | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 9756a1f2..e5159605 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -496,7 +496,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): return retval -def cleanup_streams(account_id): +def cleanup_streams(account_id, scan_start_time=timezone.now): account = M3UAccount.objects.get(id=account_id, is_active=True) existing_groups = ChannelGroup.objects.filter( m3u_account__m3u_account=account, @@ -505,7 +505,7 @@ def cleanup_streams(account_id): logger.info(f"Found {len(existing_groups)} active groups for M3U account {account_id}") # Calculate cutoff date for stale streams - stale_cutoff = timezone.now() - timezone.timedelta(days=account.stale_stream_days) + stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days) logger.info(f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}") # Delete streams that are not in active groups @@ -833,7 +833,8 @@ def refresh_single_m3u_account(account_id): return f"Task already running for account_id={account_id}." # Record start time - start_time = time.time() + refresh_start_timestamp = timezone.now() # For the cleanup function + start_time = time.time() # For tracking elapsed time as float streams_created = 0 streams_updated = 0 streams_deleted = 0 @@ -1077,7 +1078,7 @@ def refresh_single_m3u_account(account_id): Stream.objects.filter(id=-1).exists() # This will never find anything but ensures DB sync # Now run cleanup - cleanup_streams(account_id) + cleanup_streams(account_id, refresh_start_timestamp) # Calculate elapsed time elapsed_time = time.time() - start_time diff --git a/frontend/src/components/forms/M3U.jsx b/frontend/src/components/forms/M3U.jsx index caa7ae31..24ddd377 100644 --- a/frontend/src/components/forms/M3U.jsx +++ b/frontend/src/components/forms/M3U.jsx @@ -324,7 +324,7 @@ const M3U = ({ /> Date: Thu, 5 Jun 2025 21:34:14 -0500 Subject: [PATCH 12/25] Properly keep track of removed streams. --- apps/m3u/tasks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index e5159605..ce46a2ec 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -527,8 +527,12 @@ def cleanup_streams(account_id, scan_start_time=timezone.now): streams_to_delete.delete() stale_streams.delete() + total_deleted = deleted_count + stale_count logger.info(f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale") + # Return the total count of deleted streams + return total_deleted + @shared_task def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): if not acquire_task_lock('refresh_m3u_account_groups', account_id): @@ -1078,7 +1082,7 @@ def refresh_single_m3u_account(account_id): Stream.objects.filter(id=-1).exists() # This will never find anything but ensures DB sync # Now run cleanup - cleanup_streams(account_id, refresh_start_timestamp) + streams_deleted = cleanup_streams(account_id, refresh_start_timestamp) # Calculate elapsed time elapsed_time = time.time() - start_time From 0ec5ffff33a4f732737bea885f865e4a3beffe6e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 6 Jun 2025 08:00:19 -0500 Subject: [PATCH 13/25] Fix status message not updating the frontend during M3U refresh. --- frontend/src/WebSocket.jsx | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index 260ec468..aeed8826 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -192,9 +192,10 @@ export const WebsocketProvider = ({ children }) => { // Update the playlist status whenever we receive a status update // Not just when progress is 100% or status is pending_setup if (parsedEvent.data.status && parsedEvent.data.account) { - const playlist = playlists.find( - (p) => p.id === parsedEvent.data.account - ); + // Check if playlists is an object with IDs as keys or an array + const playlist = Array.isArray(playlists) + ? playlists.find((p) => p.id === parsedEvent.data.account) + : playlists[parsedEvent.data.account]; if (playlist) { // When we receive a "success" status with 100% progress, this is a completed refresh @@ -212,9 +213,18 @@ export const WebsocketProvider = ({ children }) => { parsedEvent.data.progress === 100 ) { updateData.updated_at = new Date().toISOString(); + // Log successful completion for debugging + console.log('M3U refresh completed successfully:', updateData); } updatePlaylist(updateData); + } else { + // Log when playlist can't be found for debugging purposes + console.warn( + `Received update for unknown playlist ID: ${parsedEvent.data.account}`, + Array.isArray(playlists) ? 'playlists is array' : 'playlists is object', + Object.keys(playlists).length + ); } } break; From 343ecfbca602fa255a4d95ed7c7d5972e0cd4bca Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 6 Jun 2025 12:00:08 -0500 Subject: [PATCH 14/25] Allow for 'days' to be set when using the EPG url which will limit the number of days to output epg data for. --- apps/output/views.py | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index afe3927a..f3237232 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -184,7 +184,7 @@ def generate_epg(request, profile_name=None): Dynamically generate an XMLTV (EPG) file using the new EPGData/ProgramData models. Since the EPG data is stored independently of Channels, we group programmes by their associated EPGData record. - This version does not filter by time, so it includes the entire EPG saved in the DB. + This version filters data based on the 'days' parameter. """ xml_lines = [] xml_lines.append('') @@ -206,6 +206,23 @@ def generate_epg(request, profile_name=None): # Options: 'channel_number' (default), 'tvg_id', 'gracenote' tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() + # Get the number of days for EPG data + try: + # Default to 0 days (everything) for real EPG if not specified + days_param = request.GET.get('days', '0') + num_days = int(days_param) + # Set reasonable limits + num_days = max(0, min(num_days, 365)) # Between 0 and 365 days + except ValueError: + num_days = 0 # Default to all data if invalid value + + # For dummy EPG, use either the specified value or default to 3 days + dummy_days = num_days if num_days > 0 else 3 + + # Calculate cutoff date for EPG data filtering (only if days > 0) + now = timezone.now() + cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None + # Retrieve all active channels for channel in channels: # Format channel number as integer if it has no decimal component - same as M3U generation @@ -268,18 +285,25 @@ def generate_epg(request, profile_name=None): display_name = channel.epg_data.name if channel.epg_data else channel.name if not channel.epg_data: # Use the enhanced dummy EPG generation function with defaults - # These values could be made configurable via settings or request parameters - num_days = 1 # Default to 1 days of dummy EPG data program_length_hours = 4 # Default to 4-hour program blocks generate_dummy_epg( channel_id, display_name, xml_lines, - num_days=num_days, + num_days=dummy_days, # Use dummy_days (3 days by default) program_length_hours=program_length_hours ) else: - programs = channel.epg_data.programs.all() + # For real EPG data - filter only if days parameter was specified + if num_days > 0: + programs = channel.epg_data.programs.filter( + start_time__gte=now, + start_time__lt=cutoff_date + ) + else: + # Return all programs if days=0 or not specified + programs = channel.epg_data.programs.all() + for prog in programs: start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") From 708a269ae58c2615844039b79a6dbe2da05d86a5 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 6 Jun 2025 12:21:08 -0500 Subject: [PATCH 15/25] Adds the ability to add 'direct=true' to m3u output url. Doing so will use the URL from the provider instead of the Dispatcharr URL. --- apps/output/views.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index f3237232..cb254d9a 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -24,6 +24,9 @@ def generate_m3u(request, profile_name=None): # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + # Check if direct stream URLs should be used instead of proxy + use_direct_urls = request.GET.get('direct', 'false').lower() == 'true' + # Get the source to use for tvg-id value # Options: 'channel_number' (default), 'tvg_id', 'gracenote' tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() @@ -76,10 +79,22 @@ def generate_m3u(request, profile_name=None): f'tvg-chno="{formatted_channel_number}" {tvc_guide_stationid}group-title="{group_title}",{channel.name}\n' ) - base_url = request.build_absolute_uri('/')[:-1] - stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" + # Determine the stream URL based on the direct parameter + if use_direct_urls: + # Try to get the first stream's direct URL + first_stream = channel.streams.first() + if first_stream and first_stream.url: + # Use the direct stream URL + stream_url = first_stream.url + else: + # Fall back to proxy URL if no direct URL available + base_url = request.build_absolute_uri('/')[:-1] + stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" + else: + # Standard behavior - use proxy URL + base_url = request.build_absolute_uri('/')[:-1] + stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" - #stream_url = request.build_absolute_uri(reverse('output:stream', args=[channel.id])) m3u_content += extinf_line + stream_url + "\n" response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") From 8ee68a2349830c425b3054de6177bd1b0cb6b32b Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 6 Jun 2025 19:04:19 -0500 Subject: [PATCH 16/25] Deny POST if there body isn't empty. --- apps/output/views.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/apps/output/views.py b/apps/output/views.py index cb254d9a..68b32a7f 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -1,5 +1,7 @@ -from django.http import HttpResponse +from django.http import HttpResponse, HttpResponseForbidden from django.urls import reverse +from django.views.decorators.csrf import csrf_exempt +from django.views.decorators.http import require_http_methods from apps.channels.models import Channel, ChannelProfile from apps.epg.models import ProgramData from django.utils import timezone @@ -7,11 +9,18 @@ from datetime import datetime, timedelta import re import html # Add this import for XML escaping +@csrf_exempt +@require_http_methods(["GET", "POST"]) def generate_m3u(request, profile_name=None): """ Dynamically generate an M3U file from channels. The stream URL now points to the new stream_view that uses StreamProfile. + Supports both GET and POST methods for compatibility with IPTVSmarters. """ + # Check if this is a POST request with data (which we don't want to allow) + if request.method == "POST" and request.body: + return HttpResponseForbidden("POST requests with content are not allowed") + if profile_name is not None: channel_profile = ChannelProfile.objects.get(name=profile_name) channels = Channel.objects.filter( From 7acc31ec979240b16e31306474cd225b7f6febd0 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sat, 7 Jun 2025 19:27:59 -0500 Subject: [PATCH 17/25] Allow for tuners as low as 1 instead of 2. Closes #149 --- apps/hdhr/api_views.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/hdhr/api_views.py b/apps/hdhr/api_views.py index 278efc36..0d7b77e0 100644 --- a/apps/hdhr/api_views.py +++ b/apps/hdhr/api_views.py @@ -79,8 +79,8 @@ class DiscoverAPIView(APIView): # Otherwise use the limited profile sum plus custom streams tuner_count = limited_tuners + custom_stream_count - # 5. Ensure minimum of 2 tuners - tuner_count = max(2, tuner_count) + # 5. Ensure minimum of 1 tuners + tuner_count = max(1, tuner_count) logger.debug(f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})") From 9d8e011e2c9b61f6e75a61a7c50af3d5cfecdf4e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 17:33:59 -0500 Subject: [PATCH 18/25] Store FFmpeg in redis. --- apps/proxy/ts_proxy/constants.py | 7 + apps/proxy/ts_proxy/stream_manager.py | 212 ++++++++++++++++++++------ 2 files changed, 169 insertions(+), 50 deletions(-) diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index 4827b24b..91357bbe 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -63,6 +63,13 @@ class ChannelMetadataField: STREAM_SWITCH_TIME = "stream_switch_time" STREAM_SWITCH_REASON = "stream_switch_reason" + # FFmpeg performance metrics + FFMPEG_SPEED = "ffmpeg_speed" + FFMPEG_FPS = "ffmpeg_fps" + ACTUAL_FPS = "actual_fps" + FFMPEG_BITRATE = "ffmpeg_bitrate" + FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated" + # Client metadata fields CONNECTED_AT = "connected_at" LAST_ACTIVE = "last_active" diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 973f86b9..7ba5f9f8 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -7,6 +7,7 @@ import socket import requests import subprocess import gevent # Add this import +import re # Add this import at the top from typing import Optional, List from django.shortcuts import get_object_or_404 from apps.proxy.config import TSConfig as Config @@ -376,73 +377,184 @@ class StreamManager: logger.debug(f"Started stderr reader thread for channel {self.channel_id}") def _read_stderr(self): - """Read and log ffmpeg stderr output with stats lines combined""" + """Read and log ffmpeg stderr output with real-time stats parsing""" try: buffer = b"" - in_stats_line = False + stats_buffer = b"" # Common FFmpeg stats line prefixes for detection stats_prefixes = [b"frame=", b"size=", b"time=", b"bitrate=", b"speed="] # Read in small chunks while self.transcode_process and self.transcode_process.stderr: - chunk = self.transcode_process.stderr.read(1) - if not chunk: # EOF + try: + chunk = self.transcode_process.stderr.read(256) # Smaller chunks for real-time processing + if not chunk: + break + + buffer += chunk + + # Check for stats data in the buffer (stats usually start with "frame=") + if b"frame=" in buffer: + # Split buffer at frame= to isolate potential stats + parts = buffer.split(b"frame=") + + # Process any complete lines before the stats + if parts[0]: + line_buffer = parts[0] + while b'\n' in line_buffer or b'\r' in line_buffer: + if b'\n' in line_buffer: + line, line_buffer = line_buffer.split(b'\n', 1) + else: + line, line_buffer = line_buffer.split(b'\r', 1) + + if line.strip(): + self._log_stderr_content(line.decode('utf-8', errors='ignore')) + + # Handle stats data - combine with previous stats buffer + if len(parts) > 1: + stats_buffer = b"frame=" + parts[-1] + + # Look for common stats patterns to determine if we have a complete stats line + # Stats typically contain: frame=X fps=Y q=Z size=A time=B bitrate=C speed=D + if b"speed=" in stats_buffer: + # We likely have a complete or near-complete stats line + try: + stats_text = stats_buffer.decode('utf-8', errors='ignore') + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + stats_buffer = b"" # Clear stats buffer after processing + except Exception as e: + logger.debug(f"Error parsing stats: {e}") + + # Keep any remaining parts for next iteration + buffer = b"" + else: + # No stats data, process as regular lines + while b'\n' in buffer or b'\r' in buffer: + if b'\n' in buffer: + line, buffer = buffer.split(b'\n', 1) + else: + line, buffer = buffer.split(b'\r', 1) + + if line.strip(): + self._log_stderr_content(line.decode('utf-8', errors='ignore')) + + # Prevent buffer from growing too large + if len(buffer) > 4096: + # Keep only the last 1024 bytes to preserve potential incomplete data + buffer = buffer[-1024:] + + if len(stats_buffer) > 2048: # Stats lines shouldn't be this long + stats_buffer = b"" + + except Exception as e: + logger.error(f"Error reading stderr: {e}") break - buffer += chunk - - # Check if we have a complete line - if chunk == b'\n' or chunk == b'\r': - # We have a complete line - if buffer.strip(): - line = buffer.decode('utf-8', errors='replace').strip() - # Check if this is a stats line - if any(stat_prefix in line for stat_prefix in [p.decode() for p in stats_prefixes]): - self._log_stderr_content(f"FFmpeg stats: {line}") - else: - self._log_stderr_content(line) - buffer = b"" - in_stats_line = False - continue - - # Check if this is the start of a new non-stats line after we were in a stats line - if in_stats_line: - # If we see two consecutive newlines or a non-stats-related line, - # consider the stats block complete - if chunk == b'\n' or chunk == b'\r': - in_stats_line = False - if buffer.strip(): - self._log_stderr_content(buffer.decode('utf-8', errors='replace').strip()) - buffer = b"" - # Check if this is the start of a new stats line - elif any(prefix in buffer for prefix in stats_prefixes): - # We're now in a stats line - in_stats_line = True except Exception as e: # Catch any other exceptions in the thread to prevent crashes try: - logger.error(f"Error in stderr reader thread: {e}") + logger.error(f"Error in stderr reader thread for channel {self.channel_id}: {e}") except: - # Again, if logging fails, continue silently pass def _log_stderr_content(self, content): - """Helper method to log stderr content with error handling""" + """Log stderr content from FFmpeg with appropriate log levels""" try: - # Wrap the logging call in a try-except to prevent crashes due to logging errors - logger.debug(f"Transcode stderr [{self.channel_id}]: {content}") - except OSError as e: - # If logging fails, try a simplified log message - if e.errno == 105: # No buffer space available - try: - # Try a much shorter message without the error content - logger.warning(f"Logging error (buffer full) in channel {self.channel_id}") - except: - # If even that fails, we have to silently continue - pass - except Exception: - # Ignore other logging errors to prevent thread crashes - pass + content = content.strip() + if not content: + return + + # Convert to lowercase for easier matching + content_lower = content.lower() + + # Determine log level based on content + if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): + logger.error(f"FFmpeg stderr: {content}") + elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']): + logger.warning(f"FFmpeg stderr: {content}") + elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content: + # Stats lines - log at debug level to avoid spam + logger.debug(f"FFmpeg stats: {content}") + elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']): + # Stream info - log at info level + logger.info(f"FFmpeg info: {content}") + else: + # Everything else at debug level + logger.debug(f"FFmpeg stderr: {content}") + + except Exception as e: + logger.error(f"Error logging stderr content: {e}") + + def _parse_ffmpeg_stats(self, stats_line): + """Parse FFmpeg stats line and extract speed, fps, and bitrate""" + try: + # Example FFmpeg stats line: + # frame= 1234 fps= 30 q=28.0 size= 2048kB time=00:00:41.33 bitrate= 406.1kbits/s speed=1.02x + + # Extract speed (e.g., "speed=1.02x") + speed_match = re.search(r'speed=\s*([0-9.]+)x?', stats_line) + ffmpeg_speed = float(speed_match.group(1)) if speed_match else None + + # Extract fps (e.g., "fps= 30") + fps_match = re.search(r'fps=\s*([0-9.]+)', stats_line) + ffmpeg_fps = float(fps_match.group(1)) if fps_match else None + + # Extract bitrate (e.g., "bitrate= 406.1kbits/s") + bitrate_match = re.search(r'bitrate=\s*([0-9.]+(?:\.[0-9]+)?)\s*([kmg]?)bits/s', stats_line, re.IGNORECASE) + ffmpeg_bitrate = None + if bitrate_match: + bitrate_value = float(bitrate_match.group(1)) + unit = bitrate_match.group(2).lower() + # Convert to kbps + if unit == 'm': + bitrate_value *= 1000 + elif unit == 'g': + bitrate_value *= 1000000 + # If no unit or 'k', it's already in kbps + ffmpeg_bitrate = bitrate_value + + # Calculate actual FPS + actual_fps = None + if ffmpeg_fps is not None and ffmpeg_speed is not None and ffmpeg_speed > 0: + actual_fps = ffmpeg_fps / ffmpeg_speed + + # Store in Redis if we have valid data + if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate]): + self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate) + + logger.debug(f"FFmpeg stats - Speed: {ffmpeg_speed}x, FFmpeg FPS: {ffmpeg_fps}, " + f"Actual FPS: {actual_fps:.1f if actual_fps else 'N/A'}, " + f"Bitrate: {ffmpeg_bitrate:.1f if ffmpeg_bitrate else 'N/A'} kbps") + + except Exception as e: + logger.debug(f"Error parsing FFmpeg stats: {e}") + + def _update_ffmpeg_stats_in_redis(self, speed, fps, actual_fps, bitrate): + """Update FFmpeg performance stats in Redis metadata""" + try: + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + update_data = { + ChannelMetadataField.FFMPEG_STATS_UPDATED: str(time.time()) + } + + if speed is not None: + update_data[ChannelMetadataField.FFMPEG_SPEED] = str(round(speed, 3)) + + if fps is not None: + update_data[ChannelMetadataField.FFMPEG_FPS] = str(round(fps, 1)) + + if actual_fps is not None: + update_data[ChannelMetadataField.ACTUAL_FPS] = str(round(actual_fps, 1)) + + if bitrate is not None: + update_data[ChannelMetadataField.FFMPEG_BITRATE] = str(round(bitrate, 1)) + + self.buffer.redis_client.hset(metadata_key, mapping=update_data) + + except Exception as e: + logger.error(f"Error updating FFmpeg stats in Redis: {e}") def _establish_http_connection(self): """Establish a direct HTTP connection to the stream""" From 7d0c32ef3fd0ab0e8417a8a9f80491f737059651 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 17:45:00 -0500 Subject: [PATCH 19/25] Break line breaking for stats. --- apps/proxy/ts_proxy/stream_manager.py | 109 +++++++++++++++----------- 1 file changed, 62 insertions(+), 47 deletions(-) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 7ba5f9f8..87eb2251 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -380,9 +380,7 @@ class StreamManager: """Read and log ffmpeg stderr output with real-time stats parsing""" try: buffer = b"" - stats_buffer = b"" - # Common FFmpeg stats line prefixes for detection - stats_prefixes = [b"frame=", b"size=", b"time=", b"bitrate=", b"speed="] + last_stats_line = b"" # Read in small chunks while self.transcode_process and self.transcode_process.stderr: @@ -393,59 +391,76 @@ class StreamManager: buffer += chunk - # Check for stats data in the buffer (stats usually start with "frame=") - if b"frame=" in buffer: - # Split buffer at frame= to isolate potential stats - parts = buffer.split(b"frame=") + # Look for stats updates (overwrite previous stats with \r) + if b'\r' in buffer and b"frame=" in buffer: + # Split on \r to handle overwriting stats + parts = buffer.split(b'\r') - # Process any complete lines before the stats - if parts[0]: - line_buffer = parts[0] - while b'\n' in line_buffer or b'\r' in line_buffer: - if b'\n' in line_buffer: - line, line_buffer = line_buffer.split(b'\n', 1) + # Process all parts except the last (which might be incomplete) + for i, part in enumerate(parts[:-1]): + if part.strip(): + if part.startswith(b"frame=") or b"frame=" in part: + # This is a stats line - keep it intact + try: + stats_text = part.decode('utf-8', errors='ignore').strip() + if stats_text and "frame=" in stats_text: + # Extract just the stats portion if there's other content + if "frame=" in stats_text: + frame_start = stats_text.find("frame=") + stats_text = stats_text[frame_start:] + + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + last_stats_line = part + except Exception as e: + logger.debug(f"Error parsing stats line: {e}") else: - line, line_buffer = line_buffer.split(b'\r', 1) + # Regular content - process line by line + line_content = part + while b'\n' in line_content: + line, line_content = line_content.split(b'\n', 1) + if line.strip(): + self._log_stderr_content(line.decode('utf-8', errors='ignore')) - if line.strip(): - self._log_stderr_content(line.decode('utf-8', errors='ignore')) + # Handle remaining content without newline + if line_content.strip(): + self._log_stderr_content(line_content.decode('utf-8', errors='ignore')) - # Handle stats data - combine with previous stats buffer - if len(parts) > 1: - stats_buffer = b"frame=" + parts[-1] - - # Look for common stats patterns to determine if we have a complete stats line - # Stats typically contain: frame=X fps=Y q=Z size=A time=B bitrate=C speed=D - if b"speed=" in stats_buffer: - # We likely have a complete or near-complete stats line - try: - stats_text = stats_buffer.decode('utf-8', errors='ignore') - self._parse_ffmpeg_stats(stats_text) - self._log_stderr_content(stats_text) - stats_buffer = b"" # Clear stats buffer after processing - except Exception as e: - logger.debug(f"Error parsing stats: {e}") - - # Keep any remaining parts for next iteration - buffer = b"" - else: - # No stats data, process as regular lines - while b'\n' in buffer or b'\r' in buffer: - if b'\n' in buffer: - line, buffer = buffer.split(b'\n', 1) - else: - line, buffer = buffer.split(b'\r', 1) + # Keep the last part as it might be incomplete + buffer = parts[-1] + # Handle regular line breaks for non-stats content + elif b'\n' in buffer: + while b'\n' in buffer: + line, buffer = buffer.split(b'\n', 1) if line.strip(): - self._log_stderr_content(line.decode('utf-8', errors='ignore')) + line_text = line.decode('utf-8', errors='ignore').strip() + if line_text and not line_text.startswith("frame="): + self._log_stderr_content(line_text) + + # If we have a potential stats line in buffer without line breaks + elif b"frame=" in buffer and (b"speed=" in buffer or len(buffer) > 200): + # We likely have a complete or substantial stats line + try: + stats_text = buffer.decode('utf-8', errors='ignore').strip() + if "frame=" in stats_text: + # Extract just the stats portion + frame_start = stats_text.find("frame=") + stats_text = stats_text[frame_start:] + + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + buffer = b"" # Clear buffer after processing + except Exception as e: + logger.debug(f"Error parsing buffered stats: {e}") # Prevent buffer from growing too large if len(buffer) > 4096: - # Keep only the last 1024 bytes to preserve potential incomplete data - buffer = buffer[-1024:] - - if len(stats_buffer) > 2048: # Stats lines shouldn't be this long - stats_buffer = b"" + # Try to preserve any potential stats line at the end + if b"frame=" in buffer[-1024:]: + buffer = buffer[-1024:] + else: + buffer = buffer[-512:] except Exception as e: logger.error(f"Error reading stderr: {e}") From 7e25be0717fae7459238744dd2957e79f2357dc1 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 18:57:36 -0500 Subject: [PATCH 20/25] Store video and audio information in redis. --- apps/proxy/ts_proxy/constants.py | 18 ++++ apps/proxy/ts_proxy/stream_manager.py | 149 +++++++++++++++++++++++++- 2 files changed, 163 insertions(+), 4 deletions(-) diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index 91357bbe..daaf7bb3 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -70,6 +70,24 @@ class ChannelMetadataField: FFMPEG_BITRATE = "ffmpeg_bitrate" FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated" + # Video stream info + VIDEO_CODEC = "video_codec" + RESOLUTION = "resolution" + WIDTH = "width" + HEIGHT = "height" + SOURCE_FPS = "source_fps" + PIXEL_FORMAT = "pixel_format" + VIDEO_BITRATE = "video_bitrate" + + # Audio stream info + AUDIO_CODEC = "audio_codec" + SAMPLE_RATE = "sample_rate" + AUDIO_CHANNELS = "audio_channels" + AUDIO_BITRATE = "audio_bitrate" + + # Stream info timestamp + STREAM_INFO_UPDATED = "stream_info_updated" + # Client metadata fields CONNECTED_AT = "connected_at" LAST_ACTIVE = "last_active" diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 87eb2251..883d312e 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -483,14 +483,21 @@ class StreamManager: # Convert to lowercase for easier matching content_lower = content.lower() + # Check for stream info lines first + if "stream #" in content_lower and ("video:" in content_lower or "audio:" in content_lower): + if "video:" in content_lower: + self._parse_ffmpeg_stream_info(content, stream_type="video") + elif "audio:" in content_lower: + self._parse_ffmpeg_stream_info(content, stream_type="audio") + # Determine log level based on content if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): logger.error(f"FFmpeg stderr: {content}") elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']): logger.warning(f"FFmpeg stderr: {content}") elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content: - # Stats lines - log at debug level to avoid spam - logger.debug(f"FFmpeg stats: {content}") + # Stats lines - log at trace level to avoid spam + logger.trace(f"FFmpeg stats: {content}") elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']): # Stream info - log at info level logger.info(f"FFmpeg info: {content}") @@ -501,6 +508,136 @@ class StreamManager: except Exception as e: logger.error(f"Error logging stderr content: {e}") + def _parse_ffmpeg_stream_info(self, stream_info_line, stream_type="video"): + """Parse FFmpeg stream info line to extract video/audio codec, resolution, and FPS""" + try: + if stream_type == "video": + # Example line: + # Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn + + # Extract video codec (e.g., "h264", "mpeg2video", etc.) + codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line) + video_codec = codec_match.group(1) if codec_match else None + + # Extract resolution (e.g., "1280x720") + resolution_match = re.search(r'(\d+)x(\d+)', stream_info_line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + resolution = f"{width}x{height}" + else: + width = height = resolution = None + + # Extract source FPS (e.g., "29.97 fps") + fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line) + source_fps = float(fps_match.group(1)) if fps_match else None + + # Extract pixel format (e.g., "yuv420p") + pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line) + pixel_format = None + if pixel_format_match: + pf = pixel_format_match.group(1).strip() + # Clean up pixel format (remove extra info in parentheses) + if '(' in pf: + pf = pf.split('(')[0].strip() + pixel_format = pf + + # Extract bitrate if present (e.g., "2000 kb/s") + video_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + video_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]): + self._update_stream_info_in_redis(video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None) + + logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, " + f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, " + f"Video Bitrate: {video_bitrate} kb/s") + + elif stream_type == "audio": + # Example line: + # Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s + + # Extract audio codec (e.g., "aac", "mp3", etc.) + codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line) + audio_codec = codec_match.group(1) if codec_match else None + + # Extract sample rate (e.g., "48000 Hz") + sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line) + sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None + + # Extract channel layout (e.g., "stereo", "5.1", "mono") + # Look for common channel layouts + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE) + channels = channel_match.group(1) if channel_match else None + + # Extract audio bitrate if present (e.g., "64 kb/s") + audio_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + audio_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]): + self._update_stream_info_in_redis(None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate) + + logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, " + f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s") + + except Exception as e: + logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}") + + def _update_stream_info_in_redis(self, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None): + """Update stream info in Redis metadata""" + try: + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + update_data = { + ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time()) + } + + # Video info + if codec is not None: + update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec) + + if resolution is not None: + update_data[ChannelMetadataField.RESOLUTION] = str(resolution) + + if width is not None: + update_data[ChannelMetadataField.WIDTH] = str(width) + + if height is not None: + update_data[ChannelMetadataField.HEIGHT] = str(height) + + if fps is not None: + update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2)) + + if pixel_format is not None: + update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format) + + if video_bitrate is not None: + update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1)) + + # Audio info + if audio_codec is not None: + update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec) + + if sample_rate is not None: + update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate) + + if channels is not None: + update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels) + + if audio_bitrate is not None: + update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1)) + + self.buffer.redis_client.hset(metadata_key, mapping=update_data) + + except Exception as e: + logger.error(f"Error updating stream info in Redis: {e}") + def _parse_ffmpeg_stats(self, stats_line): """Parse FFmpeg stats line and extract speed, fps, and bitrate""" try: @@ -538,9 +675,13 @@ class StreamManager: if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate]): self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate) + # Fix the f-string formatting + actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A" + ffmpeg_bitrate_str = f"{ffmpeg_bitrate:.1f}" if ffmpeg_bitrate is not None else "N/A" + logger.debug(f"FFmpeg stats - Speed: {ffmpeg_speed}x, FFmpeg FPS: {ffmpeg_fps}, " - f"Actual FPS: {actual_fps:.1f if actual_fps else 'N/A'}, " - f"Bitrate: {ffmpeg_bitrate:.1f if ffmpeg_bitrate else 'N/A'} kbps") + f"Actual FPS: {actual_fps_str}, " + f"Bitrate: {ffmpeg_bitrate_str} kbps") except Exception as e: logger.debug(f"Error parsing FFmpeg stats: {e}") From 47500daafaf4e1d90a3afd32b5e6e1dd1eae421d Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 19:10:52 -0500 Subject: [PATCH 21/25] Moved some functions to channel_service --- .../ts_proxy/services/channel_service.py | 138 ++++++++++++++++++ apps/proxy/ts_proxy/stream_manager.py | 138 +----------------- 2 files changed, 143 insertions(+), 133 deletions(-) diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index bd1f2f81..761d56ac 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -6,6 +6,7 @@ This separates business logic from HTTP handling in views. import logging import time import json +import re from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.proxy.config import TSConfig as Config @@ -415,6 +416,143 @@ class ChannelService: logger.error(f"Error validating channel state: {e}", exc_info=True) return False, None, None, {"error": f"Exception: {str(e)}"} + @staticmethod + def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video"): + """Parse FFmpeg stream info line and store in Redis metadata""" + try: + if stream_type == "video": + # Example line: + # Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn + + # Extract video codec (e.g., "h264", "mpeg2video", etc.) + codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line) + video_codec = codec_match.group(1) if codec_match else None + + # Extract resolution (e.g., "1280x720") + resolution_match = re.search(r'(\d+)x(\d+)', stream_info_line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + resolution = f"{width}x{height}" + else: + width = height = resolution = None + + # Extract source FPS (e.g., "29.97 fps") + fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line) + source_fps = float(fps_match.group(1)) if fps_match else None + + # Extract pixel format (e.g., "yuv420p") + pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line) + pixel_format = None + if pixel_format_match: + pf = pixel_format_match.group(1).strip() + # Clean up pixel format (remove extra info in parentheses) + if '(' in pf: + pf = pf.split('(')[0].strip() + pixel_format = pf + + # Extract bitrate if present (e.g., "2000 kb/s") + video_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + video_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]): + ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None) + + logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, " + f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, " + f"Video Bitrate: {video_bitrate} kb/s") + + elif stream_type == "audio": + # Example line: + # Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s + + # Extract audio codec (e.g., "aac", "mp3", etc.) + codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line) + audio_codec = codec_match.group(1) if codec_match else None + + # Extract sample rate (e.g., "48000 Hz") + sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line) + sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None + + # Extract channel layout (e.g., "stereo", "5.1", "mono") + # Look for common channel layouts + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE) + channels = channel_match.group(1) if channel_match else None + + # Extract audio bitrate if present (e.g., "64 kb/s") + audio_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + audio_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]): + ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate) + + logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, " + f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s") + + except Exception as e: + logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}") + + @staticmethod + def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None): + """Update stream info in Redis metadata""" + try: + proxy_server = ProxyServer.get_instance() + if not proxy_server.redis_client: + return False + + metadata_key = RedisKeys.channel_metadata(channel_id) + update_data = { + ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time()) + } + + # Video info + if codec is not None: + update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec) + + if resolution is not None: + update_data[ChannelMetadataField.RESOLUTION] = str(resolution) + + if width is not None: + update_data[ChannelMetadataField.WIDTH] = str(width) + + if height is not None: + update_data[ChannelMetadataField.HEIGHT] = str(height) + + if fps is not None: + update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2)) + + if pixel_format is not None: + update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format) + + if video_bitrate is not None: + update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1)) + + # Audio info + if audio_codec is not None: + update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec) + + if sample_rate is not None: + update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate) + + if channels is not None: + update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels) + + if audio_bitrate is not None: + update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1)) + + proxy_server.redis_client.hset(metadata_key, mapping=update_data) + return True + + except Exception as e: + logger.error(f"Error updating stream info in Redis: {e}") + return False + # Helper methods for Redis operations @staticmethod diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 883d312e..7f81e29e 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -483,12 +483,13 @@ class StreamManager: # Convert to lowercase for easier matching content_lower = content.lower() - # Check for stream info lines first + # Check for stream info lines first and delegate to ChannelService if "stream #" in content_lower and ("video:" in content_lower or "audio:" in content_lower): + from .services.channel_service import ChannelService if "video:" in content_lower: - self._parse_ffmpeg_stream_info(content, stream_type="video") + ChannelService.parse_and_store_stream_info(self.channel_id, content, "video") elif "audio:" in content_lower: - self._parse_ffmpeg_stream_info(content, stream_type="audio") + ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio") # Determine log level based on content if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): @@ -508,136 +509,6 @@ class StreamManager: except Exception as e: logger.error(f"Error logging stderr content: {e}") - def _parse_ffmpeg_stream_info(self, stream_info_line, stream_type="video"): - """Parse FFmpeg stream info line to extract video/audio codec, resolution, and FPS""" - try: - if stream_type == "video": - # Example line: - # Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn - - # Extract video codec (e.g., "h264", "mpeg2video", etc.) - codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line) - video_codec = codec_match.group(1) if codec_match else None - - # Extract resolution (e.g., "1280x720") - resolution_match = re.search(r'(\d+)x(\d+)', stream_info_line) - if resolution_match: - width = int(resolution_match.group(1)) - height = int(resolution_match.group(2)) - resolution = f"{width}x{height}" - else: - width = height = resolution = None - - # Extract source FPS (e.g., "29.97 fps") - fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line) - source_fps = float(fps_match.group(1)) if fps_match else None - - # Extract pixel format (e.g., "yuv420p") - pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line) - pixel_format = None - if pixel_format_match: - pf = pixel_format_match.group(1).strip() - # Clean up pixel format (remove extra info in parentheses) - if '(' in pf: - pf = pf.split('(')[0].strip() - pixel_format = pf - - # Extract bitrate if present (e.g., "2000 kb/s") - video_bitrate = None - bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) - if bitrate_match: - video_bitrate = float(bitrate_match.group(1)) - - # Store in Redis if we have valid data - if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]): - self._update_stream_info_in_redis(video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None) - - logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, " - f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, " - f"Video Bitrate: {video_bitrate} kb/s") - - elif stream_type == "audio": - # Example line: - # Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s - - # Extract audio codec (e.g., "aac", "mp3", etc.) - codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line) - audio_codec = codec_match.group(1) if codec_match else None - - # Extract sample rate (e.g., "48000 Hz") - sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line) - sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None - - # Extract channel layout (e.g., "stereo", "5.1", "mono") - # Look for common channel layouts - channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE) - channels = channel_match.group(1) if channel_match else None - - # Extract audio bitrate if present (e.g., "64 kb/s") - audio_bitrate = None - bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) - if bitrate_match: - audio_bitrate = float(bitrate_match.group(1)) - - # Store in Redis if we have valid data - if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]): - self._update_stream_info_in_redis(None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate) - - logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, " - f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s") - - except Exception as e: - logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}") - - def _update_stream_info_in_redis(self, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None): - """Update stream info in Redis metadata""" - try: - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - metadata_key = RedisKeys.channel_metadata(self.channel_id) - update_data = { - ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time()) - } - - # Video info - if codec is not None: - update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec) - - if resolution is not None: - update_data[ChannelMetadataField.RESOLUTION] = str(resolution) - - if width is not None: - update_data[ChannelMetadataField.WIDTH] = str(width) - - if height is not None: - update_data[ChannelMetadataField.HEIGHT] = str(height) - - if fps is not None: - update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2)) - - if pixel_format is not None: - update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format) - - if video_bitrate is not None: - update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1)) - - # Audio info - if audio_codec is not None: - update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec) - - if sample_rate is not None: - update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate) - - if channels is not None: - update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels) - - if audio_bitrate is not None: - update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1)) - - self.buffer.redis_client.hset(metadata_key, mapping=update_data) - - except Exception as e: - logger.error(f"Error updating stream info in Redis: {e}") - def _parse_ffmpeg_stats(self, stats_line): """Parse FFmpeg stats line and extract speed, fps, and bitrate""" try: @@ -712,6 +583,7 @@ class StreamManager: except Exception as e: logger.error(f"Error updating FFmpeg stats in Redis: {e}") + def _establish_http_connection(self): """Establish a direct HTTP connection to the stream""" try: From 71079aead3f37a2a4e17379d3c93125576d97086 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 19:36:27 -0500 Subject: [PATCH 22/25] Add ffmpeg stats to channel status api. --- apps/proxy/ts_proxy/channel_status.py | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/apps/proxy/ts_proxy/channel_status.py b/apps/proxy/ts_proxy/channel_status.py index 50e84eec..edb1bf5a 100644 --- a/apps/proxy/ts_proxy/channel_status.py +++ b/apps/proxy/ts_proxy/channel_status.py @@ -264,6 +264,60 @@ class ChannelStatus: 'last_data_age': time.time() - manager.last_data_time } + # Add FFmpeg stream information + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + + pixel_format = metadata.get(ChannelMetadataField.PIXEL_FORMAT.encode('utf-8')) + if pixel_format: + info['pixel_format'] = pixel_format.decode('utf-8') + + source_bitrate = metadata.get(ChannelMetadataField.SOURCE_BITRATE.encode('utf-8')) + if source_bitrate: + info['source_bitrate'] = float(source_bitrate.decode('utf-8')) + + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') + + sample_rate = metadata.get(ChannelMetadataField.SAMPLE_RATE.encode('utf-8')) + if sample_rate: + info['sample_rate'] = int(sample_rate.decode('utf-8')) + + audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8')) + if audio_channels: + info['audio_channels'] = audio_channels.decode('utf-8') + + audio_bitrate = metadata.get(ChannelMetadataField.AUDIO_BITRATE.encode('utf-8')) + if audio_bitrate: + info['audio_bitrate'] = float(audio_bitrate.decode('utf-8')) + + # Add FFmpeg performance stats + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + + ffmpeg_fps = metadata.get(ChannelMetadataField.FFMPEG_FPS.encode('utf-8')) + if ffmpeg_fps: + info['ffmpeg_fps'] = float(ffmpeg_fps.decode('utf-8')) + + actual_fps = metadata.get(ChannelMetadataField.ACTUAL_FPS.encode('utf-8')) + if actual_fps: + info['actual_fps'] = float(actual_fps.decode('utf-8')) + + ffmpeg_bitrate = metadata.get(ChannelMetadataField.FFMPEG_BITRATE.encode('utf-8')) + if ffmpeg_bitrate: + info['ffmpeg_bitrate'] = float(ffmpeg_bitrate.decode('utf-8')) + return info @staticmethod @@ -422,6 +476,19 @@ class ChannelStatus: except ValueError: logger.warning(f"Invalid m3u_profile_id format in Redis: {m3u_profile_id_bytes}") + # Add stream info to basic info as well + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + return info except Exception as e: logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging From 677fbba1ac9dcef940c709f9275ff9f8125705d3 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 19:42:58 -0500 Subject: [PATCH 23/25] FFmpeg stats added to channel card. --- frontend/src/pages/Stats.jsx | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/frontend/src/pages/Stats.jsx b/frontend/src/pages/Stats.jsx index fa3250ef..09163a5c 100644 --- a/frontend/src/pages/Stats.jsx +++ b/frontend/src/pages/Stats.jsx @@ -14,6 +14,7 @@ import { Tooltip, useMantineTheme, Select, + Badge, } from '@mantine/core'; import { MantineReactTable, useMantineReactTable } from 'mantine-react-table'; import { TableHelper } from '../helpers'; @@ -474,6 +475,30 @@ const ChannelCard = ({ channel, clients, stopClient, stopChannel, logos, channel )} + {/* Add stream information badges */} + + {channel.video_codec && ( + + {channel.video_codec.toUpperCase()} + + )} + {channel.resolution && ( + + {channel.resolution} + + )} + {channel.source_fps && ( + + {channel.source_fps} FPS + + )} + {channel.audio_codec && ( + + {channel.audio_codec.toUpperCase()} + + )} + + From 0fed65a4787b4ddf403d4ecf16d6eb0f577fb162 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 19:55:10 -0500 Subject: [PATCH 24/25] Add FFmpeg speed and audio codec information to channel details --- apps/proxy/ts_proxy/channel_status.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/apps/proxy/ts_proxy/channel_status.py b/apps/proxy/ts_proxy/channel_status.py index edb1bf5a..864ddac8 100644 --- a/apps/proxy/ts_proxy/channel_status.py +++ b/apps/proxy/ts_proxy/channel_status.py @@ -488,6 +488,12 @@ class ChannelStatus: source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) if source_fps: info['source_fps'] = float(source_fps.decode('utf-8')) + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') return info except Exception as e: From cd47e762451a41d9605d1293802499464bbdd79c Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 9 Jun 2025 19:56:37 -0500 Subject: [PATCH 25/25] Add FFmpeg speed display and audio codec to channel card --- frontend/src/pages/Stats.jsx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/frontend/src/pages/Stats.jsx b/frontend/src/pages/Stats.jsx index 09163a5c..ab13c1e7 100644 --- a/frontend/src/pages/Stats.jsx +++ b/frontend/src/pages/Stats.jsx @@ -497,6 +497,17 @@ const ChannelCard = ({ channel, clients, stopClient, stopChannel, logos, channel {channel.audio_codec.toUpperCase()} )} + {channel.ffmpeg_speed && ( + + = 1.0 ? "green" : "red"} + > + {channel.ffmpeg_speed}x + + + )}