merged in dev

This commit is contained in:
dekzter 2025-06-10 08:55:14 -04:00
commit a1576bd493
13 changed files with 635 additions and 72 deletions

View file

@ -1634,6 +1634,9 @@ def extract_custom_properties(prog):
elif system == 'onscreen' and ep_num.text:
# Just store the raw onscreen format
custom_props['onscreen_episode'] = ep_num.text.strip()
elif system == 'dd_progid' and ep_num.text:
# Store the dd_progid format
custom_props['dd_progid'] = ep_num.text.strip()
# Extract ratings more efficiently
rating_elem = prog.find('rating')
@ -1669,7 +1672,7 @@ def extract_custom_properties(prog):
custom_props['icon'] = icon_elem.get('src')
# Simpler approach for boolean flags
for kw in ['previously-shown', 'premiere', 'new']:
for kw in ['previously-shown', 'premiere', 'new', 'live']:
if prog.find(kw) is not None:
custom_props[kw.replace('-', '_')] = True

View file

@ -91,8 +91,8 @@ class DiscoverAPIView(APIView):
# Otherwise use the limited profile sum plus custom streams
tuner_count = limited_tuners + custom_stream_count
# 5. Ensure minimum of 2 tuners
tuner_count = max(2, tuner_count)
# 5. Ensure minimum of 1 tuners
tuner_count = max(1, tuner_count)
logger.debug(
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"

View file

@ -496,7 +496,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys):
return retval
def cleanup_streams(account_id):
def cleanup_streams(account_id, scan_start_time=timezone.now):
account = M3UAccount.objects.get(id=account_id, is_active=True)
existing_groups = ChannelGroup.objects.filter(
m3u_account__m3u_account=account,
@ -505,7 +505,7 @@ def cleanup_streams(account_id):
logger.info(f"Found {len(existing_groups)} active groups for M3U account {account_id}")
# Calculate cutoff date for stale streams
stale_cutoff = timezone.now() - timezone.timedelta(days=account.stale_stream_days)
stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days)
logger.info(f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}")
# Delete streams that are not in active groups
@ -527,8 +527,12 @@ def cleanup_streams(account_id):
streams_to_delete.delete()
stale_streams.delete()
total_deleted = deleted_count + stale_count
logger.info(f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale")
# Return the total count of deleted streams
return total_deleted
@shared_task
def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
if not acquire_task_lock('refresh_m3u_account_groups', account_id):
@ -833,7 +837,8 @@ def refresh_single_m3u_account(account_id):
return f"Task already running for account_id={account_id}."
# Record start time
start_time = time.time()
refresh_start_timestamp = timezone.now() # For the cleanup function
start_time = time.time() # For tracking elapsed time as float
streams_created = 0
streams_updated = 0
streams_deleted = 0
@ -1077,7 +1082,7 @@ def refresh_single_m3u_account(account_id):
Stream.objects.filter(id=-1).exists() # This will never find anything but ensures DB sync
# Now run cleanup
cleanup_streams(account_id)
streams_deleted = cleanup_streams(account_id, refresh_start_timestamp)
# Calculate elapsed time
elapsed_time = time.time() - start_time

View file

@ -1,8 +1,10 @@
import ipaddress
from django.http import HttpResponse, JsonResponse, Http404
from django.http import HttpResponse, JsonResponse, Http404, HttpResponseForbidden
from rest_framework.response import Response
from django.urls import reverse
from apps.channels.models import Channel, ChannelProfile, ChannelGroup
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from apps.epg.models import ProgramData
from apps.accounts.models import User
from core.models import CoreSettings, NETWORK_ACCESS
@ -30,11 +32,20 @@ def epg_endpoint(request, profile_name=None, user=None):
return generate_epg(request, profile_name, user)
def generate_m3u(request, profile_name=None, user=None):
@csrf_exempt
@require_http_methods(["GET", "POST"])
def generate_m3u(request, profile_name=None, user=None):
"""
Dynamically generate an M3U file from channels.
The stream URL now points to the new stream_view that uses StreamProfile.
Supports both GET and POST methods for compatibility with IPTVSmarters.
"""
# Check if this is a POST request with data (which we don't want to allow)
if request.method == "POST" and request.body:
return HttpResponseForbidden("POST requests with content are not allowed")
if user is not None:
if user.user_level == 0:
filters = {
@ -53,6 +64,14 @@ def generate_m3u(request, profile_name=None, user=None):
channels = Channel.objects.filter(user_level__lte=user.user_level).order_by(
"channel_number"
)
if profile_name is not None:
channel_profile = ChannelProfile.objects.get(name=profile_name)
channels = Channel.objects.filter(
channelprofilemembership__channel_profile=channel_profile,
channelprofilemembership__enabled=True
).order_by('channel_number')
else:
if profile_name is not None:
channel_profile = ChannelProfile.objects.get(name=profile_name)
@ -66,6 +85,13 @@ def generate_m3u(request, profile_name=None, user=None):
# Check if the request wants to use direct logo URLs instead of cache
use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false'
# Check if direct stream URLs should be used instead of proxy
use_direct_urls = request.GET.get('direct', 'false').lower() == 'true'
# Get the source to use for tvg-id value
# Options: 'channel_number' (default), 'tvg_id', 'gracenote'
tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower()
m3u_content = "#EXTM3U\n"
for channel in channels:
group_title = channel.channel_group.name if channel.channel_group else "Default"
@ -79,12 +105,15 @@ def generate_m3u(request, profile_name=None, user=None):
else:
formatted_channel_number = ""
# Use formatted channel number for tvg_id to ensure proper matching with EPG
tvg_id = (
str(formatted_channel_number)
if formatted_channel_number != ""
else str(channel.id)
)
# Determine the tvg-id based on the selected source
if tvg_id_source == 'tvg_id' and channel.tvg_id:
tvg_id = channel.tvg_id
elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid:
tvg_id = channel.tvc_guide_stationid
else:
# Default to channel number (original behavior)
tvg_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id)
tvg_name = channel.name
tvg_logo = ""
@ -113,10 +142,22 @@ def generate_m3u(request, profile_name=None, user=None):
f'tvg-chno="{formatted_channel_number}" {tvc_guide_stationid}group-title="{group_title}",{channel.name}\n'
)
base_url = request.build_absolute_uri("/")[:-1]
stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}"
# Determine the stream URL based on the direct parameter
if use_direct_urls:
# Try to get the first stream's direct URL
first_stream = channel.streams.first()
if first_stream and first_stream.url:
# Use the direct stream URL
stream_url = first_stream.url
else:
# Fall back to proxy URL if no direct URL available
base_url = request.build_absolute_uri('/')[:-1]
stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}"
else:
# Standard behavior - use proxy URL
base_url = request.build_absolute_uri('/')[:-1]
stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}"
# stream_url = request.build_absolute_uri(reverse('output:stream', args=[channel.id]))
m3u_content += extinf_line + stream_url + "\n"
response = HttpResponse(m3u_content, content_type="audio/x-mpegurl")
@ -242,7 +283,7 @@ def generate_epg(request, profile_name=None, user=None):
Dynamically generate an XMLTV (EPG) file using the new EPGData/ProgramData models.
Since the EPG data is stored independently of Channels, we group programmes
by their associated EPGData record.
This version does not filter by time, so it includes the entire EPG saved in the DB.
This version filters data based on the 'days' parameter.
"""
xml_lines = []
xml_lines.append('<?xml version="1.0" encoding="UTF-8"?>')
@ -278,18 +319,50 @@ def generate_epg(request, profile_name=None, user=None):
else:
channels = Channel.objects.all()
# Check if the request wants to use direct logo URLs instead of cache
use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false'
# Get the source to use for tvg-id value
# Options: 'channel_number' (default), 'tvg_id', 'gracenote'
tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower()
# Get the number of days for EPG data
try:
# Default to 0 days (everything) for real EPG if not specified
days_param = request.GET.get('days', '0')
num_days = int(days_param)
# Set reasonable limits
num_days = max(0, min(num_days, 365)) # Between 0 and 365 days
except ValueError:
num_days = 0 # Default to all data if invalid value
# For dummy EPG, use either the specified value or default to 3 days
dummy_days = num_days if num_days > 0 else 3
# Calculate cutoff date for EPG data filtering (only if days > 0)
now = timezone.now()
cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None
# Retrieve all active channels
for channel in channels:
# Format channel number as integer if it has no decimal component - same as M3U generation
if channel.channel_number is not None:
if channel.channel_number == int(channel.channel_number):
formatted_channel_number = str(int(channel.channel_number))
formatted_channel_number = int(channel.channel_number)
else:
formatted_channel_number = str(channel.channel_number)
formatted_channel_number = channel.channel_number
else:
formatted_channel_number = str(channel.id)
# Check if the request wants to use direct logo URLs instead of cache
use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false'
formatted_channel_number = ""
# Determine the channel ID based on the selected source
if tvg_id_source == 'tvg_id' and channel.tvg_id:
channel_id = channel.tvg_id
elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid:
channel_id = channel.tvc_guide_stationid
else:
# Default to channel number (original behavior)
channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id)
# Add channel logo if available
tvg_logo = ""
if channel.logo:
@ -305,44 +378,57 @@ def generate_epg(request, profile_name=None, user=None):
else:
tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id]))
display_name = channel.epg_data.name if channel.epg_data else channel.name
xml_lines.append(f' <channel id="{formatted_channel_number}">')
xml_lines.append(f' <channel id="{channel_id}">')
xml_lines.append(f' <display-name>{html.escape(display_name)}</display-name>')
xml_lines.append(f' <icon src="{html.escape(tvg_logo)}" />')
xml_lines.append(" </channel>")
for channel in channels:
# Use the same formatting for channel ID in program entries
if channel.channel_number is not None:
if channel.channel_number == int(channel.channel_number):
formatted_channel_number = str(int(channel.channel_number))
else:
formatted_channel_number = str(channel.channel_number)
# Use the same channel ID determination for program entries
if tvg_id_source == 'tvg_id' and channel.tvg_id:
channel_id = channel.tvg_id
elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid:
channel_id = channel.tvc_guide_stationid
else:
formatted_channel_number = str(channel.id)
# Get formatted channel number
if channel.channel_number is not None:
if channel.channel_number == int(channel.channel_number):
formatted_channel_number = int(channel.channel_number)
else:
formatted_channel_number = channel.channel_number
else:
formatted_channel_number = ""
# Default to channel number
channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id)
display_name = channel.epg_data.name if channel.epg_data else channel.name
if not channel.epg_data:
# Use the enhanced dummy EPG generation function with defaults
# These values could be made configurable via settings or request parameters
num_days = 1 # Default to 1 days of dummy EPG data
program_length_hours = 4 # Default to 4-hour program blocks
generate_dummy_epg(
formatted_channel_number,
channel_id,
display_name,
xml_lines,
num_days=num_days,
program_length_hours=program_length_hours,
num_days=dummy_days, # Use dummy_days (3 days by default)
program_length_hours=program_length_hours
)
else:
programs = channel.epg_data.programs.all()
# For real EPG data - filter only if days parameter was specified
if num_days > 0:
programs = channel.epg_data.programs.filter(
start_time__gte=now,
start_time__lt=cutoff_date
)
else:
# Return all programs if days=0 or not specified
programs = channel.epg_data.programs.all()
for prog in programs:
start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z")
stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z")
xml_lines.append(
f' <programme start="{start_str}" stop="{stop_str}" channel="{formatted_channel_number}">'
)
xml_lines.append(f" <title>{html.escape(prog.title)}</title>")
xml_lines.append(f' <programme start="{start_str}" stop="{stop_str}" channel="{channel_id}">')
xml_lines.append(f' <title>{html.escape(prog.title)}</title>')
# Add subtitle if available
if prog.sub_title:
@ -383,6 +469,10 @@ def generate_epg(request, profile_name=None, user=None):
f' <episode-num system="onscreen">{html.escape(custom_data["onscreen_episode"])}</episode-num>'
)
# Handle dd_progid format
if 'dd_progid' in custom_data:
xml_lines.append(f' <episode-num system="dd_progid">{html.escape(custom_data["dd_progid"])}</episode-num>')
# Add season and episode numbers in xmltv_ns format if available
if "season" in custom_data and "episode" in custom_data:
season = (
@ -455,6 +545,9 @@ def generate_epg(request, profile_name=None, user=None):
if custom_data.get("new", False):
xml_lines.append(f" <new />")
if custom_data.get('live', False):
xml_lines.append(f' <live />')
except Exception as e:
xml_lines.append(
f" <!-- Error parsing custom properties: {html.escape(str(e))} -->"

View file

@ -264,6 +264,60 @@ class ChannelStatus:
'last_data_age': time.time() - manager.last_data_time
}
# Add FFmpeg stream information
video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8'))
if video_codec:
info['video_codec'] = video_codec.decode('utf-8')
resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8'))
if resolution:
info['resolution'] = resolution.decode('utf-8')
source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8'))
if source_fps:
info['source_fps'] = float(source_fps.decode('utf-8'))
pixel_format = metadata.get(ChannelMetadataField.PIXEL_FORMAT.encode('utf-8'))
if pixel_format:
info['pixel_format'] = pixel_format.decode('utf-8')
source_bitrate = metadata.get(ChannelMetadataField.SOURCE_BITRATE.encode('utf-8'))
if source_bitrate:
info['source_bitrate'] = float(source_bitrate.decode('utf-8'))
audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8'))
if audio_codec:
info['audio_codec'] = audio_codec.decode('utf-8')
sample_rate = metadata.get(ChannelMetadataField.SAMPLE_RATE.encode('utf-8'))
if sample_rate:
info['sample_rate'] = int(sample_rate.decode('utf-8'))
audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8'))
if audio_channels:
info['audio_channels'] = audio_channels.decode('utf-8')
audio_bitrate = metadata.get(ChannelMetadataField.AUDIO_BITRATE.encode('utf-8'))
if audio_bitrate:
info['audio_bitrate'] = float(audio_bitrate.decode('utf-8'))
# Add FFmpeg performance stats
ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8'))
if ffmpeg_speed:
info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8'))
ffmpeg_fps = metadata.get(ChannelMetadataField.FFMPEG_FPS.encode('utf-8'))
if ffmpeg_fps:
info['ffmpeg_fps'] = float(ffmpeg_fps.decode('utf-8'))
actual_fps = metadata.get(ChannelMetadataField.ACTUAL_FPS.encode('utf-8'))
if actual_fps:
info['actual_fps'] = float(actual_fps.decode('utf-8'))
ffmpeg_bitrate = metadata.get(ChannelMetadataField.FFMPEG_BITRATE.encode('utf-8'))
if ffmpeg_bitrate:
info['ffmpeg_bitrate'] = float(ffmpeg_bitrate.decode('utf-8'))
return info
@staticmethod
@ -422,6 +476,25 @@ class ChannelStatus:
except ValueError:
logger.warning(f"Invalid m3u_profile_id format in Redis: {m3u_profile_id_bytes}")
# Add stream info to basic info as well
video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8'))
if video_codec:
info['video_codec'] = video_codec.decode('utf-8')
resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8'))
if resolution:
info['resolution'] = resolution.decode('utf-8')
source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8'))
if source_fps:
info['source_fps'] = float(source_fps.decode('utf-8'))
ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8'))
if ffmpeg_speed:
info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8'))
audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8'))
if audio_codec:
info['audio_codec'] = audio_codec.decode('utf-8')
return info
except Exception as e:
logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging

View file

@ -63,6 +63,31 @@ class ChannelMetadataField:
STREAM_SWITCH_TIME = "stream_switch_time"
STREAM_SWITCH_REASON = "stream_switch_reason"
# FFmpeg performance metrics
FFMPEG_SPEED = "ffmpeg_speed"
FFMPEG_FPS = "ffmpeg_fps"
ACTUAL_FPS = "actual_fps"
FFMPEG_BITRATE = "ffmpeg_bitrate"
FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated"
# Video stream info
VIDEO_CODEC = "video_codec"
RESOLUTION = "resolution"
WIDTH = "width"
HEIGHT = "height"
SOURCE_FPS = "source_fps"
PIXEL_FORMAT = "pixel_format"
VIDEO_BITRATE = "video_bitrate"
# Audio stream info
AUDIO_CODEC = "audio_codec"
SAMPLE_RATE = "sample_rate"
AUDIO_CHANNELS = "audio_channels"
AUDIO_BITRATE = "audio_bitrate"
# Stream info timestamp
STREAM_INFO_UPDATED = "stream_info_updated"
# Client metadata fields
CONNECTED_AT = "connected_at"
LAST_ACTIVE = "last_active"

View file

@ -6,6 +6,7 @@ This separates business logic from HTTP handling in views.
import logging
import time
import json
import re
from django.shortcuts import get_object_or_404
from apps.channels.models import Channel, Stream
from apps.proxy.config import TSConfig as Config
@ -415,6 +416,143 @@ class ChannelService:
logger.error(f"Error validating channel state: {e}", exc_info=True)
return False, None, None, {"error": f"Exception: {str(e)}"}
@staticmethod
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video"):
"""Parse FFmpeg stream info line and store in Redis metadata"""
try:
if stream_type == "video":
# Example line:
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
video_codec = codec_match.group(1) if codec_match else None
# Extract resolution (e.g., "1280x720")
resolution_match = re.search(r'(\d+)x(\d+)', stream_info_line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
resolution = f"{width}x{height}"
else:
width = height = resolution = None
# Extract source FPS (e.g., "29.97 fps")
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
source_fps = float(fps_match.group(1)) if fps_match else None
# Extract pixel format (e.g., "yuv420p")
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
pixel_format = None
if pixel_format_match:
pf = pixel_format_match.group(1).strip()
# Clean up pixel format (remove extra info in parentheses)
if '(' in pf:
pf = pf.split('(')[0].strip()
pixel_format = pf
# Extract bitrate if present (e.g., "2000 kb/s")
video_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
video_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None)
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
f"Video Bitrate: {video_bitrate} kb/s")
elif stream_type == "audio":
# Example line:
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
# Extract audio codec (e.g., "aac", "mp3", etc.)
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
audio_codec = codec_match.group(1) if codec_match else None
# Extract sample rate (e.g., "48000 Hz")
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
# Extract channel layout (e.g., "stereo", "5.1", "mono")
# Look for common channel layouts
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
channels = channel_match.group(1) if channel_match else None
# Extract audio bitrate if present (e.g., "64 kb/s")
audio_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
audio_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate)
logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, "
f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s")
except Exception as e:
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
@staticmethod
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None):
"""Update stream info in Redis metadata"""
try:
proxy_server = ProxyServer.get_instance()
if not proxy_server.redis_client:
return False
metadata_key = RedisKeys.channel_metadata(channel_id)
update_data = {
ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time())
}
# Video info
if codec is not None:
update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec)
if resolution is not None:
update_data[ChannelMetadataField.RESOLUTION] = str(resolution)
if width is not None:
update_data[ChannelMetadataField.WIDTH] = str(width)
if height is not None:
update_data[ChannelMetadataField.HEIGHT] = str(height)
if fps is not None:
update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2))
if pixel_format is not None:
update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format)
if video_bitrate is not None:
update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1))
# Audio info
if audio_codec is not None:
update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec)
if sample_rate is not None:
update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate)
if channels is not None:
update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels)
if audio_bitrate is not None:
update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1))
proxy_server.redis_client.hset(metadata_key, mapping=update_data)
return True
except Exception as e:
logger.error(f"Error updating stream info in Redis: {e}")
return False
# Helper methods for Redis operations
@staticmethod

View file

@ -7,6 +7,7 @@ import socket
import requests
import subprocess
import gevent # Add this import
import re # Add this import at the top
from typing import Optional, List
from django.shortcuts import get_object_or_404
from apps.proxy.config import TSConfig as Config
@ -376,34 +377,213 @@ class StreamManager:
logger.debug(f"Started stderr reader thread for channel {self.channel_id}")
def _read_stderr(self):
"""Read and log ffmpeg stderr output"""
"""Read and log ffmpeg stderr output with real-time stats parsing"""
try:
for error_line in iter(self.transcode_process.stderr.readline, b''):
if error_line:
error_line = error_line.decode('utf-8', errors='replace').strip()
try:
# Wrap the logging call in a try-except to prevent crashes due to logging errors
logger.debug(f"Transcode stderr [{self.channel_id}]: {error_line}")
except OSError as e:
# If logging fails, try a simplified log message
if e.errno == 105: # No buffer space available
try:
# Try a much shorter message without the error content
logger.warning(f"Logging error (buffer full) in channel {self.channel_id}")
except:
# If even that fails, we have to silently continue
pass
except Exception:
# Ignore other logging errors to prevent thread crashes
pass
buffer = b""
last_stats_line = b""
# Read in small chunks
while self.transcode_process and self.transcode_process.stderr:
try:
chunk = self.transcode_process.stderr.read(256) # Smaller chunks for real-time processing
if not chunk:
break
buffer += chunk
# Look for stats updates (overwrite previous stats with \r)
if b'\r' in buffer and b"frame=" in buffer:
# Split on \r to handle overwriting stats
parts = buffer.split(b'\r')
# Process all parts except the last (which might be incomplete)
for i, part in enumerate(parts[:-1]):
if part.strip():
if part.startswith(b"frame=") or b"frame=" in part:
# This is a stats line - keep it intact
try:
stats_text = part.decode('utf-8', errors='ignore').strip()
if stats_text and "frame=" in stats_text:
# Extract just the stats portion if there's other content
if "frame=" in stats_text:
frame_start = stats_text.find("frame=")
stats_text = stats_text[frame_start:]
self._parse_ffmpeg_stats(stats_text)
self._log_stderr_content(stats_text)
last_stats_line = part
except Exception as e:
logger.debug(f"Error parsing stats line: {e}")
else:
# Regular content - process line by line
line_content = part
while b'\n' in line_content:
line, line_content = line_content.split(b'\n', 1)
if line.strip():
self._log_stderr_content(line.decode('utf-8', errors='ignore'))
# Handle remaining content without newline
if line_content.strip():
self._log_stderr_content(line_content.decode('utf-8', errors='ignore'))
# Keep the last part as it might be incomplete
buffer = parts[-1]
# Handle regular line breaks for non-stats content
elif b'\n' in buffer:
while b'\n' in buffer:
line, buffer = buffer.split(b'\n', 1)
if line.strip():
line_text = line.decode('utf-8', errors='ignore').strip()
if line_text and not line_text.startswith("frame="):
self._log_stderr_content(line_text)
# If we have a potential stats line in buffer without line breaks
elif b"frame=" in buffer and (b"speed=" in buffer or len(buffer) > 200):
# We likely have a complete or substantial stats line
try:
stats_text = buffer.decode('utf-8', errors='ignore').strip()
if "frame=" in stats_text:
# Extract just the stats portion
frame_start = stats_text.find("frame=")
stats_text = stats_text[frame_start:]
self._parse_ffmpeg_stats(stats_text)
self._log_stderr_content(stats_text)
buffer = b"" # Clear buffer after processing
except Exception as e:
logger.debug(f"Error parsing buffered stats: {e}")
# Prevent buffer from growing too large
if len(buffer) > 4096:
# Try to preserve any potential stats line at the end
if b"frame=" in buffer[-1024:]:
buffer = buffer[-1024:]
else:
buffer = buffer[-512:]
except Exception as e:
logger.error(f"Error reading stderr: {e}")
break
except Exception as e:
# Catch any other exceptions in the thread to prevent crashes
try:
logger.error(f"Error in stderr reader thread: {e}")
logger.error(f"Error in stderr reader thread for channel {self.channel_id}: {e}")
except:
# Again, if logging fails, continue silently
pass
def _log_stderr_content(self, content):
"""Log stderr content from FFmpeg with appropriate log levels"""
try:
content = content.strip()
if not content:
return
# Convert to lowercase for easier matching
content_lower = content.lower()
# Check for stream info lines first and delegate to ChannelService
if "stream #" in content_lower and ("video:" in content_lower or "audio:" in content_lower):
from .services.channel_service import ChannelService
if "video:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video")
elif "audio:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio")
# Determine log level based on content
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
logger.error(f"FFmpeg stderr: {content}")
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
logger.warning(f"FFmpeg stderr: {content}")
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
# Stats lines - log at trace level to avoid spam
logger.trace(f"FFmpeg stats: {content}")
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
# Stream info - log at info level
logger.info(f"FFmpeg info: {content}")
else:
# Everything else at debug level
logger.debug(f"FFmpeg stderr: {content}")
except Exception as e:
logger.error(f"Error logging stderr content: {e}")
def _parse_ffmpeg_stats(self, stats_line):
"""Parse FFmpeg stats line and extract speed, fps, and bitrate"""
try:
# Example FFmpeg stats line:
# frame= 1234 fps= 30 q=28.0 size= 2048kB time=00:00:41.33 bitrate= 406.1kbits/s speed=1.02x
# Extract speed (e.g., "speed=1.02x")
speed_match = re.search(r'speed=\s*([0-9.]+)x?', stats_line)
ffmpeg_speed = float(speed_match.group(1)) if speed_match else None
# Extract fps (e.g., "fps= 30")
fps_match = re.search(r'fps=\s*([0-9.]+)', stats_line)
ffmpeg_fps = float(fps_match.group(1)) if fps_match else None
# Extract bitrate (e.g., "bitrate= 406.1kbits/s")
bitrate_match = re.search(r'bitrate=\s*([0-9.]+(?:\.[0-9]+)?)\s*([kmg]?)bits/s', stats_line, re.IGNORECASE)
ffmpeg_bitrate = None
if bitrate_match:
bitrate_value = float(bitrate_match.group(1))
unit = bitrate_match.group(2).lower()
# Convert to kbps
if unit == 'm':
bitrate_value *= 1000
elif unit == 'g':
bitrate_value *= 1000000
# If no unit or 'k', it's already in kbps
ffmpeg_bitrate = bitrate_value
# Calculate actual FPS
actual_fps = None
if ffmpeg_fps is not None and ffmpeg_speed is not None and ffmpeg_speed > 0:
actual_fps = ffmpeg_fps / ffmpeg_speed
# Store in Redis if we have valid data
if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate]):
self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_bitrate)
# Fix the f-string formatting
actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A"
ffmpeg_bitrate_str = f"{ffmpeg_bitrate:.1f}" if ffmpeg_bitrate is not None else "N/A"
logger.debug(f"FFmpeg stats - Speed: {ffmpeg_speed}x, FFmpeg FPS: {ffmpeg_fps}, "
f"Actual FPS: {actual_fps_str}, "
f"Bitrate: {ffmpeg_bitrate_str} kbps")
except Exception as e:
logger.debug(f"Error parsing FFmpeg stats: {e}")
def _update_ffmpeg_stats_in_redis(self, speed, fps, actual_fps, bitrate):
"""Update FFmpeg performance stats in Redis metadata"""
try:
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
metadata_key = RedisKeys.channel_metadata(self.channel_id)
update_data = {
ChannelMetadataField.FFMPEG_STATS_UPDATED: str(time.time())
}
if speed is not None:
update_data[ChannelMetadataField.FFMPEG_SPEED] = str(round(speed, 3))
if fps is not None:
update_data[ChannelMetadataField.FFMPEG_FPS] = str(round(fps, 1))
if actual_fps is not None:
update_data[ChannelMetadataField.ACTUAL_FPS] = str(round(actual_fps, 1))
if bitrate is not None:
update_data[ChannelMetadataField.FFMPEG_BITRATE] = str(round(bitrate, 1))
self.buffer.redis_client.hset(metadata_key, mapping=update_data)
except Exception as e:
logger.error(f"Error updating FFmpeg stats in Redis: {e}")
def _establish_http_connection(self):
"""Establish a direct HTTP connection to the stream"""
try:

View file

@ -1,6 +1,6 @@
services:
web:
image: dispatcharr/dispatcharr:alpha-v1
image: ghcr.io/dispatcharr/dispatcharr:latest
container_name: dispatcharr_web
ports:
- 9191:9191
@ -32,7 +32,7 @@ services:
# capabilities: [gpu]
celery:
image: dispatcharr/dispatcharr:alpha-v1
image: ghcr.io/dispatcharr/dispatcharr:latest
container_name: dispatcharr_celery
depends_on:
- db

View file

@ -192,9 +192,10 @@ export const WebsocketProvider = ({ children }) => {
// Update the playlist status whenever we receive a status update
// Not just when progress is 100% or status is pending_setup
if (parsedEvent.data.status && parsedEvent.data.account) {
const playlist = playlists.find(
(p) => p.id === parsedEvent.data.account
);
// Check if playlists is an object with IDs as keys or an array
const playlist = Array.isArray(playlists)
? playlists.find((p) => p.id === parsedEvent.data.account)
: playlists[parsedEvent.data.account];
if (playlist) {
// When we receive a "success" status with 100% progress, this is a completed refresh
@ -212,9 +213,18 @@ export const WebsocketProvider = ({ children }) => {
parsedEvent.data.progress === 100
) {
updateData.updated_at = new Date().toISOString();
// Log successful completion for debugging
console.log('M3U refresh completed successfully:', updateData);
}
updatePlaylist(updateData);
} else {
// Log when playlist can't be found for debugging purposes
console.warn(
`Received update for unknown playlist ID: ${parsedEvent.data.account}`,
Array.isArray(playlists) ? 'playlists is array' : 'playlists is object',
Object.keys(playlists).length
);
}
}
break;

View file

@ -85,7 +85,7 @@ const M3U = ({
account_type: m3uAccount.account_type,
username: m3uAccount.username ?? '',
password: '',
stale_stream_days: m3uAccount.stale_stream_days || 7,
stale_stream_days: m3uAccount.stale_stream_days !== undefined && m3uAccount.stale_stream_days !== null ? m3uAccount.stale_stream_days : 7,
});
if (m3uAccount.account_type == 'XC') {
@ -225,7 +225,7 @@ const M3U = ({
id="account_type"
name="account_type"
label="Account Type"
description="Standard for direct M3U URLs, Xtream Codes for panel-based services"
description={<>Standard for direct M3U URLs, <br />Xtream Codes for panel-based services</>}
data={[
{
value: 'STD',
@ -233,7 +233,7 @@ const M3U = ({
},
{
value: 'XC',
label: 'XTream Codes',
label: 'Xtream Codes',
},
]}
key={form.key('account_type')}
@ -324,7 +324,7 @@ const M3U = ({
/>
<NumberInput
min={1}
min={0}
max={365}
label="Stale Stream Retention (days)"
description="Streams not seen for this many days will be removed"

View file

@ -14,6 +14,7 @@ import {
Tooltip,
useMantineTheme,
Select,
Badge,
} from '@mantine/core';
import { TableHelper } from '../helpers';
import API from '../api';
@ -567,6 +568,41 @@ const ChannelCard = ({
</Tooltip>
)}
{/* Add stream information badges */}
<Group gap="xs" mt="xs">
{channel.video_codec && (
<Badge size="sm" variant="light" color="blue">
{channel.video_codec.toUpperCase()}
</Badge>
)}
{channel.resolution && (
<Badge size="sm" variant="light" color="green">
{channel.resolution}
</Badge>
)}
{channel.source_fps && (
<Badge size="sm" variant="light" color="orange">
{channel.source_fps} FPS
</Badge>
)}
{channel.audio_codec && (
<Badge size="sm" variant="light" color="purple">
{channel.audio_codec.toUpperCase()}
</Badge>
)}
{channel.ffmpeg_speed && (
<Tooltip label={`Speed: ${channel.ffmpeg_speed}x realtime`}>
<Badge
size="sm"
variant="light"
color={parseFloat(channel.ffmpeg_speed) >= 1.0 ? "green" : "red"}
>
{channel.ffmpeg_speed}x
</Badge>
</Tooltip>
)}
</Group>
<Group justify="space-between">
<Group gap={4}>
<Tooltip

View file

@ -1,5 +1,5 @@
"""
Dispatcharr version information.
"""
__version__ = '0.5.1' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__version__ = '0.5.2' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__timestamp__ = None # Set during CI/CD build process