diff --git a/apps/channels/migrations/0029_backfill_custom_stream_hashes.py b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py new file mode 100644 index 00000000..3e270be2 --- /dev/null +++ b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py @@ -0,0 +1,54 @@ +# Generated migration to backfill stream_hash for existing custom streams + +from django.db import migrations +import hashlib + + +def backfill_custom_stream_hashes(apps, schema_editor): + """ + Generate stream_hash for all custom streams that don't have one. + Uses stream ID to create a stable hash that won't change when name/url is edited. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams_without_hash = Stream.objects.filter( + is_custom=True, + stream_hash__isnull=True + ) + + updated_count = 0 + for stream in custom_streams_without_hash: + # Generate a stable hash using the stream's ID + # This ensures the hash never changes even if name/url is edited + unique_string = f"custom_stream_{stream.id}" + stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + stream.save(update_fields=['stream_hash']) + updated_count += 1 + + if updated_count > 0: + print(f"Backfilled stream_hash for {updated_count} custom streams") + else: + print("No custom streams needed stream_hash backfill") + + +def reverse_backfill(apps, schema_editor): + """ + Reverse migration - clear stream_hash for custom streams. + Note: This will break preview functionality for custom streams. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams = Stream.objects.filter(is_custom=True) + count = custom_streams.update(stream_hash=None) + print(f"Cleared stream_hash for {count} custom streams") + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'), + ] + + operations = [ + migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index 238bdb33..3b7ed6e3 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -152,8 +152,14 @@ class Stream(models.Model): stream = cls.objects.create(**fields_to_update) return stream, True # True means it was created - # @TODO: honor stream's stream profile def get_stream_profile(self): + """ + Get the stream profile for this stream. + Uses the stream's own profile if set, otherwise returns the default. + """ + if self.stream_profile: + return self.stream_profile + stream_profile = StreamProfile.objects.get( id=CoreSettings.get_default_stream_profile_id() ) diff --git a/apps/channels/signals.py b/apps/channels/signals.py index d7a7414d..27b361ba 100644 --- a/apps/channels/signals.py +++ b/apps/channels/signals.py @@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs): else: raise ValueError("No default M3UAccount found.") +@receiver(post_save, sender=Stream) +def generate_custom_stream_hash(sender, instance, created, **kwargs): + """ + Generate a stable stream_hash for custom streams after creation. + Uses the stream's ID to ensure the hash never changes even if name/url is edited. + """ + if instance.is_custom and not instance.stream_hash and created: + import hashlib + # Use stream ID for a stable, unique hash that never changes + unique_string = f"custom_stream_{instance.id}" + instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + # Use update to avoid triggering signals again + Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash) + @receiver(post_save, sender=Channel) def refresh_epg_programs(sender, instance, created, **kwargs): """ diff --git a/apps/epg/api_views.py b/apps/epg/api_views.py index f3248677..2fc5a743 100644 --- a/apps/epg/api_views.py +++ b/apps/epg/api_views.py @@ -147,23 +147,37 @@ class EPGGridAPIView(APIView): f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows." ) - # Generate dummy programs for channels that have no EPG data + # Generate dummy programs for channels that have no EPG data OR dummy EPG sources from apps.channels.models import Channel + from apps.epg.models import EPGSource from django.db.models import Q - # Get channels with no EPG data + # Get channels with no EPG data at all (standard dummy) channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True)) - channels_count = channels_without_epg.count() - # Log more detailed information about channels missing EPG data - if channels_count > 0: + # Get channels with custom dummy EPG sources (generate on-demand with patterns) + channels_with_custom_dummy = Channel.objects.filter( + epg_data__epg_source__source_type='dummy' + ).distinct() + + # Log what we found + without_count = channels_without_epg.count() + custom_count = channels_with_custom_dummy.count() + + if without_count > 0: channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg] - logger.warning( - f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}" + logger.debug( + f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}" + ) + + if custom_count > 0: + channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy] + logger.debug( + f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}" ) logger.debug( - f"EPGGridAPIView: Found {channels_count} channels with no EPG data." + f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG." ) # Serialize the regular programs @@ -205,12 +219,91 @@ class EPGGridAPIView(APIView): # Generate and append dummy programs dummy_programs = [] - for channel in channels_without_epg: - # Use the channel UUID as tvg_id for dummy programs to match in the guide + + # Import the function from output.views + from apps.output.views import generate_dummy_programs as gen_dummy_progs + + # Handle channels with CUSTOM dummy EPG sources (with patterns) + for channel in channels_with_custom_dummy: + # For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel + # This prevents multiple channels assigned to the same dummy EPG from showing identical data + # Each channel gets its own unique program data even if they share the same EPG source dummy_tvg_id = str(channel.uuid) try: - # Create programs every 4 hours for the next 24 hours + # Get the custom dummy EPG source + epg_source = channel.epg_data.epg_source if channel.epg_data else None + + logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Determine which name to parse based on custom properties + name_to_parse = channel.name + if epg_source and epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + # Get the stream index (1-based from user, convert to 0-based) + stream_index = custom_props.get('stream_index', 1) - 1 + + # Get streams ordered by channelstream order + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + name_to_parse = stream.name + logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + elif name_source == 'channel': + logger.debug(f"Using channel name for parsing: {name_to_parse}") + + # Generate programs using custom patterns from the dummy EPG source + # Use the same tvg_id that will be set in the program data + generated = gen_dummy_progs( + channel_id=dummy_tvg_id, + channel_name=name_to_parse, + num_days=1, + program_length_hours=4, + epg_source=epg_source + ) + + # Custom dummy should always return data (either from patterns or fallback) + if generated: + logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}") + # Convert generated programs to API format + for program in generated: + dummy_program = { + "id": f"dummy-custom-{channel.id}-{program['start_time'].hour}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": program['start_time'].isoformat(), + "end_time": program['end_time'].isoformat(), + "title": program['title'], + "description": program['description'], + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, + } + dummy_programs.append(dummy_program) + else: + logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}") + + except Exception as e: + logger.error( + f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) + + # Handle channels with NO EPG data (standard dummy with humorous descriptions) + for channel in channels_without_epg: + # For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic) + # The frontend uses: tvgRecord?.tvg_id ?? channel.uuid + # Since there's no EPG data, it will fall back to UUID + dummy_tvg_id = str(channel.uuid) + + try: + logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Create programs every 4 hours for the next 24 hours with humorous descriptions for hour_offset in range(0, 24, 4): # Use timedelta for time arithmetic instead of replace() to avoid hour overflow start_time = now + timedelta(hours=hour_offset) @@ -238,7 +331,7 @@ class EPGGridAPIView(APIView): # Create a dummy program in the same format as regular programs dummy_program = { - "id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID + "id": f"dummy-standard-{channel.id}-{hour_offset}", "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, "start_time": start_time.isoformat(), "end_time": end_time.isoformat(), @@ -252,7 +345,7 @@ class EPGGridAPIView(APIView): except Exception as e: logger.error( - f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" ) # Combine regular and dummy programs @@ -284,7 +377,22 @@ class EPGImportAPIView(APIView): ) def post(self, request, format=None): logger.info("EPGImportAPIView: Received request to import EPG data.") - refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task + epg_id = request.data.get("id", None) + + # Check if this is a dummy EPG source + try: + from .models import EPGSource + epg_source = EPGSource.objects.get(id=epg_id) + if epg_source.source_type == 'dummy': + logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}") + return Response( + {"success": False, "message": "Dummy EPG sources do not require refreshing."}, + status=status.HTTP_400_BAD_REQUEST, + ) + except EPGSource.DoesNotExist: + pass # Let the task handle the missing source + + refresh_epg_data.delay(epg_id) # Trigger Celery task logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.") return Response( {"success": True, "message": "EPG data import initiated."}, @@ -308,3 +416,4 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet): return [perm() for perm in permission_classes_by_action[self.action]] except KeyError: return [Authenticated()] + diff --git a/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py new file mode 100644 index 00000000..70ebb214 --- /dev/null +++ b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-17 17:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0017_alter_epgsource_url'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True), + ), + migrations.AlterField( + model_name='epgsource', + name='source_type', + field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index da6ac8e6..6c70add2 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -8,6 +8,7 @@ class EPGSource(models.Model): SOURCE_TYPE_CHOICES = [ ('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), + ('dummy', 'Custom Dummy EPG'), ] STATUS_IDLE = 'idle' @@ -38,6 +39,12 @@ class EPGSource(models.Model): refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 85186cae..3404cca9 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -28,6 +28,7 @@ class EPGSourceSerializer(serializers.ModelSerializer): 'last_message', 'created_at', 'updated_at', + 'custom_properties', 'epg_data_ids' ] diff --git a/apps/epg/signals.py b/apps/epg/signals.py index e8a004cb..e41d3aaf 100644 --- a/apps/epg/signals.py +++ b/apps/epg/signals.py @@ -1,9 +1,9 @@ from django.db.models.signals import post_save, post_delete, pre_save from django.dispatch import receiver -from .models import EPGSource +from .models import EPGSource, EPGData from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id from django_celery_beat.models import PeriodicTask, IntervalSchedule -from core.utils import is_protected_path +from core.utils import is_protected_path, send_websocket_update import json import logging import os @@ -12,15 +12,77 @@ logger = logging.getLogger(__name__) @receiver(post_save, sender=EPGSource) def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs): - # Trigger refresh only if the source is newly created and active - if created and instance.is_active: + # Trigger refresh only if the source is newly created, active, and not a dummy EPG + if created and instance.is_active and instance.source_type != 'dummy': refresh_epg_data.delay(instance.id) +@receiver(post_save, sender=EPGSource) +def create_dummy_epg_data(sender, instance, created, **kwargs): + """ + Automatically create EPGData for dummy EPG sources when they are created. + This allows channels to be assigned to dummy EPGs immediately without + requiring a refresh first. + """ + if instance.source_type == 'dummy': + # Ensure dummy EPGs always have idle status and no status message + if instance.status != EPGSource.STATUS_IDLE or instance.last_message: + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + instance.save(update_fields=['status', 'last_message']) + + # Create a URL-friendly tvg_id from the dummy EPG name + # Replace spaces and special characters with underscores + friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_') + # Remove any characters that aren't alphanumeric or underscores + friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_') + # Convert to lowercase for consistency + friendly_tvg_id = friendly_tvg_id.lower() + # Prefix with 'dummy_' to make it clear this is a dummy EPG + friendly_tvg_id = f"dummy_{friendly_tvg_id}" + + # Create or update the EPGData record + epg_data, data_created = EPGData.objects.get_or_create( + tvg_id=friendly_tvg_id, + epg_source=instance, + defaults={ + 'name': instance.name, + 'icon_url': None + } + ) + + # Update name if it changed and record already existed + if not data_created and epg_data.name != instance.name: + epg_data.name = instance.name + epg_data.save(update_fields=['name']) + + if data_created: + logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})") + + # Send websocket update to notify frontend that EPG data has been created + # This allows the channel form to immediately show the new dummy EPG without refreshing + send_websocket_update('updates', 'update', { + 'type': 'epg_data_created', + 'source_id': instance.id, + 'source_name': instance.name, + 'epg_data_id': epg_data.id + }) + else: + logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})") + @receiver(post_save, sender=EPGSource) def create_or_update_refresh_task(sender, instance, **kwargs): """ Create or update a Celery Beat periodic task when an EPGSource is created/updated. + Skip creating tasks for dummy EPG sources as they don't need refreshing. """ + # Skip task creation for dummy EPGs + if instance.source_type == 'dummy': + # If there's an existing task, disable it + if instance.refresh_task: + instance.refresh_task.enabled = False + instance.refresh_task.save(update_fields=['enabled']) + return + task_name = f"epg_source-refresh-{instance.id}" interval, _ = IntervalSchedule.objects.get_or_create( every=int(instance.refresh_interval), @@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs): def update_status_on_active_change(sender, instance, **kwargs): """ When an EPGSource's is_active field changes, update the status accordingly. + For dummy EPGs, always ensure status is idle and no status message. """ + # Dummy EPGs should always be idle with no status message + if instance.source_type == 'dummy': + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + return + if instance.pk: # Only for existing records, not new ones try: # Get the current record from the database diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index d9ae5a5d..2028cd98 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -133,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id): @shared_task def refresh_all_epg_data(): logger.info("Starting refresh_epg_data task.") - active_sources = EPGSource.objects.filter(is_active=True) - logger.debug(f"Found {active_sources.count()} active EPGSource(s).") + # Exclude dummy EPG sources from refresh - they don't need refreshing + active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy') + logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).") for source in active_sources: refresh_epg_data(source.id) @@ -180,6 +181,13 @@ def refresh_epg_data(source_id): gc.collect() return + # Skip refresh for dummy EPG sources - they don't need refreshing + if source.source_type == 'dummy': + logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})") + release_task_lock('refresh_epg_data', source_id) + gc.collect() + return + # Continue with the normal processing... logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})") if source.source_type == 'xmltv': @@ -1943,3 +1951,20 @@ def detect_file_format(file_path=None, content=None): # If we reach here, we couldn't reliably determine the format return format_type, is_compressed, file_extension + + +def generate_dummy_epg(source): + """ + DEPRECATED: This function is no longer used. + + Dummy EPG programs are now generated on-demand when they are requested + (during XMLTV export or EPG grid display), rather than being pre-generated + and stored in the database. + + See: apps/output/views.py - generate_custom_dummy_programs() + + This function remains for backward compatibility but should not be called. + """ + logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. " + f"Dummy EPG programs are now generated on-demand.") + return True diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index 05462d0f..a607dc07 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -136,6 +136,9 @@ class M3UAccountSerializer(serializers.ModelSerializer): validators=[validate_flexible_url], ) enable_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True) class Meta: model = M3UAccount @@ -164,6 +167,9 @@ class M3UAccountSerializer(serializers.ModelSerializer): "status", "last_message", "enable_vod", + "auto_enable_new_groups_live", + "auto_enable_new_groups_vod", + "auto_enable_new_groups_series", ] extra_kwargs = { "password": { @@ -175,23 +181,36 @@ class M3UAccountSerializer(serializers.ModelSerializer): def to_representation(self, instance): data = super().to_representation(instance) - # Parse custom_properties to get VOD preference + # Parse custom_properties to get VOD preference and auto_enable_new_groups settings custom_props = instance.custom_properties or {} data["enable_vod"] = custom_props.get("enable_vod", False) + data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True) + data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True) + data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True) return data def update(self, instance, validated_data): - # Handle enable_vod preference + # Handle enable_vod preference and auto_enable_new_groups settings enable_vod = validated_data.pop("enable_vod", None) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None) + # Get existing custom_properties + custom_props = instance.custom_properties or {} + + # Update preferences if enable_vod is not None: - # Get existing custom_properties - custom_props = instance.custom_properties or {} - - # Update VOD preference custom_props["enable_vod"] = enable_vod - validated_data["custom_properties"] = custom_props + if auto_enable_new_groups_live is not None: + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + if auto_enable_new_groups_vod is not None: + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + if auto_enable_new_groups_series is not None: + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + + validated_data["custom_properties"] = custom_props # Pop out channel group memberships so we can handle them manually channel_group_data = validated_data.pop("channel_group", []) @@ -225,14 +244,20 @@ class M3UAccountSerializer(serializers.ModelSerializer): return instance def create(self, validated_data): - # Handle enable_vod preference during creation + # Handle enable_vod preference and auto_enable_new_groups settings during creation enable_vod = validated_data.pop("enable_vod", False) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True) # Parse existing custom_properties or create new custom_props = validated_data.get("custom_properties", {}) - # Set VOD preference + # Set preferences (default to True for auto_enable_new_groups) custom_props["enable_vod"] = enable_vod + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series validated_data["custom_properties"] = custom_props return super().create(validated_data) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 0ba595c5..d29c294b 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -488,25 +488,29 @@ def process_groups(account, groups): } logger.info(f"Currently {len(existing_groups)} existing groups") - group_objs = [] + # Check if we should auto-enable new groups based on account settings + account_custom_props = account.custom_properties or {} + auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True) + + # Separate existing groups from groups that need to be created + existing_group_objs = [] groups_to_create = [] + for group_name, custom_props in groups.items(): - logger.debug(f"Handling group for M3U account {account.id}: {group_name}") - - if group_name not in existing_groups: - groups_to_create.append( - ChannelGroup( - name=group_name, - ) - ) + if group_name in existing_groups: + existing_group_objs.append(existing_groups[group_name]) else: - group_objs.append(existing_groups[group_name]) + groups_to_create.append(ChannelGroup(name=group_name)) + # Create new groups and fetch them back with IDs + newly_created_group_objs = [] if groups_to_create: - logger.debug(f"Creating {len(groups_to_create)} groups") - created = ChannelGroup.bulk_create_and_fetch(groups_to_create) - logger.debug(f"Created {len(created)} groups") - group_objs.extend(created) + logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}") + newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create)) + logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups") + + # Combine all groups + all_group_objs = existing_group_objs + newly_created_group_objs # Get existing relationships for this account existing_relationships = { @@ -536,7 +540,7 @@ def process_groups(account, groups): relations_to_delete.append(rel) logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}") - for group in group_objs: + for group in all_group_objs: custom_props = groups.get(group.name, {}) if group.name in existing_relationships: @@ -566,35 +570,17 @@ def process_groups(account, groups): else: logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}") else: - # Create new relationship - but check if there's an existing relationship that might have user settings - # This can happen if the group was temporarily removed and is now back - try: - potential_existing = ChannelGroupM3UAccount.objects.filter( - m3u_account=account, - channel_group=group - ).first() + # Create new relationship - this group is new to this M3U account + # Use the auto_enable setting to determine if it should start enabled + if not auto_enable_new_groups_live: + logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)") - if potential_existing: - # Merge with existing custom properties to preserve user settings - existing_custom_props = potential_existing.custom_properties or {} - - # Merge new properties with existing ones - merged_custom_props = existing_custom_props.copy() - merged_custom_props.update(custom_props) - custom_props = merged_custom_props - logger.debug(f"Merged custom properties for existing relationship: group '{group.name}' - account {account.id}") - except Exception as e: - logger.debug(f"Could not check for existing relationship: {str(e)}") - # Fall back to using just the new custom properties - pass - - # Create new relationship relations_to_create.append( ChannelGroupM3UAccount( channel_group=group, m3u_account=account, custom_properties=custom_props, - enabled=True, # Default to enabled + enabled=auto_enable_new_groups_live, ) ) @@ -1562,7 +1548,7 @@ def sync_auto_channels(account_id, scan_start_time=None): # Get force_dummy_epg, group_override, and regex patterns from group custom_properties group_custom_props = {} - force_dummy_epg = False + force_dummy_epg = False # Backward compatibility: legacy option to disable EPG override_group_id = None name_regex_pattern = None name_replace_pattern = None @@ -1571,6 +1557,8 @@ def sync_auto_channels(account_id, scan_start_time=None): channel_sort_order = None channel_sort_reverse = False stream_profile_id = None + custom_logo_id = None + custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg) if group_relation.custom_properties: group_custom_props = group_relation.custom_properties force_dummy_epg = group_custom_props.get("force_dummy_epg", False) @@ -1581,11 +1569,13 @@ def sync_auto_channels(account_id, scan_start_time=None): ) name_match_regex = group_custom_props.get("name_match_regex") channel_profile_ids = group_custom_props.get("channel_profile_ids") + custom_epg_id = group_custom_props.get("custom_epg_id") channel_sort_order = group_custom_props.get("channel_sort_order") channel_sort_reverse = group_custom_props.get( "channel_sort_reverse", False ) stream_profile_id = group_custom_props.get("stream_profile_id") + custom_logo_id = group_custom_props.get("custom_logo_id") # Determine which group to use for created channels target_group = channel_group @@ -1840,7 +1830,25 @@ def sync_auto_channels(account_id, scan_start_time=None): # Handle logo updates current_logo = None - if stream.logo_url: + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + current_logo = Logo.objects.get(id=custom_logo_id) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + elif stream.logo_url: + # No custom logo configured, use stream logo from apps.channels.models import Logo current_logo, _ = Logo.objects.get_or_create( @@ -1856,10 +1864,42 @@ def sync_auto_channels(account_id, scan_start_time=None): # Handle EPG data updates current_epg_data = None - if stream.tvg_id and not force_dummy_epg: + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + current_epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if not current_epg_data: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) current_epg_data = EPGData.objects.filter( tvg_id=stream.tvg_id ).first() + # If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None if existing_channel.epg_data != current_epg_data: existing_channel.epg_data = current_epg_data @@ -1949,19 +1989,81 @@ def sync_auto_channels(account_id, scan_start_time=None): ChannelProfileMembership.objects.bulk_create(memberships) # Try to match EPG data - if stream.tvg_id and not force_dummy_epg: + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + else: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) epg_data = EPGData.objects.filter( tvg_id=stream.tvg_id ).first() if epg_data: channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - elif stream.tvg_id and force_dummy_epg: + elif force_dummy_epg: + # Force dummy EPG with no custom EPG selected (set to None) channel.epg_data = None channel.save(update_fields=["epg_data"]) # Handle logo - if stream.logo_url: + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + custom_logo = Logo.objects.get(id=custom_logo_id) + channel.logo = custom_logo + channel.save(update_fields=["logo"]) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + elif stream.logo_url: from apps.channels.models import Logo logo, _ = Logo.objects.get_or_create( diff --git a/apps/output/views.py b/apps/output/views.py index 6eee7ccc..a695d05f 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -9,7 +9,7 @@ from apps.epg.models import ProgramData from apps.accounts.models import User from core.models import CoreSettings, NETWORK_ACCESS from dispatcharr.utils import network_access_allowed -from django.utils import timezone +from django.utils import timezone as django_timezone from django.shortcuts import get_object_or_404 from datetime import datetime, timedelta import html # Add this import for XML escaping @@ -22,6 +22,7 @@ import logging from django.db.models.functions import Lower import os from apps.m3u.utils import calculate_tuner_count +import regex logger = logging.getLogger(__name__) @@ -186,12 +187,42 @@ def generate_m3u(request, profile_name=None, user=None): return response -def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4): +def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4, epg_source=None): + """ + Generate dummy EPG programs for channels. + + If epg_source is provided and it's a custom dummy EPG with patterns, + use those patterns to generate programs from the channel title. + Otherwise, generate default dummy programs. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title/name + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + epg_source: Optional EPGSource for custom dummy EPG with patterns + + Returns: + List of program dictionaries + """ # Get current time rounded to hour - now = timezone.now() + now = django_timezone.now() now = now.replace(minute=0, second=0, microsecond=0) - # Humorous program descriptions based on time of day + # Check if this is a custom dummy EPG with regex patterns + if epg_source and epg_source.source_type == 'dummy' and epg_source.custom_properties: + custom_programs = generate_custom_dummy_programs( + channel_id, channel_name, now, num_days, + epg_source.custom_properties + ) + # If custom generation succeeded, return those programs + # If it returned empty (pattern didn't match), fall through to default + if custom_programs: + return custom_programs + else: + logger.info(f"Custom pattern didn't match for '{channel_name}', using default dummy EPG") + + # Default humorous program descriptions based on time of day time_descriptions = { (0, 4): [ f"Late Night with {channel_name} - Where insomniacs unite!", @@ -263,6 +294,579 @@ def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length return programs +def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, custom_properties): + """ + Generate programs using custom dummy EPG regex patterns. + + Extracts information from channel title using regex patterns and generates + programs based on the extracted data. + + TIMEZONE HANDLING: + ------------------ + The timezone parameter specifies the timezone of the event times in your channel + titles using standard timezone names (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London'). + DST (Daylight Saving Time) is handled automatically by pytz. + + Examples: + - Channel: "NHL 01: Bruins VS Maple Leafs @ 8:00PM ET" + - Set timezone = "US/Eastern" + - In October (DST): 8:00PM EDT → 12:00AM UTC (automatically uses UTC-4) + - In January (no DST): 8:00PM EST → 1:00AM UTC (automatically uses UTC-5) + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title to parse + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + custom_properties: Dict with title_pattern, time_pattern, templates, etc. + - timezone: Timezone name (e.g., 'US/Eastern') + + Returns: + List of program dictionaries with start_time/end_time in UTC + """ + import pytz + + logger.info(f"Generating custom dummy programs for channel: {channel_name}") + + # Extract patterns from custom properties + title_pattern = custom_properties.get('title_pattern', '') + time_pattern = custom_properties.get('time_pattern', '') + date_pattern = custom_properties.get('date_pattern', '') + + # Get timezone name (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London') + timezone_value = custom_properties.get('timezone', 'UTC') + output_timezone_value = custom_properties.get('output_timezone', '') # Optional: display times in different timezone + program_duration = custom_properties.get('program_duration', 180) # Minutes + title_template = custom_properties.get('title_template', '') + description_template = custom_properties.get('description_template', '') + + # Templates for upcoming/ended programs + upcoming_title_template = custom_properties.get('upcoming_title_template', '') + upcoming_description_template = custom_properties.get('upcoming_description_template', '') + ended_title_template = custom_properties.get('ended_title_template', '') + ended_description_template = custom_properties.get('ended_description_template', '') + + # EPG metadata options + category_string = custom_properties.get('category', '') + # Split comma-separated categories and strip whitespace, filter out empty strings + categories = [cat.strip() for cat in category_string.split(',') if cat.strip()] if category_string else [] + include_date = custom_properties.get('include_date', True) + include_live = custom_properties.get('include_live', False) + + # Parse timezone name + try: + source_tz = pytz.timezone(timezone_value) + logger.debug(f"Using timezone: {timezone_value} (DST will be handled automatically)") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown timezone: {timezone_value}, defaulting to UTC") + source_tz = pytz.utc + + # Parse output timezone if provided (for display purposes) + output_tz = None + if output_timezone_value: + try: + output_tz = pytz.timezone(output_timezone_value) + logger.debug(f"Using output timezone for display: {output_timezone_value}") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown output timezone: {output_timezone_value}, will use source timezone") + output_tz = None + + if not title_pattern: + logger.warning(f"No title_pattern in custom_properties, falling back to default") + return [] # Return empty, will use default + + logger.debug(f"Title pattern from DB: {repr(title_pattern)}") + + # Convert PCRE/JavaScript named groups (?) to Python format (?P) + # This handles patterns created with JavaScript regex syntax + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', title_pattern) + logger.debug(f"Converted title pattern: {repr(title_pattern)}") + + # Compile regex patterns using the enhanced regex module + # (supports variable-width lookbehinds like JavaScript) + try: + title_regex = regex.compile(title_pattern) + except Exception as e: + logger.error(f"Invalid title regex pattern after conversion: {e}") + logger.error(f"Pattern was: {repr(title_pattern)}") + return [] + + time_regex = None + if time_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', time_pattern) + logger.debug(f"Converted time pattern: {repr(time_pattern)}") + try: + time_regex = regex.compile(time_pattern) + except Exception as e: + logger.warning(f"Invalid time regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(time_pattern)}") + + # Compile date regex if provided + date_regex = None + if date_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', date_pattern) + logger.debug(f"Converted date pattern: {repr(date_pattern)}") + try: + date_regex = regex.compile(date_pattern) + except Exception as e: + logger.warning(f"Invalid date regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(date_pattern)}") + + # Try to match the channel name with the title pattern + # Use search() instead of match() to match JavaScript behavior where .match() searches anywhere in the string + title_match = title_regex.search(channel_name) + if not title_match: + logger.debug(f"Channel name '{channel_name}' doesn't match title pattern") + return [] # Return empty, will use default + + groups = title_match.groupdict() + logger.debug(f"Title pattern matched. Groups: {groups}") + + # Helper function to format template with matched groups + def format_template(template, groups): + """Replace {groupname} placeholders with matched group values""" + if not template: + return '' + result = template + for key, value in groups.items(): + result = result.replace(f'{{{key}}}', str(value) if value else '') + return result + + # Extract time from title if time pattern exists + time_info = None + time_groups = {} + if time_regex: + time_match = time_regex.search(channel_name) + if time_match: + time_groups = time_match.groupdict() + try: + hour = int(time_groups.get('hour')) + # Handle optional minute group - could be None if not captured + minute_value = time_groups.get('minute') + minute = int(minute_value) if minute_value is not None else 0 + ampm = time_groups.get('ampm') + ampm = ampm.lower() if ampm else None + + # Determine if this is 12-hour or 24-hour format + if ampm in ('am', 'pm'): + # 12-hour format: convert to 24-hour + if ampm == 'pm' and hour != 12: + hour += 12 + elif ampm == 'am' and hour == 12: + hour = 0 + logger.debug(f"Extracted time (12-hour): {hour}:{minute:02d} {ampm}") + else: + # 24-hour format: hour is already in 24-hour format + # Validate that it's actually a 24-hour time (0-23) + if hour > 23: + logger.warning(f"Invalid 24-hour time: {hour}. Must be 0-23.") + hour = hour % 24 # Wrap around just in case + logger.debug(f"Extracted time (24-hour): {hour}:{minute:02d}") + + time_info = {'hour': hour, 'minute': minute} + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing time: {e}") + + # Extract date from title if date pattern exists + date_info = None + date_groups = {} + if date_regex: + date_match = date_regex.search(channel_name) + if date_match: + date_groups = date_match.groupdict() + try: + # Support various date group names: month, day, year + month_str = date_groups.get('month', '') + day = int(date_groups.get('day', 1)) + year = int(date_groups.get('year', now.year)) # Default to current year if not provided + + # Parse month - can be numeric (1-12) or text (Jan, January, etc.) + month = None + if month_str.isdigit(): + month = int(month_str) + else: + # Try to parse text month names + import calendar + month_str_lower = month_str.lower() + # Check full month names + for i, month_name in enumerate(calendar.month_name): + if month_name.lower() == month_str_lower: + month = i + break + # Check abbreviated month names if not found + if month is None: + for i, month_abbr in enumerate(calendar.month_abbr): + if month_abbr.lower() == month_str_lower: + month = i + break + + if month and 1 <= month <= 12 and 1 <= day <= 31: + date_info = {'year': year, 'month': month, 'day': day} + logger.debug(f"Extracted date: {year}-{month:02d}-{day:02d}") + else: + logger.warning(f"Invalid date values: month={month}, day={day}, year={year}") + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing date: {e}") + + # Merge title groups, time groups, and date groups for template formatting + all_groups = {**groups, **time_groups, **date_groups} + + # Add formatted time strings for better display (handles minutes intelligently) + if time_info: + hour_24 = time_info['hour'] + minute = time_info['minute'] + + # If output_timezone is specified, convert the display time to that timezone + if output_tz: + # Create a datetime in the source timezone + temp_date = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + # Convert to output timezone + temp_date_output = temp_date.astimezone(output_tz) + # Extract converted hour and minute for display + hour_24 = temp_date_output.hour + minute = temp_date_output.minute + logger.debug(f"Converted display time from {source_tz} to {output_tz}: {hour_24}:{minute:02d}") + + # Format 24-hour time string - only include minutes if non-zero + if minute > 0: + all_groups['time24'] = f"{hour_24}:{minute:02d}" + else: + all_groups['time24'] = f"{hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {time} placeholder + # Note: hour_24 is ALWAYS in 24-hour format at this point (converted earlier if needed) + ampm = 'AM' if hour_24 < 12 else 'PM' + hour_12 = hour_24 + if hour_24 == 0: + hour_12 = 12 + elif hour_24 > 12: + hour_12 = hour_24 - 12 + + # Format 12-hour time string - only include minutes if non-zero + if minute > 0: + all_groups['time'] = f"{hour_12}:{minute:02d} {ampm}" + else: + all_groups['time'] = f"{hour_12} {ampm}" + + # Generate programs + programs = [] + + # If we have extracted time AND date, the event happens on a SPECIFIC date + # If we have time but NO date, generate for multiple days (existing behavior) + # All other days and times show "Upcoming" before or "Ended" after + event_happened = False + + # Determine how many iterations we need + if date_info and time_info: + # Specific date extracted - only generate for that one date + iterations = 1 + logger.debug(f"Date extracted, generating single event for specific date") + else: + # No specific date - use num_days (existing behavior) + iterations = num_days + + for day in range(iterations): + # Start from current time (like standard dummy) instead of midnight + # This ensures programs appear in the guide's current viewing window + day_start = now + timedelta(days=day) + day_end = day_start + timedelta(days=1) + + if time_info: + # We have an extracted event time - this is when the MAIN event starts + # The extracted time is in the SOURCE timezone (e.g., 8PM ET) + # We need to convert it to UTC for storage + + # Determine which date to use + if date_info: + # Use the extracted date from the channel title + current_date = datetime( + date_info['year'], + date_info['month'], + date_info['day'] + ).date() + logger.debug(f"Using extracted date: {current_date}") + else: + # No date extracted, use day offset from current time in SOURCE timezone + # This ensures we calculate "today" in the event's timezone, not UTC + # For example: 8:30 PM Central (1:30 AM UTC next day) for a 10 PM ET event + # should use today's date in ET, not tomorrow's date in UTC + now_in_source_tz = now.astimezone(source_tz) + current_date = (now_in_source_tz + timedelta(days=day)).date() + logger.debug(f"No date extracted, using day offset in {source_tz}: {current_date}") + + # Create a naive datetime (no timezone info) representing the event in source timezone + event_start_naive = datetime.combine( + current_date, + datetime.min.time().replace( + hour=time_info['hour'], + minute=time_info['minute'] + ) + ) + + # Use pytz to localize the naive datetime to the source timezone + # This automatically handles DST! + try: + event_start_local = source_tz.localize(event_start_naive) + # Convert to UTC + event_start_utc = event_start_local.astimezone(pytz.utc) + logger.debug(f"Converted {event_start_local} to UTC: {event_start_utc}") + except Exception as e: + logger.error(f"Error localizing time to {source_tz}: {e}") + # Fallback: treat as UTC + event_start_utc = django_timezone.make_aware(event_start_naive, pytz.utc) + + event_end_utc = event_start_utc + timedelta(minutes=program_duration) + + # Pre-generate the main event title and description for reuse + if title_template: + main_event_title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + main_event_title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + main_event_description = format_template(description_template, all_groups) + else: + main_event_description = main_event_title + + + + # Determine if this day is before, during, or after the event + # Event only happens on day 0 (first day) + is_event_day = (day == 0) + + if is_event_day and not event_happened: + # This is THE day the event happens + # Fill programs BEFORE the event + current_time = day_start + + while current_time < event_start_utc: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), event_start_utc) + + # Use custom upcoming templates if provided, otherwise use defaults + if upcoming_title_template: + upcoming_title = format_template(upcoming_title_template, all_groups) + else: + upcoming_title = main_event_title + + if upcoming_description_template: + upcoming_description = format_template(upcoming_description_template, all_groups) + else: + upcoming_description = f"Upcoming: {main_event_description}" + + # Build custom_properties for upcoming programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": upcoming_title, + "description": upcoming_description, + "custom_properties": program_custom_properties, + }) + + current_time += timedelta(minutes=program_duration) + + # Add the MAIN EVENT at the extracted time + # Build custom_properties for main event (includes category and live) + main_event_custom_properties = {} + + # Add categories if provided + if categories: + main_event_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = event_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + main_event_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + main_event_custom_properties['live'] = True + + programs.append({ + "channel_id": channel_id, + "start_time": event_start_utc, + "end_time": event_end_utc, + "title": main_event_title, + "description": main_event_description, + "custom_properties": main_event_custom_properties, + }) + + event_happened = True + + # Fill programs AFTER the event until end of day + current_time = event_end_utc + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom ended templates if provided, otherwise use defaults + if ended_title_template: + ended_title = format_template(ended_title_template, all_groups) + else: + ended_title = main_event_title + + if ended_description_template: + ended_description = format_template(ended_description_template, all_groups) + else: + ended_description = f"Ended: {main_event_description}" + + # Build custom_properties for ended programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": ended_title, + "description": ended_description, + "custom_properties": program_custom_properties, + }) + + current_time += timedelta(minutes=program_duration) + else: + # This day is either before the event (future days) or after the event happened + # Fill entire day with appropriate message + current_time = day_start + + # If event already happened, all programs show "Ended" + # If event hasn't happened yet (shouldn't occur with day 0 logic), show "Upcoming" + is_ended = event_happened + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom templates based on whether event has ended or is upcoming + if is_ended: + if ended_title_template: + program_title = format_template(ended_title_template, all_groups) + else: + program_title = main_event_title + + if ended_description_template: + program_description = format_template(ended_description_template, all_groups) + else: + program_description = f"Ended: {main_event_description}" + else: + if upcoming_title_template: + program_title = format_template(upcoming_title_template, all_groups) + else: + program_title = main_event_title + + if upcoming_description_template: + program_description = format_template(upcoming_description_template, all_groups) + else: + program_description = f"Upcoming: {main_event_description}" + + # Build custom_properties (only date for upcoming/ended filler programs) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": program_title, + "description": program_description, + "custom_properties": program_custom_properties, + }) + + current_time += timedelta(minutes=program_duration) + else: + # No extracted time - fill entire day with regular intervals + # day_start and day_end are already in UTC, so no conversion needed + programs_per_day = max(1, int(24 / (program_duration / 60))) + + for program_num in range(programs_per_day): + program_start_utc = day_start + timedelta(minutes=program_num * program_duration) + program_end_utc = program_start_utc + timedelta(minutes=program_duration) + + if title_template: + title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + description = format_template(description_template, all_groups) + else: + description = title + + # Build custom_properties for this program + program_custom_properties = {} + + # Add categories if provided + if categories: + program_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + program_custom_properties['live'] = True + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": title, + "description": description, + "custom_properties": program_custom_properties, + }) + + logger.info(f"Generated {len(programs)} custom dummy programs for {channel_name}") + return programs + + def generate_dummy_epg( channel_id, channel_name, xml_lines=None, num_days=1, program_length_hours=4 ): @@ -294,6 +898,23 @@ def generate_dummy_epg( ) xml_lines.append(f" {html.escape(program['title'])}") xml_lines.append(f" {html.escape(program['description'])}") + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + xml_lines.append(f" {html.escape(cat)}") + + # Date tag + if 'date' in custom_data: + xml_lines.append(f" {html.escape(custom_data['date'])}") + + # Live tag + if custom_data.get('live', False): + xml_lines.append(f" ") + xml_lines.append(f" ") return xml_lines @@ -342,9 +963,9 @@ def generate_epg(request, profile_name=None, user=None): channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, channelprofilemembership__enabled=True, - ) + ).order_by("channel_number") else: - channels = Channel.objects.all() + channels = Channel.objects.all().order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -367,7 +988,7 @@ def generate_epg(request, profile_name=None, user=None): dummy_days = num_days if num_days > 0 else 3 # Calculate cutoff date for EPG data filtering (only if days > 0) - now = timezone.now() + now = django_timezone.now() cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None # Process channels for the section @@ -434,12 +1055,38 @@ def generate_epg(request, profile_name=None, user=None): # Default to channel number channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + # Use EPG data name for display, but channel name for pattern matching display_name = channel.epg_data.name if channel.epg_data else channel.name + # For dummy EPG pattern matching, determine which name to use + pattern_match_name = channel.name + + # Check if we should use stream name instead of channel name + if channel.epg_data and channel.epg_data.epg_source: + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + logger.debug(f"Using stream name for parsing: {pattern_match_name} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") if not channel.epg_data: # Use the enhanced dummy EPG generation function with defaults program_length_hours = 4 # Default to 4-hour program blocks - dummy_programs = generate_dummy_programs(channel_id, display_name, num_days=dummy_days, program_length_hours=program_length_hours) + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=None + ) for program in dummy_programs: # Format times in XMLTV format @@ -450,246 +1097,320 @@ def generate_epg(request, profile_name=None, user=None): yield f' \n' yield f" {html.escape(program['title'])}\n" yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + yield f" \n" else: + # Check if this is a dummy EPG with no programs (generate on-demand) + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + # This is a custom dummy EPG - check if it has programs + if not channel.epg_data.programs.exists(): + # No programs stored, generate on-demand using custom patterns + # Use actual channel name for pattern matching + program_length_hours = 4 + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=channel.epg_data.epg_source + ) + + for program in dummy_programs: + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + yield f" \n" + + continue # Skip to next channel + # For real EPG data - filter only if days parameter was specified if num_days > 0: - programs = channel.epg_data.programs.filter( + programs_qs = channel.epg_data.programs.filter( start_time__gte=now, start_time__lt=cutoff_date - ) + ).order_by('id') # Explicit ordering for consistent chunking else: # Return all programs if days=0 or not specified - programs = channel.epg_data.programs.all() + programs_qs = channel.epg_data.programs.all().order_by('id') - # Process programs in chunks to avoid memory issues + # Process programs in chunks to avoid cursor timeout issues program_batch = [] - batch_size = 100 + batch_size = 250 + chunk_size = 1000 # Fetch 1000 programs at a time from DB - for prog in programs.iterator(): # Use iterator to avoid loading all at once - start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") - stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") + # Fetch chunks until no more results (avoids count() query) + offset = 0 + while True: + # Fetch a chunk of programs - this closes the cursor after fetching + program_chunk = list(programs_qs[offset:offset + chunk_size]) - program_xml = [f' '] - program_xml.append(f' {html.escape(prog.title)}') + # Break if no more programs + if not program_chunk: + break - # Add subtitle if available - if prog.sub_title: - program_xml.append(f" {html.escape(prog.sub_title)}") + # Process each program in the chunk + for prog in program_chunk: + start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") + stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") - # Add description if available - if prog.description: - program_xml.append(f" {html.escape(prog.description)}") + program_xml = [f' '] + program_xml.append(f' {html.escape(prog.title)}') - # Process custom properties if available - if prog.custom_properties: - custom_data = prog.custom_properties or {} + # Add subtitle if available + if prog.sub_title: + program_xml.append(f" {html.escape(prog.sub_title)}") - # Add categories if available - if "categories" in custom_data and custom_data["categories"]: - for category in custom_data["categories"]: - program_xml.append(f" {html.escape(category)}") + # Add description if available + if prog.description: + program_xml.append(f" {html.escape(prog.description)}") - # Add keywords if available - if "keywords" in custom_data and custom_data["keywords"]: - for keyword in custom_data["keywords"]: - program_xml.append(f" {html.escape(keyword)}") + # Process custom properties if available + if prog.custom_properties: + custom_data = prog.custom_properties or {} - # Handle episode numbering - multiple formats supported - # Prioritize onscreen_episode over standalone episode for onscreen system - if "onscreen_episode" in custom_data: - program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') - elif "episode" in custom_data: - program_xml.append(f' E{custom_data["episode"]}') + # Add categories if available + if "categories" in custom_data and custom_data["categories"]: + for category in custom_data["categories"]: + program_xml.append(f" {html.escape(category)}") - # Handle dd_progid format - if 'dd_progid' in custom_data: - program_xml.append(f' {html.escape(custom_data["dd_progid"])}') + # Add keywords if available + if "keywords" in custom_data and custom_data["keywords"]: + for keyword in custom_data["keywords"]: + program_xml.append(f" {html.escape(keyword)}") - # Handle external database IDs - for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: - if f'{system}_id' in custom_data: - program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') + # Handle episode numbering - multiple formats supported + # Prioritize onscreen_episode over standalone episode for onscreen system + if "onscreen_episode" in custom_data: + program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') + elif "episode" in custom_data: + program_xml.append(f' E{custom_data["episode"]}') - # Add season and episode numbers in xmltv_ns format if available - if "season" in custom_data and "episode" in custom_data: - season = ( - int(custom_data["season"]) - 1 - if str(custom_data["season"]).isdigit() - else 0 - ) - episode = ( - int(custom_data["episode"]) - 1 - if str(custom_data["episode"]).isdigit() - else 0 - ) - program_xml.append(f' {season}.{episode}.') + # Handle dd_progid format + if 'dd_progid' in custom_data: + program_xml.append(f' {html.escape(custom_data["dd_progid"])}') - # Add language information - if "language" in custom_data: - program_xml.append(f' {html.escape(custom_data["language"])}') + # Handle external database IDs + for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: + if f'{system}_id' in custom_data: + program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') - if "original_language" in custom_data: - program_xml.append(f' {html.escape(custom_data["original_language"])}') + # Add season and episode numbers in xmltv_ns format if available + if "season" in custom_data and "episode" in custom_data: + season = ( + int(custom_data["season"]) - 1 + if str(custom_data["season"]).isdigit() + else 0 + ) + episode = ( + int(custom_data["episode"]) - 1 + if str(custom_data["episode"]).isdigit() + else 0 + ) + program_xml.append(f' {season}.{episode}.') - # Add length information - if "length" in custom_data and isinstance(custom_data["length"], dict): - length_value = custom_data["length"].get("value", "") - length_units = custom_data["length"].get("units", "minutes") - program_xml.append(f' {html.escape(str(length_value))}') + # Add language information + if "language" in custom_data: + program_xml.append(f' {html.escape(custom_data["language"])}') - # Add video information - if "video" in custom_data and isinstance(custom_data["video"], dict): - program_xml.append(" ") + if "original_language" in custom_data: + program_xml.append(f' {html.escape(custom_data["original_language"])}') - # Add audio information - if "audio" in custom_data and isinstance(custom_data["audio"], dict): - program_xml.append(" ") + # Add length information + if "length" in custom_data and isinstance(custom_data["length"], dict): + length_value = custom_data["length"].get("value", "") + length_units = custom_data["length"].get("units", "minutes") + program_xml.append(f' {html.escape(str(length_value))}') - # Add subtitles information - if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): - for subtitle in custom_data["subtitles"]: - if isinstance(subtitle, dict): - subtitle_type = subtitle.get("type", "") - type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" - program_xml.append(f" ") - if "language" in subtitle: - program_xml.append(f" {html.escape(subtitle['language'])}") - program_xml.append(" ") + # Add video information + if "video" in custom_data and isinstance(custom_data["video"], dict): + program_xml.append(" ") - # Add rating if available - if "rating" in custom_data: - rating_system = custom_data.get("rating_system", "TV Parental Guidelines") - program_xml.append(f' ') - program_xml.append(f' {html.escape(custom_data["rating"])}') - program_xml.append(f" ") + # Add audio information + if "audio" in custom_data and isinstance(custom_data["audio"], dict): + program_xml.append(" ") - # Add star ratings - if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): - for star_rating in custom_data["star_ratings"]: - if isinstance(star_rating, dict) and "value" in star_rating: - system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" - program_xml.append(f" ") - program_xml.append(f" {html.escape(star_rating['value'])}") - program_xml.append(" ") + # Add subtitles information + if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): + for subtitle in custom_data["subtitles"]: + if isinstance(subtitle, dict): + subtitle_type = subtitle.get("type", "") + type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" + program_xml.append(f" ") + if "language" in subtitle: + program_xml.append(f" {html.escape(subtitle['language'])}") + program_xml.append(" ") - # Add reviews - if "reviews" in custom_data and isinstance(custom_data["reviews"], list): - for review in custom_data["reviews"]: - if isinstance(review, dict) and "content" in review: - review_type = review.get("type", "text") - attrs = [f'type="{html.escape(review_type)}"'] - if "source" in review: - attrs.append(f'source="{html.escape(review["source"])}"') - if "reviewer" in review: - attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') - attr_str = " ".join(attrs) - program_xml.append(f' {html.escape(review["content"])}') + # Add rating if available + if "rating" in custom_data: + rating_system = custom_data.get("rating_system", "TV Parental Guidelines") + program_xml.append(f' ') + program_xml.append(f' {html.escape(custom_data["rating"])}') + program_xml.append(f" ") - # Add images - if "images" in custom_data and isinstance(custom_data["images"], list): - for image in custom_data["images"]: - if isinstance(image, dict) and "url" in image: - attrs = [] - for attr in ['type', 'size', 'orient', 'system']: - if attr in image: - attrs.append(f'{attr}="{html.escape(image[attr])}"') - attr_str = " " + " ".join(attrs) if attrs else "" - program_xml.append(f' {html.escape(image["url"])}') + # Add star ratings + if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): + for star_rating in custom_data["star_ratings"]: + if isinstance(star_rating, dict) and "value" in star_rating: + system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" + program_xml.append(f" ") + program_xml.append(f" {html.escape(star_rating['value'])}") + program_xml.append(" ") - # Add enhanced credits handling - if "credits" in custom_data: - program_xml.append(" ") - credits = custom_data["credits"] + # Add reviews + if "reviews" in custom_data and isinstance(custom_data["reviews"], list): + for review in custom_data["reviews"]: + if isinstance(review, dict) and "content" in review: + review_type = review.get("type", "text") + attrs = [f'type="{html.escape(review_type)}"'] + if "source" in review: + attrs.append(f'source="{html.escape(review["source"])}"') + if "reviewer" in review: + attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') + attr_str = " ".join(attrs) + program_xml.append(f' {html.escape(review["content"])}') - # Handle different credit types - for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: - if role in credits: - people = credits[role] - if isinstance(people, list): - for person in people: - program_xml.append(f" <{role}>{html.escape(person)}") - else: - program_xml.append(f" <{role}>{html.escape(people)}") + # Add images + if "images" in custom_data and isinstance(custom_data["images"], list): + for image in custom_data["images"]: + if isinstance(image, dict) and "url" in image: + attrs = [] + for attr in ['type', 'size', 'orient', 'system']: + if attr in image: + attrs.append(f'{attr}="{html.escape(image[attr])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f' {html.escape(image["url"])}') - # Handle actors separately to include role and guest attributes - if "actor" in credits: - actors = credits["actor"] - if isinstance(actors, list): - for actor in actors: - if isinstance(actor, dict): - name = actor.get("name", "") - role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" - guest_attr = ' guest="yes"' if actor.get("guest") else "" - program_xml.append(f" {html.escape(name)}") + # Add enhanced credits handling + if "credits" in custom_data: + program_xml.append(" ") + credits = custom_data["credits"] + + # Handle different credit types + for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if role in credits: + people = credits[role] + if isinstance(people, list): + for person in people: + program_xml.append(f" <{role}>{html.escape(person)}") else: - program_xml.append(f" {html.escape(actor)}") + program_xml.append(f" <{role}>{html.escape(people)}") + + # Handle actors separately to include role and guest attributes + if "actor" in credits: + actors = credits["actor"] + if isinstance(actors, list): + for actor in actors: + if isinstance(actor, dict): + name = actor.get("name", "") + role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" + guest_attr = ' guest="yes"' if actor.get("guest") else "" + program_xml.append(f" {html.escape(name)}") + else: + program_xml.append(f" {html.escape(actor)}") + else: + program_xml.append(f" {html.escape(actors)}") + + program_xml.append(" ") + + # Add program date if available (full date, not just year) + if "date" in custom_data: + program_xml.append(f' {html.escape(custom_data["date"])}') + + # Add country if available + if "country" in custom_data: + program_xml.append(f' {html.escape(custom_data["country"])}') + + # Add icon if available + if "icon" in custom_data: + program_xml.append(f' ') + + # Add special flags as proper tags with enhanced handling + if custom_data.get("previously_shown", False): + prev_shown_details = custom_data.get("previously_shown_details", {}) + attrs = [] + if "start" in prev_shown_details: + attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') + if "channel" in prev_shown_details: + attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f" ") + + if custom_data.get("premiere", False): + premiere_text = custom_data.get("premiere_text", "") + if premiere_text: + program_xml.append(f" {html.escape(premiere_text)}") else: - program_xml.append(f" {html.escape(actors)}") + program_xml.append(" ") - program_xml.append(" ") + if custom_data.get("last_chance", False): + last_chance_text = custom_data.get("last_chance_text", "") + if last_chance_text: + program_xml.append(f" {html.escape(last_chance_text)}") + else: + program_xml.append(" ") - # Add program date if available (full date, not just year) - if "date" in custom_data: - program_xml.append(f' {html.escape(custom_data["date"])}') + if custom_data.get("new", False): + program_xml.append(" ") - # Add country if available - if "country" in custom_data: - program_xml.append(f' {html.escape(custom_data["country"])}') + if custom_data.get('live', False): + program_xml.append(' ') - # Add icon if available - if "icon" in custom_data: - program_xml.append(f' ') + program_xml.append(" ") - # Add special flags as proper tags with enhanced handling - if custom_data.get("previously_shown", False): - prev_shown_details = custom_data.get("previously_shown_details", {}) - attrs = [] - if "start" in prev_shown_details: - attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') - if "channel" in prev_shown_details: - attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') - attr_str = " " + " ".join(attrs) if attrs else "" - program_xml.append(f" ") + # Add to batch + program_batch.extend(program_xml) - if custom_data.get("premiere", False): - premiere_text = custom_data.get("premiere_text", "") - if premiere_text: - program_xml.append(f" {html.escape(premiere_text)}") - else: - program_xml.append(" ") + # Send batch when full or send keep-alive + if len(program_batch) >= batch_size: + yield '\n'.join(program_batch) + '\n' + program_batch = [] - if custom_data.get("last_chance", False): - last_chance_text = custom_data.get("last_chance_text", "") - if last_chance_text: - program_xml.append(f" {html.escape(last_chance_text)}") - else: - program_xml.append(" ") - - if custom_data.get("new", False): - program_xml.append(" ") - - if custom_data.get('live', False): - program_xml.append(' ') - - program_xml.append(" ") - - # Add to batch - program_batch.extend(program_xml) - - # Send batch when full or send keep-alive - if len(program_batch) >= batch_size: - yield '\n'.join(program_batch) + '\n' - program_batch = [] # Send keep-alive every batch + # Move to next chunk + offset += chunk_size # Send remaining programs in batch if program_batch: @@ -998,14 +1719,34 @@ def xc_get_epg(request, user, short=False): limit = request.GET.get('limit', 4) if channel.epg_data: - if short == False: - programs = channel.epg_data.programs.filter( - start_time__gte=timezone.now() - ).order_by('start_time') + # Check if this is a dummy EPG that generates on-demand + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + if not channel.epg_data.programs.exists(): + # Generate on-demand using custom patterns + programs = generate_dummy_programs( + channel_id=channel_id, + channel_name=channel.name, + epg_source=channel.epg_data.epg_source + ) + else: + # Has stored programs, use them + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] else: - programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + # Regular EPG with stored programs + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] else: - programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name) + # No EPG data assigned, generate default dummy + programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name, epg_source=None) output = {"epg_listings": []} for program in programs: @@ -1032,7 +1773,7 @@ def xc_get_epg(request, user, short=False): } if short == False: - program_output["now_playing"] = 1 if start <= timezone.now() <= end else 0 + program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 program_output["has_archive"] = "0" output['epg_listings'].append(program_output) @@ -1217,7 +1958,7 @@ def xc_get_series_info(request, user, series_id): try: should_refresh = ( not series_relation.last_episode_refresh or - series_relation.last_episode_refresh < timezone.now() - timedelta(hours=24) + series_relation.last_episode_refresh < django_timezone.now() - timedelta(hours=24) ) # Check if detailed data has been fetched diff --git a/apps/proxy/config.py b/apps/proxy/config.py index 9ce5b66c..74bfc61f 100644 --- a/apps/proxy/config.py +++ b/apps/proxy/config.py @@ -1,4 +1,6 @@ """Shared configuration between proxy types""" +import time +from django.db import connection class BaseConfig: DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail @@ -12,13 +14,29 @@ class BaseConfig: BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc. + # Cache for proxy settings (class-level, shared across all instances) + _proxy_settings_cache = None + _proxy_settings_cache_time = 0 + _proxy_settings_cache_ttl = 10 # Cache for 10 seconds + @classmethod def get_proxy_settings(cls): - """Get proxy settings from CoreSettings JSON data with fallback to defaults""" + """Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)""" + # Check if cache is still valid + now = time.time() + if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl: + return cls._proxy_settings_cache + + # Cache miss or expired - fetch from database try: from core.models import CoreSettings - return CoreSettings.get_proxy_settings() + settings = CoreSettings.get_proxy_settings() + cls._proxy_settings_cache = settings + cls._proxy_settings_cache_time = now + return settings + except Exception: + # Return defaults if database query fails return { "buffering_timeout": 15, "buffering_speed": 1.0, @@ -26,6 +44,13 @@ class BaseConfig: "channel_shutdown_delay": 0, "channel_init_grace_period": 5, } + + finally: + # Always close the connection after reading settings + try: + connection.close() + except Exception: + pass @classmethod def get_redis_chunk_ttl(cls): diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index d4b83d3a..3d89b3b8 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -8,7 +8,7 @@ import gevent from typing import Set, Optional from apps.proxy.config import TSConfig as Config from redis.exceptions import ConnectionError, TimeoutError -from .constants import EventType +from .constants import EventType, ChannelState, ChannelMetadataField from .config_helper import ConfigHelper from .redis_keys import RedisKeys from .utils import get_logger @@ -26,6 +26,7 @@ class ClientManager: self.lock = threading.Lock() self.last_active_time = time.time() self.worker_id = worker_id # Store worker ID as instance variable + self._heartbeat_running = True # Flag to control heartbeat thread # STANDARDIZED KEYS: Move client set under channel namespace self.client_set_key = RedisKeys.clients(channel_id) @@ -77,56 +78,28 @@ class ClientManager: logger.debug(f"Failed to trigger stats update: {e}") def _start_heartbeat_thread(self): - """Start thread to regularly refresh client presence in Redis""" + """Start thread to regularly refresh client presence in Redis for local clients""" def heartbeat_task(): - no_clients_count = 0 # Track consecutive empty cycles - max_empty_cycles = 3 # Exit after this many consecutive empty checks - logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") - while True: + while self._heartbeat_running: try: - # Wait for the interval - gevent.sleep(self.heartbeat_interval) + # Wait for the interval, but check stop flag frequently for quick shutdown + # Sleep in 1-second increments to allow faster response to stop signal + for _ in range(int(self.heartbeat_interval)): + if not self._heartbeat_running: + break + time.sleep(1) + + # Final check before doing work + if not self._heartbeat_running: + break # Send heartbeat for all local clients with self.lock: - if not self.clients or not self.redis_client: - # No clients left, increment our counter - no_clients_count += 1 - - # Check if we're in a shutdown delay period before exiting - in_shutdown_delay = False - if self.redis_client: - try: - disconnect_key = RedisKeys.last_client_disconnect(self.channel_id) - disconnect_time_bytes = self.redis_client.get(disconnect_key) - if disconnect_time_bytes: - disconnect_time = float(disconnect_time_bytes.decode('utf-8')) - elapsed = time.time() - disconnect_time - shutdown_delay = ConfigHelper.channel_shutdown_delay() - - if elapsed < shutdown_delay: - in_shutdown_delay = True - logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed") - except Exception as e: - logger.debug(f"Error checking shutdown delay: {e}") - - # Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay - if no_clients_count >= max_empty_cycles and not in_shutdown_delay: - logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread") - return # This exits the thread - - # Skip this cycle if we have no clients but continue if in shutdown delay - if not in_shutdown_delay: - continue - else: - # Reset counter during shutdown delay to prevent premature exit - no_clients_count = 0 - continue - else: - # Reset counter when we see clients - no_clients_count = 0 + # Skip this cycle if we have no local clients + if not self.clients: + continue # IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats current_time = time.time() @@ -197,11 +170,20 @@ class ClientManager: except Exception as e: logger.error(f"Error in client heartbeat thread: {e}") + logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}") + thread = threading.Thread(target=heartbeat_task, daemon=True) thread.name = f"client-heartbeat-{self.channel_id}" thread.start() logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + def stop(self): + """Stop the heartbeat thread and cleanup""" + logger.debug(f"Stopping ClientManager for channel {self.channel_id}") + self._heartbeat_running = False + # Give the thread a moment to exit gracefully + # Note: We don't join() here because it's a daemon thread and will exit on its own + def _execute_redis_command(self, command_func): """Execute Redis command with error handling""" if not self.redis_client: diff --git a/apps/proxy/ts_proxy/config_helper.py b/apps/proxy/ts_proxy/config_helper.py index d59fa1f9..d7d33558 100644 --- a/apps/proxy/ts_proxy/config_helper.py +++ b/apps/proxy/ts_proxy/config_helper.py @@ -100,3 +100,12 @@ class ConfigHelper: def channel_init_grace_period(): """Get channel initialization grace period in seconds""" return Config.get_channel_init_grace_period() + + @staticmethod + def chunk_timeout(): + """ + Get chunk timeout in seconds (used for both socket and HTTP read timeouts). + This controls how long we wait for each chunk before timing out. + Set this higher (e.g., 30s) for slow providers that may have intermittent delays. + """ + return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds diff --git a/apps/proxy/ts_proxy/http_streamer.py b/apps/proxy/ts_proxy/http_streamer.py new file mode 100644 index 00000000..147d2c93 --- /dev/null +++ b/apps/proxy/ts_proxy/http_streamer.py @@ -0,0 +1,138 @@ +""" +HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe. +This allows us to use the same fetch_chunk() path for both transcode and HTTP streams. +""" + +import threading +import os +import requests +from requests.adapters import HTTPAdapter +from .utils import get_logger + +logger = get_logger() + + +class HTTPStreamReader: + """Thread-based HTTP stream reader that writes to a pipe""" + + def __init__(self, url, user_agent=None, chunk_size=8192): + self.url = url + self.user_agent = user_agent + self.chunk_size = chunk_size + self.session = None + self.response = None + self.thread = None + self.pipe_read = None + self.pipe_write = None + self.running = False + + def start(self): + """Start the HTTP stream reader thread""" + # Create a pipe (works on Windows and Unix) + self.pipe_read, self.pipe_write = os.pipe() + + # Start the reader thread + self.running = True + self.thread = threading.Thread(target=self._read_stream, daemon=True) + self.thread.start() + + logger.info(f"Started HTTP stream reader thread for {self.url}") + return self.pipe_read + + def _read_stream(self): + """Thread worker that reads HTTP stream and writes to pipe""" + try: + # Build headers + headers = {} + if self.user_agent: + headers['User-Agent'] = self.user_agent + + logger.info(f"HTTP reader connecting to {self.url}") + + # Create session + self.session = requests.Session() + + # Disable retries for faster failure detection + adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + # Stream the URL + self.response = self.session.get( + self.url, + headers=headers, + stream=True, + timeout=(5, 30) # 5s connect, 30s read + ) + + if self.response.status_code != 200: + logger.error(f"HTTP {self.response.status_code} from {self.url}") + return + + logger.info(f"HTTP reader connected successfully, streaming data...") + + # Stream chunks to pipe + chunk_count = 0 + for chunk in self.response.iter_content(chunk_size=self.chunk_size): + if not self.running: + break + + if chunk: + try: + # Write binary data to pipe + os.write(self.pipe_write, chunk) + chunk_count += 1 + + # Log progress periodically + if chunk_count % 1000 == 0: + logger.debug(f"HTTP reader streamed {chunk_count} chunks") + except OSError as e: + logger.error(f"Pipe write error: {e}") + break + + logger.info("HTTP stream ended") + + except requests.exceptions.RequestException as e: + logger.error(f"HTTP reader request error: {e}") + except Exception as e: + logger.error(f"HTTP reader unexpected error: {e}", exc_info=True) + finally: + self.running = False + # Close write end of pipe to signal EOF + try: + if self.pipe_write is not None: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + def stop(self): + """Stop the HTTP stream reader""" + logger.info("Stopping HTTP stream reader") + self.running = False + + # Close response + if self.response: + try: + self.response.close() + except: + pass + + # Close session + if self.session: + try: + self.session.close() + except: + pass + + # Close write end of pipe + if self.pipe_write is not None: + try: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + # Wait for thread + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index da5daaa7..cca827a9 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -131,6 +131,8 @@ class ProxyServer: max_retries = 10 base_retry_delay = 1 # Start with 1 second delay max_retry_delay = 30 # Cap at 30 seconds + pubsub_client = None + pubsub = None while True: try: @@ -339,20 +341,27 @@ class ProxyServer: logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})") gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay) - # Try to clean up the old connection - try: - if 'pubsub' in locals(): - pubsub.close() - if 'pubsub_client' in locals(): - pubsub_client.close() - except: - pass - except Exception as e: logger.error(f"Error in event listener: {e}") # Add a short delay to prevent rapid retries on persistent errors gevent.sleep(5) # REPLACE: time.sleep(5) + finally: + # Always clean up PubSub connections in all error paths + try: + if pubsub: + pubsub.close() + pubsub = None + except Exception as e: + logger.debug(f"Error closing pubsub: {e}") + + try: + if pubsub_client: + pubsub_client.close() + pubsub_client = None + except Exception as e: + logger.debug(f"Error closing pubsub_client: {e}") + thread = threading.Thread(target=event_listener, daemon=True) thread.name = "redis-event-listener" thread.start() @@ -486,17 +495,18 @@ class ProxyServer: ) return True - # Create buffer and client manager instances - buffer = StreamBuffer(channel_id, redis_client=self.redis_client) - client_manager = ClientManager( - channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) + # Create buffer and client manager instances (or reuse if they exist) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Store in local tracking - self.stream_buffers[channel_id] = buffer - self.client_managers[channel_id] = client_manager + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # IMPROVED: Set initializing state in Redis BEFORE any other operations if self.redis_client: @@ -550,13 +560,15 @@ class ProxyServer: logger.info(f"Channel {channel_id} already owned by worker {current_owner}") logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -571,13 +583,15 @@ class ProxyServer: # Another worker just acquired ownership logger.info(f"Another worker just acquired ownership of channel {channel_id}") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -596,7 +610,7 @@ class ProxyServer: if channel_user_agent: metadata["user_agent"] = channel_user_agent - # CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged + # Make sure stream_id is always set in metadata and properly logged if channel_stream_id: metadata["stream_id"] = str(channel_stream_id) logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}") @@ -632,13 +646,14 @@ class ProxyServer: logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager - # Create client manager with channel_id, redis_client AND worker_id - client_manager = ClientManager( - channel_id=channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id=channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # Start stream manager thread only for the owner thread = threading.Thread(target=stream_manager.run, daemon=True) @@ -846,6 +861,10 @@ class ProxyServer: # Clean up client manager - SAFE CHECK HERE TOO if channel_id in self.client_managers: try: + client_manager = self.client_managers[channel_id] + # Stop the heartbeat thread before deleting + if hasattr(client_manager, 'stop'): + client_manager.stop() del self.client_managers[channel_id] logger.info(f"Removed client manager for channel {channel_id}") except KeyError: diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index 932479ea..551e2d27 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -597,31 +597,40 @@ class ChannelService: @staticmethod def _update_stream_stats_in_db(stream_id, **stats): """Update stream stats in database""" + from django.db import connection + try: from apps.channels.models import Stream from django.utils import timezone - + stream = Stream.objects.get(id=stream_id) - + # Get existing stats or create new dict current_stats = stream.stream_stats or {} - + # Update with new stats for key, value in stats.items(): if value is not None: current_stats[key] = value - + # Save updated stats and timestamp stream.stream_stats = current_stats stream.stream_stats_updated_at = timezone.now() stream.save(update_fields=['stream_stats', 'stream_stats_updated_at']) - + logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}") return True - + except Exception as e: logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") return False + + finally: + # Always close database connection after update + try: + connection.close() + except Exception: + pass # Helper methods for Redis operations @@ -678,7 +687,7 @@ class ChannelService: switch_request = { "event": EventType.STREAM_SWITCH, - "channel_id": channel_id, + "channel_id": str(channel_id), "url": new_url, "user_agent": user_agent, "stream_id": stream_id, @@ -703,7 +712,7 @@ class ChannelService: stop_request = { "event": EventType.CHANNEL_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() } @@ -726,7 +735,7 @@ class ChannelService: stop_request = { "event": EventType.CLIENT_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "client_id": client_id, "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() diff --git a/apps/proxy/ts_proxy/stream_buffer.py b/apps/proxy/ts_proxy/stream_buffer.py index a5169c3a..85feb5dd 100644 --- a/apps/proxy/ts_proxy/stream_buffer.py +++ b/apps/proxy/ts_proxy/stream_buffer.py @@ -303,6 +303,14 @@ class StreamBuffer: # Retrieve chunks chunks = self.get_chunks_exact(client_index, chunk_count) + # Check if we got significantly fewer chunks than expected (likely due to expiration) + # Only check if we expected multiple chunks and got none or very few + if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10: + # Chunks are missing - likely expired from Redis + # Return empty list to signal client should skip forward + logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)") + return [], client_index + # Check total size total_size = sum(len(c) for c in chunks) @@ -316,7 +324,7 @@ class StreamBuffer: additional_size = sum(len(c) for c in more_chunks) if total_size + additional_size <= MAX_SIZE: chunks.extend(more_chunks) - chunk_count += additional + chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved return chunks, client_index + chunk_count diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 817a7b82..368691b8 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -204,6 +204,18 @@ class StreamGenerator: self.empty_reads += 1 self.consecutive_empty += 1 + # Check if we're too far behind (chunks expired from Redis) + chunks_behind = self.buffer.index - self.local_index + if chunks_behind > 50: # If more than 50 chunks behind, jump forward + # Calculate new position: stay a few chunks behind current buffer + initial_behind = ConfigHelper.initial_behind_chunks() + new_index = max(self.local_index, self.buffer.index - initial_behind) + + logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}") + self.local_index = new_index + self.consecutive_empty = 0 # Reset since we're repositioning + continue # Try again immediately with new position + if self._should_send_keepalive(self.local_index): keepalive_packet = create_ts_packet('keepalive') logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head") diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index e80d4527..99ae8027 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -9,7 +9,9 @@ import subprocess import gevent import re from typing import Optional, List +from django.db import connection from django.shortcuts import get_object_or_404 +from urllib3.exceptions import ReadTimeoutError from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile @@ -91,11 +93,13 @@ class StreamManager: self.tried_stream_ids.add(self.current_stream_id) logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}") else: - logger.warning(f"No stream_id found in Redis for channel {channel_id}") + logger.warning(f"No stream_id found in Redis for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") except Exception as e: logger.warning(f"Error loading stream ID from Redis: {e}") else: - logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly") + logger.warning(f"Unable to get stream ID for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") logger.info(f"Initialized stream manager for channel {buffer.channel_id}") @@ -111,6 +115,9 @@ class StreamManager: self.stderr_reader_thread = None self.ffmpeg_input_phase = True # Track if we're still reading input info + # Add HTTP reader thread property + self.http_reader = None + def _create_session(self): """Create and configure requests session with optimal settings""" session = requests.Session() @@ -378,6 +385,12 @@ class StreamManager: except Exception as e: logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True) + # Close database connection for this thread + try: + connection.close() + except Exception: + pass + logger.info(f"Stream manager stopped for channel {self.channel_id}") def _establish_transcode_connection(self): @@ -737,9 +750,9 @@ class StreamManager: def _establish_http_connection(self): - """Establish a direct HTTP connection to the stream""" + """Establish HTTP connection using thread-based reader (same as transcode path)""" try: - logger.debug(f"Using TS Proxy to connect to stream: {self.url}") + logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}") # Check if we already have active HTTP connections if self.current_response or self.current_session: @@ -756,41 +769,39 @@ class StreamManager: logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}") self._close_socket() - # Create new session for each connection attempt - session = self._create_session() - self.current_session = session + # Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor + # This allows us to use the same fetch_chunk() path as transcode + from .http_streamer import HTTPStreamReader - # Stream the URL with proper timeout handling - response = session.get( - self.url, - stream=True, - timeout=(10, 60) # 10s connect timeout, 60s read timeout + # Create and start the HTTP stream reader + self.http_reader = HTTPStreamReader( + url=self.url, + user_agent=self.user_agent, + chunk_size=self.chunk_size ) - self.current_response = response - if response.status_code == 200: - self.connected = True - self.healthy = True - logger.info(f"Successfully connected to stream source for channel {self.channel_id}") + # Start the reader thread and get the read end of the pipe + pipe_fd = self.http_reader.start() - # Store connection start time for stability tracking - self.connection_start_time = time.time() + # Wrap the file descriptor in a file object (same as transcode stdout) + import os + self.socket = os.fdopen(pipe_fd, 'rb', buffering=0) + self.connected = True + self.healthy = True - # Set channel state to waiting for clients - self._set_waiting_for_clients() + logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}") + + # Store connection start time for stability tracking + self.connection_start_time = time.time() + + # Set channel state to waiting for clients + self._set_waiting_for_clients() + + return True - return True - else: - logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}") - self._close_connection() - return False - except requests.exceptions.RequestException as e: - logger.error(f"HTTP request error: {e}") - self._close_connection() - return False except Exception as e: logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True) - self._close_connection() + self._close_socket() return False def _update_bytes_processed(self, chunk_size): @@ -818,48 +829,19 @@ class StreamManager: logger.error(f"Error updating bytes processed: {e}") def _process_stream_data(self): - """Process stream data until disconnect or error""" + """Process stream data until disconnect or error - unified path for both transcode and HTTP""" try: - if self.transcode: - # Handle transcoded stream data - while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: - if self.fetch_chunk(): - self.last_data_time = time.time() - else: - if not self.running: - break - gevent.sleep(0.1) # REPLACE time.sleep(0.1) - else: - # Handle direct HTTP connection - chunk_count = 0 - try: - for chunk in self.current_response.iter_content(chunk_size=self.chunk_size): - # Check if we've been asked to stop - if self.stop_requested or self.url_switching or self.needs_stream_switch: - break - - if chunk: - # Track chunk size before adding to buffer - chunk_size = len(chunk) - self._update_bytes_processed(chunk_size) - - # Add chunk to buffer with TS packet alignment - success = self.buffer.add_chunk(chunk) - - if success: - self.last_data_time = time.time() - chunk_count += 1 - - # Update last data timestamp in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - last_data_key = RedisKeys.last_data(self.buffer.channel_id) - self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60) - except (AttributeError, ConnectionError) as e: - if self.stop_requested or self.url_switching: - logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}") - else: - logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}") - raise + # Both transcode and HTTP now use the same subprocess/socket approach + # This gives us perfect control: check flags between chunks, timeout just returns False + while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: + if self.fetch_chunk(): + self.last_data_time = time.time() + else: + # fetch_chunk() returned False - could be timeout, no data, or error + if not self.running: + break + # Brief sleep before retry to avoid tight loop + gevent.sleep(0.1) except Exception as e: logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True) @@ -948,6 +930,7 @@ class StreamManager: # Import both models for proper resource management from apps.channels.models import Stream, Channel + from django.db import connection # Update stream profile if we're switching streams if self.current_stream_id and stream_id and self.current_stream_id != stream_id: @@ -965,8 +948,16 @@ class StreamManager: logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}") else: logger.warning(f"Failed to update stream profile for channel {self.channel_id}") + except Exception as e: logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}") + + finally: + # Always close database connection after profile update + try: + connection.close() + except Exception: + pass # CRITICAL: Set a flag to prevent immediate reconnection with old URL self.url_switching = True @@ -1183,6 +1174,15 @@ class StreamManager: if self.current_response or self.current_session: self._close_connection() + # Stop HTTP reader thread if it exists + if hasattr(self, 'http_reader') and self.http_reader: + try: + logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}") + self.http_reader.stop() + self.http_reader = None + except Exception as e: + logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}") + # Otherwise handle socket and transcode resources if self.socket: try: @@ -1219,6 +1219,30 @@ class StreamManager: except Exception as e: logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}") + # Explicitly close all subprocess pipes to prevent file descriptor leaks + try: + if self.transcode_process.stdin: + self.transcode_process.stdin.close() + if self.transcode_process.stdout: + self.transcode_process.stdout.close() + if self.transcode_process.stderr: + self.transcode_process.stderr.close() + logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}") + + # Join stderr reader thread to ensure it's fully terminated + if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive(): + try: + logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}") + self.stderr_reader_thread.join(timeout=2.0) + if self.stderr_reader_thread.is_alive(): + logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}") + finally: + self.stderr_reader_thread = None + self.transcode_process = None self.transcode_process_active = False # Reset the flag @@ -1250,7 +1274,7 @@ class StreamManager: try: # Set timeout for chunk reads - chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds + chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration try: # Handle different socket types with timeout @@ -1333,7 +1357,17 @@ class StreamManager: # Only update if not already past connecting if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: # NEW CODE: Check if buffer has enough chunks - current_buffer_index = getattr(self.buffer, 'index', 0) + # IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup + # each worker has its own StreamBuffer instance with potentially stale local index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + initial_chunks_needed = ConfigHelper.initial_behind_chunks() if current_buffer_index < initial_chunks_needed: @@ -1381,10 +1415,21 @@ class StreamManager: # Clean up completed timers self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()] - if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'): - current_buffer_index = self.buffer.index - initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10) + if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'): channel_id = self.buffer.channel_id + redis_client = self.buffer.redis_client + + # IMPORTANT: Read from Redis, not local buffer.index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + + initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency if current_buffer_index >= initial_chunks_needed: # We now have enough buffer, call _set_waiting_for_clients again @@ -1409,6 +1454,7 @@ class StreamManager: def _try_next_stream(self): """ Try to switch to the next available stream for this channel. + Will iterate through multiple alternate streams if needed to find one with a different URL. Returns: bool: True if successfully switched to a new stream, False otherwise @@ -1434,60 +1480,71 @@ class StreamManager: logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}") return False - # Get the next stream to try - next_stream = untried_streams[0] - stream_id = next_stream['stream_id'] - profile_id = next_stream['profile_id'] # This is the M3U profile ID we need + # IMPROVED: Try multiple streams until we find one with a different URL + for next_stream in untried_streams: + stream_id = next_stream['stream_id'] + profile_id = next_stream['profile_id'] # This is the M3U profile ID we need - # Add to tried streams - self.tried_stream_ids.add(stream_id) + # Add to tried streams + self.tried_stream_ids.add(stream_id) - # Get stream info including URL using the profile_id we already have - logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") - stream_info = get_stream_info_for_switch(self.channel_id, stream_id) + # Get stream info including URL using the profile_id we already have + logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") + stream_info = get_stream_info_for_switch(self.channel_id, stream_id) - if 'error' in stream_info or not stream_info.get('url'): - logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") - return False + if 'error' in stream_info or not stream_info.get('url'): + logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") + continue # Try next stream instead of giving up - # Update URL and user agent - new_url = stream_info['url'] - new_user_agent = stream_info['user_agent'] - new_transcode = stream_info['transcode'] + # Update URL and user agent + new_url = stream_info['url'] + new_user_agent = stream_info['user_agent'] + new_transcode = stream_info['transcode'] - logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") + # CRITICAL FIX: Check if the new URL is the same as current URL + # This can happen when current_stream_id is None and we accidentally select the same stream + if new_url == self.url: + logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). " + f"Skipping this stream and trying next alternative.") + continue # Try next stream instead of giving up - # IMPORTANT: Just update the URL, don't stop the channel or release resources - switch_result = self.update_url(new_url, stream_id, profile_id) - if not switch_result: - logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") - return False + logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") - # Update stream ID tracking - self.current_stream_id = stream_id + # IMPORTANT: Just update the URL, don't stop the channel or release resources + switch_result = self.update_url(new_url, stream_id, profile_id) + if not switch_result: + logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") + continue # Try next stream - # Store the new user agent and transcode settings - self.user_agent = new_user_agent - self.transcode = new_transcode + # Update stream ID tracking + self.current_stream_id = stream_id - # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - metadata_key = RedisKeys.channel_metadata(self.channel_id) - self.buffer.redis_client.hset(metadata_key, mapping={ - ChannelMetadataField.URL: new_url, - ChannelMetadataField.USER_AGENT: new_user_agent, - ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], - ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams - ChannelMetadataField.STREAM_ID: str(stream_id), - ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), - ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" - }) + # Store the new user agent and transcode settings + self.user_agent = new_user_agent + self.transcode = new_transcode - # Log the switch - logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") + # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, mapping={ + ChannelMetadataField.URL: new_url, + ChannelMetadataField.USER_AGENT: new_user_agent, + ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], + ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams + ChannelMetadataField.STREAM_ID: str(stream_id), + ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), + ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" + }) - logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") - return True + # Log the switch + logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") + + logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") + return True + + # If we get here, we tried all streams but none worked + logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}") + return False except Exception as e: logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True) diff --git a/apps/proxy/ts_proxy/url_utils.py b/apps/proxy/ts_proxy/url_utils.py index 75e7653e..db53cc74 100644 --- a/apps/proxy/ts_proxy/url_utils.py +++ b/apps/proxy/ts_proxy/url_utils.py @@ -8,7 +8,7 @@ from typing import Optional, Tuple, List from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile -from core.models import UserAgent, CoreSettings +from core.models import UserAgent, CoreSettings, StreamProfile from .utils import get_logger from uuid import UUID import requests @@ -26,16 +26,67 @@ def get_stream_object(id: str): def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]: """ - Generate the appropriate stream URL for a channel based on its profile settings. + Generate the appropriate stream URL for a channel or stream based on its profile settings. Args: - channel_id: The UUID of the channel + channel_id: The UUID of the channel or stream hash Returns: Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id) """ try: - channel = get_stream_object(channel_id) + channel_or_stream = get_stream_object(channel_id) + + # Handle direct stream preview (custom streams) + if isinstance(channel_or_stream, Stream): + stream = channel_or_stream + logger.info(f"Previewing stream directly: {stream.id} ({stream.name})") + + # For custom streams, we need to get the M3U account and profile + m3u_account = stream.m3u_account + if not m3u_account: + logger.error(f"Stream {stream.id} has no M3U account") + return None, None, False, None + + # Get the default profile for this M3U account (custom streams use default) + m3u_profiles = m3u_account.profiles.all() + profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not profile: + logger.error(f"No default profile found for M3U account {m3u_account.id}") + return None, None, False, None + + # Get the appropriate user agent + stream_user_agent = m3u_account.get_user_agent().user_agent + if stream_user_agent is None: + stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + logger.debug(f"No user agent found for account, using default: {stream_user_agent}") + + # Get stream URL (no transformation for custom streams) + stream_url = stream.url + + # Check if the stream has its own stream_profile set, otherwise use default + if stream.stream_profile: + stream_profile = stream.stream_profile + logger.debug(f"Using stream's own stream profile: {stream_profile.name}") + else: + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) + logger.debug(f"Using default stream profile: {stream_profile.name}") + + # Check if transcoding is needed + if stream_profile.is_proxy() or stream_profile is None: + transcode = False + else: + transcode = True + + stream_profile_id = stream_profile.id + + return stream_url, stream_user_agent, transcode, stream_profile_id + + # Handle channel preview (existing logic) + channel = channel_or_stream # Get stream and profile for this channel # Note: get_stream now returns 3 values (stream_id, profile_id, error_reason) diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index e31d0418..109e88cf 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -128,7 +128,7 @@ def stream_ts(request, channel_id): ChannelService.stop_channel(channel_id) # Use fixed retry interval and timeout - retry_timeout = 1.5 # 1.5 seconds total timeout + retry_timeout = 3 # 3 seconds total timeout retry_interval = 0.1 # 100ms between attempts wait_start_time = time.time() @@ -138,9 +138,10 @@ def stream_ts(request, channel_id): profile_value = None error_reason = None attempt = 0 + should_retry = True # Try to get a stream with fixed interval retries - while time.time() - wait_start_time < retry_timeout: + while should_retry and time.time() - wait_start_time < retry_timeout: attempt += 1 stream_url, stream_user_agent, transcode, profile_value = ( generate_stream_url(channel_id) @@ -152,35 +153,53 @@ def stream_ts(request, channel_id): ) break - # If we failed because there are no streams assigned, don't retry - _, _, error_reason = channel.get_stream() - if error_reason and "maximum connection limits" not in error_reason: - logger.warning( - f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" + # On first failure, check if the error is retryable + if attempt == 1: + _, _, error_reason = channel.get_stream() + if error_reason and "maximum connection limits" not in error_reason: + logger.warning( + f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" + ) + should_retry = False + break + + # Check if we have time remaining for another sleep cycle + elapsed_time = time.time() - wait_start_time + remaining_time = retry_timeout - elapsed_time + + # If we don't have enough time for the next sleep interval, break + # but only after we've already made an attempt (the while condition will try one more time) + if remaining_time <= retry_interval: + logger.info( + f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt" ) break - # Wait 100ms before retrying - elapsed_time = time.time() - wait_start_time - remaining_time = retry_timeout - elapsed_time - if remaining_time > retry_interval: + # Wait before retrying + logger.info( + f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + ) + gevent.sleep(retry_interval) + retry_interval += 0.025 # Increase wait time by 25ms for next attempt + + # Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout + if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + logger.info( + f"[{client_id}] Making final attempt {attempt} at timeout boundary" + ) + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) + if stream_url is not None: logger.info( - f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}" ) - gevent.sleep(retry_interval) - retry_interval += 0.025 # Increase wait time by 25ms for next attempt if stream_url is None: - # Make sure to release any stream locks that might have been acquired - if hasattr(channel, "streams") and channel.streams.exists(): - for stream in channel.streams.all(): - try: - stream.release_stream() - logger.info( - f"[{client_id}] Released stream {stream.id} for channel {channel_id}" - ) - except Exception as e: - logger.error(f"[{client_id}] Error releasing stream: {e}") + # Release the channel's stream lock if one was acquired + # Note: Only call this if get_stream() actually assigned a stream + # In our case, if stream_url is None, no stream was ever assigned, so don't release # Get the specific error message if available wait_duration = f"{int(time.time() - wait_start_time)}s" @@ -189,6 +208,9 @@ def stream_ts(request, channel_id): if error_reason else "No available streams for this channel" ) + logger.info( + f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}" + ) return JsonResponse( {"error": error_msg, "waited": wait_duration}, status=503 ) # 503 Service Unavailable is appropriate here @@ -474,24 +496,33 @@ def stream_xc(request, username, password, channel_id): print(f"Fetchin channel with ID: {channel_id}") if user.user_level < 10: - filters = { - "id": int(channel_id), - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = { + "id": int(channel_id), + "user_level__lte": user.user_level + } + channel = Channel.objects.filter(**filters).first() + else: + # User has specific limited profiles assigned + filters = { + "id": int(channel_id), + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() - channel = Channel.objects.filter(**filters).distinct().first() if not channel: return JsonResponse({"error": "Not found"}, status=404) else: channel = get_object_or_404(Channel, id=channel_id) # @TODO: we've got the file 'type' via extension, support this when we support multiple outputs - return stream_ts(request._request, channel.uuid) + return stream_ts(request._request, str(channel.uuid)) @csrf_exempt diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index 1a2e51ca..bc8ad80f 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -187,16 +187,28 @@ def batch_create_categories(categories_data, category_type, account): logger.debug(f"Found {len(existing_categories)} existing categories") + # Check if we should auto-enable new categories based on account settings + account_custom_props = account.custom_properties or {} + if category_type == 'movie': + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + else: # series + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + # Create missing categories in batch new_categories = [] + for name in category_names: if name not in existing_categories: + # Always create new categories new_categories.append(VODCategory(name=name, category_type=category_type)) else: + # Existing category - create relationship with enabled based on auto_enable setting + # (category exists globally but is new to this account) relations_to_create.append(M3UVODCategoryRelation( category=existing_categories[name], m3u_account=account, custom_properties={}, + enabled=auto_enable_new, )) logger.debug(f"{len(new_categories)} new categories found") @@ -204,24 +216,68 @@ def batch_create_categories(categories_data, category_type, account): if new_categories: logger.debug("Creating new categories...") - created_categories = VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True) + created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)) + + # Create relations for newly created categories with enabled based on auto_enable setting + for cat in created_categories: + if not auto_enable_new: + logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}") + + relations_to_create.append( + M3UVODCategoryRelation( + category=cat, + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + ) + ) + # Convert to dictionary for easy lookup newly_created = {cat.name: cat for cat in created_categories} - - relations_to_create += [ - M3UVODCategoryRelation( - category=cat, - m3u_account=account, - custom_properties={}, - ) for cat in newly_created.values() - ] - existing_categories.update(newly_created) # Create missing relations logger.debug("Updating category account relations...") M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + # Delete orphaned category relationships (categories no longer in the M3U source) + current_category_ids = set(existing_categories[name].id for name in category_names) + existing_relations = M3UVODCategoryRelation.objects.filter( + m3u_account=account, + category__category_type=category_type + ).select_related('category') + + relations_to_delete = [ + rel for rel in existing_relations + if rel.category_id not in current_category_ids + ] + + if relations_to_delete: + M3UVODCategoryRelation.objects.filter( + id__in=[rel.id for rel in relations_to_delete] + ).delete() + logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}") + + # Check if any of the deleted relationships left categories with no remaining associations + orphaned_category_ids = [] + for rel in relations_to_delete: + category = rel.category + + # Check if this category has any remaining M3U account relationships + remaining_relationships = M3UVODCategoryRelation.objects.filter( + category=category + ).exists() + + # If no relationships remain, it's safe to delete the category + if not remaining_relationships: + orphaned_category_ids.append(category.id) + logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted") + + # Delete orphaned categories + if orphaned_category_ids: + VODCategory.objects.filter(id__in=orphaned_category_ids).delete() + logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations") + # 🔑 Fetch all relations for this account, for all categories # relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects # .filter(category__in=existing_categories.values(), m3u_account=account) diff --git a/core/api_urls.py b/core/api_urls.py index 00e20a6e..baa4bbe5 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,7 +2,7 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint +from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint, TimezoneListView router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') @@ -12,5 +12,6 @@ urlpatterns = [ path('settings/env/', environment, name='token_refresh'), path('version/', version, name='version'), path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), + path('timezones/', TimezoneListView.as_view(), name='timezones'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index 9de5aa5a..f475909a 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -5,10 +5,12 @@ import ipaddress import logging from rest_framework import viewsets, status from rest_framework.response import Response +from rest_framework.views import APIView from django.shortcuts import get_object_or_404 from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import api_view, permission_classes, action from drf_yasg.utils import swagger_auto_schema +from drf_yasg import openapi from .models import ( UserAgent, StreamProfile, @@ -328,25 +330,69 @@ def rehash_streams_endpoint(request): # Get the current hash keys from settings hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY) hash_keys = hash_key_setting.value.split(",") - + # Queue the rehash task task = rehash_streams.delay(hash_keys) - + return Response({ "success": True, "message": "Stream rehashing task has been queued", "task_id": task.id }, status=status.HTTP_200_OK) - + except CoreSettings.DoesNotExist: return Response({ "success": False, "message": "Hash key settings not found" }, status=status.HTTP_400_BAD_REQUEST) - + except Exception as e: logger.error(f"Error triggering rehash streams: {e}") return Response({ "success": False, "message": "Failed to trigger rehash task" }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +# ───────────────────────────── +# Timezone List API +# ───────────────────────────── +class TimezoneListView(APIView): + """ + API endpoint that returns all available timezones supported by pytz. + Returns a list of timezone names grouped by region for easy selection. + This is a general utility endpoint that can be used throughout the application. + """ + + def get_permissions(self): + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Get list of all supported timezones", + responses={200: openapi.Response('List of timezones with grouping by region')} + ) + def get(self, request): + import pytz + + # Get all common timezones (excludes deprecated ones) + all_timezones = sorted(pytz.common_timezones) + + # Group by region for better UX + grouped = {} + for tz in all_timezones: + if '/' in tz: + region = tz.split('/')[0] + if region not in grouped: + grouped[region] = [] + grouped[region].append(tz) + else: + # Handle special zones like UTC, GMT, etc. + if 'Other' not in grouped: + grouped['Other'] = [] + grouped['Other'].append(tz) + + return Response({ + 'timezones': all_timezones, + 'grouped': grouped, + 'count': len(all_timezones) + }) diff --git a/dispatcharr/celery.py b/dispatcharr/celery.py index 98c6210b..c845dafe 100644 --- a/dispatcharr/celery.py +++ b/dispatcharr/celery.py @@ -50,13 +50,21 @@ app.conf.update( ) # Add memory cleanup after task completion -#@task_postrun.connect # Use the imported signal +@task_postrun.connect # Use the imported signal def cleanup_task_memory(**kwargs): - """Clean up memory after each task completes""" + """Clean up memory and database connections after each task completes""" + from django.db import connection + # Get task name from kwargs task_name = kwargs.get('task').name if kwargs.get('task') else '' - # Only run cleanup for memory-intensive tasks + # Close database connection for this Celery worker process + try: + connection.close() + except Exception: + pass + + # Only run memory cleanup for memory-intensive tasks memory_intensive_tasks = [ 'apps.m3u.tasks.refresh_single_m3u_account', 'apps.m3u.tasks.refresh_m3u_accounts', diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 057780de..a0c4fc84 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -134,6 +134,7 @@ else: "PASSWORD": os.environ.get("POSTGRES_PASSWORD", "secret"), "HOST": os.environ.get("POSTGRES_HOST", "localhost"), "PORT": int(os.environ.get("POSTGRES_PORT", 5432)), + "CONN_MAX_AGE": DATABASE_CONN_MAX_AGE, } } diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index 90cd8654..fe5e1507 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -14,6 +14,15 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #devices: # - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API) diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml index 163ebf6a..d9dbef0e 100644 --- a/docker/docker-compose.debug.yml +++ b/docker/docker-compose.debug.yml @@ -18,3 +18,12 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=trace + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 00394d55..d1bb3680 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -17,6 +17,15 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=debug + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE pgadmin: image: dpage/pgadmin4 diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index dd989c81..aaa63990 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -17,6 +17,15 @@ services: - REDIS_HOST=redis - CELERY_BROKER_URL=redis://redis:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #group_add: # - video diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index fd0a883d..1b41bf29 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -40,6 +40,18 @@ export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' + +# Process priority configuration +# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) +# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority) +# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI +export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0} +CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5} + +# Calculate relative nice value for Celery (since nice is relative to parent process) +# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value +export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL)) + # Set LIBVA_DRIVER_NAME if user has specified it if [ -v LIBVA_DRIVER_NAME ]; then export LIBVA_DRIVER_NAME @@ -78,6 +90,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL ) # Process each variable for both profile.d and environment @@ -96,7 +109,16 @@ fi chmod +x /etc/profile.d/dispatcharr.sh -pip install django-filter +# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells +if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then + cat >> /root/.bashrc << 'EOF' + +# Source Dispatcharr environment variables +if [ -f /etc/profile.d/dispatcharr.sh ]; then + . /etc/profile.d/dispatcharr.sh +fi +EOF +fi # Run init scripts echo "Starting user setup..." @@ -161,10 +183,13 @@ if [ "$DISPATCHARR_DEBUG" != "true" ]; then uwsgi_args+=" --disable-logging" fi -# Launch uwsgi -p passes environment variables to the process -su -p - $POSTGRES_USER -c "cd /app && uwsgi $uwsgi_args &" -uwsgi_pid=$(pgrep uwsgi | sort | head -n1) -echo "✅ uwsgi started with PID $uwsgi_pid" +# Launch uwsgi with configurable nice level (default: -10 for high priority) +# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose +# Start with nice as root, then use setpriv to drop privileges to dispatch user +# This preserves both the nice value and environment variables +cd /app && nice -n $UWSGI_NICE_LEVEL setpriv --reuid=$POSTGRES_USER --regid=$POSTGRES_USER --clear-groups -- uwsgi $uwsgi_args & +uwsgi_pid=$! +echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") # sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf @@ -209,7 +234,7 @@ echo "🔍 Running hardware acceleration check..." # Wait for at least one process to exit and log the process that exited first if [ ${#pids[@]} -gt 0 ]; then - echo "⏳ Waiting for processes to exit..." + echo "⏳ Dispatcharr is running. Monitoring processes..." while kill -0 "${pids[@]}" 2>/dev/null; do sleep 1 # Wait for a second before checking again done diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index 629c5a51..2c769241 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -1,25 +1,60 @@ #!/bin/bash -mkdir -p /data/logos -mkdir -p /data/recordings -mkdir -p /data/uploads/m3us -mkdir -p /data/uploads/epgs -mkdir -p /data/m3us -mkdir -p /data/epgs -mkdir -p /data/plugins -mkdir -p /app/logo_cache -mkdir -p /app/media +# Define directories that need to exist and be owned by PUID:PGID +DATA_DIRS=( + "/data/logos" + "/data/recordings" + "/data/uploads/m3us" + "/data/uploads/epgs" + "/data/m3us" + "/data/epgs" + "/data/plugins" +) + +APP_DIRS=( + "/app/logo_cache" + "/app/media" +) + +# Create all directories +for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do + mkdir -p "$dir" +done + +# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation) +if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then + if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /app (non-recursive)" + chown $PUID:$PGID /app + fi +fi sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then - # Needs to own ALL of /data except db, we handle that below - chown -R $PUID:$PGID /data - chown -R $PUID:$PGID /app + # Fix data directories (non-recursive to avoid touching user files) + for dir in "${DATA_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir" + chown $PUID:$PGID "$dir" + fi + done + + # Fix app directories (recursive since they're managed by the app) + for dir in "${APP_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir (recursive)" + chown -R $PUID:$PGID "$dir" + fi + done + + # Database permissions + if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then + echo "Fixing ownership for /data/db" + chown -R postgres:postgres /data/db + fi - # Permissions - chown -R postgres:postgres /data/db chmod +x /data -fi +fi \ No newline at end of file diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index fa94df92..3de890a5 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -7,9 +7,10 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 6eca871d..e476e216 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index f763c3bc..f8fe8ab7 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application # Core settings diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index 23a9a656..0f46b012 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -642,6 +642,16 @@ export const WebsocketProvider = ({ children }) => { } break; + case 'epg_data_created': + // A new EPG data entry was created (e.g., for a dummy EPG) + // Fetch EPG data so the channel form can immediately assign it + try { + await fetchEPGData(); + } catch (e) { + console.warn('Failed to refresh EPG data after creation:', e); + } + break; + case 'stream_rehash': // Handle stream rehash progress updates if (parsedEvent.data.action === 'starting') { diff --git a/frontend/src/api.js b/frontend/src/api.js index 4ef5f97e..5b80a3f7 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1118,6 +1118,21 @@ export default class API { } } + static async getTimezones() { + try { + const response = await request(`${host}/api/core/timezones/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve timezones', e); + // Return fallback data instead of throwing + return { + timezones: ['UTC', 'US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific'], + grouped: {}, + count: 5 + }; + } + } + static async getStreamProfiles() { try { const response = await request(`${host}/api/core/streamprofiles/`); diff --git a/frontend/src/components/forms/DummyEPG.jsx b/frontend/src/components/forms/DummyEPG.jsx new file mode 100644 index 00000000..a449d0c6 --- /dev/null +++ b/frontend/src/components/forms/DummyEPG.jsx @@ -0,0 +1,1005 @@ +import React, { useEffect, useMemo, useState } from 'react'; +import { + Box, + Button, + Checkbox, + Divider, + Group, + Modal, + NumberInput, + Select, + Stack, + Text, + TextInput, + Textarea, +} from '@mantine/core'; +import { useForm } from '@mantine/form'; +import { notifications } from '@mantine/notifications'; +import API from '../../api'; +import dayjs from 'dayjs'; +import utc from 'dayjs/plugin/utc'; +import timezone from 'dayjs/plugin/timezone'; + +// Extend dayjs with timezone support +dayjs.extend(utc); +dayjs.extend(timezone); + +const DummyEPGForm = ({ epg, isOpen, onClose }) => { + // Separate state for each field to prevent focus loss + const [titlePattern, setTitlePattern] = useState(''); + const [timePattern, setTimePattern] = useState(''); + const [datePattern, setDatePattern] = useState(''); + const [sampleTitle, setSampleTitle] = useState(''); + const [titleTemplate, setTitleTemplate] = useState(''); + const [descriptionTemplate, setDescriptionTemplate] = useState(''); + const [upcomingTitleTemplate, setUpcomingTitleTemplate] = useState(''); + const [upcomingDescriptionTemplate, setUpcomingDescriptionTemplate] = + useState(''); + const [endedTitleTemplate, setEndedTitleTemplate] = useState(''); + const [endedDescriptionTemplate, setEndedDescriptionTemplate] = useState(''); + const [timezoneOptions, setTimezoneOptions] = useState([]); + const [loadingTimezones, setLoadingTimezones] = useState(true); + + const form = useForm({ + initialValues: { + name: '', + is_active: true, + source_type: 'dummy', + custom_properties: { + title_pattern: '', + time_pattern: '', + date_pattern: '', + timezone: 'US/Eastern', + output_timezone: '', + program_duration: 180, + sample_title: '', + title_template: '', + description_template: '', + upcoming_title_template: '', + upcoming_description_template: '', + ended_title_template: '', + ended_description_template: '', + name_source: 'channel', + stream_index: 1, + category: '', + include_date: true, + include_live: false, + }, + }, + validate: { + name: (value) => (value?.trim() ? null : 'Name is required'), + 'custom_properties.title_pattern': (value) => { + if (!value?.trim()) return 'Title pattern is required'; + try { + new RegExp(value); + return null; + } catch (e) { + return `Invalid regex: ${e.message}`; + } + }, + 'custom_properties.name_source': (value) => { + if (!value) return 'Name source is required'; + return null; + }, + 'custom_properties.stream_index': (value, values) => { + if (values.custom_properties?.name_source === 'stream') { + if (!value || value < 1) { + return 'Stream index must be at least 1'; + } + } + return null; + }, + }, + }); + + // Real-time pattern validation with useMemo to prevent re-renders + const patternValidation = useMemo(() => { + const result = { + titleMatch: false, + timeMatch: false, + dateMatch: false, + titleGroups: {}, + timeGroups: {}, + dateGroups: {}, + formattedTitle: '', + formattedDescription: '', + formattedUpcomingTitle: '', + formattedUpcomingDescription: '', + formattedEndedTitle: '', + formattedEndedDescription: '', + error: null, + }; + + // Validate title pattern + if (titlePattern && sampleTitle) { + try { + const titleRegex = new RegExp(titlePattern); + const titleMatch = sampleTitle.match(titleRegex); + + if (titleMatch) { + result.titleMatch = true; + result.titleGroups = titleMatch.groups || {}; + } + } catch (e) { + result.error = `Title pattern error: ${e.message}`; + } + } + + // Validate time pattern + if (timePattern && sampleTitle) { + try { + const timeRegex = new RegExp(timePattern); + const timeMatch = sampleTitle.match(timeRegex); + + if (timeMatch) { + result.timeMatch = true; + result.timeGroups = timeMatch.groups || {}; + } + } catch (e) { + result.error = result.error + ? `${result.error}; Time pattern error: ${e.message}` + : `Time pattern error: ${e.message}`; + } + } + + // Validate date pattern + if (datePattern && sampleTitle) { + try { + const dateRegex = new RegExp(datePattern); + const dateMatch = sampleTitle.match(dateRegex); + + if (dateMatch) { + result.dateMatch = true; + result.dateGroups = dateMatch.groups || {}; + } + } catch (e) { + result.error = result.error + ? `${result.error}; Date pattern error: ${e.message}` + : `Date pattern error: ${e.message}`; + } + } + + // Merge all groups for template formatting + const allGroups = { + ...result.titleGroups, + ...result.timeGroups, + ...result.dateGroups, + }; + + // Calculate formatted time strings if time was extracted + if (result.timeGroups.hour) { + try { + let hour24 = parseInt(result.timeGroups.hour); + const minute = result.timeGroups.minute + ? parseInt(result.timeGroups.minute) + : 0; + const ampm = result.timeGroups.ampm?.toLowerCase(); + + // Convert to 24-hour if AM/PM present + if (ampm === 'pm' && hour24 !== 12) { + hour24 += 12; + } else if (ampm === 'am' && hour24 === 12) { + hour24 = 0; + } + + // Apply timezone conversion if output_timezone is set + const sourceTimezone = form.values.custom_properties?.timezone || 'UTC'; + const outputTimezone = form.values.custom_properties?.output_timezone; + + if (outputTimezone && outputTimezone !== sourceTimezone) { + // Create a date in the source timezone + const sourceDate = dayjs() + .tz(sourceTimezone) + .set('hour', hour24) + .set('minute', minute) + .set('second', 0); + + // Convert to output timezone + const outputDate = sourceDate.tz(outputTimezone); + + // Update hour and minute to the converted values + hour24 = outputDate.hour(); + const convertedMinute = outputDate.minute(); + + // Format 24-hour time string with converted time + if (convertedMinute > 0) { + allGroups.time24 = `${hour24.toString().padStart(2, '0')}:${convertedMinute.toString().padStart(2, '0')}`; + } else { + allGroups.time24 = `${hour24.toString().padStart(2, '0')}:00`; + } + + // Convert to 12-hour format with converted time + const ampmDisplay = hour24 < 12 ? 'AM' : 'PM'; + let hour12 = hour24; + if (hour24 === 0) { + hour12 = 12; + } else if (hour24 > 12) { + hour12 = hour24 - 12; + } + + // Format 12-hour time string with converted time + if (convertedMinute > 0) { + allGroups.time = `${hour12}:${convertedMinute.toString().padStart(2, '0')} ${ampmDisplay}`; + } else { + allGroups.time = `${hour12} ${ampmDisplay}`; + } + } else { + // No timezone conversion - use original logic + // Format 24-hour time string + if (minute > 0) { + allGroups.time24 = `${hour24.toString().padStart(2, '0')}:${minute.toString().padStart(2, '0')}`; + } else { + allGroups.time24 = `${hour24.toString().padStart(2, '0')}:00`; + } + + // Convert to 12-hour format + const ampmDisplay = hour24 < 12 ? 'AM' : 'PM'; + let hour12 = hour24; + if (hour24 === 0) { + hour12 = 12; + } else if (hour24 > 12) { + hour12 = hour24 - 12; + } + + // Format 12-hour time string + if (minute > 0) { + allGroups.time = `${hour12}:${minute.toString().padStart(2, '0')} ${ampmDisplay}`; + } else { + allGroups.time = `${hour12} ${ampmDisplay}`; + } + } + } catch (e) { + // If parsing fails, leave time/time24 as placeholders + console.error('Error formatting time:', e); + } + } + + // Format title template + if (titleTemplate && (result.titleMatch || result.timeMatch)) { + result.formattedTitle = titleTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + // Format description template + if (descriptionTemplate && (result.titleMatch || result.timeMatch)) { + result.formattedDescription = descriptionTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + // Format upcoming title template + if (upcomingTitleTemplate && (result.titleMatch || result.timeMatch)) { + result.formattedUpcomingTitle = upcomingTitleTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + // Format upcoming description template + if ( + upcomingDescriptionTemplate && + (result.titleMatch || result.timeMatch) + ) { + result.formattedUpcomingDescription = upcomingDescriptionTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + // Format ended title template + if (endedTitleTemplate && (result.titleMatch || result.timeMatch)) { + result.formattedEndedTitle = endedTitleTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + // Format ended description template + if (endedDescriptionTemplate && (result.titleMatch || result.timeMatch)) { + result.formattedEndedDescription = endedDescriptionTemplate.replace( + /\{(\w+)\}/g, + (match, key) => allGroups[key] || match + ); + } + + return result; + }, [ + titlePattern, + timePattern, + datePattern, + sampleTitle, + titleTemplate, + descriptionTemplate, + upcomingTitleTemplate, + upcomingDescriptionTemplate, + endedTitleTemplate, + endedDescriptionTemplate, + form.values.custom_properties?.timezone, + form.values.custom_properties?.output_timezone, + ]); + + useEffect(() => { + if (epg) { + const custom = epg.custom_properties || {}; + + form.setValues({ + name: epg.name || '', + is_active: epg.is_active ?? true, + source_type: 'dummy', + custom_properties: { + title_pattern: custom.title_pattern || '', + time_pattern: custom.time_pattern || '', + date_pattern: custom.date_pattern || '', + timezone: + custom.timezone || + custom.timezone_offset?.toString() || + 'US/Eastern', + output_timezone: custom.output_timezone || '', + program_duration: custom.program_duration || 180, + sample_title: custom.sample_title || '', + title_template: custom.title_template || '', + description_template: custom.description_template || '', + upcoming_title_template: custom.upcoming_title_template || '', + upcoming_description_template: + custom.upcoming_description_template || '', + ended_title_template: custom.ended_title_template || '', + ended_description_template: custom.ended_description_template || '', + name_source: custom.name_source || 'channel', + stream_index: custom.stream_index || 1, + category: custom.category || '', + include_date: custom.include_date ?? true, + include_live: custom.include_live ?? false, + }, + }); + + // Set controlled state + setTitlePattern(custom.title_pattern || ''); + setTimePattern(custom.time_pattern || ''); + setDatePattern(custom.date_pattern || ''); + setSampleTitle(custom.sample_title || ''); + setTitleTemplate(custom.title_template || ''); + setDescriptionTemplate(custom.description_template || ''); + setUpcomingTitleTemplate(custom.upcoming_title_template || ''); + setUpcomingDescriptionTemplate( + custom.upcoming_description_template || '' + ); + setEndedTitleTemplate(custom.ended_title_template || ''); + setEndedDescriptionTemplate(custom.ended_description_template || ''); + } else { + form.reset(); + setTitlePattern(''); + setTimePattern(''); + setDatePattern(''); + setSampleTitle(''); + setTitleTemplate(''); + setDescriptionTemplate(''); + setUpcomingTitleTemplate(''); + setUpcomingDescriptionTemplate(''); + setEndedTitleTemplate(''); + setEndedDescriptionTemplate(''); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [epg]); + + // Fetch available timezones from the API + useEffect(() => { + const fetchTimezones = async () => { + try { + setLoadingTimezones(true); + const response = await API.getTimezones(); + + // Convert timezone list to Select options format + const options = response.timezones.map((tz) => ({ + value: tz, + label: tz, + })); + + setTimezoneOptions(options); + } catch (error) { + console.error('Failed to load timezones:', error); + notifications.show({ + title: 'Warning', + message: 'Failed to load timezone list. Using default options.', + color: 'yellow', + }); + // Fallback to a minimal list + setTimezoneOptions([ + { value: 'UTC', label: 'UTC' }, + { value: 'US/Eastern', label: 'US/Eastern' }, + { value: 'US/Central', label: 'US/Central' }, + { value: 'US/Pacific', label: 'US/Pacific' }, + ]); + } finally { + setLoadingTimezones(false); + } + }; + + fetchTimezones(); + }, []); + + const handleSubmit = async (values) => { + try { + if (epg?.id) { + await API.updateEPG({ ...values, id: epg.id }); + notifications.show({ + title: 'Success', + message: 'Dummy EPG source updated successfully', + color: 'green', + }); + } else { + await API.addEPG(values); + notifications.show({ + title: 'Success', + message: 'Dummy EPG source created successfully', + color: 'green', + }); + } + onClose(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error.message || 'Failed to save dummy EPG source', + color: 'red', + }); + } + }; + + return ( + +
+ + {/* Basic Settings */} + + + {/* Pattern Configuration */} + + + + Define regex patterns to extract information from channel titles or + stream names. Use named capture groups like + (?<groupname>pattern). + + +