diff --git a/README.md b/README.md index 5216663f..9b359e25 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and ๐Ÿ“Š **Real-Time Stats Dashboard** โ€” Live insights into stream health and client activity\ ๐Ÿง  **EPG Auto-Match** โ€” Match program data to channels automatically\ โš™๏ธ **Streamlink + FFmpeg Support** โ€” Flexible backend options for streaming and recording\ +๐ŸŽฌ **VOD Management** โ€” Full Video on Demand support with movies and TV series\ ๐Ÿงผ **UI & UX Enhancements** โ€” Smoother, faster, more responsive interface\ ๐Ÿ› **Output Compatibility** โ€” HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more @@ -31,6 +32,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and โœ… **Full IPTV Control** โ€” Import, organize, proxy, and monitor IPTV streams on your own terms\ โœ… **Smart Playlist Handling** โ€” M3U import, filtering, grouping, and failover support\ +โœ… **VOD Content Management** โ€” Organize movies and TV series with metadata and streaming\ โœ… **Reliable EPG Integration** โ€” Match and manage TV guide data with ease\ โœ… **Clean & Responsive Interface** โ€” Modern design that gets out of your way\ โœ… **Fully Self-Hosted** โ€” Total control, zero reliance on third-party services diff --git a/apps/api/urls.py b/apps/api/urls.py index 75a80725..3de2e560 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -25,6 +25,7 @@ urlpatterns = [ path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')), path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')), path('core/', include(('core.api_urls', 'core'), namespace='core')), + path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index ee3ef8b9..e96f4d2a 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -44,6 +44,7 @@ import django_filters from django_filters.rest_framework import DjangoFilterBackend from rest_framework.filters import SearchFilter, OrderingFilter from apps.epg.models import EPGData +from apps.vod.models import Movie, Series from django.db.models import Q from django.http import StreamingHttpResponse, FileResponse, Http404 import mimetypes @@ -195,7 +196,7 @@ class ChannelGroupViewSet(viewsets.ModelViewSet): from django.db.models import Count return ChannelGroup.objects.annotate( channel_count=Count('channels', distinct=True), - m3u_account_count=Count('m3u_account', distinct=True) + m3u_account_count=Count('m3u_accounts', distinct=True) ) def update(self, request, *args, **kwargs): @@ -237,7 +238,7 @@ class ChannelGroupViewSet(viewsets.ModelViewSet): # Find groups with no channels and no M3U account associations unused_groups = ChannelGroup.objects.annotate( channel_count=Count('channels', distinct=True), - m3u_account_count=Count('m3u_account', distinct=True) + m3u_account_count=Count('m3u_accounts', distinct=True) ).filter( channel_count=0, m3u_account_count=0 @@ -1206,7 +1207,7 @@ class CleanupUnusedLogosAPIView(APIView): return [Authenticated()] @swagger_auto_schema( - operation_description="Delete all logos that are not used by any channels", + operation_description="Delete all logos that are not used by any channels, movies, or series", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ @@ -1220,10 +1221,24 @@ class CleanupUnusedLogosAPIView(APIView): responses={200: "Cleanup completed"}, ) def post(self, request): - """Delete all logos with no channel associations""" + """Delete all logos with no channel, movie, or series associations""" delete_files = request.data.get("delete_files", False) - unused_logos = Logo.objects.filter(channels__isnull=True) + # Find logos that are not used by channels, movies, or series + filter_conditions = Q(channels__isnull=True) + + # Add VOD conditions if models are available + try: + filter_conditions &= Q(movie__isnull=True) + except: + pass + + try: + filter_conditions &= Q(series__isnull=True) + except: + pass + + unused_logos = Logo.objects.filter(filter_conditions) deleted_count = unused_logos.count() logo_names = list(unused_logos.values_list('name', flat=True)) local_files_deleted = 0 @@ -1259,9 +1274,23 @@ class CleanupUnusedLogosAPIView(APIView): }) +class LogoPagination(PageNumberPagination): + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 1000 # Prevent excessive page sizes + + def paginate_queryset(self, queryset, request, view=None): + # Check if pagination should be disabled for specific requests + if request.query_params.get('no_pagination') == 'true': + return None # disables pagination, returns full queryset + + return super().paginate_queryset(queryset, request, view) + + class LogoViewSet(viewsets.ModelViewSet): queryset = Logo.objects.all() serializer_class = LogoSerializer + pagination_class = LogoPagination parser_classes = (MultiPartParser, FormParser, JSONParser) def get_permissions(self): @@ -1278,8 +1307,16 @@ class LogoViewSet(viewsets.ModelViewSet): def get_queryset(self): """Optimize queryset with prefetch and add filtering""" + # Start with basic prefetch for channels queryset = Logo.objects.prefetch_related('channels').order_by('name') + # Try to prefetch VOD relations if available + try: + queryset = queryset.prefetch_related('movie', 'series') + except: + # VOD app might not be available, continue without VOD prefetch + pass + # Filter by specific IDs ids = self.request.query_params.getlist('ids') if ids: @@ -1292,12 +1329,62 @@ class LogoViewSet(viewsets.ModelViewSet): pass # Invalid IDs, return empty queryset queryset = Logo.objects.none() - # Filter by usage + # Filter by usage - now includes VOD content used_filter = self.request.query_params.get('used', None) if used_filter == 'true': - queryset = queryset.filter(channels__isnull=False).distinct() + # Logo is used if it has any channels, movies, or series + filter_conditions = Q(channels__isnull=False) + + # Add VOD conditions if models are available + try: + filter_conditions |= Q(movie__isnull=False) + except: + pass + + try: + filter_conditions |= Q(series__isnull=False) + except: + pass + + queryset = queryset.filter(filter_conditions).distinct() + elif used_filter == 'false': - queryset = queryset.filter(channels__isnull=True) + # Logo is unused if it has no channels, movies, or series + filter_conditions = Q(channels__isnull=True) + + # Add VOD conditions if models are available + try: + filter_conditions &= Q(movie__isnull=True) + except: + pass + + try: + filter_conditions &= Q(series__isnull=True) + except: + pass + + queryset = queryset.filter(filter_conditions) + + # Filter for channel assignment (unused + channel-used, exclude VOD-only) + channel_assignable = self.request.query_params.get('channel_assignable', None) + if channel_assignable == 'true': + # Include logos that are either: + # 1. Completely unused, OR + # 2. Used by channels (but may also be used by VOD) + # Exclude logos that are ONLY used by VOD content + + unused_condition = Q(channels__isnull=True) + channel_used_condition = Q(channels__isnull=False) + + # Add VOD conditions if models are available + try: + unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True) + except: + pass + + # Combine: unused OR used by channels + filter_conditions = unused_condition | channel_used_condition + queryset = queryset.filter(filter_conditions).distinct() # Filter by name name_filter = self.request.query_params.get('name', None) diff --git a/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py new file mode 100644 index 00000000..7ee5544c --- /dev/null +++ b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py @@ -0,0 +1,19 @@ +# Generated by Django 5.2.4 on 2025-08-22 20:14 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'), + ] + + operations = [ + migrations.AlterField( + model_name='channelgroupm3uaccount', + name='channel_group', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index d6c3faef..13cf0f54 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -95,7 +95,7 @@ class Stream(models.Model): ) last_seen = models.DateTimeField(db_index=True, default=datetime.now) custom_properties = models.TextField(null=True, blank=True) - + # Stream statistics fields stream_stats = models.JSONField( null=True, @@ -560,7 +560,7 @@ class ChannelStream(models.Model): class ChannelGroupM3UAccount(models.Model): channel_group = models.ForeignKey( - ChannelGroup, on_delete=models.CASCADE, related_name="m3u_account" + ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts" ) m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, related_name="channel_group" diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 7c5ddd54..0b29353e 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,3 +1,4 @@ +import json from rest_framework import serializers from .models import ( Stream, @@ -60,19 +61,81 @@ class LogoSerializer(serializers.ModelSerializer): return reverse("api:channels:logo-cache", args=[obj.id]) def get_channel_count(self, obj): - """Get the number of channels using this logo""" - return obj.channels.count() + """Get the number of channels, movies, and series using this logo""" + channel_count = obj.channels.count() + + # Safely get movie count + try: + movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0 + except AttributeError: + movie_count = 0 + + # Safely get series count + try: + series_count = obj.series.count() if hasattr(obj, 'series') else 0 + except AttributeError: + series_count = 0 + + return channel_count + movie_count + series_count def get_is_used(self, obj): - """Check if this logo is used by any channels""" - return obj.channels.exists() + """Check if this logo is used by any channels, movies, or series""" + # Check if used by channels + if obj.channels.exists(): + return True + + # Check if used by movies (handle case where VOD app might not be available) + try: + if hasattr(obj, 'movie') and obj.movie.exists(): + return True + except AttributeError: + pass + + # Check if used by series (handle case where VOD app might not be available) + try: + if hasattr(obj, 'series') and obj.series.exists(): + return True + except AttributeError: + pass + + return False def get_channel_names(self, obj): - """Get the names of channels using this logo (limited to first 5)""" + """Get the names of channels, movies, and series using this logo (limited to first 5)""" + names = [] + + # Get channel names channels = obj.channels.all()[:5] - names = [channel.name for channel in channels] - if obj.channels.count() > 5: - names.append(f"...and {obj.channels.count() - 5} more") + for channel in channels: + names.append(f"Channel: {channel.name}") + + # Get movie names (only if we haven't reached limit) + if len(names) < 5: + try: + if hasattr(obj, 'movie'): + remaining_slots = 5 - len(names) + movies = obj.movie.all()[:remaining_slots] + for movie in movies: + names.append(f"Movie: {movie.name}") + except AttributeError: + pass + + # Get series names (only if we haven't reached limit) + if len(names) < 5: + try: + if hasattr(obj, 'series'): + remaining_slots = 5 - len(names) + series = obj.series.all()[:remaining_slots] + for series_item in series: + names.append(f"Series: {series_item.name}") + except AttributeError: + pass + + # Calculate total count for "more" message + total_count = self.get_channel_count(obj) + if total_count > 5: + names.append(f"...and {total_count - 5} more") + return names @@ -134,16 +197,54 @@ class StreamSerializer(serializers.ModelSerializer): return fields +class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): + m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True) + enabled = serializers.BooleanField() + auto_channel_sync = serializers.BooleanField(default=False) + auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False) + custom_properties = serializers.JSONField(required=False) + + class Meta: + model = ChannelGroupM3UAccount + fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"] + + def to_representation(self, instance): + data = super().to_representation(instance) + + custom_props = {} + if instance.custom_properties: + try: + custom_props = json.loads(instance.custom_properties) + except (json.JSONDecodeError, TypeError): + custom_props = {} + + return data + + def to_internal_value(self, data): + # Accept both dict and JSON string for custom_properties + val = data.get("custom_properties") + if isinstance(val, str): + try: + data["custom_properties"] = json.loads(val) + except Exception: + pass + + return super().to_internal_value(data) + # # Channel Group # class ChannelGroupSerializer(serializers.ModelSerializer): channel_count = serializers.IntegerField(read_only=True) m3u_account_count = serializers.IntegerField(read_only=True) + m3u_accounts = ChannelGroupM3UAccountSerializer( + many=True, + read_only=True + ) class Meta: model = ChannelGroup - fields = ["id", "name", "channel_count", "m3u_account_count"] + fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"] class ChannelProfileSerializer(serializers.ModelSerializer): @@ -347,40 +448,6 @@ class ChannelSerializer(serializers.ModelSerializer): return None -class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): - enabled = serializers.BooleanField() - auto_channel_sync = serializers.BooleanField(default=False) - auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False) - custom_properties = serializers.JSONField(required=False) - - class Meta: - model = ChannelGroupM3UAccount - fields = ["id", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"] - - def to_representation(self, instance): - ret = super().to_representation(instance) - # Ensure custom_properties is always a dict or None - val = ret.get("custom_properties") - if isinstance(val, str): - import json - try: - ret["custom_properties"] = json.loads(val) - except Exception: - ret["custom_properties"] = None - return ret - - def to_internal_value(self, data): - # Accept both dict and JSON string for custom_properties - val = data.get("custom_properties") - if isinstance(val, str): - import json - try: - data["custom_properties"] = json.loads(val) - except Exception: - pass - return super().to_internal_value(data) - - class RecordingSerializer(serializers.ModelSerializer): class Meta: model = Recording diff --git a/apps/m3u/admin.py b/apps/m3u/admin.py index 29022933..dd5986eb 100644 --- a/apps/m3u/admin.py +++ b/apps/m3u/admin.py @@ -1,6 +1,7 @@ from django.contrib import admin from django.utils.html import format_html from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent +import json class M3UFilterInline(admin.TabularInline): @@ -17,6 +18,7 @@ class M3UAccountAdmin(admin.ModelAdmin): "server_url", "server_group", "max_streams", + "priority", "is_active", "user_agent_display", "uploaded_file_link", @@ -38,6 +40,18 @@ class M3UAccountAdmin(admin.ModelAdmin): user_agent_display.short_description = "User Agent(s)" + def vod_enabled_display(self, obj): + """Display whether VOD is enabled for this account""" + if obj.custom_properties: + try: + custom_props = json.loads(obj.custom_properties) + return "Yes" if custom_props.get('enable_vod', False) else "No" + except (json.JSONDecodeError, TypeError): + pass + return "No" + vod_enabled_display.short_description = "VOD Enabled" + vod_enabled_display.boolean = True + def uploaded_file_link(self, obj): if obj.uploaded_file: return format_html( diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index 46676e93..65fb1c0a 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -21,6 +21,7 @@ from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent from apps.channels.models import ChannelGroupM3UAccount from core.serializers import UserAgentSerializer +from apps.vod.models import M3UVODCategoryRelation from .serializers import ( M3UAccountSerializer, @@ -30,8 +31,7 @@ from .serializers import ( ) from .tasks import refresh_single_m3u_account, refresh_m3u_accounts -from django.core.files.storage import default_storage -from django.core.files.base import ContentFile +import json class M3UAccountViewSet(viewsets.ModelViewSet): @@ -78,15 +78,33 @@ class M3UAccountViewSet(viewsets.ModelViewSet): # Now call super().create() to create the instance response = super().create(request, *args, **kwargs) - print(response.data.get("account_type")) - if response.data.get("account_type") == M3UAccount.Types.XC: - refresh_m3u_groups(response.data.get("id")) + account_type = response.data.get("account_type") + account_id = response.data.get("id") + + if account_type == M3UAccount.Types.XC: + refresh_m3u_groups(account_id) + + # Check if VOD is enabled + enable_vod = request.data.get("enable_vod", False) + if enable_vod: + from apps.vod.tasks import refresh_categories + + refresh_categories(account_id) # After the instance is created, return the response return response def update(self, request, *args, **kwargs): instance = self.get_object() + old_vod_enabled = False + + # Check current VOD setting + if instance.custom_properties: + try: + custom_props = json.loads(instance.custom_properties) + old_vod_enabled = custom_props.get("enable_vod", False) + except (json.JSONDecodeError, TypeError): + pass # Handle file upload first, if any file_path = None @@ -122,6 +140,18 @@ class M3UAccountViewSet(viewsets.ModelViewSet): # Now call super().update() to update the instance response = super().update(request, *args, **kwargs) + # Check if VOD setting changed and trigger refresh if needed + new_vod_enabled = request.data.get("enable_vod", old_vod_enabled) + + if ( + instance.account_type == M3UAccount.Types.XC + and not old_vod_enabled + and new_vod_enabled + ): + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(instance.id) + # After the instance is updated, return the response return response @@ -143,11 +173,52 @@ class M3UAccountViewSet(viewsets.ModelViewSet): # Continue with regular partial update return super().partial_update(request, *args, **kwargs) + @action(detail=True, methods=["post"], url_path="refresh-vod") + def refresh_vod(self, request, pk=None): + """Trigger VOD content refresh for XtreamCodes accounts""" + account = self.get_object() + + if account.account_type != M3UAccount.Types.XC: + return Response( + {"error": "VOD refresh is only available for XtreamCodes accounts"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Check if VOD is enabled + vod_enabled = False + if account.custom_properties: + try: + custom_props = json.loads(account.custom_properties) + vod_enabled = custom_props.get("enable_vod", False) + except (json.JSONDecodeError, TypeError): + pass + + if not vod_enabled: + return Response( + {"error": "VOD is not enabled for this account"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(account.id) + return Response( + {"message": f"VOD refresh initiated for account {account.name}"}, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"error": f"Failed to initiate VOD refresh: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + @action(detail=True, methods=["patch"], url_path="group-settings") def update_group_settings(self, request, pk=None): """Update auto channel sync settings for M3U account groups""" account = self.get_object() group_settings = request.data.get("group_settings", []) + category_settings = request.data.get("category_settings", []) try: for setting in group_settings: @@ -173,6 +244,25 @@ class M3UAccountViewSet(viewsets.ModelViewSet): }, ) + for setting in category_settings: + category_id = setting.get("id") + enabled = setting.get("enabled", True) + custom_properties = setting.get("custom_properties", {}) + + if category_id: + M3UVODCategoryRelation.objects.update_or_create( + category_id=category_id, + m3u_account=account, + defaults={ + "enabled": enabled, + "custom_properties": ( + custom_properties + if isinstance(custom_properties, str) + else json.dumps(custom_properties) + ), + }, + ) + return Response({"message": "Group settings updated successfully"}) except Exception as e: diff --git a/apps/m3u/forms.py b/apps/m3u/forms.py index f6fc7f91..dc29188a 100644 --- a/apps/m3u/forms.py +++ b/apps/m3u/forms.py @@ -4,6 +4,13 @@ from .models import M3UAccount, M3UFilter import re class M3UAccountForm(forms.ModelForm): + enable_vod = forms.BooleanField( + required=False, + initial=False, + label="Enable VOD Content", + help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts" + ) + class Meta: model = M3UAccount fields = [ @@ -13,8 +20,44 @@ class M3UAccountForm(forms.ModelForm): 'server_group', 'max_streams', 'is_active', + 'enable_vod', ] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Set initial value for enable_vod from custom_properties + if self.instance and self.instance.custom_properties: + try: + import json + custom_props = json.loads(self.instance.custom_properties) + self.fields['enable_vod'].initial = custom_props.get('enable_vod', False) + except (json.JSONDecodeError, TypeError): + pass + + def save(self, commit=True): + instance = super().save(commit=False) + + # Handle enable_vod field + enable_vod = self.cleaned_data.get('enable_vod', False) + + # Parse existing custom_properties + custom_props = {} + if instance.custom_properties: + try: + import json + custom_props = json.loads(instance.custom_properties) + except (json.JSONDecodeError, TypeError): + custom_props = {} + + # Update VOD preference + custom_props['enable_vod'] = enable_vod + instance.custom_properties = json.dumps(custom_props) + + if commit: + instance.save() + return instance + def clean_uploaded_file(self): uploaded_file = self.cleaned_data.get('uploaded_file') if uploaded_file: diff --git a/apps/m3u/migrations/0016_m3uaccount_priority.py b/apps/m3u/migrations/0016_m3uaccount_priority.py new file mode 100644 index 00000000..55e0e95b --- /dev/null +++ b/apps/m3u/migrations/0016_m3uaccount_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-08-20 22:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'), + ), + ] diff --git a/apps/m3u/models.py b/apps/m3u/models.py index 2a7846c6..cfcc3646 100644 --- a/apps/m3u/models.py +++ b/apps/m3u/models.py @@ -94,6 +94,10 @@ class M3UAccount(models.Model): default=7, help_text="Number of days after which a stream will be removed if not seen in the M3U source.", ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.", + ) def __str__(self): return self.name diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index c0824bb3..1a62080b 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -1,5 +1,5 @@ from core.utils import validate_flexible_url -from rest_framework import serializers +from rest_framework import serializers, status from rest_framework.response import Response from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent @@ -8,6 +8,7 @@ from apps.channels.serializers import ( ChannelGroupM3UAccountSerializer, ) import logging +import json logger = logging.getLogger(__name__) @@ -87,6 +88,7 @@ class M3UAccountSerializer(serializers.ModelSerializer): allow_null=True, validators=[validate_flexible_url], ) + enable_vod = serializers.BooleanField(required=False, write_only=True) class Meta: model = M3UAccount @@ -111,8 +113,10 @@ class M3UAccountSerializer(serializers.ModelSerializer): "username", "password", "stale_stream_days", + "priority", "status", "last_message", + "enable_vod", ] extra_kwargs = { "password": { @@ -121,7 +125,37 @@ class M3UAccountSerializer(serializers.ModelSerializer): }, } + def to_representation(self, instance): + data = super().to_representation(instance) + + # Parse custom_properties to get VOD preference + custom_props = {} + if instance.custom_properties: + try: + custom_props = json.loads(instance.custom_properties) + except (json.JSONDecodeError, TypeError): + custom_props = {} + + data["enable_vod"] = custom_props.get("enable_vod", False) + return data + def update(self, instance, validated_data): + # Handle enable_vod preference + enable_vod = validated_data.pop("enable_vod", None) + + if enable_vod is not None: + # Parse existing custom_properties + custom_props = {} + if instance.custom_properties: + try: + custom_props = json.loads(instance.custom_properties) + except (json.JSONDecodeError, TypeError): + custom_props = {} + + # Update VOD preference + custom_props["enable_vod"] = enable_vod + validated_data["custom_properties"] = json.dumps(custom_props) + # Pop out channel group memberships so we can handle them manually channel_group_data = validated_data.pop("channel_group", []) @@ -153,6 +187,24 @@ class M3UAccountSerializer(serializers.ModelSerializer): return instance + def create(self, validated_data): + # Handle enable_vod preference during creation + enable_vod = validated_data.pop("enable_vod", False) + + # Parse existing custom_properties or create new + custom_props = {} + if validated_data.get("custom_properties"): + try: + custom_props = json.loads(validated_data["custom_properties"]) + except (json.JSONDecodeError, TypeError): + custom_props = {} + + # Set VOD preference + custom_props["enable_vod"] = enable_vod + validated_data["custom_properties"] = json.dumps(custom_props) + + return super().create(validated_data) + def get_filters(self, obj): filters = obj.filters.order_by("order") return M3UFilterSerializer(filters, many=True).data diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 1072bcc6..2617e7ea 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -663,8 +663,8 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): def cleanup_streams(account_id, scan_start_time=timezone.now): account = M3UAccount.objects.get(id=account_id, is_active=True) existing_groups = ChannelGroup.objects.filter( - m3u_account__m3u_account=account, - m3u_account__enabled=True, + m3u_accounts__m3u_account=account, + m3u_accounts__enabled=True, ).values_list("id", flat=True) logger.info( f"Found {len(existing_groups)} active groups for M3U account {account_id}" @@ -1613,7 +1613,19 @@ def refresh_single_m3u_account(account_id): # Set status to fetching account.status = M3UAccount.Status.FETCHING - account.save(update_fields=["status"]) + account.save(update_fields=['status']) + + filters = list(account.filters.all()) + + # Check if VOD is enabled for this account + vod_enabled = False + if account.custom_properties: + try: + custom_props = json.loads(account.custom_properties) + vod_enabled = custom_props.get('enable_vod', False) + except (json.JSONDecodeError, TypeError): + vod_enabled = False + except M3UAccount.DoesNotExist: # The M3U account doesn't exist, so delete the periodic task if it exists logger.warning( @@ -1742,8 +1754,8 @@ def refresh_single_m3u_account(account_id): existing_groups = { group.name: group.id for group in ChannelGroup.objects.filter( - m3u_account__m3u_account=account, # Filter by the M3UAccount - m3u_account__enabled=True, # Filter by the enabled flag in the join table + m3u_accounts__m3u_account=account, # Filter by the M3UAccount + m3u_accounts__enabled=True, # Filter by the enabled flag in the join table ) } @@ -1946,6 +1958,16 @@ def refresh_single_m3u_account(account_id): message=account.last_message, ) + # Trigger VOD refresh if enabled and account is XtreamCodes type + if vod_enabled and account.account_type == M3UAccount.Types.XC: + logger.info(f"VOD is enabled for account {account_id}, triggering VOD refresh") + try: + from apps.vod.tasks import refresh_vod_content + refresh_vod_content.delay(account_id) + logger.info(f"VOD refresh task queued for account {account_id}") + except Exception as e: + logger.error(f"Failed to queue VOD refresh for account {account_id}: {str(e)}") + except Exception as e: logger.error(f"Error processing M3U for account {account_id}: {str(e)}") account.status = M3UAccount.Status.ERROR diff --git a/apps/m3u/views.py b/apps/m3u/views.py index f69dd6c4..0fab8c10 100644 --- a/apps/m3u/views.py +++ b/apps/m3u/views.py @@ -3,6 +3,7 @@ from django.views import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt +from django.http import JsonResponse from apps.m3u.models import M3UAccount import json diff --git a/apps/output/urls.py b/apps/output/urls.py index 8b9c4f3a..dc023ed7 100644 --- a/apps/output/urls.py +++ b/apps/output/urls.py @@ -1,5 +1,5 @@ from django.urls import path, re_path, include -from .views import m3u_endpoint, epg_endpoint, xc_get +from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream from core.views import stream_view app_name = "output" diff --git a/apps/output/views.py b/apps/output/views.py index 3fcd512b..20d3c206 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -18,6 +18,10 @@ import time # Add this import for keep-alive delays from tzlocal import get_localzone from urllib.parse import urlparse import base64 +import logging +import os + +logger = logging.getLogger(__name__) def m3u_endpoint(request, profile_name=None, user=None): if not network_access_allowed(request, "M3U_EPG"): @@ -117,7 +121,7 @@ def generate_m3u(request, profile_name=None, user=None): if channel.logo: if use_cached_logos: # Use cached logo as before - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) else: # Try to find direct logo URL from channel's streams direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None @@ -125,7 +129,7 @@ def generate_m3u(request, profile_name=None, user=None): if direct_logo: tvg_logo = direct_logo else: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) # create possible gracenote id insertion tvc_guide_stationid = "" @@ -369,7 +373,7 @@ def generate_epg(request, profile_name=None, user=None): if channel.logo: if use_cached_logos: # Use cached logo as before - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) else: # Try to find direct logo URL from channel's streams direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None @@ -377,7 +381,7 @@ def generate_epg(request, profile_name=None, user=None): if direct_logo: tvg_logo = direct_logo else: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) display_name = channel.name xml_lines.append(f' ') xml_lines.append(f' {html.escape(display_name)}') @@ -789,7 +793,20 @@ def xc_player_api(request, full=False): "get_series_info", "get_vod_info", ]: - return JsonResponse([], safe=False) + if action == "get_vod_categories": + return JsonResponse(xc_get_vod_categories(user), safe=False) + elif action == "get_vod_streams": + return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_categories": + return JsonResponse(xc_get_series_categories(user), safe=False) + elif action == "get_series": + return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_info": + return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) + elif action == "get_vod_info": + return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) + else: + return JsonResponse([], safe=False) raise Http404() @@ -901,7 +918,8 @@ def xc_get_live_streams(request, user, category_id=None): "stream_icon": ( None if not channel.logo - else request.build_absolute_uri( + else build_absolute_uri_with_port( + request, reverse("api:channels:logo-cache", args=[channel.logo.id]) ) ), @@ -986,3 +1004,734 @@ def xc_get_epg(request, user, short=False): output['epg_listings'].append(program_output) return output + + +def xc_get_vod_categories(user): + """Get VOD categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3UMovieRelation + + response = [] + + # Filter categories based on user's M3U accounts + if user.user_level == 0: + # For regular users, get categories from their accessible M3U accounts + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + # Get M3U accounts accessible through user's profiles + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + else: + m3u_accounts = [] + + # Get categories that have movie relations with these accounts + categories = VODCategory.objects.filter( + category_type='movie', + m3umovierelation__m3u_account__in=m3u_accounts + ).distinct() + else: + # Admins can see all categories that have active movie relations + categories = VODCategory.objects.filter( + category_type='movie', + m3umovierelation__m3u_account__is_active=True + ).distinct() + + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) + + return response + + +def xc_get_vod_streams(request, user, category_id=None): + """Get VOD streams (movies) for XtreamCodes API""" + from apps.vod.models import Movie + + streams = [] + + # Build filters for movies based on user access + filters = {"m3u_relations__m3u_account__is_active": True} + + if user.user_level == 0: + # For regular users, filter by accessible M3U accounts + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_relations__m3u_account__in"] = m3u_accounts + else: + return [] # No accessible accounts + + if category_id: + filters["m3u_relations__category_id"] = category_id + + # Get movies directly with their relations + movies = Movie.objects.filter(**filters).select_related('logo').distinct() + + for movie in movies: + # Get the highest priority relation for this movie (for metadata like container_extension) + relation = movie.m3u_relations.filter( + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + relation_custom = relation.custom_properties or {} + relation_info = relation_custom.get('basic_data', {}) + streams.append({ + "num": movie.id, + "name": movie.name, + "stream_type": "movie", + "stream_id": movie.id, + "stream_icon": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[movie.logo.id]) + ) + ), + #'stream_icon': movie.logo.url if movie.logo else '', + "rating": movie.rating or "0", + "rating_5based": round(float(movie.rating or 0) / 2, 2) if movie.rating else 0, + "added": str(movie.created_at.timestamp()), + "is_adult": 0, + "tmdb_id": movie.tmdb_id or "", + "imdb_id": movie.imdb_id or "", + "trailer": (movie.custom_properties or {}).get('youtube_trailer') or relation_info.get('youtube_trailer') or relation_info.get('trailer', ''), + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + "container_extension": relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + }) + + return streams + + +def xc_get_series_categories(user): + """Get series categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3USeriesRelation + + response = [] + + # Similar filtering as VOD categories but for series + if user.user_level == 0: + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + else: + m3u_accounts = [] + + # Get categories that have series relations with these accounts + categories = VODCategory.objects.filter( + category_type='series', + m3useriesrelation__m3u_account__in=m3u_accounts + ).distinct() + else: + categories = VODCategory.objects.filter( + category_type='series', + m3useriesrelation__m3u_account__is_active=True + ).distinct() + + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) + + return response + + +def xc_get_series(request, user, category_id=None): + """Get series list for XtreamCodes API""" + from apps.vod.models import M3USeriesRelation + + series_list = [] + + # Build filters based on user access + filters = {"m3u_account__is_active": True} + + if user.user_level == 0: + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_account__in"] = m3u_accounts + else: + return [] + + if category_id: + filters["category_id"] = category_id + + # Get series relations instead of series directly + series_relations = M3USeriesRelation.objects.filter(**filters).select_related( + 'series', 'series__logo', 'category', 'm3u_account' + ) + + for relation in series_relations: + series = relation.series + series_list.append({ + "num": relation.id, # Use relation ID + "name": series.name, + "series_id": relation.id, # Use relation ID + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[series.logo.id]) + ) + ), + "plot": series.description or "", + "cast": series.custom_properties.get('cast', '') if series.custom_properties else "", + "director": series.custom_properties.get('director', '') if series.custom_properties else "", + "genre": series.genre or "", + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "last_modified": str(int(relation.updated_at.timestamp())), + "rating": str(series.rating or "0"), + "rating_5based": str(round(float(series.rating or 0) / 2, 2)) if series.rating else "0", + "backdrop_path": series.custom_properties.get('backdrop_path', []) if series.custom_properties else [], + "youtube_trailer": series.custom_properties.get('youtube_trailer', '') if series.custom_properties else "", + "episode_run_time": series.custom_properties.get('episode_run_time', '') if series.custom_properties else "", + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + }) + + return series_list + + +def xc_get_series_info(request, user, series_id): + """Get detailed series information including episodes""" + from apps.vod.models import M3USeriesRelation, M3UEpisodeRelation + + if not series_id: + raise Http404() + + # Get series relation with user access filtering + filters = {"id": series_id, "m3u_account__is_active": True} + + if user.user_level == 0: + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_account__in"] = m3u_accounts + else: + raise Http404() + + try: + series_relation = M3USeriesRelation.objects.select_related('series', 'series__logo').get(**filters) + series = series_relation.series + except M3USeriesRelation.DoesNotExist: + raise Http404() + + # Check if we need to refresh detailed info (similar to vod api_views pattern) + try: + should_refresh = ( + not series_relation.last_episode_refresh or + series_relation.last_episode_refresh < timezone.now() - timedelta(hours=24) + ) + + # Check if detailed data has been fetched + custom_props = series_relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes/details have never been fetched or time interval exceeded + if not episodes_fetched or not detailed_fetched or should_refresh: + from apps.vod.tasks import refresh_series_episodes + account = series_relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, series_relation.external_series_id) + # Refresh objects from database after task completion + series.refresh_from_db() + series_relation.refresh_from_db() + + except Exception as e: + logger.error(f"Error refreshing series data for relation {series_relation.id}: {str(e)}") + + # Get episodes for this series from the same M3U account + episode_relations = M3UEpisodeRelation.objects.filter( + episode__series=series, + m3u_account=series_relation.m3u_account + ).select_related('episode').order_by('episode__season_number', 'episode__episode_number') + + # Group episodes by season + seasons = {} + for relation in episode_relations: + episode = relation.episode + season_num = episode.season_number or 1 + if season_num not in seasons: + seasons[season_num] = [] + + # Try to get the highest priority related M3UEpisodeRelation for this episode (for video/audio/bitrate) + from apps.vod.models import M3UEpisodeRelation + first_relation = M3UEpisodeRelation.objects.filter( + episode=episode + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + video = audio = bitrate = None + if first_relation and first_relation.custom_properties: + info = first_relation.custom_properties.get('info') + if info and isinstance(info, dict): + info_info = info.get('info') + if info_info and isinstance(info_info, dict): + video = info_info.get('video', {}) + audio = info_info.get('audio', {}) + bitrate = info_info.get('bitrate', 0) + if video is None: + video = episode.custom_properties.get('video', {}) if episode.custom_properties else {} + if audio is None: + audio = episode.custom_properties.get('audio', {}) if episode.custom_properties else {} + if bitrate is None: + bitrate = episode.custom_properties.get('bitrate', 0) if episode.custom_properties else 0 + + seasons[season_num].append({ + "id": episode.id, + "season": season_num, + "episode_num": episode.episode_number or 0, + "title": episode.name, + "container_extension": relation.container_extension or "mp4", + "added": str(int(relation.created_at.timestamp())), + "custom_sid": None, + "direct_source": "", + "info": { + "id": int(episode.id), + "name": episode.name, + "overview": episode.description or "", + "crew": str(episode.custom_properties.get('crew', "") if episode.custom_properties else ""), + "directed_by": episode.custom_properties.get('director', '') if episode.custom_properties else "", + "imdb_id": episode.imdb_id or "", + "air_date": f"{episode.air_date}" if episode.air_date else "", + "backdrop_path": episode.custom_properties.get('backdrop_path', []) if episode.custom_properties else [], + "movie_image": episode.custom_properties.get('movie_image', '') if episode.custom_properties else "", + "rating": float(episode.rating or 0), + "release_date": f"{episode.air_date}" if episode.air_date else "", + "duration_secs": (episode.duration_secs or 0), + "duration": format_duration_hms(episode.duration_secs), + "video": video, + "audio": audio, + "bitrate": bitrate, + } + }) + + # Build response using potentially refreshed data + series_data = { + 'name': series.name, + 'description': series.description or '', + 'year': series.year, + 'genre': series.genre or '', + 'rating': series.rating or '0', + 'cast': '', + 'director': '', + 'youtube_trailer': '', + 'episode_run_time': '', + 'backdrop_path': [], + } + + # Add detailed info from custom_properties if available + try: + if series.custom_properties: + custom_data = series.custom_properties + series_data.update({ + 'cast': custom_data.get('cast', ''), + 'director': custom_data.get('director', ''), + 'youtube_trailer': custom_data.get('youtube_trailer', ''), + 'episode_run_time': custom_data.get('episode_run_time', ''), + 'backdrop_path': custom_data.get('backdrop_path', []), + }) + + # Check relation custom_properties for detailed_info + if series_relation.custom_properties and 'detailed_info' in series_relation.custom_properties: + detailed_info = series_relation.custom_properties['detailed_info'] + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating']: + if detailed_info.get(key): + series_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + series_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + series_data['description'] = detailed_info['description'] + + # Update additional fields from detailed info + series_data.update({ + 'cast': detailed_info.get('cast', series_data['cast']), + 'director': detailed_info.get('director', series_data['director']), + 'youtube_trailer': detailed_info.get('youtube_trailer', series_data['youtube_trailer']), + 'episode_run_time': detailed_info.get('episode_run_time', series_data['episode_run_time']), + 'backdrop_path': detailed_info.get('backdrop_path', series_data['backdrop_path']), + }) + + except Exception as e: + logger.error(f"Error parsing series custom_properties: {str(e)}") + + seasons_list = [ + {"season_number": int(season_num), "name": f"Season {season_num}"} + for season_num in sorted(seasons.keys(), key=lambda x: int(x)) + ] + + info = { + 'seasons': seasons_list, + "info": { + "name": series_data['name'], + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[series.logo.id]) + ) + ), + "plot": series_data['description'], + "cast": series_data['cast'], + "director": series_data['director'], + "genre": series_data['genre'], + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "added": str(int(series_relation.created_at.timestamp())), + "last_modified": str(int(series_relation.updated_at.timestamp())), + "rating": str(series_data['rating']), + "rating_5based": str(round(float(series_data['rating'] or 0) / 2, 2)) if series_data['rating'] else "0", + "backdrop_path": series_data['backdrop_path'], + "youtube_trailer": series_data['youtube_trailer'], + "imdb": str(series.imdb_id) if series.imdb_id else "", + "tmdb": str(series.tmdb_id) if series.tmdb_id else "", + "episode_run_time": str(series_data['episode_run_time']), + "category_id": str(series_relation.category.id) if series_relation.category else "0", + "category_ids": [int(series_relation.category.id)] if series_relation.category else [], + }, + "episodes": dict(seasons) + } + return info + + +def xc_get_vod_info(request, user, vod_id): + """Get detailed VOD (movie) information""" + from apps.vod.models import M3UMovieRelation + from django.utils import timezone + from datetime import timedelta + + if not vod_id: + raise Http404() + + # Get movie relation with user access filtering - use movie ID instead of relation ID + filters = {"movie_id": vod_id, "m3u_account__is_active": True} + + if user.user_level == 0: + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_account__in"] = m3u_accounts + else: + raise Http404() + + try: + movie_relation = M3UMovieRelation.objects.select_related('movie', 'movie__logo').get(**filters) + movie = movie_relation.movie + except M3UMovieRelation.DoesNotExist: + raise Http404() + + # Initialize basic movie data first + movie_data = { + 'name': movie.name, + 'description': movie.description or '', + 'year': movie.year, + 'genre': movie.genre or '', + 'rating': movie.rating or 0, + 'tmdb_id': movie.tmdb_id or '', + 'imdb_id': movie.imdb_id or '', + 'director': '', + 'actors': '', + 'country': '', + 'release_date': '', + 'youtube_trailer': '', + 'backdrop_path': [], + 'cover_big': '', + 'bitrate': 0, + 'video': {}, + 'audio': {}, + } + + # Duplicate the provider_info logic for detailed information + try: + # Check if we need to refresh detailed info (same logic as provider_info) + should_refresh = ( + not movie_relation.last_advanced_refresh or + movie_relation.last_advanced_refresh < timezone.now() - timedelta(hours=24) + ) + + if should_refresh: + # Trigger refresh of detailed info + from apps.vod.tasks import refresh_movie_advanced_data + refresh_movie_advanced_data(movie_relation.id) + # Refresh objects from database after task completion + movie.refresh_from_db() + movie_relation.refresh_from_db() + + # Add detailed info from custom_properties if available + if movie.custom_properties: + try: + if isinstance(movie.custom_properties, dict): + custom_data = movie.custom_properties + else: + custom_data = json.loads(movie.custom_properties) + + # Extract detailed info + #detailed_info = custom_data.get('detailed_info', {}) + detailed_info = movie_relation.custom_properties.get('detailed_info', {}) + # Update movie_data with detailed info + movie_data.update({ + 'director': custom_data.get('director') or detailed_info.get('director', ''), + 'actors': custom_data.get('actors') or detailed_info.get('actors', ''), + 'country': custom_data.get('country') or detailed_info.get('country', ''), + 'release_date': custom_data.get('release_date') or detailed_info.get('release_date') or detailed_info.get('releasedate', ''), + 'youtube_trailer': custom_data.get('youtube_trailer') or detailed_info.get('youtube_trailer') or detailed_info.get('trailer', ''), + 'backdrop_path': custom_data.get('backdrop_path') or detailed_info.get('backdrop_path', []), + 'cover_big': detailed_info.get('cover_big', ''), + 'bitrate': detailed_info.get('bitrate', 0), + 'video': detailed_info.get('video', {}), + 'audio': detailed_info.get('audio', {}), + }) + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating', 'tmdb_id', 'imdb_id']: + if detailed_info.get(key): + movie_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + movie_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + movie_data['description'] = detailed_info['description'] + + except (json.JSONDecodeError, AttributeError, TypeError) as e: + logger.warning(f"Error parsing custom_properties for movie {movie.id}: {e}") + + except Exception as e: + logger.error(f"Failed to process movie data: {e}") + + # Transform API response to XtreamCodes format + info = { + "info": { + "name": movie_data.get('name', movie.name), + "o_name": movie_data.get('name', movie.name), + "cover_big": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[movie.logo.id]) + ) + ), + "movie_image": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[movie.logo.id]) + ) + ), + 'description': movie_data.get('description', ''), + 'plot': movie_data.get('description', ''), + 'year': movie_data.get('year', ''), + 'release_date': movie_data.get('release_date', ''), + 'genre': movie_data.get('genre', ''), + 'director': movie_data.get('director', ''), + 'actors': movie_data.get('actors', ''), + 'cast': movie_data.get('actors', ''), + 'country': movie_data.get('country', ''), + 'rating': movie_data.get('rating', 0), + 'imdb_id': movie_data.get('imdb_id', ''), + "tmdb_id": movie_data.get('tmdb_id', ''), + 'youtube_trailer': movie_data.get('youtube_trailer', ''), + 'backdrop_path': movie_data.get('backdrop_path', []), + 'cover': movie_data.get('cover_big', ''), + 'bitrate': movie_data.get('bitrate', 0), + 'video': movie_data.get('video', {}), + 'audio': movie_data.get('audio', {}), + }, + "movie_data": { + "stream_id": movie.id, + "name": movie.name, + "added": int(movie_relation.created_at.timestamp()), + "category_id": str(movie_relation.category.id) if movie_relation.category else "0", + "category_ids": [int(movie_relation.category.id)] if movie_relation.category else [], + "container_extension": movie_relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + } + } + + return info + + +def xc_movie_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes movie streaming requests""" + from apps.vod.models import M3UMovieRelation + + user = get_object_or_404(User, username=username) + + custom_properties = ( + json.loads(user.custom_properties) if user.custom_properties else {} + ) + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # Get movie relation based on user access level - use movie ID instead of relation ID + filters = {"movie_id": stream_id, "m3u_account__is_active": True} + + if user.user_level < 10: + # For regular users, filter by accessible M3U accounts + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_account__in"] = m3u_accounts + else: + return JsonResponse({"error": "No accessible content"}, status=403) + + try: + movie_relation = M3UMovieRelation.objects.select_related('movie').get(**filters) + except M3UMovieRelation.DoesNotExist: + return JsonResponse({"error": "Movie not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'movie', + 'content_id': movie_relation.movie.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def xc_series_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes series/episode streaming requests""" + from apps.vod.models import M3UEpisodeRelation + + user = get_object_or_404(User, username=username) + + custom_properties = ( + json.loads(user.custom_properties) if user.custom_properties else {} + ) + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # Get episode relation based on user access level - use episode ID instead of stream_id + filters = {"episode_id": stream_id, "m3u_account__is_active": True} + + if user.user_level < 10: + # For regular users, filter by accessible M3U accounts + if user.channel_profiles.count() > 0: + channel_profiles = user.channel_profiles.all() + from apps.m3u.models import M3UAccount + m3u_accounts = M3UAccount.objects.filter( + is_active=True, + profiles__in=channel_profiles + ).distinct() + filters["m3u_account__in"] = m3u_accounts + else: + return JsonResponse({"error": "No accessible content"}, status=403) + + try: + episode_relation = M3UEpisodeRelation.objects.select_related('episode').get(**filters) + except M3UEpisodeRelation.DoesNotExist: + return JsonResponse({"error": "Episode not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'episode', + 'content_id': episode_relation.episode.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def get_host_and_port(request): + """ + Returns (host, port) for building absolute URIs. + - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). + - Falls back to Host header. + - In dev, if missing, uses 5656 or 8000 as a guess. + """ + # 1. Try X-Forwarded-Host (may include port) + xfh = request.META.get("HTTP_X_FORWARDED_HOST") + if xfh: + if ":" in xfh: + host, port = xfh.split(":", 1) + else: + host = xfh + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + return host, port + + # 2. Try Host header + raw_host = request.get_host() + if ":" in raw_host: + host, port = raw_host.split(":", 1) + return host, port + else: + host = raw_host + + # 3. Try X-Forwarded-Port + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + return host, port + + # 4. Dev fallback: guess port + if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): + guess = "5656" + return host, guess + + # 5. Fallback to scheme default + port = "443" if request.is_secure() else "9191" + return host, port + +def build_absolute_uri_with_port(request, path): + host, port = get_host_and_port(request) + scheme = request.scheme + return f"{scheme}://{host}:{port}{path}" + +def format_duration_hms(seconds): + """ + Format a duration in seconds as HH:MM:SS zero-padded string. + """ + seconds = int(seconds or 0) + return f"{seconds//3600:02}:{(seconds%3600)//60:02}:{seconds%60:02}" diff --git a/apps/proxy/tasks.py b/apps/proxy/tasks.py index 00e3e039..68843712 100644 --- a/apps/proxy/tasks.py +++ b/apps/proxy/tasks.py @@ -10,6 +10,7 @@ import gc # Add import for garbage collection from core.utils import RedisClient from apps.proxy.ts_proxy.channel_status import ChannelStatus from core.utils import send_websocket_update +from apps.proxy.vod_proxy.connection_manager import get_connection_manager logger = logging.getLogger(__name__) @@ -59,3 +60,13 @@ def fetch_channel_stats(): # Explicitly clean up large data structures all_channels = None gc.collect() + +@shared_task +def cleanup_vod_connections(): + """Clean up stale VOD connections""" + try: + connection_manager = get_connection_manager() + connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour + logger.info("VOD connection cleanup completed") + except Exception as e: + logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True) diff --git a/apps/proxy/urls.py b/apps/proxy/urls.py index 98303990..34c026a9 100644 --- a/apps/proxy/urls.py +++ b/apps/proxy/urls.py @@ -5,4 +5,5 @@ app_name = 'proxy' urlpatterns = [ path('ts/', include('apps.proxy.ts_proxy.urls')), path('hls/', include('apps.proxy.hls_proxy.urls')), + path('vod/', include('apps.proxy.vod_proxy.urls')), ] \ No newline at end of file diff --git a/apps/proxy/vod_proxy/__init__.py b/apps/proxy/vod_proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/proxy/vod_proxy/connection_manager.py b/apps/proxy/vod_proxy/connection_manager.py new file mode 100644 index 00000000..b12996cf --- /dev/null +++ b/apps/proxy/vod_proxy/connection_manager.py @@ -0,0 +1,1444 @@ +""" +VOD Connection Manager - Redis-based connection tracking for VOD streams +""" + +import time +import json +import logging +import threading +import random +import re +import requests +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +class PersistentVODConnection: + """Handles a single persistent connection to a VOD provider for a session""" + + def __init__(self, session_id: str, stream_url: str, headers: dict): + self.session_id = session_id + self.stream_url = stream_url + self.base_headers = headers + self.session = None + self.current_response = None + self.content_length = None + self.content_type = 'video/mp4' + self.final_url = None + self.lock = threading.Lock() + self.request_count = 0 # Track number of requests on this connection + self.last_activity = time.time() # Track last activity for cleanup + self.cleanup_timer = None # Timer for delayed cleanup + self.active_streams = 0 # Count of active stream generators + + def _establish_connection(self, range_header=None): + """Establish or re-establish connection to provider""" + try: + if not self.session: + self.session = requests.Session() + + headers = self.base_headers.copy() + + # Validate range header against content length + if range_header and self.content_length: + logger.info(f"[{self.session_id}] Validating range {range_header} against content length {self.content_length}") + validated_range = self._validate_range_header(range_header, int(self.content_length)) + if validated_range is None: + # Range is not satisfiable, but don't raise error - return empty response + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header} for content length {self.content_length}") + return None + elif validated_range != range_header: + range_header = validated_range + logger.info(f"[{self.session_id}] Adjusted range header: {range_header}") + else: + logger.info(f"[{self.session_id}] Range header validated successfully: {range_header}") + elif range_header: + logger.info(f"[{self.session_id}] Range header provided but no content length available yet: {range_header}") + + if range_header: + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Track request count for better logging + self.request_count += 1 + if self.request_count == 1: + logger.info(f"[{self.session_id}] Making initial request to provider") + target_url = self.stream_url + allow_redirects = True + else: + logger.info(f"[{self.session_id}] Making range request #{self.request_count} on SAME session (using final URL)") + # Use the final URL from first request to avoid redirect chain + target_url = self.final_url if self.final_url else self.stream_url + allow_redirects = False # No need to follow redirects again + logger.info(f"[{self.session_id}] Using cached final URL: {target_url}") + + response = self.session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 30), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Log successful response + if self.request_count == 1: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (followed redirects)") + else: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (direct to final URL)") + + # Capture headers from final URL + if not self.content_length: + # First check if we have a pre-stored content length from HEAD request + try: + import redis + r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + content_length_key = f"vod_content_length:{self.session_id}" + stored_length = r.get(content_length_key) + if stored_length: + self.content_length = stored_length + logger.info(f"[{self.session_id}] *** USING PRE-STORED CONTENT LENGTH: {self.content_length} ***") + else: + # Fallback to response headers + self.content_length = response.headers.get('content-length') + logger.info(f"[{self.session_id}] *** USING RESPONSE CONTENT LENGTH: {self.content_length} ***") + except Exception as e: + logger.error(f"[{self.session_id}] Error checking Redis for content length: {e}") + # Fallback to response headers + self.content_length = response.headers.get('content-length') + + self.content_type = response.headers.get('content-type', 'video/mp4') + self.final_url = response.url + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Final URL: {self.final_url} ***") + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Content-Length: {self.content_length} ***") + + self.current_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header, content_length): + """Validate and potentially adjust range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + # Start is beyond file end - not satisfiable + logger.warning(f"[{self.session_id}] Range start {start_byte} >= content length {content_length} - not satisfiable") + return None + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + # Adjust end to file end + end_byte = content_length - 1 + logger.info(f"[{self.session_id}] Adjusted range end to {end_byte}") + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + logger.warning(f"[{self.session_id}] Range start {start_byte} > end {end_byte} - not satisfiable") + return None + + validated_range = f"bytes={start_byte}-{end_byte}" + return validated_range + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def get_stream(self, range_header=None): + """Get stream with optional range header - reuses connection for range requests""" + with self.lock: + # Update activity timestamp + self.last_activity = time.time() + + # Cancel any pending cleanup since connection is being reused + self.cancel_cleanup() + + # For range requests, we don't need to close the connection + # We can make a new request on the same session + if range_header: + logger.info(f"[{self.session_id}] Range request on existing connection: {range_header}") + # Close only the response stream, keep the session alive + if self.current_response: + logger.info(f"[{self.session_id}] Closing previous response stream (keeping connection alive)") + self.current_response.close() + self.current_response = None + + # Make new request (reuses connection if session exists) + response = self._establish_connection(range_header) + if response is None: + # Range not satisfiable - return None to indicate this + return None + + return self.current_response + + def cancel_cleanup(self): + """Cancel any pending cleanup - called when connection is reused""" + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.info(f"[{self.session_id}] Cancelled pending cleanup - connection being reused for new request") + + def increment_active_streams(self): + """Increment the count of active streams""" + with self.lock: + self.active_streams += 1 + logger.debug(f"[{self.session_id}] Active streams incremented to {self.active_streams}") + + def decrement_active_streams(self): + """Decrement the count of active streams""" + with self.lock: + if self.active_streams > 0: + self.active_streams -= 1 + logger.debug(f"[{self.session_id}] Active streams decremented to {self.active_streams}") + else: + logger.warning(f"[{self.session_id}] Attempted to decrement active streams when already at 0") + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + with self.lock: + return self.active_streams > 0 + + def schedule_cleanup_if_not_streaming(self, delay_seconds: int = 10): + """Schedule cleanup only if no active streams""" + with self.lock: + if self.active_streams > 0: + logger.info(f"[{self.session_id}] Connection has {self.active_streams} active streams - NOT scheduling cleanup") + return False + + # No active streams, proceed with delayed cleanup + if self.cleanup_timer: + self.cleanup_timer.cancel() + + def delayed_cleanup(): + logger.info(f"[{self.session_id}] Delayed cleanup triggered - checking if connection is still needed") + # Use the singleton VODConnectionManager instance + manager = VODConnectionManager.get_instance() + manager.cleanup_persistent_connection(self.session_id) + + self.cleanup_timer = threading.Timer(delay_seconds, delayed_cleanup) + self.cleanup_timer.start() + logger.info(f"[{self.session_id}] Scheduled cleanup in {delay_seconds} seconds (connection not actively streaming)") + return True + + def get_headers(self): + """Get headers for response""" + return { + 'content_length': self.content_length, + 'content_type': self.content_type, + 'final_url': self.final_url + } + + def cleanup(self): + """Clean up connection resources""" + with self.lock: + # Cancel any pending cleanup timer + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.debug(f"[{self.session_id}] Cancelled cleanup timer during manual cleanup") + + # Clear active streams count + self.active_streams = 0 + + if self.current_response: + self.current_response.close() + self.current_response = None + if self.session: + self.session.close() + self.session = None + logger.info(f"[{self.session_id}] Persistent connection cleaned up") + + +class VODConnectionManager: + """Manages VOD connections using Redis for tracking""" + + _instance = None + _persistent_connections = {} # session_id -> PersistentVODConnection + + @classmethod + def get_instance(cls): + """Get the singleton instance of VODConnectionManager""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """ + Find an existing session that matches content and client criteria with no active streams + + Args: + content_type: Type of content (movie, episode, series) + content_uuid: UUID of the content + client_ip: Client IP address + user_agent: Client user agent + utc_start: UTC start time for timeshift + utc_end: UTC end time for timeshift + offset: Offset in seconds + + Returns: + Session ID if matching idle session found, None otherwise + """ + if not self.redis_client: + return None + + try: + # Search for sessions with matching content + pattern = "vod_session:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + session_data = self.redis_client.hgetall(key) + if not session_data: + continue + + # Extract session info + stored_content_type = session_data.get(b'content_type', b'').decode('utf-8') + stored_content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + + # Check if content matches + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID from key + session_id = key.decode('utf-8').replace('vod_session:', '') + + # Check if session has an active persistent connection + persistent_conn = self._persistent_connections.get(session_id) + if not persistent_conn: + # No persistent connection exists, skip + continue + + # Check if connection has no active streams + if persistent_conn.has_active_streams(): + logger.debug(f"[{session_id}] Session has active streams - skipping") + continue + + # Get stored client info for comparison + stored_client_ip = session_data.get(b'client_ip', b'').decode('utf-8') + stored_user_agent = session_data.get(b'user_agent', b'').decode('utf-8') + + # Check timeshift parameters match + stored_utc_start = session_data.get(b'utc_start', b'').decode('utf-8') + stored_utc_end = session_data.get(b'utc_end', b'').decode('utf-8') + stored_offset = session_data.get(b'offset', b'').decode('utf-8') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + # Calculate match score + score = 0 + match_reasons = [] + + # Content already matches (required) + score += 10 + match_reasons.append("content") + + # IP match (high priority) + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + # User-Agent match (medium priority) + if stored_user_agent and stored_user_agent == user_agent: + score += 3 + match_reasons.append("user-agent") + + # Timeshift parameters match (high priority for seeking) + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + # Consider it a good match if we have at least content + one other criteria + if score >= 13: # content(10) + ip(5) or content(10) + user-agent(3) + something else + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(session_data.get(b'last_activity', b'0').decode('utf-8')) + }) + + except Exception as e: + logger.debug(f"Error processing session key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score (highest first), then by last activity (most recent first) + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + else: + logger.debug(f"No matching idle sessions found for {content_type} {content_uuid}") + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None + + def _get_connection_key(self, content_type: str, content_uuid: str, client_id: str) -> str: + """Get Redis key for a specific connection""" + return f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _get_content_connections_key(self, content_type: str, content_uuid: str) -> str: + """Get Redis key for tracking connections per content""" + return f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """ + Create a new VOD connection with profile limit checking + + Returns: + bool: True if connection was created, False if profile limit exceeded + """ + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits using standardized key + if not self._check_profile_limits(m3u_profile): + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded") + return False + + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + + # Check if connection already exists to prevent duplicate counting + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + # Update activity but don't increment profile counter + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0" + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + + # Store connection data + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + + # Increment profile connections using standardized method + pipe.incr(profile_connections_key) + + # Add to content connections set + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + + # Execute all operations + pipe.execute() + + logger.info(f"Created VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating VOD connection: {e}") + return False + + def _check_profile_limits(self, m3u_profile: M3UAccountProfile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int = 0, + position_seconds: int = 0) -> bool: + """Update connection activity""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + update_data = { + "last_activity": str(time.time()) + } + + if bytes_sent > 0: + # Get current bytes and add to it + current_bytes = self.redis_client.hget(connection_key, "bytes_sent") + if current_bytes: + total_bytes = int(current_bytes.decode('utf-8')) + bytes_sent + else: + total_bytes = bytes_sent + update_data["bytes_sent"] = str(total_bytes) + + if position_seconds > 0: + update_data["position_seconds"] = str(position_seconds) + + # Update connection data + self.redis_client.hset(connection_key, mapping=update_data) + self.redis_client.expire(connection_key, self.connection_ttl) + + return True + + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str) -> bool: + """Remove a VOD connection""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + # Get connection data before removing + connection_data = self.redis_client.hgetall(connection_key) + if not connection_data: + return True # Already removed + + # Get profile ID for cleanup + profile_id = None + if b"m3u_profile_id" in connection_data: + try: + profile_id = int(connection_data[b"m3u_profile_id"].decode('utf-8')) + except ValueError: + pass + + # Use pipeline for atomic cleanup + pipe = self.redis_client.pipeline() + + # Remove connection data + pipe.delete(connection_key) + + # Decrement profile connections using standardized key + if profile_id: + profile_connections_key = self._get_profile_connections_key(profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + pipe.decr(profile_connections_key) + + # Remove from content connections set + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + pipe.srem(content_connections_key, client_id) + + # Execute cleanup + pipe.execute() + + logger.info(f"Removed VOD connection: {client_id}") + return True + + except Exception as e: + logger.error(f"Error removing connection: {e}") + return False + + def get_connection_info(self, content_type: str, content_uuid: str, client_id: str) -> Optional[Dict[str, Any]]: + """Get connection information""" + if not self.redis_client: + return None + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + connection_data = self.redis_client.hgetall(connection_key) + + if not connection_data: + return None + + # Convert bytes to strings and parse numbers + info = {} + for key, value in connection_data.items(): + key_str = key.decode('utf-8') + value_str = value.decode('utf-8') + + # Parse numeric fields + if key_str in ['connected_at', 'last_activity']: + info[key_str] = float(value_str) + elif key_str in ['bytes_sent', 'position_seconds', 'm3u_profile_id']: + info[key_str] = int(value_str) + else: + info[key_str] = value_str + + return info + + except Exception as e: + logger.error(f"Error getting connection info: {e}") + return None + + def get_profile_connections(self, profile_id: int) -> int: + """Get current connection count for a profile using standardized key""" + if not self.redis_client: + return 0 + + try: + profile_connections_key = self._get_profile_connections_key(profile_id) + return int(self.redis_client.get(profile_connections_key) or 0) + + except Exception as e: + logger.error(f"Error getting profile connections: {e}") + return 0 + + def get_content_connections(self, content_type: str, content_uuid: str) -> int: + """Get current connection count for content""" + if not self.redis_client: + return 0 + + try: + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + return self.redis_client.scard(content_connections_key) or 0 + + except Exception as e: + logger.error(f"Error getting content connections: {e}") + return 0 + + def cleanup_stale_connections(self, max_age_seconds: int = 3600): + """Clean up stale connections that haven't been active recently""" + if not self.redis_client: + return + + try: + pattern = "vod_proxy:connection:*" + cursor = 0 + cleaned = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + key_str = key.decode('utf-8') + last_activity = self.redis_client.hget(key, "last_activity") + + if last_activity: + last_activity_time = float(last_activity.decode('utf-8')) + if current_time - last_activity_time > max_age_seconds: + # Extract info for cleanup + parts = key_str.split(':') + if len(parts) >= 5: + content_type = parts[2] + content_uuid = parts[3] + client_id = parts[4] + self.remove_connection(content_type, content_uuid, client_id) + cleaned += 1 + except Exception as e: + logger.error(f"Error processing key {key}: {e}") + + if cursor == 0: + break + + if cleaned > 0: + logger.info(f"Cleaned up {cleaned} stale VOD connections") + + except Exception as e: + logger.error(f"Error during connection cleanup: {e}") + + def stream_content(self, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with connection tracking and timeshift support + + Args: + content_obj: Movie or Episode object + stream_url: Final stream URL to proxy + m3u_profile: M3UAccountProfile instance + client_ip: Client IP address + user_agent: Client user agent + request: Django request object + utc_start: UTC start time for timeshift (e.g., '2023-01-01T12:00:00') + utc_end: UTC end time for timeshift + offset: Offset in seconds for seeking + range_header: HTTP Range header for partial content requests + + Returns: + StreamingHttpResponse or HttpResponse with error + """ + + try: + # Generate unique client ID + client_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Create connection tracking + connection_created = self.create_connection( + content_type=content_type, + content_uuid=content_uuid, + content_name=content_name, + client_id=client_id, + client_ip=client_ip, + user_agent=user_agent, + m3u_profile=m3u_profile + ) + + if not connection_created: + logger.error(f"Failed to create connection tracking for {content_type} {content_uuid}") + return HttpResponse("Connection limit exceeded", status=503) + + # Modify stream URL for timeshift functionality + modified_stream_url = self._apply_timeshift_parameters( + stream_url, utc_start, utc_end, offset + ) + + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Create streaming generator with simplified header handling + upstream_response = None + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make single request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Final URL after redirects: {upstream_response.url}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + # Create streaming response with sensible defaults + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type='video/mp4' + ) + + # Set status code based on request type + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC and other players expect + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # Log the critical headers we're sending to the client + logger.info(f"[{client_id}] Response headers to client - Status: {response.status_code}, Accept-Ranges: {response.get('Accept-Ranges', 'MISSING')}") + if 'Content-Length' in response: + logger.info(f"[{client_id}] Content-Length: {response['Content-Length']}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] Content-Range: {response['Content-Range']}") + if 'Content-Type' in response: + logger.info(f"[{client_id}] Content-Type: {response['Content-Type']}") + + # Critical: Log what VLC needs to see for seeking to work + if response.status_code == 200: + logger.info(f"[{client_id}] VLC SEEKING INFO: Full content response (200). VLC should see Accept-Ranges and Content-Length to enable seeking.") + elif response.status_code == 206: + logger.info(f"[{client_id}] VLC SEEKING INFO: Partial content response (206). This confirms seeking is working if VLC requested a range.") + + return response + + except Exception as e: + logger.error(f"Error in stream_content: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with persistent connection per session + + Maintains 1 open connection to provider per session that handles all range requests + dynamically based on client Range headers for seeking functionality. + """ + + try: + # Use session_id as client_id for connection tracking + client_id = session_id + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Check for existing connection or create new one + persistent_conn = self._persistent_connections.get(session_id) + + # Cancel any pending cleanup timer for this session regardless of new/existing + if persistent_conn: + persistent_conn.cancel_cleanup() + + # If no existing connection, try to find a matching idle session first + if not persistent_conn: + # Look for existing idle sessions that match content and client criteria + matching_session_id = self.find_matching_idle_session( + content_type, content_uuid, client_ip, user_agent, + utc_start, utc_end, offset + ) + + if matching_session_id: + logger.info(f"[{client_id}] Found matching idle session {matching_session_id} - redirecting client") + + # Update the session activity and client info + session_key = f"vod_session:{matching_session_id}" + if self.redis_client: + update_data = { + "last_activity": str(time.time()), + "client_ip": client_ip, # Update in case IP changed + "user_agent": user_agent # Update in case user agent changed + } + self.redis_client.hset(session_key, mapping=update_data) + self.redis_client.expire(session_key, self.session_ttl) + + # Get the existing persistent connection + persistent_conn = self._persistent_connections.get(matching_session_id) + if persistent_conn: + # Update the session_id to use the matching one + client_id = matching_session_id + session_id = matching_session_id + logger.info(f"[{client_id}] Successfully redirected to existing idle session") + else: + logger.warning(f"[{client_id}] Matching session found but no persistent connection - will create new") + + if not persistent_conn: + logger.info(f"[{client_id}] Creating NEW persistent connection for {content_type} {content_name}") + + # Create session in Redis for tracking + session_info = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "created_at": str(time.time()), + "last_activity": str(time.time()), + "profile_id": str(m3u_profile.id), + "connection_counted": "True", + "client_ip": client_ip, + "user_agent": user_agent, + "utc_start": utc_start or "", + "utc_end": utc_end or "", + "offset": str(offset) if offset else "" + } + + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, mapping=session_info) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Created new session: {session_info}") + + # Apply timeshift parameters to URL + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Prepare headers + headers = { + 'User-Agent': user_agent or 'VLC/3.0.21 LibVLC/3.0.21', + 'Accept': '*/*', + 'Connection': 'keep-alive' + } + + # Add any authentication headers from profile + if hasattr(m3u_profile, 'auth_headers') and m3u_profile.auth_headers: + headers.update(m3u_profile.auth_headers) + + # Create persistent connection + persistent_conn = PersistentVODConnection(session_id, modified_stream_url, headers) + self._persistent_connections[session_id] = persistent_conn + + # Track connection in profile + self.create_connection(content_type, content_uuid, content_name, client_id, client_ip, user_agent, m3u_profile) + else: + logger.info(f"[{client_id}] Using EXISTING persistent connection for {content_type} {content_name}") + # Update session activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, "last_activity", str(time.time())) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Reusing existing session - no new connection created") + + # Log the incoming Range header for debugging + if range_header: + logger.info(f"[{client_id}] *** CLIENT RANGE REQUEST: {range_header} ***") + + # Parse range for logging + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte and int(start_byte) > 0: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[{client_id}] *** VLC SEEKING TO: {start_pos_mb:.1f} MB ***") + else: + logger.info(f"[{client_id}] Range request from start") + except Exception as e: + logger.warning(f"[{client_id}] Could not parse range header: {e}") + else: + logger.info(f"[{client_id}] Full content request (no Range header)") + + # Get stream from persistent connection with current range + upstream_response = persistent_conn.get_stream(range_header) + + # Handle range not satisfiable + if upstream_response is None: + logger.warning(f"[{client_id}] Range not satisfiable - returning 416 error") + return HttpResponse( + "Requested Range Not Satisfiable", + status=416, + headers={ + 'Content-Range': f'bytes */{persistent_conn.content_length}' if persistent_conn.content_length else 'bytes */*' + } + ) + + connection_headers = persistent_conn.get_headers() + + # Ensure any pending cleanup is cancelled before starting stream + persistent_conn.cancel_cleanup() + + # Create streaming generator + def stream_generator(): + decremented = False # Track if we've already decremented the counter + + try: + logger.info(f"[{client_id}] Starting stream from persistent connection") + + # Increment active streams counter + persistent_conn.increment_active_streams() + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] Persistent stream completed normally: {bytes_sent} bytes sent") + # Stream completed normally - decrement counter + persistent_conn.decrement_active_streams() + decremented = True + + except GeneratorExit: + # Client disconnected - decrement counter and schedule cleanup only if no active streams + logger.info(f"[{client_id}] Client disconnected - checking if cleanup should be scheduled") + persistent_conn.decrement_active_streams() + decremented = True + scheduled = persistent_conn.schedule_cleanup_if_not_streaming(delay_seconds=10) + if not scheduled: + logger.info(f"[{client_id}] Cleanup not scheduled - connection still has active streams") + + except Exception as e: + logger.error(f"[{client_id}] Error in persistent stream: {e}") + # On error, decrement counter and cleanup the connection as it may be corrupted + persistent_conn.decrement_active_streams() + decremented = True + logger.info(f"[{client_id}] Cleaning up persistent connection due to error") + self.cleanup_persistent_connection(session_id) + yield b"Error: Stream interrupted" + + finally: + # Safety net: only decrement if we haven't already + if not decremented: + logger.warning(f"[{client_id}] Stream generator exited without decrement - applying safety net") + persistent_conn.decrement_active_streams() + # This runs regardless of how the generator exits + logger.debug(f"[{client_id}] Stream generator finished") + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers['content_type'] + ) + + # Set status code based on range request + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC expects + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # CRITICAL: Forward Content-Length from persistent connection + if connection_headers['content_length']: + response['Content-Length'] = connection_headers['content_length'] + logger.info(f"[{client_id}] *** FORWARDED Content-Length: {connection_headers['content_length']} *** (VLC seeking enabled)") + else: + logger.warning(f"[{client_id}] *** NO Content-Length available *** (VLC seeking may not work)") + + # Handle range requests - set Content-Range for partial responses + if range_header and connection_headers['content_length']: + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + end = int(end_byte) if end_byte else int(connection_headers['content_length']) - 1 + total_size = int(connection_headers['content_length']) + + content_range = f"bytes {start}-{end}/{total_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Set Content-Range: {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Could not set Content-Range: {e}") + + # Log response headers + logger.info(f"[{client_id}] PERSISTENT Response - Status: {response.status_code}, Content-Length: {response.get('Content-Length', 'MISSING')}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] PERSISTENT Content-Range: {response['Content-Range']}") + + # Log VLC seeking status + if response.status_code == 200: + if connection_headers['content_length']: + logger.info(f"[{client_id}] โœ… PERSISTENT VLC SEEKING: Full response with Content-Length - seeking should work!") + else: + logger.info(f"[{client_id}] โŒ PERSISTENT VLC SEEKING: Full response but no Content-Length - seeking won't work!") + elif response.status_code == 206: + logger.info(f"[{client_id}] โœ… PERSISTENT VLC SEEKING: Partial response - seeking is working!") + + return response + + except Exception as e: + logger.error(f"Error in persistent stream_content_with_session: {e}", exc_info=True) + # Cleanup persistent connection on error + if session_id in self._persistent_connections: + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """ + Apply timeshift parameters to the stream URL + + Args: + original_url: Original stream URL + utc_start: UTC start time (ISO format string) + utc_end: UTC end time (ISO format string) + offset: Offset in seconds + + Returns: + Modified URL with timeshift parameters + """ + try: + from urllib.parse import urlparse, parse_qs, urlencode, urlunparse + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + + logger.debug(f"Original URL: {original_url}") + logger.debug(f"Original query params: {query_params}") + + # Add timeshift parameters if provided + if utc_start: + # Support both utc_start and start parameter names + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] # Some providers use 'start' + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + # Support both utc_end and end parameter names + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] # Some providers use 'end' + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + # Ensure offset is a valid number + offset_seconds = int(offset) + # Support multiple offset parameter names + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] # Some providers use 'seek' + query_params['t'] = [str(offset_seconds)] # Some providers use 't' + logger.info(f"Added offset/seek/t parameter: {offset_seconds} seconds") + except (ValueError, TypeError): + logger.warning(f"Invalid offset value: {offset}, skipping") + + # Handle special URL patterns for VOD providers + # Some providers embed timeshift info in the path rather than query params + path = parsed_url.path + + # Check if this looks like an IPTV catchup URL pattern + catchup_pattern = r'/(\d{4}-\d{2}-\d{2})/(\d{2}-\d{2}-\d{2})' + if utc_start and re.search(catchup_pattern, path): + # Convert ISO format to provider-specific format if needed + try: + from datetime import datetime + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + # Replace existing date/time in path + path = re.sub(catchup_pattern, f'/{date_part}/{time_part}', path) + logger.info(f"Modified path for catchup: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL with new parameters + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, # Use potentially modified path + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific persistent connection""" + if session_id in self._persistent_connections: + logger.info(f"[{session_id}] Cleaning up persistent connection") + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + + # Clean up ALL Redis keys associated with this session + session_key = f"vod_session:{session_id}" + if self.redis_client: + try: + session_data = self.redis_client.hgetall(session_key) + if session_data: + # Get session details for connection cleanup + content_type = session_data.get(b'content_type', b'').decode('utf-8') + content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + profile_id = session_data.get(b'profile_id') + + # Generate client_id from session_id (matches what's used during streaming) + client_id = session_id + + # Remove individual connection tracking keys created during streaming + if content_type and content_uuid: + logger.info(f"[{session_id}] Cleaning up connection tracking keys") + self.remove_connection(content_type, content_uuid, client_id) + + # Remove from profile connections if counted (additional safety check) + if session_data.get(b'connection_counted') == b'True' and profile_id: + profile_key = self._get_profile_connections_key(int(profile_id.decode('utf-8'))) + current_count = int(self.redis_client.get(profile_key) or 0) + if current_count > 0: + self.redis_client.decr(profile_key) + logger.info(f"[{session_id}] Decremented profile {profile_id.decode('utf-8')} connections") + + # Remove session tracking key + self.redis_client.delete(session_key) + logger.info(f"[{session_id}] Removed session tracking") + + # Clean up any additional session-related keys (pattern cleanup) + try: + # Look for any other keys that might be related to this session + pattern = f"*{session_id}*" + cursor = 0 + session_related_keys = [] + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + session_related_keys.extend(keys) + if cursor == 0: + break + + if session_related_keys: + # Filter out keys we already deleted + remaining_keys = [k for k in session_related_keys if k.decode('utf-8') != session_key] + if remaining_keys: + self.redis_client.delete(*remaining_keys) + logger.info(f"[{session_id}] Cleaned up {len(remaining_keys)} additional session-related keys") + except Exception as scan_error: + logger.warning(f"[{session_id}] Error during pattern cleanup: {scan_error}") + + except Exception as e: + logger.error(f"[{session_id}] Error cleaning up session: {e}") + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale persistent connections that haven't been used recently""" + current_time = time.time() + stale_sessions = [] + + for session_id, conn in self._persistent_connections.items(): + try: + # Check connection's last activity time first + if hasattr(conn, 'last_activity'): + time_since_last_activity = current_time - conn.last_activity + if time_since_last_activity > max_age_seconds: + logger.info(f"[{session_id}] Connection inactive for {time_since_last_activity:.1f}s (max: {max_age_seconds}s)") + stale_sessions.append(session_id) + continue + + # Fallback to Redis session data if connection doesn't have last_activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + session_data = self.redis_client.hgetall(session_key) + if session_data: + created_at = float(session_data.get(b'created_at', b'0').decode('utf-8')) + if current_time - created_at > max_age_seconds: + logger.info(f"[{session_id}] Session older than {max_age_seconds}s") + stale_sessions.append(session_id) + else: + # Session data missing, connection is stale + logger.info(f"[{session_id}] Session data missing from Redis") + stale_sessions.append(session_id) + + except Exception as e: + logger.error(f"[{session_id}] Error checking session age: {e}") + stale_sessions.append(session_id) + + # Clean up stale connections + for session_id in stale_sessions: + logger.info(f"[{session_id}] Cleaning up stale persistent connection") + self.cleanup_persistent_connection(session_id) + + if stale_sessions: + logger.info(f"Cleaned up {len(stale_sessions)} stale persistent connections") + else: + logger.debug(f"No stale persistent connections found (checked {len(self._persistent_connections)} connections)") + + +# Global instance +_connection_manager = None + +def get_connection_manager() -> VODConnectionManager: + """Get the global VOD connection manager instance""" + global _connection_manager + if _connection_manager is None: + _connection_manager = VODConnectionManager() + return _connection_manager diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py new file mode 100644 index 00000000..10905e60 --- /dev/null +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -0,0 +1,1025 @@ +""" +Enhanced VOD Connection Manager with Redis-based connection sharing for multi-worker environments +""" + +import time +import json +import logging +import threading +import random +import re +import requests +import pickle +import base64 +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +class SerializableConnectionState: + """Serializable connection state that can be stored in Redis""" + + def __init__(self, session_id: str, stream_url: str, headers: dict, + content_length: str = None, content_type: str = 'video/mp4', + final_url: str = None, m3u_profile_id: int = None): + self.session_id = session_id + self.stream_url = stream_url + self.headers = headers + self.content_length = content_length + self.content_type = content_type + self.final_url = final_url + self.m3u_profile_id = m3u_profile_id # Store M3U profile ID for connection counting + self.last_activity = time.time() + self.request_count = 0 + self.active_streams = 0 + + def to_dict(self): + """Convert to dictionary for Redis storage""" + return { + 'session_id': self.session_id or '', + 'stream_url': self.stream_url or '', + 'headers': json.dumps(self.headers or {}), + 'content_length': str(self.content_length) if self.content_length is not None else '', + 'content_type': self.content_type or 'video/mp4', + 'final_url': self.final_url or '', + 'm3u_profile_id': str(self.m3u_profile_id) if self.m3u_profile_id is not None else '', + 'last_activity': str(self.last_activity), + 'request_count': str(self.request_count), + 'active_streams': str(self.active_streams) + } + + @classmethod + def from_dict(cls, data: dict): + """Create from dictionary loaded from Redis""" + obj = cls( + session_id=data['session_id'], + stream_url=data['stream_url'], + headers=json.loads(data['headers']) if data['headers'] else {}, + content_length=data.get('content_length') if data.get('content_length') else None, + content_type=data.get('content_type', 'video/mp4'), + final_url=data.get('final_url') if data.get('final_url') else None, + m3u_profile_id=int(data.get('m3u_profile_id')) if data.get('m3u_profile_id') else None + ) + obj.last_activity = float(data.get('last_activity', time.time())) + obj.request_count = int(data.get('request_count', 0)) + obj.active_streams = int(data.get('active_streams', 0)) + return obj + + +class RedisBackedVODConnection: + """Redis-backed VOD connection that can be accessed from any worker""" + + def __init__(self, session_id: str, redis_client=None): + self.session_id = session_id + self.redis_client = redis_client or RedisClient.get_client() + self.connection_key = f"vod_persistent_connection:{session_id}" + self.lock_key = f"vod_connection_lock:{session_id}" + self.local_session = None # Local requests session + self.local_response = None # Local current response + + def _get_connection_state(self) -> Optional[SerializableConnectionState]: + """Get connection state from Redis""" + if not self.redis_client: + return None + + try: + data = self.redis_client.hgetall(self.connection_key) + if not data: + return None + + # Convert bytes keys/values to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + return SerializableConnectionState.from_dict(data) + except Exception as e: + logger.error(f"[{self.session_id}] Error getting connection state from Redis: {e}") + return None + + def _save_connection_state(self, state: SerializableConnectionState): + """Save connection state to Redis""" + if not self.redis_client: + return False + + try: + data = state.to_dict() + # Log the data being saved for debugging + logger.debug(f"[{self.session_id}] Saving connection state: {data}") + + # Verify all values are valid for Redis + for key, value in data.items(): + if value is None: + logger.error(f"[{self.session_id}] None value found for key '{key}' - this should not happen") + return False + + self.redis_client.hset(self.connection_key, mapping=data) + self.redis_client.expire(self.connection_key, 3600) # 1 hour TTL + return True + except Exception as e: + logger.error(f"[{self.session_id}] Error saving connection state to Redis: {e}") + return False + + def _acquire_lock(self, timeout: int = 10) -> bool: + """Acquire distributed lock for connection operations""" + if not self.redis_client: + return False + + try: + return self.redis_client.set(self.lock_key, "locked", nx=True, ex=timeout) + except Exception as e: + logger.error(f"[{self.session_id}] Error acquiring lock: {e}") + return False + + def _release_lock(self): + """Release distributed lock""" + if not self.redis_client: + return + + try: + self.redis_client.delete(self.lock_key) + except Exception as e: + logger.error(f"[{self.session_id}] Error releasing lock: {e}") + + def create_connection(self, stream_url: str, headers: dict, m3u_profile_id: int = None) -> bool: + """Create a new connection state in Redis""" + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for connection creation") + return False + + try: + # Check if connection already exists + existing_state = self._get_connection_state() + if existing_state: + logger.info(f"[{self.session_id}] Connection already exists in Redis") + return True + + # Create new connection state + state = SerializableConnectionState(self.session_id, stream_url, headers, m3u_profile_id=m3u_profile_id) + success = self._save_connection_state(state) + + if success: + logger.info(f"[{self.session_id}] Created new connection state in Redis") + + return success + finally: + self._release_lock() + + def get_stream(self, range_header: str = None): + """Get stream with optional range header - works across workers""" + # Get connection state from Redis + state = self._get_connection_state() + if not state: + logger.error(f"[{self.session_id}] No connection state found in Redis") + return None + + # Update activity and increment request count + state.last_activity = time.time() + state.request_count += 1 + + try: + # Create local session if needed + if not self.local_session: + self.local_session = requests.Session() + + # Prepare headers + headers = state.headers.copy() + if range_header: + # Validate range against content length if available + if state.content_length: + validated_range = self._validate_range_header(range_header, int(state.content_length)) + if validated_range is None: + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header}") + return None + range_header = validated_range + + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Use final URL if available, otherwise original URL + target_url = state.final_url if state.final_url else state.stream_url + allow_redirects = not state.final_url # Only follow redirects if we don't have final URL + + logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL") + + # Make request + response = self.local_session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 30), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Update state with response info on first request + if state.request_count == 1: + if not state.content_length: + state.content_length = response.headers.get('content-length') + if not state.content_type: + state.content_type = response.headers.get('content-type', 'video/mp4') + if not state.final_url: + state.final_url = response.url + + logger.info(f"[{self.session_id}] Updated connection state: length={state.content_length}, type={state.content_type}") + + # Save updated state + self._save_connection_state(state) + + self.local_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header: str, content_length: int): + """Validate range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + return None # Not satisfiable + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + end_byte = content_length - 1 + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + return None + + return f"bytes={start_byte}-{end_byte}" + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def increment_active_streams(self): + """Increment active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state: + state.active_streams += 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams incremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def decrement_active_streams(self): + """Decrement active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state and state.active_streams > 0: + state.active_streams -= 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams decremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + state = self._get_connection_state() + return state.active_streams > 0 if state else False + + def get_headers(self): + """Get headers for response""" + state = self._get_connection_state() + if state: + return { + 'content_length': state.content_length, + 'content_type': state.content_type, + 'final_url': state.final_url + } + return {} + + def cleanup(self, connection_manager=None): + """Clean up local resources and Redis state""" + # Get connection state before cleanup to handle profile decrementing + state = self._get_connection_state() + + if self.local_response: + self.local_response.close() + self.local_response = None + if self.local_session: + self.local_session.close() + self.local_session = None + + # Remove from Redis + if self.redis_client: + try: + # Get session information for cleanup + session_key = f"vod_session:{self.session_id}" + session_data = self.redis_client.hgetall(session_key) + + # Convert bytes to strings if needed + if session_data and isinstance(list(session_data.keys())[0], bytes): + session_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in session_data.items()} + + # Use pipeline for atomic cleanup operations + pipe = self.redis_client.pipeline() + + # 1. Remove main connection state + pipe.delete(self.connection_key) + + # 2. Remove distributed lock + pipe.delete(self.lock_key) + + # 3. Remove session tracking + pipe.delete(session_key) + + # 4. Clean up legacy vod_proxy connection keys if session data exists + if session_data: + content_type = session_data.get('content_type') + content_uuid = session_data.get('content_uuid') + + if content_type and content_uuid: + # Remove from vod_proxy connection tracking + vod_proxy_connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{self.session_id}" + pipe.delete(vod_proxy_connection_key) + + # Remove from content connections set + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + pipe.srem(content_connections_key, self.session_id) + + # Execute all cleanup operations + pipe.execute() + + logger.info(f"[{self.session_id}] Cleaned up all Redis keys (connection, session, locks)") + + # Decrement profile connections if we have the state and connection manager + if state and state.m3u_profile_id and connection_manager: + connection_manager._decrement_profile_connections(state.m3u_profile_id) + + except Exception as e: + logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}") + + +# Modify the VODConnectionManager to use Redis-backed connections +class MultiWorkerVODConnectionManager: + """Enhanced VOD Connection Manager that works across multiple uwsgi workers""" + + _instance = None + + @classmethod + def get_instance(cls): + """Get the singleton instance""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + self.worker_id = self._get_worker_id() + logger.info(f"MultiWorkerVODConnectionManager initialized for worker {self.worker_id}") + + def _get_worker_id(self): + """Get unique worker ID for this process""" + import os + import socket + try: + # Use combination of hostname and PID for unique worker ID + return f"{socket.gethostname()}-{os.getpid()}" + except: + import random + return f"worker-{random.randint(1000, 9999)}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _check_profile_limits(self, m3u_profile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-CHECK] Profile {m3u_profile.id} has {current_connections}/{m3u_profile.max_streams} connections") + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def _increment_profile_connections(self, m3u_profile): + """Increment profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + new_count = self.redis_client.incr(profile_connections_key) + logger.info(f"[PROFILE-INCR] Profile {m3u_profile.id} connections: {new_count}") + return new_count + except Exception as e: + logger.error(f"Error incrementing profile connections: {e}") + return None + + def _decrement_profile_connections(self, m3u_profile_id: int): + """Decrement profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + new_count = self.redis_client.decr(profile_connections_key) + logger.info(f"[PROFILE-DECR] Profile {m3u_profile_id} connections: {new_count}") + return new_count + else: + logger.warning(f"[PROFILE-DECR] Profile {m3u_profile_id} already at 0 connections") + return 0 + except Exception as e: + logger.error(f"Error decrementing profile connections: {e}") + return None + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, + client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """Stream content with Redis-backed persistent connection""" + + # Generate client ID + content_type = "movie" if isinstance(content_obj, Movie) else "episode" + content_uuid = str(content_obj.uuid) + content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj) + client_id = session_id + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}") + + try: + # Create Redis-backed connection + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + + # Check if connection exists, create if not + existing_state = redis_connection._get_connection_state() + if not existing_state: + logger.info(f"[{client_id}] Worker {self.worker_id} - Creating new Redis-backed connection") + + # Check profile limits before creating new connection + if not self._check_profile_limits(m3u_profile): + logger.warning(f"[{client_id}] Profile {m3u_profile.name} connection limit exceeded") + return HttpResponse("Connection limit exceeded for profile", status=429) + + # Apply timeshift parameters + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + + # Prepare headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers from request + important_headers = ['authorization', 'x-forwarded-for', 'x-real-ip', 'referer', 'origin', 'accept'] + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add worker identification + headers['X-Worker-ID'] = self.worker_id + + # Create connection state in Redis + if not redis_connection.create_connection(modified_stream_url, headers, m3u_profile.id): + logger.error(f"[{client_id}] Worker {self.worker_id} - Failed to create Redis connection") + return HttpResponse("Failed to create connection", status=500) + + # Increment profile connections after successful connection creation + self._increment_profile_connections(m3u_profile) + + # Create session tracking + session_info = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "created_at": str(time.time()), + "last_activity": str(time.time()), + "profile_id": str(m3u_profile.id), + "client_ip": client_ip, + "user_agent": user_agent, + "utc_start": utc_start or "", + "utc_end": utc_end or "", + "offset": str(offset) if offset else "", + "worker_id": self.worker_id, # Track which worker created this + "connection_type": "redis_backed" + } + + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, mapping=session_info) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Worker {self.worker_id} - Created session: {session_info}") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection") + + # Update session activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, mapping={ + "last_activity": str(time.time()), + "last_worker_id": self.worker_id # Track which worker last accessed this + }) + self.redis_client.expire(session_key, self.session_ttl) + + # Get stream from Redis-backed connection + upstream_response = redis_connection.get_stream(range_header) + + if upstream_response is None: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Range not satisfiable") + return HttpResponse("Requested Range Not Satisfiable", status=416) + + # Get connection headers + connection_headers = redis_connection.get_headers() + + # Create streaming generator + def stream_generator(): + decremented = False + try: + logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") + + # Increment active streams + redis_connection.increment_active_streams() + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") + redis_connection.decrement_active_streams() + decremented = True + + except GeneratorExit: + logger.info(f"[{client_id}] Worker {self.worker_id} - Client disconnected from Redis-backed stream") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + + # Schedule cleanup if no active streams + if not redis_connection.has_active_streams(): + def delayed_cleanup(): + time.sleep(10) # Wait 10 seconds + if not redis_connection.has_active_streams(): + logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection") + redis_connection.cleanup(connection_manager=self) + + import threading + cleanup_thread = threading.Thread(target=delayed_cleanup) + cleanup_thread.daemon = True + cleanup_thread.start() + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream: {e}") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + redis_connection.cleanup(connection_manager=self) + yield b"Error: Stream interrupted" + + finally: + if not decremented: + redis_connection.decrement_active_streams() + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers.get('content_type', 'video/mp4') + ) + + # Set appropriate status code + response.status_code = 206 if range_header else 200 + + # Set required headers + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['X-Worker-ID'] = self.worker_id # Identify which worker served this + + if connection_headers.get('content_length'): + response['Accept-Ranges'] = 'bytes' + response['Content-Length'] = connection_headers['content_length'] + + # Set Content-Range for partial requests + if range_header and 'bytes=' in range_header: + try: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + end = int(end_byte) if end_byte else int(connection_headers['content_length']) - 1 + total_size = int(connection_headers['content_length']) + + content_range = f"bytes {start}-{end}/{total_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Worker {self.worker_id} - Set Content-Range: {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Could not set Content-Range: {e}") + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed response ready (status: {response.status_code})") + return response + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """Apply timeshift parameters to URL""" + if not any([utc_start, utc_end, offset]): + return original_url + + try: + from urllib.parse import urlparse, urlunparse, parse_qs, urlencode + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + path = parsed_url.path + + logger.info(f"Applying timeshift parameters: utc_start={utc_start}, utc_end={utc_end}, offset={offset}") + + # Add timeshift parameters + if utc_start: + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + offset_seconds = int(offset) + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] + query_params['t'] = [str(offset_seconds)] + logger.info(f"Added offset/seek/t parameter: {offset_seconds}") + except ValueError: + logger.warning(f"Invalid offset value: {offset}") + + # Handle special catchup URL patterns + if utc_start: + try: + from datetime import datetime + import re + + # Parse the UTC start time + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + + # Check for catchup URL patterns like /catchup/YYYY-MM-DD/HH-MM-SS/ + catchup_pattern = r'/catchup/\d{4}-\d{2}-\d{2}/\d{2}-\d{2}-\d{2}/' + if re.search(catchup_pattern, path): + # Replace the date/time in the path + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + path = re.sub(catchup_pattern, f'/catchup/{date_part}/{time_part}/', path) + logger.info(f"Modified catchup path: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific Redis-backed persistent connection""" + logger.info(f"[{session_id}] Cleaning up Redis-backed persistent connection") + + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + + # The cleanup method now handles all Redis keys including session data + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale Redis-backed persistent connections""" + if not self.redis_client: + return + + try: + logger.info(f"Cleaning up Redis-backed connections older than {max_age_seconds} seconds") + + # Find all persistent connection keys + pattern = "vod_persistent_connection:*" + cursor = 0 + cleanup_count = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + # Get connection state + data = self.redis_client.hgetall(key) + if not data: + continue + + # Convert bytes to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + last_activity = float(data.get('last_activity', 0)) + active_streams = int(data.get('active_streams', 0)) + + # Clean up if stale and no active streams + if (current_time - last_activity > max_age_seconds) and active_streams == 0: + session_id = key.decode('utf-8').replace('vod_persistent_connection:', '') + logger.info(f"Cleaning up stale connection: {session_id}") + + # Clean up connection and related keys + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + cleanup_count += 1 + + except Exception as e: + logger.error(f"Error processing connection key {key}: {e}") + continue + + if cursor == 0: + break + + if cleanup_count > 0: + logger.info(f"Cleaned up {cleanup_count} stale Redis-backed connections") + else: + logger.debug("No stale Redis-backed connections found") + + except Exception as e: + logger.error(f"Error during Redis-backed connection cleanup: {e}") + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """Create connection tracking in Redis (same as original but for Redis-backed connections)""" + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits + profile_connections_key = f"profile_connections:{m3u_profile.id}" + current_connections = self.redis_client.get(profile_connections_key) + max_connections = getattr(m3u_profile, 'max_connections', 3) # Default to 3 + + if current_connections and int(current_connections) >= max_connections: + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded ({current_connections}/{max_connections})") + return False + + # Create connection tracking + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Check if connection already exists + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0" + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + pipe.incr(profile_connections_key) + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + pipe.execute() + + logger.info(f"Created Redis-backed VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating Redis-backed connection: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str): + """Remove connection tracking from Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Get connection data to find profile + connection_data = self.redis_client.hgetall(connection_key) + if connection_data: + # Convert bytes to strings if needed + if isinstance(list(connection_data.keys())[0], bytes): + connection_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in connection_data.items()} + + profile_id = connection_data.get('m3u_profile_id') + if profile_id: + profile_connections_key = f"profile_connections:{profile_id}" + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.delete(connection_key) + pipe.srem(content_connections_key, client_id) + pipe.decr(profile_connections_key) + pipe.execute() + + logger.info(f"Removed Redis-backed connection: {client_id}") + + except Exception as e: + logger.error(f"Error removing Redis-backed connection: {e}") + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int): + """Update connection activity in Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping={ + "last_activity": str(time.time()), + "bytes_sent": str(bytes_sent) + }) + pipe.expire(connection_key, self.connection_ttl) + pipe.execute() + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """Find existing Redis-backed session that matches criteria""" + if not self.redis_client: + return None + + try: + # Search for sessions with matching content + pattern = "vod_session:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + session_data = self.redis_client.hgetall(key) + if not session_data: + continue + + # Convert bytes keys/values to strings if needed + if isinstance(list(session_data.keys())[0], bytes): + session_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in session_data.items()} + + # Check if content matches + stored_content_type = session_data.get('content_type', '') + stored_content_uuid = session_data.get('content_uuid', '') + + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID + session_id = key.decode('utf-8').replace('vod_session:', '') + + # Check if Redis-backed connection exists and has no active streams + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + if redis_connection.has_active_streams(): + continue + + # Calculate match score + score = 10 # Content match + match_reasons = ["content"] + + # Check other criteria + stored_client_ip = session_data.get('client_ip', '') + stored_user_agent = session_data.get('user_agent', '') + + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + if stored_user_agent and stored_user_agent == user_agent: + score += 3 + match_reasons.append("user-agent") + + # Check timeshift parameters + stored_utc_start = session_data.get('utc_start', '') + stored_utc_end = session_data.get('utc_end', '') + stored_offset = session_data.get('offset', '') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + if score >= 13: # Good match threshold + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(session_data.get('last_activity', '0')) + }) + + except Exception as e: + logger.debug(f"Error processing session key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score and last activity + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching Redis-backed idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None diff --git a/apps/proxy/vod_proxy/urls.py b/apps/proxy/vod_proxy/urls.py new file mode 100644 index 00000000..0d2b306e --- /dev/null +++ b/apps/proxy/vod_proxy/urls.py @@ -0,0 +1,21 @@ +from django.urls import path +from . import views + +app_name = 'vod_proxy' + +urlpatterns = [ + # Generic VOD streaming with session ID in path (for compatibility) + path('//', views.VODStreamView.as_view(), name='vod_stream_with_session'), + path('////', views.VODStreamView.as_view(), name='vod_stream_with_session_and_profile'), + + # Generic VOD streaming (supports movies, episodes, series) - legacy patterns + path('/', views.VODStreamView.as_view(), name='vod_stream'), + path('///', views.VODStreamView.as_view(), name='vod_stream_with_profile'), + + # VOD playlist generation + path('playlist/', views.VODPlaylistView.as_view(), name='vod_playlist'), + path('playlist//', views.VODPlaylistView.as_view(), name='vod_playlist_with_profile'), + + # Position tracking + path('position//', views.VODPositionView.as_view(), name='vod_position'), +] diff --git a/apps/proxy/vod_proxy/utils.py b/apps/proxy/vod_proxy/utils.py new file mode 100644 index 00000000..7ccf08b4 --- /dev/null +++ b/apps/proxy/vod_proxy/utils.py @@ -0,0 +1,58 @@ +""" +Utility functions for VOD proxy operations. +""" + +import logging +from django.http import HttpResponse + +logger = logging.getLogger(__name__) + + +def get_client_info(request): + """ + Extract client IP and User-Agent from request. + + Args: + request: Django HttpRequest object + + Returns: + tuple: (client_ip, user_agent) + """ + # Get client IP, checking for proxy headers + client_ip = request.META.get('HTTP_X_FORWARDED_FOR') + if client_ip: + # Take the first IP if there are multiple (comma-separated) + client_ip = client_ip.split(',')[0].strip() + else: + client_ip = request.META.get('HTTP_X_REAL_IP') or request.META.get('REMOTE_ADDR', 'unknown') + + # Get User-Agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + return client_ip, user_agent + + +def create_vod_response(content, content_type='video/mp4', filename=None): + """ + Create a streaming HTTP response for VOD content. + + Args: + content: Content to stream (file-like object or bytes) + content_type: MIME type of the content + filename: Optional filename for Content-Disposition header + + Returns: + HttpResponse: Configured HTTP response for streaming + """ + response = HttpResponse(content, content_type=content_type) + + if filename: + response['Content-Disposition'] = f'attachment; filename="{filename}"' + + # Add headers for streaming + response['Accept-Ranges'] = 'bytes' + response['Cache-Control'] = 'no-cache, no-store, must-revalidate' + response['Pragma'] = 'no-cache' + response['Expires'] = '0' + + return response diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py new file mode 100644 index 00000000..60472403 --- /dev/null +++ b/apps/proxy/vod_proxy/views.py @@ -0,0 +1,666 @@ +""" +VOD (Video on Demand) proxy views for handling movie and series streaming. +Supports M3U profiles for authentication and URL transformation. +""" + +import time +import random +import logging +import requests +from django.http import StreamingHttpResponse, JsonResponse, Http404, HttpResponse +from django.shortcuts import get_object_or_404 +from django.views.decorators.csrf import csrf_exempt +from django.utils.decorators import method_decorator +from django.views import View +from apps.vod.models import Movie, Series, Episode +from apps.m3u.models import M3UAccount, M3UAccountProfile +from apps.proxy.vod_proxy.connection_manager import VODConnectionManager +from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager +from .utils import get_client_info, create_vod_response + +logger = logging.getLogger(__name__) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODStreamView(View): + """Handle VOD streaming requests with M3U profile support""" + + def get(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Stream VOD content (movies or series episodes) with session-based connection reuse + + Args: + content_type: 'movie', 'series', or 'episode' + content_id: ID of the content + session_id: Optional session ID from URL path (for persistent connections) + profile_id: Optional M3U profile ID for authentication + """ + logger.info(f"[VOD-REQUEST] Starting VOD stream request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + logger.info(f"[VOD-REQUEST] Full request path: {request.get_full_path()}") + logger.info(f"[VOD-REQUEST] Request method: {request.method}") + logger.info(f"[VOD-REQUEST] Request headers: {dict(request.headers)}") + + try: + client_ip, user_agent = get_client_info(request) + + # Extract timeshift parameters from query string + # Support multiple timeshift parameter formats + utc_start = request.GET.get('utc_start') or request.GET.get('start') or request.GET.get('playliststart') + utc_end = request.GET.get('utc_end') or request.GET.get('end') or request.GET.get('playlistend') + offset = request.GET.get('offset') or request.GET.get('seek') or request.GET.get('t') + + # VLC specific timeshift parameters + if not utc_start and not offset: + # Check for VLC-style timestamp parameters + if 'timestamp' in request.GET: + offset = request.GET.get('timestamp') + elif 'time' in request.GET: + offset = request.GET.get('time') + + # Session ID now comes from URL path parameter + # Remove legacy query parameter extraction since we're using path-based routing + + # Extract Range header for seeking support + range_header = request.META.get('HTTP_RANGE') + + logger.info(f"[VOD-TIMESHIFT] Timeshift params - utc_start: {utc_start}, utc_end: {utc_end}, offset: {offset}") + logger.info(f"[VOD-SESSION] Session ID: {session_id}") + + # Log all query parameters for debugging + if request.GET: + logger.debug(f"[VOD-PARAMS] All query params: {dict(request.GET)}") + + if range_header: + logger.info(f"[VOD-RANGE] Range header: {range_header}") + + # Parse the range to understand what position VLC is seeking to + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] Seeking to byte position: {start_byte} (~{start_pos_mb:.1f} MB)") + if int(start_byte) > 0: + logger.info(f"[VOD-SEEK] *** ACTUAL SEEK DETECTED *** Position: {start_pos_mb:.1f} MB") + else: + logger.info(f"[VOD-SEEK] Open-ended range request (from start)") + if end_byte: + end_pos_mb = int(end_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] End position: {end_byte} bytes (~{end_pos_mb:.1f} MB)") + except Exception as e: + logger.warning(f"[VOD-SEEK] Could not parse range header: {e}") + + # Simple seek detection - track rapid requests + current_time = time.time() + request_key = f"{client_ip}:{content_type}:{content_id}" + + if not hasattr(self.__class__, '_request_times'): + self.__class__._request_times = {} + + if request_key in self.__class__._request_times: + time_diff = current_time - self.__class__._request_times[request_key] + if time_diff < 5.0: + logger.info(f"[VOD-SEEK] Rapid request detected ({time_diff:.1f}s) - likely seeking") + + self.__class__._request_times[request_key] = current_time + else: + logger.info(f"[VOD-RANGE] No Range header - full content request") + + logger.info(f"[VOD-CLIENT] Client info - IP: {client_ip}, User-Agent: {user_agent[:50]}...") + + # If no session ID, create one and redirect to path-based URL + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-SESSION] Creating new session: {new_session_id}") + + # Build redirect URL with session ID in path, preserve query parameters + path_parts = request.path.rstrip('/').split('/') + + # Construct new path: /vod/movie/UUID/SESSION_ID or /vod/movie/UUID/SESSION_ID/PROFILE_ID/ + if profile_id: + new_path = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + new_path = f"{'/'.join(path_parts)}/{new_session_id}" + + # Preserve any query parameters (except session_id) + query_params = dict(request.GET) + query_params.pop('session_id', None) # Remove if present + + if query_params: + from urllib.parse import urlencode + query_string = urlencode(query_params, doseq=True) + redirect_url = f"{new_path}?{query_string}" + else: + redirect_url = new_path + + logger.info(f"[VOD-SESSION] Redirecting to path-based URL: {redirect_url}") + + return HttpResponse( + status=301, + headers={'Location': redirect_url} + ) + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-PARAM] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-PARAM] Preferred stream ID: {preferred_stream_id}") + + # Get the content object and its relation + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-ERROR] Content or relation not found: {content_type} {content_id}") + raise Http404(f"Content not found: {content_type} {content_id}") + + logger.info(f"[VOD-CONTENT] Found content: {getattr(content_obj, 'name', 'Unknown')}") + + # Get M3U account from relation + m3u_account = relation.m3u_account + logger.info(f"[VOD-ACCOUNT] Using M3U account: {m3u_account.name}") + + # Get stream URL from relation + stream_url = self._get_stream_url_from_relation(relation) + logger.info(f"[VOD-CONTENT] Content URL: {stream_url or 'No URL found'}") + + if not stream_url: + logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile + m3u_profile = self._get_m3u_profile(m3u_account, profile_id, user_agent) + + if not m3u_profile: + logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}") + return HttpResponse("No available stream", status=503) + + logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {m3u_profile.current_viewers})") + + # Connection tracking is handled by the connection manager + # Transform URL based on profile + final_stream_url = self._transform_url(stream_url, m3u_profile) + logger.info(f"[VOD-URL] Final stream URL: {final_stream_url}") + + # Validate stream URL + if not final_stream_url or not final_stream_url.startswith(('http://', 'https://')): + logger.error(f"[VOD-ERROR] Invalid stream URL: {final_stream_url}") + return HttpResponse("Invalid stream URL", status=500) + + # Get connection manager (Redis-backed for multi-worker support) + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + + # Stream the content with session-based connection reuse + logger.info("[VOD-STREAM] Calling connection manager to stream content") + response = connection_manager.stream_content_with_session( + session_id=session_id, + content_obj=content_obj, + stream_url=final_stream_url, + m3u_profile=m3u_profile, + client_ip=client_ip, + user_agent=user_agent, + request=request, + utc_start=utc_start, + utc_end=utc_end, + offset=offset, + range_header=range_header + ) + + logger.info(f"[VOD-SUCCESS] Stream response created successfully, type: {type(response)}") + return response + + except Exception as e: + logger.error(f"[VOD-EXCEPTION] Error streaming {content_type} {content_id}: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def head(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Handle HEAD requests for FUSE filesystem integration + + Returns content length and session URL header for subsequent GET requests + """ + logger.info(f"[VOD-HEAD] HEAD request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + + try: + # Get client info for M3U profile selection + client_ip, user_agent = get_client_info(request) + logger.info(f"[VOD-HEAD] Client info - IP: {client_ip}, User-Agent: {user_agent[:50] if user_agent else 'None'}...") + + # If no session ID, create one (same logic as GET) + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-HEAD] Creating new session for HEAD: {new_session_id}") + + # Build session URL for response header + path_parts = request.path.rstrip('/').split('/') + if profile_id: + session_url = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + session_url = f"{'/'.join(path_parts)}/{new_session_id}" + + session_id = new_session_id + else: + # Session already in URL, construct the current session URL + session_url = request.path + logger.info(f"[VOD-HEAD] Using existing session: {session_id}") + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-HEAD] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-HEAD] Preferred stream ID: {preferred_stream_id}") + + # Get content and relation (same as GET) + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-HEAD] Content or relation not found: {content_type} {content_id}") + return HttpResponse("Content not found", status=404) + + # Get M3U account and stream URL + m3u_account = relation.m3u_account + stream_url = self._get_stream_url_from_relation(relation) + if not stream_url: + logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile + m3u_profile = self._get_m3u_profile(m3u_account, profile_id, user_agent) + if not m3u_profile: + logger.error(f"[VOD-HEAD] No M3U profile found") + return HttpResponse("Profile not found", status=404) + + # Transform URL if needed + final_stream_url = self._transform_url(stream_url, m3u_profile) + + # Make a small range GET request to get content length since providers don't support HEAD + # We'll use a tiny range to minimize data transfer but get the headers we need + headers = { + 'User-Agent': user_agent or 'Dispatcharr/1.0', + 'Accept': '*/*', + 'Range': 'bytes=0-1' # Request only first 2 bytes + } + + logger.info(f"[VOD-HEAD] Making small range GET request to provider: {final_stream_url}") + response = requests.get(final_stream_url, headers=headers, timeout=30, allow_redirects=True, stream=True) + + # Check for range support - should be 206 for partial content + if response.status_code == 206: + # Parse Content-Range header to get total file size + content_range = response.headers.get('Content-Range', '') + if content_range: + # Content-Range: bytes 0-1/1234567890 + total_size = content_range.split('/')[-1] + logger.info(f"[VOD-HEAD] Got file size from Content-Range: {total_size}") + else: + logger.warning(f"[VOD-HEAD] No Content-Range header in 206 response") + total_size = response.headers.get('Content-Length', '0') + elif response.status_code == 200: + # Server doesn't support range requests, use Content-Length from full response + total_size = response.headers.get('Content-Length', '0') + logger.info(f"[VOD-HEAD] Server doesn't support ranges, got Content-Length: {total_size}") + else: + logger.error(f"[VOD-HEAD] Provider GET request failed: {response.status_code}") + return HttpResponse("Provider error", status=response.status_code) + + # Close the small range request - we don't need to keep this connection + response.close() + + # Store the total content length in Redis for the persistent connection to use + try: + import redis + r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + content_length_key = f"vod_content_length:{session_id}" + r.set(content_length_key, total_size, ex=1800) # Store for 30 minutes + logger.info(f"[VOD-HEAD] Stored total content length {total_size} for session {session_id}") + except Exception as e: + logger.error(f"[VOD-HEAD] Failed to store content length in Redis: {e}") + + # Now create a persistent connection for the session (if one doesn't exist) + # This ensures the FUSE GET requests will reuse the same connection + + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + logger.info(f"[VOD-HEAD] Pre-creating persistent connection for session: {session_id}") + + # We don't actually stream content here, just ensure connection is ready + # The actual GET requests from FUSE will use the persistent connection + + # Use the total_size we extracted from the range response + content_type_header = response.headers.get('Content-Type', 'video/mp4') + + logger.info(f"[VOD-HEAD] Provider response - Total Size: {total_size}, Type: {content_type_header}") + + # Create response with content length and session URL header + head_response = HttpResponse() + head_response['Content-Length'] = total_size + head_response['Content-Type'] = content_type_header + head_response['Accept-Ranges'] = 'bytes' + + # Custom header with session URL for FUSE + head_response['X-Session-URL'] = session_url + head_response['X-Dispatcharr-Session'] = session_id + + logger.info(f"[VOD-HEAD] Returning HEAD response with session URL: {session_url}") + return head_response + + except Exception as e: + logger.error(f"[VOD-HEAD] Error in HEAD request: {e}", exc_info=True) + return HttpResponse(f"HEAD error: {str(e)}", status=500) + + def _get_content_and_relation(self, content_type, content_id, preferred_m3u_account_id=None, preferred_stream_id=None): + """Get the content object and its M3U relation""" + try: + logger.info(f"[CONTENT-LOOKUP] Looking up {content_type} with UUID {content_id}") + if preferred_m3u_account_id: + logger.info(f"[CONTENT-LOOKUP] Preferred M3U account ID: {preferred_m3u_account_id}") + if preferred_stream_id: + logger.info(f"[CONTENT-LOOKUP] Preferred stream ID: {preferred_stream_id}") + + if content_type == 'movie': + content_obj = get_object_or_404(Movie, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Movie: {content_obj.name} (ID: {content_obj.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'episode': + content_obj = get_object_or_404(Episode, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Episode: {content_obj.name} (ID: {content_obj.id}, Series: {content_obj.series.name})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'series': + # For series, get the first episode + series = get_object_or_404(Series, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Series: {series.name} (ID: {series.id})") + episode = series.episodes.first() + if not episode: + logger.error(f"[CONTENT-ERROR] No episodes found for series {series.name}") + return None, None + + logger.info(f"[CONTENT-FOUND] First episode: {episode.name} (ID: {episode.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = episode.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return episode, relation + else: + logger.error(f"[CONTENT-ERROR] Invalid content type: {content_type}") + return None, None + + except Exception as e: + logger.error(f"Error getting content object: {e}") + return None, None + + def _get_stream_url_from_relation(self, relation): + """Get stream URL from the M3U relation""" + try: + # Log the relation type and available attributes + logger.info(f"[VOD-URL] Relation type: {type(relation).__name__}") + logger.info(f"[VOD-URL] Account type: {relation.m3u_account.account_type}") + logger.info(f"[VOD-URL] Stream ID: {getattr(relation, 'stream_id', 'N/A')}") + + # First try the get_stream_url method (this should build URLs dynamically) + if hasattr(relation, 'get_stream_url'): + url = relation.get_stream_url() + if url: + logger.info(f"[VOD-URL] Built URL from get_stream_url(): {url}") + return url + else: + logger.warning(f"[VOD-URL] get_stream_url() returned None") + + logger.error(f"[VOD-URL] Relation has no get_stream_url method or it failed") + return None + except Exception as e: + logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True) + return None + + def _get_m3u_profile(self, m3u_account, profile_id, user_agent): + """Get appropriate M3U profile for streaming""" + try: + # If specific profile requested, try to use it + if profile_id: + try: + profile = M3UAccountProfile.objects.get( + id=profile_id, + m3u_account=m3u_account, + is_active=True + ) + if profile.current_viewers < profile.max_streams or profile.max_streams == 0: + return profile + except M3UAccountProfile.DoesNotExist: + pass + + # Find available profile based on user agent matching + profiles = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True + ).order_by('current_viewers') + + for profile in profiles: + # Check if profile matches user agent pattern + if self._matches_user_agent_pattern(profile, user_agent): + if profile.current_viewers < profile.max_streams or profile.max_streams == 0: + return profile + + # Fallback to default profile + return profiles.filter(is_default=True).first() + + except Exception as e: + logger.error(f"Error getting M3U profile: {e}") + return None + + def _matches_user_agent_pattern(self, profile, user_agent): + """Check if user agent matches profile pattern""" + try: + import re + pattern = profile.search_pattern + if pattern and user_agent: + return bool(re.search(pattern, user_agent, re.IGNORECASE)) + return True # If no pattern, match all + except Exception: + return True + + def _transform_url(self, original_url, m3u_profile): + """Transform URL based on M3U profile settings""" + try: + import re + + if not original_url: + return None + + search_pattern = m3u_profile.search_pattern + replace_pattern = m3u_profile.replace_pattern + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', replace_pattern) + + if search_pattern and replace_pattern: + transformed_url = re.sub(search_pattern, safe_replace_pattern, original_url) + return transformed_url + + return original_url + + except Exception as e: + logger.error(f"Error transforming URL: {e}") + return original_url + +@method_decorator(csrf_exempt, name='dispatch') +class VODPlaylistView(View): + """Generate M3U playlists for VOD content""" + + def get(self, request, profile_id=None): + """Generate VOD playlist""" + try: + # Get profile if specified + m3u_profile = None + if profile_id: + try: + m3u_profile = M3UAccountProfile.objects.get( + id=profile_id, + is_active=True + ) + except M3UAccountProfile.DoesNotExist: + return HttpResponse("Profile not found", status=404) + + # Generate playlist content + playlist_content = self._generate_playlist(m3u_profile) + + response = HttpResponse(playlist_content, content_type='application/vnd.apple.mpegurl') + response['Content-Disposition'] = 'attachment; filename="vod_playlist.m3u8"' + return response + + except Exception as e: + logger.error(f"Error generating VOD playlist: {e}") + return HttpResponse("Playlist generation error", status=500) + + def _generate_playlist(self, m3u_profile=None): + """Generate M3U playlist content for VOD""" + lines = ["#EXTM3U"] + + # Add movies + movies = Movie.objects.filter(is_active=True) + if m3u_profile: + movies = movies.filter(m3u_account=m3u_profile.m3u_account) + + for movie in movies: + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + lines.append(f'#EXTINF:-1 tvg-id="{movie.tmdb_id}" group-title="Movies",{movie.title}') + lines.append(f'/proxy/vod/movie/{movie.uuid}/{profile_param}') + + # Add series + series_list = Series.objects.filter(is_active=True) + if m3u_profile: + series_list = series_list.filter(m3u_account=m3u_profile.m3u_account) + + for series in series_list: + for episode in series.episodes.all(): + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + episode_title = f"{series.title} - S{episode.season_number:02d}E{episode.episode_number:02d}" + lines.append(f'#EXTINF:-1 tvg-id="{series.tmdb_id}" group-title="Series",{episode_title}') + lines.append(f'/proxy/vod/episode/{episode.uuid}/{profile_param}') + + return '\n'.join(lines) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODPositionView(View): + """Handle VOD position updates""" + + def post(self, request, content_id): + """Update playback position for VOD content""" + try: + import json + data = json.loads(request.body) + client_id = data.get('client_id') + position = data.get('position', 0) + + # Find the content object + content_obj = None + try: + content_obj = Movie.objects.get(uuid=content_id) + except Movie.DoesNotExist: + try: + content_obj = Episode.objects.get(uuid=content_id) + except Episode.DoesNotExist: + return JsonResponse({'error': 'Content not found'}, status=404) + + # Here you could store the position in a model or cache + # For now, just return success + logger.info(f"Position update for {content_obj.__class__.__name__} {content_id}: {position}s") + + return JsonResponse({ + 'success': True, + 'content_id': str(content_id), + 'position': position + }) + + except Exception as e: + logger.error(f"Error updating VOD position: {e}") + return JsonResponse({'error': str(e)}, status=500) diff --git a/apps/vod/__init__.py b/apps/vod/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/admin.py b/apps/vod/admin.py new file mode 100644 index 00000000..c660f310 --- /dev/null +++ b/apps/vod/admin.py @@ -0,0 +1,67 @@ +from django.contrib import admin +from .models import ( + Series, VODCategory, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation +) + + +@admin.register(VODCategory) +class VODCategoryAdmin(admin.ModelAdmin): + list_display = ['name', 'category_type', 'created_at'] + list_filter = ['category_type', 'created_at'] + search_fields = ['name'] + + +@admin.register(Series) +class SeriesAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + +@admin.register(Movie) +class MovieAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'duration_secs', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('logo') + + +@admin.register(Episode) +class EpisodeAdmin(admin.ModelAdmin): + list_display = ['name', 'series', 'season_number', 'episode_number', 'duration_secs', 'created_at'] + list_filter = ['series', 'season_number', 'created_at'] + search_fields = ['name', 'description', 'series__name'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('series') + + +@admin.register(M3UMovieRelation) +class M3UMovieRelationAdmin(admin.ModelAdmin): + list_display = ['movie', 'm3u_account', 'category', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['movie__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3USeriesRelation) +class M3USeriesRelationAdmin(admin.ModelAdmin): + list_display = ['series', 'm3u_account', 'category', 'external_series_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['series__name', 'm3u_account__name', 'external_series_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3UEpisodeRelation) +class M3UEpisodeRelationAdmin(admin.ModelAdmin): + list_display = ['episode', 'm3u_account', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'created_at'] + search_fields = ['episode__name', 'episode__series__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py new file mode 100644 index 00000000..b49e79e3 --- /dev/null +++ b/apps/vod/api_urls.py @@ -0,0 +1,18 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import ( + MovieViewSet, + EpisodeViewSet, + SeriesViewSet, + VODCategoryViewSet, +) + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet, basename='movie') +router.register(r'episodes', EpisodeViewSet, basename='episode') +router.register(r'series', SeriesViewSet, basename='series') +router.register(r'categories', VODCategoryViewSet, basename='vodcategory') + +urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py new file mode 100644 index 00000000..b72ae035 --- /dev/null +++ b/apps/vod/api_views.py @@ -0,0 +1,471 @@ +from rest_framework import viewsets, status +from rest_framework.response import Response +from rest_framework.decorators import action +from rest_framework.filters import SearchFilter, OrderingFilter +from rest_framework.pagination import PageNumberPagination +from django_filters.rest_framework import DjangoFilterBackend +from django.shortcuts import get_object_or_404 +import django_filters +import logging +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, +) +from .models import ( + Series, VODCategory, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation +) +from .serializers import ( + MovieSerializer, + EpisodeSerializer, + SeriesSerializer, + VODCategorySerializer, + M3UMovieRelationSerializer, + M3USeriesRelationSerializer, + M3UEpisodeRelationSerializer +) +from .tasks import refresh_series_episodes, refresh_movie_advanced_data +from django.utils import timezone +from datetime import timedelta + +logger = logging.getLogger(__name__) + + +class VODPagination(PageNumberPagination): + page_size = 20 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 100 # Prevent excessive page sizes for VOD content + + +class MovieFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Movie + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.split('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class MovieViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Movie content""" + queryset = Movie.objects.all() + serializer_class = MovieSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = MovieFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return movies that have active M3U relations + return Movie.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('m3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this movie""" + movie = self.get_object() + relations = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3UMovieRelationSerializer(relations, many=True) + return Response(serializer.data) + + + @action(detail=True, methods=['get'], url_path='provider-info') + def provider_info(self, request, pk=None): + """Get detailed movie information from the original provider, throttled to 24h.""" + movie = self.get_object() + + # Get the highest priority active relation + relation = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this movie'}, + status=status.HTTP_400_BAD_REQUEST + ) + + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + now = timezone.now() + needs_refresh = ( + force_refresh or + not relation.last_advanced_refresh or + (now - relation.last_advanced_refresh).total_seconds() > 86400 + ) + + if needs_refresh: + # Trigger advanced data refresh + logger.debug(f"Refreshing advanced data for movie {movie.id} (relation ID: {relation.id})") + refresh_movie_advanced_data(relation.id, force_refresh=force_refresh) + + # Refresh objects from database after task completion + movie.refresh_from_db() + relation.refresh_from_db() + + # Use refreshed data from database + custom_props = relation.custom_properties or {} + info = custom_props.get('detailed_info', {}) + movie_data = custom_props.get('movie_data', {}) + + # Build response with available data + response_data = { + 'id': movie.id, + 'uuid': movie.uuid, + 'stream_id': relation.stream_id, + 'name': info.get('name', movie.name), + 'o_name': info.get('o_name', ''), + 'description': info.get('description', info.get('plot', movie.description)), + 'plot': info.get('plot', info.get('description', movie.description)), + 'year': movie.year or info.get('year'), + 'release_date': (movie.custom_properties or {}).get('release_date') or info.get('release_date') or info.get('releasedate', ''), + 'genre': movie.genre or info.get('genre', ''), + 'director': (movie.custom_properties or {}).get('director') or info.get('director', ''), + 'actors': (movie.custom_properties or {}).get('actors') or info.get('actors', ''), + 'country': (movie.custom_properties or {}).get('country') or info.get('country', ''), + 'rating': movie.rating or info.get('rating', movie.rating or 0), + 'tmdb_id': movie.tmdb_id or info.get('tmdb_id', ''), + 'imdb_id': movie.imdb_id or info.get('imdb_id', ''), + 'youtube_trailer': (movie.custom_properties or {}).get('youtube_trailer') or info.get('youtube_trailer') or info.get('trailer', ''), + 'duration_secs': movie.duration_secs or info.get('duration_secs'), + 'age': info.get('age', ''), + 'backdrop_path': (movie.custom_properties or {}).get('backdrop_path') or info.get('backdrop_path', []), + 'cover': info.get('cover_big', ''), + 'cover_big': info.get('cover_big', ''), + 'movie_image': movie.logo.url if movie.logo else info.get('movie_image', ''), + 'bitrate': info.get('bitrate', 0), + 'video': info.get('video', {}), + 'audio': info.get('audio', {}), + 'container_extension': movie_data.get('container_extension', 'mp4'), + 'direct_source': movie_data.get('direct_source', ''), + 'category_id': movie_data.get('category_id', ''), + 'added': movie_data.get('added', ''), + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + } + } + return Response(response_data) + +class EpisodeFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + series = django_filters.NumberFilter(field_name="series__id") + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + season_number = django_filters.NumberFilter() + episode_number = django_filters.NumberFilter() + + class Meta: + model = Episode + fields = ['name', 'series', 'm3u_account', 'season_number', 'episode_number'] + + +class SeriesFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Series + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.split('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class EpisodeViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Episode content""" + queryset = Episode.objects.all() + serializer_class = EpisodeSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = EpisodeFilter + search_fields = ['name', 'description'] + ordering_fields = ['name', 'season_number', 'episode_number', 'created_at'] + ordering = ['series__name', 'season_number', 'episode_number'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + return Episode.objects.select_related( + 'series', 'm3u_account' + ).filter(m3u_account__is_active=True) + + +class SeriesViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Series management""" + queryset = Series.objects.all() + serializer_class = SeriesSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = SeriesFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return series that have active M3U relations + return Series.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('episodes', 'm3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this series""" + series = self.get_object() + relations = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3USeriesRelationSerializer(relations, many=True) + return Response(serializer.data) + + @action(detail=True, methods=['get'], url_path='episodes') + def get_episodes(self, request, pk=None): + """Get episodes for this series with provider information""" + series = self.get_object() + episodes = Episode.objects.filter(series=series).prefetch_related( + 'm3u_relations__m3u_account' + ).order_by('season_number', 'episode_number') + + episodes_data = [] + for episode in episodes: + episode_serializer = EpisodeSerializer(episode) + episode_data = episode_serializer.data + + # Add provider information + relations = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True + ).select_related('m3u_account') + + episode_data['providers'] = M3UEpisodeRelationSerializer(relations, many=True).data + episodes_data.append(episode_data) + + return Response(episodes_data) + + @action(detail=True, methods=['get'], url_path='provider-info') + def series_info(self, request, pk=None): + """Get detailed series information, refreshing from provider if needed""" + logger.debug(f"SeriesViewSet.series_info called for series ID: {pk}") + series = self.get_object() + logger.debug(f"Retrieved series: {series.name} (ID: {series.id})") + + # Get the highest priority active relation + relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this series'}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Check if we should refresh data (optional force refresh parameter) + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + refresh_interval_hours = int(request.query_params.get("refresh_interval", 24)) # Default to 24 hours + + now = timezone.now() + last_refreshed = relation.last_episode_refresh + + # Check if detailed data has been fetched + custom_props = relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes have never been fetched or if forced + if not episodes_fetched or not detailed_fetched or force_refresh: + force_refresh = True + logger.debug(f"Series {series.id} needs detailed/episode refresh, forcing refresh") + elif last_refreshed is None or (now - last_refreshed) > timedelta(hours=refresh_interval_hours): + force_refresh = True + logger.debug(f"Series {series.id} refresh interval exceeded or never refreshed, forcing refresh") + + if force_refresh: + logger.debug(f"Refreshing series {series.id} data from provider") + # Use existing refresh logic with external_series_id + from .tasks import refresh_series_episodes + account = relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, relation.external_series_id) + series.refresh_from_db() # Reload from database after refresh + relation.refresh_from_db() # Reload relation too + + # Return the database data (which should now be fresh) + custom_props = relation.custom_properties or {} + response_data = { + 'id': series.id, + 'series_id': relation.external_series_id, + 'name': series.name, + 'description': series.description, + 'year': series.year, + 'genre': series.genre, + 'rating': series.rating, + 'tmdb_id': series.tmdb_id, + 'imdb_id': series.imdb_id, + 'category_id': relation.category.id if relation.category else None, + 'category_name': relation.category.name if relation.category else None, + 'cover': { + 'id': series.logo.id, + 'url': series.logo.url, + 'name': series.logo.name, + } if series.logo else None, + 'last_refreshed': series.updated_at, + 'custom_properties': series.custom_properties, + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + }, + 'episodes_fetched': custom_props.get('episodes_fetched', False), + 'detailed_fetched': custom_props.get('detailed_fetched', False) + } + + # Always include episodes for series info if they've been fetched + include_episodes = request.query_params.get('include_episodes', 'true').lower() == 'true' + if include_episodes and custom_props.get('episodes_fetched', False): + logger.debug(f"Including episodes for series {series.id}") + episodes_by_season = {} + for episode in series.episodes.all().order_by('season_number', 'episode_number'): + season_key = str(episode.season_number or 0) + if season_key not in episodes_by_season: + episodes_by_season[season_key] = [] + + # Get episode relation for additional data + episode_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account=relation.m3u_account + ).first() + + episode_data = { + 'id': episode.id, + 'uuid': episode.uuid, + 'name': episode.name, + 'title': episode.name, + 'episode_number': episode.episode_number, + 'season_number': episode.season_number, + 'description': episode.description, + 'air_date': episode.air_date, + 'plot': episode.description, + 'duration_secs': episode.duration_secs, + 'rating': episode.rating, + 'tmdb_id': episode.tmdb_id, + 'imdb_id': episode.imdb_id, + 'movie_image': episode.custom_properties.get('movie_image', '') if episode.custom_properties else '', + 'container_extension': episode_relation.container_extension if episode_relation else 'mp4', + 'type': 'episode', + 'series': { + 'id': series.id, + 'name': series.name + } + } + episodes_by_season[season_key].append(episode_data) + + response_data['episodes'] = episodes_by_season + logger.debug(f"Added {len(episodes_by_season)} seasons of episodes to response") + elif include_episodes: + # Episodes not yet fetched, include empty episodes list + response_data['episodes'] = {} + + logger.debug(f"Returning series info response for series {series.id}") + return Response(response_data) + + except Exception as e: + logger.error(f"Error fetching series info for series {pk}: {str(e)}") + return Response( + {'error': f'Failed to fetch series information: {str(e)}'}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + +class VODCategoryFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + category_type = django_filters.ChoiceFilter(choices=VODCategory.CATEGORY_TYPE_CHOICES) + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + + class Meta: + model = VODCategory + fields = ['name', 'category_type', 'm3u_account'] + + +class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for VOD Categories""" + queryset = VODCategory.objects.all() + serializer_class = VODCategorySerializer + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = VODCategoryFilter + search_fields = ['name'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] diff --git a/apps/vod/apps.py b/apps/vod/apps.py new file mode 100644 index 00000000..0e2af56d --- /dev/null +++ b/apps/vod/apps.py @@ -0,0 +1,12 @@ +from django.apps import AppConfig + + +class VODConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'apps.vod' + verbose_name = 'Video on Demand' + + def ready(self): + """Initialize VOD app when Django is ready""" + # Import models to ensure they're registered + from . import models diff --git a/apps/vod/migrations/0001_initial.py b/apps/vod/migrations/0001_initial.py new file mode 100644 index 00000000..02c6ae2a --- /dev/null +++ b/apps/vod/migrations/0001_initial.py @@ -0,0 +1,201 @@ +# Generated by Django 5.2.4 on 2025-08-28 18:16 + +import django.db.models.deletion +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'), + ('m3u', '0016_m3uaccount_priority'), + ] + + operations = [ + migrations.CreateModel( + name='Movie', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the movie', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Movie', + 'verbose_name_plural': 'Movies', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Series', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the series', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Series', + 'verbose_name_plural': 'Series', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Episode', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('air_date', models.DateField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('season_number', models.IntegerField(blank=True, null=True)), + ('episode_number', models.IntegerField(blank=True, null=True)), + ('tmdb_id', models.CharField(blank=True, db_index=True, help_text='TMDB ID for metadata', max_length=50, null=True)), + ('imdb_id', models.CharField(blank=True, db_index=True, help_text='IMDB ID for metadata', max_length=50, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Custom properties for this episode', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episodes', to='vod.series')), + ], + options={ + 'verbose_name': 'Episode', + 'verbose_name_plural': 'Episodes', + 'ordering': ['series__name', 'season_number', 'episode_number'], + }, + ), + migrations.CreateModel( + name='VODCategory', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('category_type', models.CharField(choices=[('movie', 'Movie'), ('series', 'Series')], default='movie', help_text='Type of content this category contains', max_length=10)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ], + options={ + 'verbose_name': 'VOD Category', + 'verbose_name_plural': 'VOD Categories', + 'ordering': ['name'], + 'unique_together': {('name', 'category_type')}, + }, + ), + migrations.CreateModel( + name='M3UVODCategoryRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('enabled', models.BooleanField(default=False, help_text='Set to false to deactivate this category for the M3U account')), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_relations', to='m3u.m3uaccount')), + ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U VOD Category Relation', + 'verbose_name_plural': 'M3U VOD Category Relations', + }, + ), + migrations.CreateModel( + name='M3USeriesRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('external_series_id', models.CharField(help_text='External series ID from M3U provider', max_length=255)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_episode_refresh', models.DateTimeField(blank=True, help_text='Last time episodes were refreshed', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='series_relations', to='m3u.m3uaccount')), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.series')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Series Relation', + 'verbose_name_plural': 'M3U Series Relations', + }, + ), + migrations.CreateModel( + name='M3UMovieRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_advanced_refresh', models.DateTimeField(blank=True, help_text='Last time advanced data was fetched from provider', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movie_relations', to='m3u.m3uaccount')), + ('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.movie')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Movie Relation', + 'verbose_name_plural': 'M3U Movie Relations', + }, + ), + migrations.CreateModel( + name='M3UEpisodeRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('episode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.episode')), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episode_relations', to='m3u.m3uaccount')), + ], + options={ + 'verbose_name': 'M3U Episode Relation', + 'verbose_name_plural': 'M3U Episode Relations', + 'unique_together': {('m3u_account', 'stream_id')}, + }, + ), + migrations.AddConstraint( + model_name='movie', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_movie_name_year_no_external_id'), + ), + migrations.AddConstraint( + model_name='series', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_series_name_year_no_external_id'), + ), + migrations.AlterUniqueTogether( + name='episode', + unique_together={('series', 'season_number', 'episode_number')}, + ), + migrations.AlterUniqueTogether( + name='m3uvodcategoryrelation', + unique_together={('m3u_account', 'category')}, + ), + migrations.AlterUniqueTogether( + name='m3useriesrelation', + unique_together={('m3u_account', 'external_series_id')}, + ), + migrations.AlterUniqueTogether( + name='m3umovierelation', + unique_together={('m3u_account', 'stream_id')}, + ), + ] diff --git a/apps/vod/migrations/__init__.py b/apps/vod/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/models.py b/apps/vod/models.py new file mode 100644 index 00000000..5e73bc5d --- /dev/null +++ b/apps/vod/models.py @@ -0,0 +1,303 @@ +from django.db import models +from django.db.models import Q +from django.utils import timezone +from django.contrib.contenttypes.fields import GenericForeignKey +from django.contrib.contenttypes.models import ContentType +from apps.m3u.models import M3UAccount +from apps.channels.models import Logo +import uuid + + +class VODCategory(models.Model): + """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" + + CATEGORY_TYPE_CHOICES = [ + ('movie', 'Movie'), + ('series', 'Series'), + ] + + name = models.CharField(max_length=255) + category_type = models.CharField( + max_length=10, + choices=CATEGORY_TYPE_CHOICES, + default='movie', + help_text="Type of content this category contains" + ) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'VOD Category' + verbose_name_plural = 'VOD Categories' + ordering = ['name'] + unique_together = [('name', 'category_type')] + + @classmethod + def bulk_create_and_fetch(cls, objects, ignore_conflicts=False): + # Perform the bulk create operation + cls.objects.bulk_create(objects, ignore_conflicts=ignore_conflicts) + + # Use the unique fields to fetch the created objects + # Since we have unique_together on ('name', 'category_type'), we need both fields + filter_conditions = [] + for obj in objects: + filter_conditions.append( + Q(name=obj.name, category_type=obj.category_type) + ) + + if filter_conditions: + # Combine all conditions with OR + combined_condition = filter_conditions[0] + for condition in filter_conditions[1:]: + combined_condition |= condition + + created_objects = cls.objects.filter(combined_condition) + else: + created_objects = cls.objects.none() + + return created_objects + + def __str__(self): + return f"{self.name} ({self.get_category_type_display()})" + + +class Series(models.Model): + """Series information for TV shows""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the series') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Series' + verbose_name_plural = 'Series' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_series_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Movie(models.Model): + """Movie content""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the movie') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Movie' + verbose_name_plural = 'Movies' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_movie_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Episode(models.Model): + """Episode content for TV series""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + air_date = models.DateField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + + # Episode specific fields + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='episodes') + season_number = models.IntegerField(blank=True, null=True) + episode_number = models.IntegerField(blank=True, null=True) + + # Metadata IDs + tmdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="TMDB ID for metadata", db_index=True) + imdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="IMDB ID for metadata", db_index=True) + + # Custom properties for episode + custom_properties = models.JSONField(blank=True, null=True, help_text="Custom properties for this episode") + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Episode' + verbose_name_plural = 'Episodes' + ordering = ['series__name', 'season_number', 'episode_number'] + unique_together = [ + ('series', 'season_number', 'episode_number'), + ] + + def __str__(self): + season_ep = f"S{self.season_number or 0:02d}E{self.episode_number or 0:02d}" + return f"{self.series.name} - {season_ep} - {self.name}" + + +# New relation models to link M3U accounts with VOD content + +class M3USeriesRelation(models.Model): + """Links M3U accounts to Series with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='series_relations') + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Provider-specific fields - renamed to avoid clash with series ForeignKey + external_series_id = models.CharField(max_length=255, help_text="External series ID from M3U provider") + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_episode_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time episodes were refreshed") + + class Meta: + verbose_name = 'M3U Series Relation' + verbose_name_plural = 'M3U Series Relations' + unique_together = [('m3u_account', 'external_series_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.series.name}" + + +class M3UMovieRelation(models.Model): + """Links M3U accounts to Movies with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='movie_relations') + movie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_advanced_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time advanced data was fetched from provider") + + class Meta: + verbose_name = 'M3U Movie Relation' + verbose_name_plural = 'M3U Movie Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.movie.name}" + + def get_stream_url(self): + """Get the full stream URL for this movie from this provider""" + # Build URL dynamically for XtreamCodes accounts + if self.m3u_account.account_type == 'XC': + server_url = self.m3u_account.server_url.rstrip('/') + username = self.m3u_account.username + password = self.m3u_account.password + return f"{server_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # For other account types, we would need another way to build URLs + return None + + +class M3UEpisodeRelation(models.Model): + """Links M3U accounts to Episodes with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='episode_relations') + episode = models.ForeignKey(Episode, on_delete=models.CASCADE, related_name='m3u_relations') + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'M3U Episode Relation' + verbose_name_plural = 'M3U Episode Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.episode}" + + def get_stream_url(self): + """Get the full stream URL for this episode from this provider""" + from core.xtream_codes import Client as XtreamCodesClient + + if self.m3u_account.account_type == 'XC': + # For XtreamCodes accounts, build the URL dynamically + server_url = self.m3u_account.server_url.rstrip('/') + username = self.m3u_account.username + password = self.m3u_account.password + return f"{server_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # We might support non XC accounts in the future + # For now, return None + return None + +class M3UVODCategoryRelation(models.Model): + """Links M3U accounts to categories with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='category_relations') + category = models.ForeignKey(VODCategory, on_delete=models.CASCADE, related_name='m3u_relations') + + enabled = models.BooleanField( + default=False, help_text="Set to false to deactivate this category for the M3U account" + ) + + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'M3U VOD Category Relation' + verbose_name_plural = 'M3U VOD Category Relations' + unique_together = [('m3u_account', 'category')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.category.name}" diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py new file mode 100644 index 00000000..5a672b33 --- /dev/null +++ b/apps/vod/serializers.py @@ -0,0 +1,237 @@ +from rest_framework import serializers +from .models import ( + Series, VODCategory, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from apps.channels.serializers import LogoSerializer +from apps.m3u.serializers import M3UAccountSerializer + + +class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): + category = serializers.IntegerField(source="category.id") + m3u_account = serializers.IntegerField(source="m3u_account.id") + + class Meta: + model = M3UVODCategoryRelation + fields = ["category", "m3u_account", "enabled"] + + +class VODCategorySerializer(serializers.ModelSerializer): + category_type_display = serializers.CharField(source='get_category_type_display', read_only=True) + m3u_accounts = M3UVODCategoryRelationSerializer(many=True, source="m3u_relations", read_only=True) + + class Meta: + model = VODCategory + fields = [ + "id", + "name", + "category_type", + "category_type_display", + "m3u_accounts", + ] + +class SeriesSerializer(serializers.ModelSerializer): + logo = LogoSerializer(read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() + + +class MovieSerializer(serializers.ModelSerializer): + logo = LogoSerializer(read_only=True) + + class Meta: + model = Movie + fields = '__all__' + + +class EpisodeSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + + class Meta: + model = Episode + fields = '__all__' + + +class M3USeriesRelationSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + + class Meta: + model = M3USeriesRelation + fields = '__all__' + + +class M3UMovieRelationSerializer(serializers.ModelSerializer): + movie = MovieSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UMovieRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the movie if available + movie = obj.movie + if hasattr(movie, 'video') and movie.video: + video_data = movie.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from movie name/title + if movie and movie.name: + name = movie.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(movie, 'bitrate') and movie.bitrate and movie.bitrate > 0: + bitrate = movie.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class M3UEpisodeRelationSerializer(serializers.ModelSerializer): + episode = EpisodeSerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UEpisodeRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the episode if available + episode = obj.episode + if hasattr(episode, 'video') and episode.video: + video_data = episode.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from episode name/title + if episode and episode.name: + name = episode.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(episode, 'bitrate') and episode.bitrate and episode.bitrate > 0: + bitrate = episode.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class EnhancedSeriesSerializer(serializers.ModelSerializer): + """Enhanced serializer for series with provider information""" + logo = LogoSerializer(read_only=True) + providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py new file mode 100644 index 00000000..1484294b --- /dev/null +++ b/apps/vod/tasks.py @@ -0,0 +1,1873 @@ +from celery import shared_task, current_app, group +from django.utils import timezone +from django.db import transaction, IntegrityError +from django.db.models import Q +from apps.m3u.models import M3UAccount +from core.xtream_codes import Client as XtreamCodesClient +from .models import ( + VODCategory, Series, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from apps.channels.models import Logo +from datetime import datetime +import logging +import json +import re + +logger = logging.getLogger(__name__) + + +@shared_task +def refresh_vod_content(account_id): + """Refresh VOD content for an M3U account with batch processing for improved performance""" + # Import here to avoid circular import + from apps.m3u.tasks import send_m3u_update + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"VOD refresh called for non-XC account {account_id}") + return "VOD refresh only available for XtreamCodes accounts" + + logger.info(f"Starting batch VOD refresh for account {account.name}") + start_time = timezone.now() + + # Send start notification + send_m3u_update(account_id, "vod_refresh", 0, status="processing") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + movie_categories, series_categories = refresh_categories(account.id, client) + + logger.debug("Fetching relations for filtering category filtering") + relations = { rel.category_id: rel for rel in M3UVODCategoryRelation.objects + .filter(m3u_account=account) + .select_related("category", "m3u_account") + } + + # Refresh movies with batch processing + refresh_movies(client, account, movie_categories, relations) + + # Refresh series with batch processing + refresh_series(client, account, series_categories, relations) + + end_time = timezone.now() + duration = (end_time - start_time).total_seconds() + + logger.info(f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds") + + # Send completion notification + send_m3u_update(account_id, "vod_refresh", 100, status="success", + message=f"VOD refresh completed in {duration:.2f} seconds") + + return f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds" + + except Exception as e: + logger.error(f"Error refreshing VOD for account {account_id}: {str(e)}") + + # Send error notification + send_m3u_update(account_id, "vod_refresh", 100, status="error", + message=f"VOD refresh failed: {str(e)}") + + return f"VOD refresh failed: {str(e)}" + +def refresh_categories(account_id, client=None): + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if not client: + client = XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) + logger.info(f"Refreshing movie categories for account {account.name}") + + # First, get the category list to properly map category IDs and names + logger.info("Fetching movie categories from provider...") + categories_data = client.get_vod_categories() + category_map = batch_create_categories(categories_data, 'movie', account) + + # Create a mapping from provider category IDs to our category objects + movies_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + movies_category_id_map[str(provider_cat_id)] = our_category + + # Get the category list to properly map category IDs and names + logger.info("Fetching series categories from provider...") + categories_data = client.get_series_categories() + category_map = batch_create_categories(categories_data, 'series', account) + + # Create a mapping from provider category IDs to our category objects + series_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + series_category_id_map[str(provider_cat_id)] = our_category + + return movies_category_id_map, series_category_id_map + +def refresh_movies(client, account, categories_by_provider, relations): + """Refresh movie content using single API call for all movies""" + logger.info(f"Refreshing movies for account {account.name}") + + # Get all movies in a single API call + logger.info("Fetching all movies from provider...") + all_movies_data = client.get_vod_streams() # No category_id = get all movies + + # Process movies in chunks using the simple approach + chunk_size = 1000 + total_movies = len(all_movies_data) + + for i in range(0, total_movies, chunk_size): + chunk = all_movies_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + total_chunks = (total_movies + chunk_size - 1) // chunk_size + + logger.info(f"Processing movie chunk {chunk_num}/{total_chunks} ({len(chunk)} movies)") + process_movie_batch(account, chunk, categories_by_provider, relations) + + logger.info(f"Completed processing all {total_movies} movies in {total_chunks} chunks") + + +def refresh_series(client, account, categories_by_provider, relations): + """Refresh series content using single API call for all series""" + logger.info(f"Refreshing series for account {account.name}") + + # Get all series in a single API call + logger.info("Fetching all series from provider...") + all_series_data = client.get_series() # No category_id = get all series + + # Process series in chunks using the simple approach + chunk_size = 1000 + total_series = len(all_series_data) + + for i in range(0, total_series, chunk_size): + chunk = all_series_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + total_chunks = (total_series + chunk_size - 1) // chunk_size + + logger.info(f"Processing series chunk {chunk_num}/{total_chunks} ({len(chunk)} series)") + process_series_batch(account, chunk, categories_by_provider, relations) + + logger.info(f"Completed processing all {total_series} series in {total_chunks} chunks") + + +def batch_create_categories(categories_data, category_type, account): + """Create categories in batch and return a mapping""" + category_names = [cat.get('category_name', 'Unknown') for cat in categories_data] + + relations_to_create = [] + + # Get existing categories + logger.debug(f"Starting VOD {category_type} category refresh") + existing_categories = { + cat.name: cat for cat in VODCategory.objects.filter( + name__in=category_names, + category_type=category_type + ) + } + + logger.debug(f"Found {len(existing_categories)} existing categories") + + # Create missing categories in batch + new_categories = [] + for name in category_names: + if name not in existing_categories: + new_categories.append(VODCategory(name=name, category_type=category_type)) + else: + relations_to_create.append(M3UVODCategoryRelation( + category=existing_categories[name], + m3u_account=account, + custom_properties={}, + )) + + logger.debug(f"{len(new_categories)} new categories found") + logger.debug(f"{len(relations_to_create)} existing categories found for account") + + if new_categories: + logger.debug("Creating new categories...") + created_categories = VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True) + # Convert to dictionary for easy lookup + newly_created = {cat.name: cat for cat in created_categories} + + relations_to_create += [ + M3UVODCategoryRelation( + category=cat, + m3u_account=account, + custom_properties={}, + ) for cat in newly_created.values() + ] + + existing_categories.update(newly_created) + + # Create missing relations + logger.debug("Updating category account relations...") + M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + # ๐Ÿ”‘ Fetch all relations for this account, for all categories + # relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects + # .filter(category__in=existing_categories.values(), m3u_account=account) + # .select_related("category", "m3u_account") + # } + + # Attach relations to category objects + # for rel in relations: + # existing_categories[rel.category.name]['relation'] = { + # "relation_id": rel.id, + # "category_id": rel.category_id, + # "account_id": rel.m3u_account_id, + # } + + + return existing_categories + + + +@shared_task +def process_movie_batch(account, batch, categories, relations): + """Process a batch of movies using simple bulk operations like M3U processing""" + logger.info(f"Processing movie batch of {len(batch)} movies for account {account.name}") + + movies_to_create = [] + movies_to_update = [] + relations_to_create = [] + relations_to_update = [] + movie_keys = {} # For deduplication like M3U stream_hashes + + # Process each movie in the batch + for movie_data in batch: + try: + stream_id = str(movie_data.get('stream_id')) + name = movie_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(movie_data.get('category_id', '')) if movie_data.get('category_id') else None + movie_data['_provider_category_id'] = provider_cat_id + movie_data['_category_id'] = None + + logger.debug(f"Checking for existing provider category ID {provider_cat_id}") + if provider_cat_id in categories: + category = categories[provider_cat_id] + movie_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for movie {name}") + + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + logger.warning(f"No category ID provided for movie {name}") + + # Extract metadata + year = extract_year_from_data(movie_data, 'name') + tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb') + imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb') + + # Clean empty string IDs + if tmdb_id == '': + tmdb_id = None + if imdb_id == '': + imdb_id = None + + # Create a unique key for this movie (priority: TMDB > IMDB > name+year) + if tmdb_id: + movie_key = f"tmdb_{tmdb_id}" + elif imdb_id: + movie_key = f"imdb_{imdb_id}" + else: + movie_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if movie_key in movie_keys: + continue + + # Prepare movie properties + description = movie_data.get('description') or movie_data.get('plot') or '' + rating = movie_data.get('rating') or movie_data.get('vote_average') or '' + genre = movie_data.get('genre') or movie_data.get('category_name') or '' + duration_secs = extract_duration_from_data(movie_data) + trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or '' + trailer = extract_string_from_array_or_string(trailer_raw) if trailer_raw else None + logo_url = movie_data.get('stream_icon') or '' + + movie_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'duration_secs': duration_secs, + 'custom_properties': {'trailer': trailer} if trailer else None, + } + + movie_keys[movie_key] = { + 'props': movie_props, + 'stream_id': stream_id, + 'category': category, + 'movie_data': movie_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing movie {movie_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to movie names + for data in movie_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the movie name (use first occurrence if multiple movies share same logo) + if logo_url not in logo_url_to_name: + movie_name = data['props'].get('name', 'Unknown Movie') + logo_url_to_name[logo_url] = movie_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') + logos_to_create.append(Logo(url=logo_url, name=movie_name)) + + if logos_to_create: + try: + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new logos for movies") + except Exception as e: + logger.warning(f"Failed to create logos: {e}") + + # Get existing movies based on our keys + existing_movies = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in movie_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for movie in Movie.objects.filter(tmdb_id__in=tmdb_ids): + existing_movies[f"tmdb_{movie.tmdb_id}"] = movie + + # Query by IMDB IDs + imdb_keys = [k for k in movie_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for movie in Movie.objects.filter(imdb_id__in=imdb_ids): + existing_movies[f"imdb_{movie.imdb_id}"] = movie + + # Query by name+year for movies without external IDs + name_year_keys = [k for k in movie_keys.keys() if k.startswith('name_')] + if name_year_keys: + for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{movie.name}_{movie.year or 'None'}" + if key in name_year_keys: + existing_movies[key] = movie + + # Get existing relations + stream_ids = [data['stream_id'] for data in movie_keys.values()] + existing_relations = { + rel.stream_id: rel for rel in M3UMovieRelation.objects.filter( + m3u_account=account, + stream_id__in=stream_ids + ).select_related('movie') + } + + # Process each movie + for movie_key, data in movie_keys.items(): + movie_props = data['props'] + stream_id = data['stream_id'] + category = data['category'] + movie_data = data['movie_data'] + logo_url = data.get('logo_url') + + if movie_key in existing_movies: + # Update existing movie + movie = existing_movies[movie_key] + updated = False + + for field, value in movie_props.items(): + if field == 'custom_properties': + if value != movie.custom_properties: + movie.custom_properties = value + updated = True + elif getattr(movie, field) != value: + setattr(movie, field, value) + updated = True + + # Handle logo assignment for existing movies + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if movie.logo != new_logo: + movie.logo = new_logo + updated = True + elif (not logo_url or len(logo_url) > 500) and movie.logo: + # Clear logo if no logo URL provided or URL is too long + movie.logo = None + updated = True + + if updated: + movies_to_update.append(movie) + else: + # Create new movie + movie = Movie(**movie_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + movie.logo = existing_logos[logo_url] + + movies_to_create.append(movie) + + # Handle relation + if stream_id in existing_relations: + # Update existing relation + relation = existing_relations[stream_id] + relation.movie = movie + relation.category = category + relation.container_extension = movie_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'basic_data': movie_data, + 'detailed_fetched': False + } + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UMovieRelation( + m3u_account=account, + movie=movie, + category=category, + stream_id=stream_id, + container_extension=movie_data.get('container_extension', 'mp4'), + custom_properties={ + 'basic_data': movie_data, + 'detailed_fetched': False + } + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(movies_to_create)} movies to create, {len(movies_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new movies and get their IDs + created_movies = {} + if movies_to_create: + Movie.objects.bulk_create(movies_to_create, ignore_conflicts=True) + + # Get the newly created movies with their IDs + # We need to re-fetch them to get the primary keys + for movie in movies_to_create: + # Find the movie by its unique identifiers + if movie.tmdb_id: + db_movie = Movie.objects.filter(tmdb_id=movie.tmdb_id).first() + elif movie.imdb_id: + db_movie = Movie.objects.filter(imdb_id=movie.imdb_id).first() + else: + db_movie = Movie.objects.filter( + name=movie.name, + year=movie.year, + tmdb_id__isnull=True, + imdb_id__isnull=True + ).first() + + if db_movie: + created_movies[id(movie)] = db_movie + + # Update existing movies + if movies_to_update: + Movie.objects.bulk_update(movies_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'duration_secs', 'custom_properties', 'logo' + ]) + + # Update relations to reference the correct movie objects + for relation in relations_to_create: + if id(relation.movie) in created_movies: + relation.movie = created_movies[id(relation.movie)] + + # Handle relations + if relations_to_create: + M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3UMovieRelation.objects.bulk_update(relations_to_update, [ + 'movie', 'category', 'container_extension', 'custom_properties' + ]) + + logger.info("Movie batch processing completed successfully!") + return f"Movie batch processed: {len(movies_to_create)} created, {len(movies_to_update)} updated" + + except Exception as e: + logger.error(f"Movie batch processing failed: {str(e)}") + return f"Movie batch processing failed: {str(e)}" + + +@shared_task +def process_series_batch(account, batch, categories, relations): + """Process a batch of series using simple bulk operations like M3U processing""" + logger.info(f"Processing series batch of {len(batch)} series for account {account.name}") + + series_to_create = [] + series_to_update = [] + relations_to_create = [] + relations_to_update = [] + series_keys = {} # For deduplication like M3U stream_hashes + + # Process each series in the batch + for series_data in batch: + try: + series_id = str(series_data.get('series_id')) + name = series_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(series_data.get('category_id', '')) if series_data.get('category_id') else None + series_data['_provider_category_id'] = provider_cat_id + series_data['_category_id'] = None + + if provider_cat_id in categories: + category = categories[provider_cat_id] + series_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for series {name}") + relation = relations.get(category.id, None) + + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + logger.warning(f"No category ID provided for series {name}") + + # Extract metadata + year = extract_year(series_data.get('releaseDate', '')) + if not year and series_data.get('release_date'): + year = extract_year(series_data.get('release_date')) + + tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id') + imdb_id = series_data.get('imdb') or series_data.get('imdb_id') + + # Clean empty string IDs + if tmdb_id == '': + tmdb_id = None + if imdb_id == '': + imdb_id = None + + # Create a unique key for this series (priority: TMDB > IMDB > name+year) + if tmdb_id: + series_key = f"tmdb_{tmdb_id}" + elif imdb_id: + series_key = f"imdb_{imdb_id}" + else: + series_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if series_key in series_keys: + continue + + # Prepare series properties + description = series_data.get('plot', '') + rating = series_data.get('rating', '') + genre = series_data.get('genre', '') + logo_url = series_data.get('cover') or '' + + # Extract additional metadata for custom_properties + additional_metadata = {} + for key in ['backdrop_path', 'poster_path', 'original_name', 'first_air_date', 'last_air_date', + 'episode_run_time', 'status', 'type', 'cast', 'director', 'country', 'language', + 'releaseDate', 'youtube_trailer', 'category_id', 'age', 'seasons']: + value = series_data.get(key) + if value: + # For string-like fields that might be arrays, extract clean strings + if key in ['poster_path', 'youtube_trailer', 'cast', 'director']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = clean_value + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = [clean_value] + else: + # For other fields, keep as-is if not null/empty + if value is not None and value != '' and value != []: + additional_metadata[key] = value + + series_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'custom_properties': additional_metadata if additional_metadata else None, + } + + series_keys[series_key] = { + 'props': series_props, + 'series_id': series_id, + 'category': category, + 'series_data': series_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing series {series_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to series names + for data in series_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the series name (use first occurrence if multiple series share same logo) + if logo_url not in logo_url_to_name: + series_name = data['props'].get('name', 'Unknown Series') + logo_url_to_name[logo_url] = series_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + series_name = logo_url_to_name.get(logo_url, 'Unknown Series') + logos_to_create.append(Logo(url=logo_url, name=series_name)) + + if logos_to_create: + try: + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new logos for series") + except Exception as e: + logger.warning(f"Failed to create logos: {e}") + + # Get existing series based on our keys - same pattern as movies + existing_series = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in series_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for series in Series.objects.filter(tmdb_id__in=tmdb_ids): + existing_series[f"tmdb_{series.tmdb_id}"] = series + + # Query by IMDB IDs + imdb_keys = [k for k in series_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for series in Series.objects.filter(imdb_id__in=imdb_ids): + existing_series[f"imdb_{series.imdb_id}"] = series + + # Query by name+year for series without external IDs + name_year_keys = [k for k in series_keys.keys() if k.startswith('name_')] + if name_year_keys: + for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{series.name}_{series.year or 'None'}" + if key in name_year_keys: + existing_series[key] = series + + # Get existing relations + series_ids = [data['series_id'] for data in series_keys.values()] + existing_relations = { + rel.external_series_id: rel for rel in M3USeriesRelation.objects.filter( + m3u_account=account, + external_series_id__in=series_ids + ).select_related('series') + } + + # Process each series + for series_key, data in series_keys.items(): + series_props = data['props'] + series_id = data['series_id'] + category = data['category'] + series_data = data['series_data'] + logo_url = data.get('logo_url') + + if series_key in existing_series: + # Update existing series + series = existing_series[series_key] + updated = False + + for field, value in series_props.items(): + if field == 'custom_properties': + if value != series.custom_properties: + series.custom_properties = value + updated = True + elif getattr(series, field) != value: + setattr(series, field, value) + updated = True + + # Handle logo assignment for existing series + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if series.logo != new_logo: + series.logo = new_logo + updated = True + elif (not logo_url or len(logo_url) > 500) and series.logo: + # Clear logo if no logo URL provided or URL is too long + series.logo = None + updated = True + + if updated: + series_to_update.append(series) + else: + # Create new series + series = Series(**series_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + series.logo = existing_logos[logo_url] + + series_to_create.append(series) + + # Handle relation + if series_id in existing_relations: + # Update existing relation + relation = existing_relations[series_id] + relation.series = series + relation.category = category + relation.custom_properties = { + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + } + relations_to_update.append(relation) + else: + # Create new relation + relation = M3USeriesRelation( + m3u_account=account, + series=series, + category=category, + external_series_id=series_id, + custom_properties={ + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + } + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(series_to_create)} series to create, {len(series_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new series and get their IDs + created_series = {} + if series_to_create: + Series.objects.bulk_create(series_to_create, ignore_conflicts=True) + + # Get the newly created series with their IDs + # We need to re-fetch them to get the primary keys + for series in series_to_create: + # Find the series by its unique identifiers + if series.tmdb_id: + db_series = Series.objects.filter(tmdb_id=series.tmdb_id).first() + elif series.imdb_id: + db_series = Series.objects.filter(imdb_id=series.imdb_id).first() + else: + db_series = Series.objects.filter( + name=series.name, + year=series.year, + tmdb_id__isnull=True, + imdb_id__isnull=True + ).first() + + if db_series: + created_series[id(series)] = db_series + + # Update existing series + if series_to_update: + Series.objects.bulk_update(series_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'custom_properties', 'logo' + ]) + + # Update relations to reference the correct series objects + for relation in relations_to_create: + if id(relation.series) in created_series: + relation.series = created_series[id(relation.series)] + + # Handle relations + if relations_to_create: + M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3USeriesRelation.objects.bulk_update(relations_to_update, [ + 'series', 'category', 'custom_properties' + ]) + + logger.info("Series batch processing completed successfully!") + return f"Series batch processed: {len(series_to_create)} created, {len(series_to_update)} updated" + + except Exception as e: + logger.error(f"Series batch processing failed: {str(e)}") + return f"Series batch processing failed: {str(e)}" + + +# Helper functions for year and date extraction + +def extract_duration_from_data(movie_data): + """Extract duration in seconds from movie data""" + duration_secs = None + + # Try to extract duration from various possible fields + if movie_data.get('duration_secs'): + duration_secs = int(movie_data.get('duration_secs')) + elif movie_data.get('duration'): + # Handle duration that might be in different formats + duration_str = str(movie_data.get('duration')) + if duration_str.isdigit(): + duration_secs = int(duration_str) * 60 # Assume minutes if just a number + else: + # Try to parse time format like "01:30:00" + try: + time_parts = duration_str.split(':') + if len(time_parts) == 3: + hours, minutes, seconds = map(int, time_parts) + duration_secs = (hours * 3600) + (minutes * 60) + seconds + elif len(time_parts) == 2: + minutes, seconds = map(int, time_parts) + duration_secs = minutes * 60 + seconds + except (ValueError, AttributeError): + pass + + return duration_secs + + +def extract_year(date_string): + """Extract year from date string""" + if not date_string: + return None + try: + return int(date_string.split('-')[0]) + except (ValueError, IndexError): + return None + + +def extract_year_from_title(title): + """Extract year from movie title if present""" + if not title: + return None + + # Pattern for (YYYY) format + pattern1 = r'\((\d{4})\)' + # Pattern for - YYYY format + pattern2 = r'\s-\s(\d{4})' + # Pattern for YYYY at the end + pattern3 = r'\s(\d{4})$' + + for pattern in [pattern1, pattern2, pattern3]: + match = re.search(pattern, title) + if match: + year = int(match.group(1)) + # Validate year is reasonable (between 1900 and current year + 5) + if 1900 <= year <= 2030: + return year + + return None + + +def extract_year_from_data(data, title_key='name'): + """Extract year from various data sources with fallback options""" + try: + # First try the year field + year = data.get('year') + if year and str(year).strip() and str(year).strip() != '': + try: + year_int = int(year) + if 1900 <= year_int <= 2030: + return year_int + except (ValueError, TypeError): + pass + + # Try releaseDate or release_date fields + for date_field in ['releaseDate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + # Extract year from date format like "2011-09-19" + try: + year_str = date_value.split('-')[0].strip() + if year_str: + year = int(year_str) + if 1900 <= year <= 2030: + return year + except (ValueError, IndexError): + continue + + # Finally try extracting from title + title = data.get(title_key, '') + if title and title.strip(): + return extract_year_from_title(title) + + except Exception: + # Don't fail processing if year extraction fails + pass + + return None + + +def extract_date_from_data(data): + """Extract date from various data sources with fallback options""" + try: + for date_field in ['air_date', 'releasedate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + parsed = parse_date(date_value) + if parsed: + return parsed + except Exception: + # Don't fail processing if date extraction fails + pass + return None + + +def parse_date(date_string): + """Parse date string into a datetime object""" + if not date_string: + return None + try: + # Try to parse ISO format first + return datetime.fromisoformat(date_string) + except ValueError: + # Fallback to parsing with strptime for common formats + try: + return datetime.strptime(date_string, '%Y-%m-%d') + except ValueError: + return None # Return None if parsing fails + + +# Episode processing and other advanced features + +def refresh_series_episodes(account, series, external_series_id, episodes_data=None): + """Refresh episodes for a series - only called on-demand""" + try: + if not episodes_data: + # Fetch detailed series info including episodes + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + series_info = client.get_series_info(external_series_id) + if series_info: + # Update series with detailed info + info = series_info.get('info', {}) + if info: + # Only update fields if new value is non-empty and either no existing value or existing value is empty + updated = False + if should_update_field(series.description, info.get('plot')): + series.description = extract_string_from_array_or_string(info.get('plot')) + updated = True + if (info.get('rating') and str(info.get('rating')).strip() and + (not series.rating or not str(series.rating).strip())): + series.rating = info.get('rating') + updated = True + if should_update_field(series.genre, info.get('genre')): + series.genre = extract_string_from_array_or_string(info.get('genre')) + updated = True + + year = extract_year_from_data(info) + if year and not series.year: + series.year = year + updated = True + + if updated: + series.save() + + episodes_data = series_info.get('episodes', {}) + else: + episodes_data = {} + + # Clear existing episodes for this account to handle deletions + Episode.objects.filter( + series=series, + m3u_relations__m3u_account=account + ).delete() + + # Process all episodes in batch + batch_process_episodes(account, series, episodes_data) + + # Update the series relation to mark episodes as fetched + series_relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account=account + ).first() + + if series_relation: + custom_props = series_relation.custom_properties or {} + custom_props['episodes_fetched'] = True + custom_props['detailed_fetched'] = True + series_relation.custom_properties = custom_props + series_relation.last_episode_refresh = timezone.now() + series_relation.save() + + except Exception as e: + logger.error(f"Error refreshing episodes for series {series.name}: {str(e)}") + + +def batch_process_episodes(account, series, episodes_data): + """Process episodes in batches for better performance""" + if not episodes_data: + return + + # Flatten episodes data + all_episodes_data = [] + for season_num, season_episodes in episodes_data.items(): + for episode_data in season_episodes: + episode_data['_season_number'] = int(season_num) + all_episodes_data.append(episode_data) + + if not all_episodes_data: + return + + logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}") + + # Extract episode identifiers + episode_keys = [] + episode_ids = [] + for episode_data in all_episodes_data: + season_num = episode_data['_season_number'] + episode_num = episode_data.get('episode_num', 0) + episode_keys.append((series.id, season_num, episode_num)) + episode_ids.append(str(episode_data.get('id'))) + + # Pre-fetch existing episodes + existing_episodes = {} + for episode in Episode.objects.filter(series=series): + key = (episode.series_id, episode.season_number, episode.episode_number) + existing_episodes[key] = episode + + # Pre-fetch existing episode relations + existing_relations = { + rel.stream_id: rel for rel in M3UEpisodeRelation.objects.filter( + m3u_account=account, + stream_id__in=episode_ids + ).select_related('episode') + } + + # Prepare batch operations + episodes_to_create = [] + episodes_to_update = [] + relations_to_create = [] + relations_to_update = [] + + for episode_data in all_episodes_data: + try: + episode_id = str(episode_data.get('id')) + episode_name = episode_data.get('title', 'Unknown Episode') + season_number = episode_data['_season_number'] + episode_number = episode_data.get('episode_num', 0) + info = episode_data.get('info', {}) + + # Extract episode metadata + description = info.get('plot') or info.get('overview', '') if info else '' + rating = info.get('rating', '') if info else '' + air_date = extract_date_from_data(info) if info else None + duration_secs = info.get('duration_secs') if info else None + tmdb_id = info.get('tmdb_id') if info else None + imdb_id = info.get('imdb_id') if info else None + + # Prepare custom properties + custom_props = {} + if info: + if info.get('crew'): + custom_props['crew'] = info.get('crew') + if info.get('movie_image'): + movie_image = extract_string_from_array_or_string(info.get('movie_image')) + if movie_image: + custom_props['movie_image'] = movie_image + backdrop = extract_string_from_array_or_string(info.get('backdrop_path')) + if backdrop: + custom_props['backdrop_path'] = [backdrop] + + # Find existing episode + episode_key = (series.id, season_number, episode_number) + episode = existing_episodes.get(episode_key) + + if episode: + # Update existing episode + updated = False + if episode_name != episode.name: + episode.name = episode_name + updated = True + if description != episode.description: + episode.description = description + updated = True + if rating != episode.rating: + episode.rating = rating + updated = True + if air_date != episode.air_date: + episode.air_date = air_date + updated = True + if duration_secs != episode.duration_secs: + episode.duration_secs = duration_secs + updated = True + if tmdb_id != episode.tmdb_id: + episode.tmdb_id = tmdb_id + updated = True + if imdb_id != episode.imdb_id: + episode.imdb_id = imdb_id + updated = True + if custom_props != episode.custom_properties: + episode.custom_properties = custom_props if custom_props else None + updated = True + + if updated: + episodes_to_update.append(episode) + else: + # Create new episode + episode = Episode( + series=series, + name=episode_name, + description=description, + air_date=air_date, + rating=rating, + duration_secs=duration_secs, + season_number=season_number, + episode_number=episode_number, + tmdb_id=tmdb_id, + imdb_id=imdb_id, + custom_properties=custom_props if custom_props else None + ) + episodes_to_create.append(episode) + + # Handle episode relation + if episode_id in existing_relations: + # Update existing relation + relation = existing_relations[episode_id] + relation.episode = episode + relation.container_extension = episode_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'info': episode_data, + 'season_number': season_number + } + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UEpisodeRelation( + m3u_account=account, + episode=episode, + stream_id=episode_id, + container_extension=episode_data.get('container_extension', 'mp4'), + custom_properties={ + 'info': episode_data, + 'season_number': season_number + } + ) + relations_to_create.append(relation) + + except Exception as e: + logger.error(f"Error preparing episode {episode_data.get('title', 'Unknown')}: {str(e)}") + + # Execute batch operations + with transaction.atomic(): + # Create new episodes + if episodes_to_create: + Episode.objects.bulk_create(episodes_to_create) + + # Update existing episodes + if episodes_to_update: + Episode.objects.bulk_update(episodes_to_update, [ + 'name', 'description', 'air_date', 'rating', 'duration_secs', + 'tmdb_id', 'imdb_id', 'custom_properties' + ]) + + # Create new episode relations + if relations_to_create: + M3UEpisodeRelation.objects.bulk_create(relations_to_create) + + # Update existing episode relations + if relations_to_update: + M3UEpisodeRelation.objects.bulk_update(relations_to_update, [ + 'episode', 'container_extension', 'custom_properties' + ]) + + logger.info(f"Batch processed episodes: {len(episodes_to_create)} new, {len(episodes_to_update)} updated, " + f"{len(relations_to_create)} new relations, {len(relations_to_update)} updated relations") + + +@shared_task +def batch_refresh_series_episodes(account_id, series_ids=None): + """ + Batch refresh episodes for multiple series. + If series_ids is None, refresh all series that haven't been refreshed recently. + """ + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"Episode refresh called for non-XC account {account_id}") + return "Episode refresh only available for XtreamCodes accounts" + + # Determine which series to refresh + if series_ids: + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + series__id__in=series_ids + ).select_related('series') + else: + # Refresh series that haven't been refreshed in the last 24 hours + cutoff_time = timezone.now() - timezone.timedelta(hours=24) + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + last_episode_refresh__lt=cutoff_time + ).select_related('series') + + logger.info(f"Batch refreshing episodes for {series_relations.count()} series") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + refreshed_count = 0 + for relation in series_relations: + try: + refresh_series_episodes( + account, + relation.series, + relation.external_series_id + ) + refreshed_count += 1 + except Exception as e: + logger.error(f"Error refreshing episodes for series {relation.series.name}: {str(e)}") + + logger.info(f"Batch episode refresh completed for {refreshed_count} series") + return f"Batch episode refresh completed for {refreshed_count} series" + + except Exception as e: + logger.error(f"Error in batch episode refresh for account {account_id}: {str(e)}") + return f"Batch episode refresh failed: {str(e)}" + + +@shared_task +def cleanup_orphaned_vod_content(): + """Clean up VOD content that has no M3U relations""" + # Clean up movies with no relations + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + movie_count = orphaned_movies.count() + orphaned_movies.delete() + + # Clean up series with no relations + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + series_count = orphaned_series.count() + orphaned_series.delete() + + # Episodes will be cleaned up via CASCADE when series are deleted + + logger.info(f"Cleaned up {movie_count} orphaned movies and {series_count} orphaned series") + return f"Cleaned up {movie_count} movies and {series_count} series" + + +def handle_movie_id_conflicts(current_movie, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id. + + Since this is called when a user is actively accessing movie details, we always + preserve the current movie (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (movie_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_movie_with_tmdb = None + existing_movie_with_imdb = None + + # Check for existing movies with these IDs + if tmdb_id_to_set: + try: + existing_movie_with_tmdb = Movie.objects.get(tmdb_id=tmdb_id_to_set) + except Movie.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_movie_with_imdb = Movie.objects.get(imdb_id=imdb_id_to_set) + except Movie.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_movie_with_tmdb and not existing_movie_with_imdb: + return current_movie, False + + # Determine which existing movie has the conflicting ID (prefer TMDB match) + existing_movie = existing_movie_with_tmdb or existing_movie_with_imdb + + # CRITICAL: Check if the existing movie is actually the same as the current movie + # This can happen if the current movie already has the ID we're trying to set + if existing_movie.id == current_movie.id: + logger.debug(f"Current movie {current_movie.id} already has the target ID, no conflict resolution needed") + return current_movie, False + + logger.info(f"ID conflict detected: Merging existing movie '{existing_movie.name}' (ID: {existing_movie.id}) into current movie '{current_movie.name}' (ID: {current_movie.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing movie before any merging + if existing_movie_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.tmdb_id = None + existing_movie.save(update_fields=['tmdb_id']) + + if existing_movie_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.imdb_id = None + existing_movie.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing movie into current movie (now safe to set IDs) + merge_movie_data(source_movie=existing_movie, target_movie=current_movie, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing movie to current movie + existing_relations = existing_movie.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing movie {existing_movie.id} to current movie {current_movie.id}") + existing_relations.update(movie=current_movie) + + # Now safe to delete the existing movie since all its relations have been transferred + logger.info(f"Deleting existing movie {existing_movie.id} '{existing_movie.name}' after merging data and transferring relations") + existing_movie.delete() + + return current_movie, False # No relation update needed since we kept current movie + + +def merge_movie_data(source_movie, target_movie, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_movie into target_movie. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_movie: Movie to copy data from + target_movie: Movie to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_movie.description and source_movie.description: + target_movie.description = source_movie.description + updated = True + logger.debug(f"Merged description from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.year and source_movie.year: + target_movie.year = source_movie.year + updated = True + logger.debug(f"Merged year from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.rating and source_movie.rating: + target_movie.rating = source_movie.rating + updated = True + logger.debug(f"Merged rating from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.genre and source_movie.genre: + target_movie.genre = source_movie.genre + updated = True + logger.debug(f"Merged genre from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.duration_secs and source_movie.duration_secs: + target_movie.duration_secs = source_movie.duration_secs + updated = True + logger.debug(f"Merged duration_secs from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.logo and source_movie.logo: + target_movie.logo = source_movie.logo + updated = True + logger.debug(f"Merged logo from movie {source_movie.id} to {target_movie.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_movie.tmdb_id: + if tmdb_id_to_set: + target_movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {target_movie.id}") + elif source_movie.tmdb_id: + target_movie.tmdb_id = source_movie.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.imdb_id: + if imdb_id_to_set: + target_movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {target_movie.id}") + elif source_movie.imdb_id: + target_movie.imdb_id = source_movie.imdb_id + updated = True + logger.debug(f"Merged imdb_id from movie {source_movie.id} to {target_movie.id}") + + # Merge custom properties + target_props = target_movie.custom_properties or {} + source_props = source_movie.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from movie {source_movie.id} to {target_movie.id}") + + if updated: + target_movie.custom_properties = target_props + target_movie.save() + logger.info(f"Successfully merged data from movie {source_movie.id} into {target_movie.id}") + + +def handle_series_id_conflicts(current_series, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id for series. + + Since this is called when a user is actively accessing series details, we always + preserve the current series (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (series_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_series_with_tmdb = None + existing_series_with_imdb = None + + # Check for existing series with these IDs + if tmdb_id_to_set: + try: + existing_series_with_tmdb = Series.objects.get(tmdb_id=tmdb_id_to_set) + except Series.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_series_with_imdb = Series.objects.get(imdb_id=imdb_id_to_set) + except Series.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_series_with_tmdb and not existing_series_with_imdb: + return current_series, False + + # Determine which existing series has the conflicting ID (prefer TMDB match) + existing_series = existing_series_with_tmdb or existing_series_with_imdb + + # CRITICAL: Check if the existing series is actually the same as the current series + # This can happen if the current series already has the ID we're trying to set + if existing_series.id == current_series.id: + logger.debug(f"Current series {current_series.id} already has the target ID, no conflict resolution needed") + return current_series, False + + logger.info(f"ID conflict detected: Merging existing series '{existing_series.name}' (ID: {existing_series.id}) into current series '{current_series.name}' (ID: {current_series.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing series before any merging + if existing_series_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.tmdb_id = None + existing_series.save(update_fields=['tmdb_id']) + + if existing_series_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.imdb_id = None + existing_series.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing series into current series (now safe to set IDs) + merge_series_data(source_series=existing_series, target_series=current_series, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing series to current series + existing_relations = existing_series.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing series {existing_series.id} to current series {current_series.id}") + existing_relations.update(series=current_series) + + # Now safe to delete the existing series since all its relations have been transferred + logger.info(f"Deleting existing series {existing_series.id} '{existing_series.name}' after merging data and transferring relations") + existing_series.delete() + + return current_series, False # No relation update needed since we kept current series + + +def merge_series_data(source_series, target_series, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_series into target_series. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_series: Series to copy data from + target_series: Series to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_series.description and source_series.description: + target_series.description = source_series.description + updated = True + logger.debug(f"Merged description from series {source_series.id} to {target_series.id}") + + if not target_series.year and source_series.year: + target_series.year = source_series.year + updated = True + logger.debug(f"Merged year from series {source_series.id} to {target_series.id}") + + if not target_series.rating and source_series.rating: + target_series.rating = source_series.rating + updated = True + logger.debug(f"Merged rating from series {source_series.id} to {target_series.id}") + + if not target_series.genre and source_series.genre: + target_series.genre = source_series.genre + updated = True + logger.debug(f"Merged genre from series {source_series.id} to {target_series.id}") + + if not target_series.logo and source_series.logo: + target_series.logo = source_series.logo + updated = True + logger.debug(f"Merged logo from series {source_series.id} to {target_series.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_series.tmdb_id: + if tmdb_id_to_set: + target_series.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on series {target_series.id}") + elif source_series.tmdb_id: + target_series.tmdb_id = source_series.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from series {source_series.id} to {target_series.id}") + + if not target_series.imdb_id: + if imdb_id_to_set: + target_series.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on series {target_series.id}") + elif source_series.imdb_id: + target_series.imdb_id = source_series.imdb_id + updated = True + logger.debug(f"Merged imdb_id from series {source_series.id} to {target_series.id}") + + # Merge custom properties + target_props = target_series.custom_properties or {} + source_props = source_series.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from series {source_series.id} to {target_series.id}") + + if updated: + target_series.custom_properties = target_props + target_series.save() + logger.info(f"Successfully merged data from series {source_series.id} into {target_series.id}") + + +def is_non_empty_string(value): + """ + Helper function to safely check if a value is a non-empty string. + Returns True only if value is a string and has non-whitespace content. + """ + return isinstance(value, str) and value.strip() + + +def extract_string_from_array_or_string(value): + """ + Helper function to extract a string value from either a string or array. + Returns the first non-null string from an array, or the string itself. + Returns None if no valid string is found. + """ + if isinstance(value, str): + return value.strip() if value.strip() else None + elif isinstance(value, list) and value: + # Find first non-null, non-empty string in the array + for item in value: + if isinstance(item, str) and item.strip(): + return item.strip() + elif item is not None and str(item).strip(): + return str(item).strip() + return None + + +def clean_custom_properties(custom_props): + """ + Remove null, empty, or invalid values from custom_properties dict. + Only keeps properties that have meaningful values. + """ + if not custom_props: + return None + + cleaned = {} + for key, value in custom_props.items(): + # Handle fields that should extract clean strings + if key in ['youtube_trailer', 'actors', 'director', 'cast']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = clean_value + # Handle backdrop_path which should remain as array format + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = [clean_value] + else: + # For other properties, keep them if they're not None and not empty + if value is not None and value != '' and value != []: + # If it's a list with only null values, skip it + if isinstance(value, list) and all(item is None for item in value): + continue + cleaned[key] = value + + return cleaned if cleaned else None + + +def should_update_field(existing_value, new_value): + """ + Helper function to determine if we should update a field. + Returns True if: + - new_value is a non-empty string (or contains one if it's an array) AND + - existing_value is None, empty string, array with null/empty values, or non-string + """ + # Extract actual string values from arrays if needed + new_string = extract_string_from_array_or_string(new_value) + existing_string = extract_string_from_array_or_string(existing_value) + + return new_string is not None and (existing_string is None or not existing_string) + + +@shared_task +def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): + """ + Fetch advanced movie data from provider and update Movie and M3UMovieRelation. + Only fetch if last_advanced_refresh > 24h ago, unless force_refresh is True. + """ + try: + relation = M3UMovieRelation.objects.select_related('movie', 'm3u_account').get(id=m3u_movie_relation_id) + now = timezone.now() + if not force_refresh and relation.last_advanced_refresh and (now - relation.last_advanced_refresh).total_seconds() < 86400: + return "Advanced data recently fetched, skipping." + + account = relation.m3u_account + movie = relation.movie + + from core.xtream_codes import Client as XtreamCodesClient + + with XtreamCodesClient( + server_url=account.server_url, + username=account.username, + password=account.password, + user_agent=account.get_user_agent().user_agent + ) as client: + vod_info = client.get_vod_info(relation.stream_id) + if vod_info and 'info' in vod_info: + info_raw = vod_info.get('info', {}) + + # Handle case where 'info' might be a list instead of dict + if isinstance(info_raw, list): + # If it's a list, try to use the first item or create empty dict + info = info_raw[0] if info_raw and isinstance(info_raw[0], dict) else {} + logger.warning(f"VOD info for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(info_raw, dict): + info = info_raw + else: + info = {} + logger.warning(f"VOD info for stream {relation.stream_id} returned unexpected type: {type(info_raw)}") + + movie_data_raw = vod_info.get('movie_data', {}) + + # Handle case where 'movie_data' might be a list instead of dict + if isinstance(movie_data_raw, list): + movie_data = movie_data_raw[0] if movie_data_raw and isinstance(movie_data_raw[0], dict) else {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(movie_data_raw, dict): + movie_data = movie_data_raw + else: + movie_data = {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned unexpected type: {type(movie_data_raw)}") + + # Update Movie fields if changed + updated = False + custom_props = movie.custom_properties or {} + if info.get('plot') and info.get('plot') != movie.description: + movie.description = info.get('plot') + updated = True + if info.get('rating') and info.get('rating') != movie.rating: + movie.rating = info.get('rating') + updated = True + if info.get('genre') and info.get('genre') != movie.genre: + movie.genre = info.get('genre') + updated = True + if info.get('duration_secs'): + duration_secs = int(info.get('duration_secs')) + if duration_secs != movie.duration_secs: + movie.duration_secs = duration_secs + updated = True + # Check for releasedate or release_date + release_date_value = info.get('releasedate') or info.get('release_date') + if release_date_value: + try: + year = int(str(release_date_value).split('-')[0]) + if year != movie.year: + movie.year = year + updated = True + except Exception: + pass + # Handle TMDB/IMDB ID updates with duplicate key protection + tmdb_id_to_set = info.get('tmdb_id') if info.get('tmdb_id') and info.get('tmdb_id') != movie.tmdb_id else None + imdb_id_to_set = info.get('imdb_id') if info.get('imdb_id') and info.get('imdb_id') != movie.imdb_id else None + + logger.debug(f"Movie {movie.id} current IDs: tmdb_id={movie.tmdb_id}, imdb_id={movie.imdb_id}") + logger.debug(f"IDs to set: tmdb_id={tmdb_id_to_set}, imdb_id={imdb_id_to_set}") + + if tmdb_id_to_set or imdb_id_to_set: + # Check for existing movies with these IDs and handle duplicates + updated_movie, relation_updated = handle_movie_id_conflicts( + movie, relation, tmdb_id_to_set, imdb_id_to_set + ) + if relation_updated: + # If the relation was updated to point to a different movie, + # we need to update our reference and continue with that movie + movie = updated_movie + logger.info(f"Relation updated, now working with movie {movie.id}") + else: + # No relation update, safe to set the IDs + if tmdb_id_to_set: + movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {movie.id}") + if imdb_id_to_set: + movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {movie.id}") + # Only update trailer if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('youtube_trailer'), info.get('trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('trailer')) + updated = True + if should_update_field(custom_props.get('youtube_trailer'), info.get('youtube_trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('youtube_trailer')) + updated = True + # Only update backdrop_path if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('backdrop_path'), info.get('backdrop_path')): + backdrop_url = extract_string_from_array_or_string(info.get('backdrop_path')) + custom_props['backdrop_path'] = [backdrop_url] if backdrop_url else None + updated = True + # Only update actors if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('actors'), info.get('actors')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('actors')) + updated = True + if should_update_field(custom_props.get('actors'), info.get('cast')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('cast')) + updated = True + # Only update director if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('director'), info.get('director')): + custom_props['director'] = extract_string_from_array_or_string(info.get('director')) + updated = True + if updated: + # Clean custom_properties before saving to remove null/empty values + movie.custom_properties = clean_custom_properties(custom_props) + try: + movie.save() + except Exception as save_error: + # If we still get an integrity error after our conflict resolution, + # log it and try to save without the problematic IDs + logger.error(f"Failed to save movie {movie.id} after conflict resolution: {str(save_error)}") + if 'tmdb_id' in str(save_error) and movie.tmdb_id: + logger.warning(f"Clearing tmdb_id {movie.tmdb_id} from movie {movie.id} due to save error") + movie.tmdb_id = None + if 'imdb_id' in str(save_error) and movie.imdb_id: + logger.warning(f"Clearing imdb_id {movie.imdb_id} from movie {movie.id} due to save error") + movie.imdb_id = None + try: + movie.save() + logger.info(f"Successfully saved movie {movie.id} after clearing problematic IDs") + except Exception as final_error: + logger.error(f"Final save attempt failed for movie {movie.id}: {str(final_error)}") + raise + + # Update relation custom_properties and last_advanced_refresh + relation_custom_props = relation.custom_properties or {} + + # Clean the detailed_info before saving to avoid storing null/empty arrays + cleaned_info = clean_custom_properties(info) if info else None + cleaned_movie_data = clean_custom_properties(movie_data) if movie_data else None + + if cleaned_info: + relation_custom_props['detailed_info'] = cleaned_info + if cleaned_movie_data: + relation_custom_props['movie_data'] = cleaned_movie_data + relation_custom_props['detailed_fetched'] = True + + relation.custom_properties = relation_custom_props + relation.last_advanced_refresh = now + relation.save(update_fields=['custom_properties', 'last_advanced_refresh']) + + return "Advanced data refreshed." + except Exception as e: + logger.error(f"Error refreshing advanced movie data for relation {m3u_movie_relation_id}: {str(e)}") + return f"Error: {str(e)}" + + +def validate_logo_reference(obj, obj_type="object"): + """ + Validate that a logo reference exists in the database. + If not, set it to None to prevent foreign key constraint violations. + + Args: + obj: Object with a logo attribute + obj_type: String description of the object type for logging + + Returns: + bool: True if logo was valid or None, False if logo was invalid and cleared + """ + if not hasattr(obj, 'logo') or not obj.logo: + return True + + if not obj.logo.pk: + # Logo doesn't have a primary key, so it's not saved + obj.logo = None + return False + + try: + # Verify the logo exists in the database + Logo.objects.get(pk=obj.logo.pk) + return True + except Logo.DoesNotExist: + logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + obj.logo = None + return False diff --git a/apps/vod/urls.py b/apps/vod/urls.py new file mode 100644 index 00000000..f90e3fb6 --- /dev/null +++ b/apps/vod/urls.py @@ -0,0 +1,16 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import MovieViewSet, EpisodeViewSet, SeriesViewSet, VODCategoryViewSet, VODConnectionViewSet + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet) +router.register(r'episodes', EpisodeViewSet) +router.register(r'series', SeriesViewSet) +router.register(r'categories', VODCategoryViewSet) +router.register(r'connections', VODConnectionViewSet) + +urlpatterns = [ + path('api/', include(router.urls)), +] diff --git a/core/api_urls.py b/core/api_urls.py index 30714d44..00e20a6e 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -8,7 +8,6 @@ router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') router.register(r'streamprofiles', StreamProfileViewSet, basename='streamprofile') router.register(r'settings', CoreSettingsViewSet, basename='coresettings') -router.register(r'settings', CoreSettingsViewSet, basename='settings') urlpatterns = [ path('settings/env/', environment, name='token_refresh'), path('version/', version, name='version'), diff --git a/core/tasks.py b/core/tasks.py index 47bc8cf0..8e9dfb66 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -1,4 +1,3 @@ -# yourapp/tasks.py from celery import shared_task from channels.layers import get_channel_layer from asgiref.sync import async_to_sync @@ -633,3 +632,17 @@ def rehash_streams(keys): for account_id in acquired_locks: release_task_lock('refresh_single_m3u_account', account_id) logger.info(f"Released M3U task locks for {len(acquired_locks)} accounts") + + +@shared_task +def cleanup_vod_persistent_connections(): + """Clean up stale VOD persistent connections""" + try: + from apps.proxy.vod_proxy.connection_manager import VODConnectionManager + + # Clean up connections older than 30 minutes + VODConnectionManager.cleanup_stale_persistent_connections(max_age_seconds=1800) + logger.info("VOD persistent connection cleanup completed") + + except Exception as e: + logger.error(f"Error during VOD persistent connection cleanup: {e}") diff --git a/core/views.py b/core/views.py index 397783fb..d10df027 100644 --- a/core/views.py +++ b/core/views.py @@ -73,7 +73,6 @@ def stream_view(request, channel_uuid): default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) profiles = [obj for obj in m3u_profiles if not obj.is_default] - # -- Loop through profiles and pick the first active one -- for profile in [default_profile] + profiles: logger.debug(f'Checking profile {profile.name}...') @@ -174,7 +173,7 @@ def stream_view(request, channel_uuid): persistent_lock.release() logger.debug("Persistent lock released for channel ID=%s", channel.id) - return StreamingHttpResponse( - stream_generator(process, stream, persistent_lock), - content_type="video/MP2T" - ) + return StreamingHttpResponse( + stream_generator(process, stream, persistent_lock), + content_type="video/MP2T" + ) diff --git a/core/xtream_codes.py b/core/xtream_codes.py index d068bacb..469f3a9c 100644 --- a/core/xtream_codes.py +++ b/core/xtream_codes.py @@ -196,6 +196,184 @@ class Client: """Get the playback URL for a stream""" return f"{self.server_url}/live/{self.username}/{self.password}/{stream_id}.ts" + def get_episode_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for an episode stream""" + return f"{self.server_url}/series/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for a VOD stream""" + return f"{self.server_url}/movie/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_categories(self): + """Get VOD categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid VOD categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} VOD categories") + return categories + except Exception as e: + logger.error(f"Failed to get VOD categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_streams(self, category_id=None): + """Get VOD streams for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_streams' + } + + if category_id: + params['category_id'] = category_id + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid VOD streams response for category {category_id}: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} VOD streams for category {category_id}") + return streams + except Exception as e: + logger.error(f"Failed to get VOD streams for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_info(self, vod_id): + """Get detailed information for a specific VOD""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_info', + 'vod_id': vod_id + } + + vod_info = self._make_request(endpoint, params) + + if not isinstance(vod_info, dict): + error_msg = f"Invalid VOD info response for vod_id {vod_id}: {vod_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved VOD info for vod_id {vod_id}") + return vod_info + except Exception as e: + logger.error(f"Failed to get VOD info for vod_id {vod_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_categories(self): + """Get series categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid series categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} series categories") + return categories + except Exception as e: + logger.error(f"Failed to get series categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series(self, category_id=None): + """Get series for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series' + } + + if category_id: + params['category_id'] = category_id + + series = self._make_request(endpoint, params) + + if not isinstance(series, list): + error_msg = f"Invalid series response for category {category_id}: {series}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(series)} series for category {category_id}") + return series + except Exception as e: + logger.error(f"Failed to get series for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_info(self, series_id): + """Get detailed information for a specific series including episodes""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_info', + 'series_id': series_id + } + + series_info = self._make_request(endpoint, params) + + if not isinstance(series_info, dict): + error_msg = f"Invalid series info response for series_id {series_id}: {series_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved series info for series_id {series_id}") + return series_info + except Exception as e: + logger.error(f"Failed to get series info for series_id {series_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + def close(self): """Close the session and cleanup resources""" if hasattr(self, 'session') and self.session: diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index acac4c1a..040e9156 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -28,6 +28,7 @@ INSTALLED_APPS = [ "apps.output", "apps.proxy.apps.ProxyConfig", "apps.proxy.ts_proxy", + "apps.vod.apps.VODConfig", "core", "daphne", "drf_yasg", diff --git a/dispatcharr/urls.py b/dispatcharr/urls.py index 6335f656..890d0c2d 100644 --- a/dispatcharr/urls.py +++ b/dispatcharr/urls.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from .routing import websocket_urlpatterns from apps.output.views import xc_player_api, xc_panel_api, xc_get, xc_xmltv from apps.proxy.ts_proxy.views import stream_xc +from apps.output.views import xc_movie_stream, xc_series_stream # Define schema_view for Swagger schema_view = get_schema_view( @@ -55,11 +56,25 @@ urlpatterns = [ stream_xc, name="xc_stream_endpoint", ), + # XC VOD endpoints + path( + "movie///.", + xc_movie_stream, + name="xc_movie_stream", + ), + path( + "series///.", + xc_series_stream, + name="xc_series_stream", + ), + re_path(r"^swagger/?$", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"), # ReDoc UI path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"), # Optionally, serve the raw Swagger JSON path("swagger.json", schema_view.without_ui(cache_timeout=0), name="schema-json"), + + # VOD proxy is now handled by the main proxy URLs above # Catch-all routes should always be last path("", TemplateView.as_view(template_name="index.html")), # React entry point path("", TemplateView.as_view(template_name="index.html")), diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 8d204a5b..fd0a883d 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -118,6 +118,10 @@ postgres_pid=$(su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} status" | echo "โœ… Postgres started with PID $postgres_pid" pids+=("$postgres_pid") +# Ensure database encoding is UTF8 +. /app/docker/init/02-postgres.sh +ensure_utf8_encoding + if [[ "$DISPATCHARR_ENV" = "dev" ]]; then . /app/docker/init/99-init-dev.sh echo "Starting frontend dev environment" diff --git a/docker/init/02-postgres.sh b/docker/init/02-postgres.sh index 4deb921d..e36dd744 100644 --- a/docker/init/02-postgres.sh +++ b/docker/init/02-postgres.sh @@ -1,5 +1,4 @@ #!/bin/bash - # Temporary migration from postgres in /data to $POSTGRES_DIR. Can likely remove # some time in the future. if [ -e "/data/postgresql.conf" ]; then @@ -115,9 +114,8 @@ if [ -z "$(ls -A $POSTGRES_DIR)" ]; then if ! su - postgres -c "psql -p ${POSTGRES_PORT} -tAc \"SELECT 1 FROM pg_database WHERE datname = '$POSTGRES_DB';\"" | grep -q 1; then # Create PostgreSQL database echo "Creating PostgreSQL database..." - su - postgres -c "createdb -p ${POSTGRES_PORT} ${POSTGRES_DB}" - - # Create user, set ownership, and grant privileges + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 ${POSTGRES_DB}" + # Create user, set ownership, and grant privileges echo "Creating PostgreSQL user..." su - postgres -c "psql -p ${POSTGRES_PORT} -d ${POSTGRES_DB}" < $DUMP_FILE" + # Drop and recreate database with UTF8 encoding using template0 + su - postgres -c "dropdb -p ${POSTGRES_PORT} $POSTGRES_DB" + # Recreate database with UTF8 encoding + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 --template=template0 ${POSTGRES_DB}" + + + # Restore data + su - postgres -c "psql -p ${POSTGRES_PORT} -d $POSTGRES_DB < $DUMP_FILE" + #configure_db + + + rm -f "$DUMP_FILE" + echo "Database $POSTGRES_DB converted to UTF8 and permissions set." + fi +} + + diff --git a/docker/nginx.conf b/docker/nginx.conf index db097ede..5e754d20 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -7,13 +7,14 @@ server { proxy_connect_timeout 75; proxy_send_timeout 300; proxy_read_timeout 300; - client_max_body_size 0; # Allow file uploads up to 128MB + client_max_body_size 0; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host:$server_port; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $host; + proxy_set_header X-Forwarded-Port $server_port; # Serve Django via uWSGI location / { diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 4467759e..04555488 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -16,7 +16,9 @@ import DVR from './pages/DVR'; import Settings from './pages/Settings'; import Users from './pages/Users'; import LogosPage from './pages/Logos'; +import VODsPage from './pages/VODs'; import useAuthStore from './store/auth'; +import useLogosStore from './store/logos'; import FloatingVideo from './components/FloatingVideo'; import { WebsocketProvider } from './WebSocket'; import { Box, AppShell, MantineProvider } from '@mantine/core'; @@ -37,6 +39,8 @@ const defaultRoute = '/channels'; const App = () => { const [open, setOpen] = useState(true); + const [backgroundLoadingStarted, setBackgroundLoadingStarted] = + useState(false); const isAuthenticated = useAuthStore((s) => s.isAuthenticated); const setIsAuthenticated = useAuthStore((s) => s.setIsAuthenticated); const logout = useAuthStore((s) => s.logout); @@ -76,6 +80,11 @@ const App = () => { const loggedIn = await initializeAuth(); if (loggedIn) { await initData(); + // Start background logo loading after app is fully initialized (only once) + if (!backgroundLoadingStarted) { + setBackgroundLoadingStarted(true); + useLogosStore.getState().startBackgroundLoading(); + } } else { await logout(); } @@ -86,7 +95,7 @@ const App = () => { }; checkAuth(); - }, [initializeAuth, initData, logout]); + }, [initializeAuth, initData, logout, backgroundLoadingStarted]); return ( { } /> } /> } /> + } /> ) : ( } /> diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index 9ba62273..e2b31ba4 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -17,7 +17,7 @@ import API from './api'; import useSettingsStore from './store/settings'; import useAuthStore from './store/auth'; -export const WebsocketContext = createContext([false, () => { }, null]); +export const WebsocketContext = createContext([false, () => {}, null]); export const WebsocketProvider = ({ children }) => { const [isReady, setIsReady] = useState(false); @@ -215,7 +215,10 @@ export const WebsocketProvider = ({ children }) => { ) { updateData.updated_at = new Date().toISOString(); // Log successful completion for debugging - console.log('M3U refresh completed successfully:', updateData); + console.log( + 'M3U refresh completed successfully:', + updateData + ); } updatePlaylist(updateData); @@ -225,7 +228,9 @@ export const WebsocketProvider = ({ children }) => { // Log when playlist can't be found for debugging purposes console.warn( `Received update for unknown playlist ID: ${parsedEvent.data.account}`, - Array.isArray(playlists) ? 'playlists is array' : 'playlists is object', + Array.isArray(playlists) + ? 'playlists is array' + : 'playlists is object', Object.keys(playlists).length ); } @@ -500,7 +505,7 @@ export const WebsocketProvider = ({ children }) => { const setProfilePreview = usePlaylistsStore((s) => s.setProfilePreview); const fetchEPGData = useEPGsStore((s) => s.fetchEPGData); const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); - const fetchLogos = useLogosStore((s) => s.fetchLogos); + const fetchLogos = useLogosStore((s) => s.fetchAllLogos); const fetchChannelProfiles = useChannelsStore((s) => s.fetchChannelProfiles); const ret = useMemo(() => { diff --git a/frontend/src/api.js b/frontend/src/api.js index 94205715..982eae78 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -735,13 +735,20 @@ export default class API { } } - static async updateM3UGroupSettings(playlistId, groupSettings) { + static async updateM3UGroupSettings( + playlistId, + groupSettings = [], + categorySettings = [] + ) { try { const response = await request( `${host}/api/m3u/accounts/${playlistId}/group-settings/`, { method: 'PATCH', - body: { group_settings: groupSettings }, + body: { + group_settings: groupSettings, + category_settings: categorySettings, + }, } ); // Fetch the updated playlist and update the store @@ -793,7 +800,6 @@ export default class API { errorNotification('Failed to refresh M3U account', e); } } - static async refreshAllPlaylist() { try { const response = await request(`${host}/api/m3u/refresh/`, { @@ -805,6 +811,19 @@ export default class API { errorNotification('Failed to refresh all M3U accounts', e); } } + static async refreshVODContent(accountId) { + try { + const response = await request( + `${host}/api/m3u/accounts/${accountId}/refresh-vod/`, + { + method: 'POST', + } + ); + return response; + } catch (e) { + errorNotification('Failed to refresh VOD content', e); + } + } static async deletePlaylist(id) { try { @@ -1291,6 +1310,8 @@ export default class API { const params = new URLSearchParams(); logoIds.forEach(id => params.append('ids', id)); + // Disable pagination for ID-based queries to get all matching logos + params.append('no_pagination', 'true'); const response = await request( `${host}/api/channels/logos/?${params.toString()}` @@ -1777,4 +1798,105 @@ export default class API { errorNotification('Failed to retrieve streams by IDs', e); } } + + // VOD Methods + static async getMovies(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/movies/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movies', e); + } + } + + static async getSeries(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/series/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series', e); + } + } + + static async getMovieDetails(movieId) { + try { + const response = await request(`${host}/api/vod/movies/${movieId}/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie details', e); + } + } + + static async getMovieProviderInfo(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/provider-info/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie provider info', e); + } + } + + static async getMovieProviders(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie providers', e); + } + } + + static async getSeriesProviders(seriesId) { + try { + const response = await request( + `${host}/api/vod/series/${seriesId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series providers', e); + } + } + + static async getVODCategories() { + try { + const response = await request(`${host}/api/vod/categories/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD categories', e); + } + } + + static async getSeriesInfo(seriesId) { + try { + // Call the provider-info endpoint that includes episodes + const response = await request( + `${host}/api/vod/series/${seriesId}/provider-info/?include_episodes=true` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series info', e); + } + } + + static async updateVODPosition(vodUuid, clientId, position) { + try { + const response = await request( + `${host}/proxy/vod/stream/${vodUuid}/position/`, + { + method: 'POST', + body: { client_id: clientId, position }, + } + ); + return response; + } catch (e) { + errorNotification('Failed to update playback position', e); + } + } } diff --git a/frontend/src/components/ConfirmationDialog.jsx b/frontend/src/components/ConfirmationDialog.jsx index 1cfbe84d..73805513 100644 --- a/frontend/src/components/ConfirmationDialog.jsx +++ b/frontend/src/components/ConfirmationDialog.jsx @@ -30,7 +30,7 @@ const ConfirmationDialog = ({ size = 'md', zIndex = 1000, showDeleteFileOption = false, - deleteFileLabel = "Also delete files from disk", + deleteFileLabel = 'Also delete files from disk', }) => { const suppressWarning = useWarningsStore((s) => s.suppressWarning); const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); diff --git a/frontend/src/components/FloatingVideo.jsx b/frontend/src/components/FloatingVideo.jsx index 8b131dd3..6aaeecda 100644 --- a/frontend/src/components/FloatingVideo.jsx +++ b/frontend/src/components/FloatingVideo.jsx @@ -8,88 +8,181 @@ import { CloseButton, Flex, Loader, Text, Box } from '@mantine/core'; export default function FloatingVideo() { const isVisible = useVideoStore((s) => s.isVisible); const streamUrl = useVideoStore((s) => s.streamUrl); + const contentType = useVideoStore((s) => s.contentType); + const metadata = useVideoStore((s) => s.metadata); const hideVideo = useVideoStore((s) => s.hideVideo); const videoRef = useRef(null); const playerRef = useRef(null); const videoContainerRef = useRef(null); - // Convert ref to state so we can use it for rendering const [isLoading, setIsLoading] = useState(false); const [loadError, setLoadError] = useState(null); + const [showOverlay, setShowOverlay] = useState(true); + const overlayTimeoutRef = useRef(null); - // Safely destroy the player to prevent errors + // Safely destroy the mpegts player to prevent errors const safeDestroyPlayer = () => { try { if (playerRef.current) { - // Set loading to false when destroying player setIsLoading(false); setLoadError(null); - // First unload the source to stop any in-progress fetches if (videoRef.current) { - // Remove src attribute and force a load to clear any pending requests videoRef.current.removeAttribute('src'); videoRef.current.load(); } - // Pause the player first try { playerRef.current.pause(); } catch (e) { // Ignore pause errors } - // Use a try-catch block specifically for the destroy call try { playerRef.current.destroy(); } catch (error) { - // Ignore expected abort errors - if (error.name !== 'AbortError' && !error.message?.includes('aborted')) { - console.log("Error during player destruction:", error.message); + if ( + error.name !== 'AbortError' && + !error.message?.includes('aborted') + ) { + console.log('Error during player destruction:', error.message); } } finally { playerRef.current = null; } } } catch (error) { - console.log("Error during player cleanup:", error); + console.log('Error during player cleanup:', error); playerRef.current = null; } + + // Clear overlay timer + if (overlayTimeoutRef.current) { + clearTimeout(overlayTimeoutRef.current); + overlayTimeoutRef.current = null; + } }; - useEffect(() => { - if (!isVisible || !streamUrl) { - safeDestroyPlayer(); - return; + // Start overlay auto-hide timer + const startOverlayTimer = () => { + if (overlayTimeoutRef.current) { + clearTimeout(overlayTimeoutRef.current); } + overlayTimeoutRef.current = setTimeout(() => { + setShowOverlay(false); + }, 4000); // Hide after 4 seconds + }; - // Check if we have an existing player and clean it up - safeDestroyPlayer(); + // Initialize VOD player (native HTML5 with enhanced controls) + const initializeVODPlayer = () => { + if (!videoRef.current || !streamUrl) return; + + setIsLoading(true); + setLoadError(null); + setShowOverlay(true); // Show overlay initially + + console.log('Initializing VOD player for:', streamUrl); + + const video = videoRef.current; + + // Enhanced video element configuration for VOD + video.preload = 'metadata'; + video.crossOrigin = 'anonymous'; + + // Set up event listeners + const handleLoadStart = () => setIsLoading(true); + const handleLoadedData = () => setIsLoading(false); + const handleCanPlay = () => { + setIsLoading(false); + // Auto-play for VOD content + video.play().catch((e) => { + console.log('Auto-play prevented:', e); + setLoadError('Auto-play was prevented. Click play to start.'); + }); + // Start overlay timer when video is ready + startOverlayTimer(); + }; + const handleError = (e) => { + setIsLoading(false); + const error = e.target.error; + let errorMessage = 'Video playback error'; + + if (error) { + switch (error.code) { + case error.MEDIA_ERR_ABORTED: + errorMessage = 'Video playback was aborted'; + break; + case error.MEDIA_ERR_NETWORK: + errorMessage = 'Network error while loading video'; + break; + case error.MEDIA_ERR_DECODE: + errorMessage = 'Video codec not supported by your browser'; + break; + case error.MEDIA_ERR_SRC_NOT_SUPPORTED: + errorMessage = 'Video format not supported by your browser'; + break; + default: + errorMessage = error.message || 'Unknown video error'; + } + } + + setLoadError(errorMessage); + }; + + // Enhanced progress tracking for VOD + const handleProgress = () => { + if (video.buffered.length > 0) { + const bufferedEnd = video.buffered.end(video.buffered.length - 1); + const duration = video.duration; + if (duration > 0) { + const bufferedPercent = (bufferedEnd / duration) * 100; + // You could emit this to a store for UI feedback + } + } + }; + + // Add event listeners + video.addEventListener('loadstart', handleLoadStart); + video.addEventListener('loadeddata', handleLoadedData); + video.addEventListener('canplay', handleCanPlay); + video.addEventListener('error', handleError); + video.addEventListener('progress', handleProgress); + + // Set the source + video.src = streamUrl; + video.load(); + + // Store cleanup function + playerRef.current = { + destroy: () => { + video.removeEventListener('loadstart', handleLoadStart); + video.removeEventListener('loadeddata', handleLoadedData); + video.removeEventListener('canplay', handleCanPlay); + video.removeEventListener('error', handleError); + video.removeEventListener('progress', handleProgress); + video.removeAttribute('src'); + video.load(); + }, + }; + }; + + // Initialize live stream player (mpegts.js) + const initializeLivePlayer = () => { + if (!videoRef.current || !streamUrl) return; - // Set loading state to true when starting a new stream setIsLoading(true); setLoadError(null); - // Debug log to help diagnose stream issues - console.log("Attempting to play stream:", streamUrl); + console.log('Initializing live stream player for:', streamUrl); try { - // Check for MSE support first if (!mpegts.getFeatureList().mseLivePlayback) { setIsLoading(false); - setLoadError("Your browser doesn't support live video streaming. Please try Chrome or Edge."); + setLoadError( + "Your browser doesn't support live video streaming. Please try Chrome or Edge." + ); return; } - // Check for basic codec support - const video = document.createElement('video'); - const h264Support = video.canPlayType('video/mp4; codecs="avc1.42E01E"'); - const aacSupport = video.canPlayType('audio/mp4; codecs="mp4a.40.2"'); - - console.log("Browser codec support - H264:", h264Support, "AAC:", aacSupport); - - // If the browser supports MSE for live playback, initialize mpegts.js - setIsLoading(true); - const player = mpegts.createPlayer({ type: 'mpegts', url: streamUrl, @@ -107,7 +200,6 @@ export default function FloatingVideo() { player.attachMediaElement(videoRef.current); - // Add events to track loading state player.on(mpegts.Events.LOADING_COMPLETE, () => { setIsLoading(false); }); @@ -116,29 +208,37 @@ export default function FloatingVideo() { setIsLoading(false); }); - // Enhanced error event handler with codec-specific messages player.on(mpegts.Events.ERROR, (errorType, errorDetail) => { setIsLoading(false); - // Filter out aborted errors if (errorType !== 'NetworkError' || !errorDetail?.includes('aborted')) { console.error('Player error:', errorType, errorDetail); - // Provide specific error messages based on error type let errorMessage = `Error: ${errorType}`; if (errorType === 'MediaError') { - // Try to determine if it's an audio or video codec issue const errorString = errorDetail?.toLowerCase() || ''; - if (errorString.includes('audio') || errorString.includes('ac3') || errorString.includes('ac-3')) { - errorMessage = "Audio codec not supported by your browser. Try Chrome or Edge for better audio codec support."; - } else if (errorString.includes('video') || errorString.includes('h264') || errorString.includes('h.264')) { - errorMessage = "Video codec not supported by your browser. Try Chrome or Edge for better video codec support."; + if ( + errorString.includes('audio') || + errorString.includes('ac3') || + errorString.includes('ac-3') + ) { + errorMessage = + 'Audio codec not supported by your browser. Try Chrome or Edge for better audio codec support.'; + } else if ( + errorString.includes('video') || + errorString.includes('h264') || + errorString.includes('h.264') + ) { + errorMessage = + 'Video codec not supported by your browser. Try Chrome or Edge for better video codec support.'; } else if (errorString.includes('mse')) { - errorMessage = "Your browser doesn't support the codecs used in this stream. Try Chrome or Edge for better compatibility."; + errorMessage = + "Your browser doesn't support the codecs used in this stream. Try Chrome or Edge for better compatibility."; } else { - errorMessage = "Media codec not supported by your browser. This may be due to unsupported audio (AC3) or video codecs. Try Chrome or Edge."; + errorMessage = + 'Media codec not supported by your browser. This may be due to unsupported audio (AC3) or video codecs. Try Chrome or Edge.'; } } else if (errorDetail) { errorMessage += ` - ${errorDetail}`; @@ -150,49 +250,66 @@ export default function FloatingVideo() { player.load(); - // Don't auto-play until we've loaded properly player.on(mpegts.Events.MEDIA_INFO, () => { setIsLoading(false); try { - player.play().catch(e => { - console.log("Auto-play prevented:", e); - setLoadError("Auto-play was prevented. Click play to start."); + player.play().catch((e) => { + console.log('Auto-play prevented:', e); + setLoadError('Auto-play was prevented. Click play to start.'); }); } catch (e) { - console.log("Error during play:", e); + console.log('Error during play:', e); setLoadError(`Playback error: ${e.message}`); } }); - // Store player instance so we can clean up later playerRef.current = player; } catch (error) { setIsLoading(false); - console.error("Error initializing player:", error); + console.error('Error initializing player:', error); - // Provide helpful error message based on the error - if (error.message?.includes('codec') || error.message?.includes('format')) { - setLoadError("Codec not supported by your browser. Please try a different browser (Chrome/Edge recommended)."); + if ( + error.message?.includes('codec') || + error.message?.includes('format') + ) { + setLoadError( + 'Codec not supported by your browser. Please try a different browser (Chrome/Edge recommended).' + ); } else { setLoadError(`Initialization error: ${error.message}`); } } + }; + + useEffect(() => { + if (!isVisible || !streamUrl) { + safeDestroyPlayer(); + return; + } + + // Clean up any existing player + safeDestroyPlayer(); + + // Initialize the appropriate player based on content type + if (contentType === 'vod') { + initializeVODPlayer(); + } else { + initializeLivePlayer(); + } // Cleanup when component unmounts or streamUrl changes return () => { safeDestroyPlayer(); }; - }, [isVisible, streamUrl]); + }, [isVisible, streamUrl, contentType]); // Modified hideVideo handler to clean up player first const handleClose = (e) => { - // Prevent event propagation to avoid triggering drag events if (e) { e.stopPropagation(); e.preventDefault(); } safeDestroyPlayer(); - // Small delay before hiding the video component to ensure cleanup is complete setTimeout(() => { hideVideo(); }, 50); @@ -223,7 +340,7 @@ export default function FloatingVideo() { {/* Video container with relative positioning for the overlay */} - - {/* The