From 7e5be6094f6fb9bc00ff3eee9e4e4b799c98ce3c Mon Sep 17 00:00:00 2001 From: Marlon Alkan Date: Sun, 8 Jun 2025 16:45:34 +0200 Subject: [PATCH 01/41] docker: init: 02-postgres.sh: allow DB user to create new DB (for tests) --- docker/init/02-postgres.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/init/02-postgres.sh b/docker/init/02-postgres.sh index 69a81dd4..7bb90671 100644 --- a/docker/init/02-postgres.sh +++ b/docker/init/02-postgres.sh @@ -57,13 +57,14 @@ if [ -z "$(ls -A $POSTGRES_DIR)" ]; then echo "Creating PostgreSQL database..." su - postgres -c "createdb -p ${POSTGRES_PORT} ${POSTGRES_DB}" - # Create user, set ownership, and grant privileges + # Create user, set ownership, and grant privileges, including privileges to create new databases echo "Creating PostgreSQL user..." su - postgres -c "psql -p ${POSTGRES_PORT} -d ${POSTGRES_DB}" < Date: Sun, 8 Jun 2025 16:47:00 +0200 Subject: [PATCH 02/41] apps: output: change body detection logic and add tests --- apps/output/tests.py | 23 +++++++++++++++++++++++ apps/output/views.py | 5 +++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/apps/output/tests.py b/apps/output/tests.py index e1e857ee..f87c8340 100644 --- a/apps/output/tests.py +++ b/apps/output/tests.py @@ -14,3 +14,26 @@ class OutputM3UTest(TestCase): self.assertEqual(response.status_code, 200) content = response.content.decode() self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_empty_body(self): + """ + Test that a POST request with an empty body returns 200 OK. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded') + content = response.content.decode() + + self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK") + self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_with_body(self): + """ + Test that a POST request with a non-empty body returns 403 Forbidden. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data={'evilstring': 'muhahaha'}) + + self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden") + self.assertIn("POST requests with body are not allowed, body is:", response.content.decode()) diff --git a/apps/output/views.py b/apps/output/views.py index 2b18d185..ff02560c 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -18,9 +18,10 @@ def generate_m3u(request, profile_name=None): The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ - # Check if this is a POST request with data (which we don't want to allow) + # Check if this is a POST request and the body is not empty (which we don't want to allow) if request.method == "POST" and request.body: - return HttpResponseForbidden("POST requests with content are not allowed") + if request.body.decode() != '{}': + return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) if profile_name is not None: channel_profile = ChannelProfile.objects.get(name=profile_name) From ae8b85a3e2d019234d4b183fd1963a35d0a7c85f Mon Sep 17 00:00:00 2001 From: Ragchuck Date: Wed, 15 Oct 2025 22:06:01 +0200 Subject: [PATCH 03/41] feat: added support for rtsp --- apps/m3u/tasks.py | 6 +++++- core/utils.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 0ba595c5..52847e77 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -219,6 +219,10 @@ def fetch_m3u_lines(account, use_cache=False): # Has HTTP URLs, might be a simple M3U without headers is_valid_m3u = True logger.info("Content validated as M3U: contains HTTP URLs") + elif any(line.strip().startswith('rtsp') for line in content_lines): + # Has HTTP URLs, might be a simple M3U without headers + is_valid_m3u = True + logger.info("Content validated as M3U: contains RTSP URLs") if not is_valid_m3u: # Log what we actually received for debugging @@ -1381,7 +1385,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): ) problematic_lines.append((line_index + 1, line[:200])) - elif extinf_data and line.startswith("http"): + elif extinf_data and (line.startswith("http") or line.startswith("rtsp")): url_count += 1 # Associate URL with the last EXTINF line extinf_data[-1]["url"] = line diff --git a/core/utils.py b/core/utils.py index 36ac5fef..da40d19c 100644 --- a/core/utils.py +++ b/core/utils.py @@ -377,8 +377,8 @@ def validate_flexible_url(value): import re # More flexible pattern for non-FQDN hostnames with paths - # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml - non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$' + # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1 + non_fqdn_pattern = r'^(rts?p|https?)://([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' non_fqdn_match = re.match(non_fqdn_pattern, value) if non_fqdn_match: From e6146e5243074e8b55544291a76096623fb9f576 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 4 Nov 2025 18:23:45 -0600 Subject: [PATCH 04/41] Bug fix: Reduce websocket message size when processing epgs. Also remove unnecessary console logging during epg refresh. Fixes [Bug]: Page goes blank if sending too many requests / responses Fixes #327 --- apps/channels/api_views.py | 37 +++++++++++++------ apps/epg/serializers.py | 9 +++-- apps/epg/tasks.py | 6 +++ frontend/src/WebSocket.jsx | 6 +-- frontend/src/api.js | 8 +++- .../src/components/forms/ChannelBatch.jsx | 34 +++++++++++------ frontend/src/components/tables/EPGsTable.jsx | 2 - frontend/src/store/epgs.jsx | 35 ++++++++++++------ 8 files changed, 93 insertions(+), 44 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 862de7f9..c97d8255 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -987,19 +987,27 @@ class ChannelViewSet(viewsets.ModelViewSet): channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" + + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" return Response( { - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", + "message": message, "channel": self.get_serializer(channel).data, "task_status": status_message, } @@ -1062,12 +1070,19 @@ class ChannelViewSet(viewsets.ModelViewSet): f"Error setting EPG data for channel {channel_id}: {str(e)}" ) - # Trigger program refresh for unique EPG data IDs + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + try: + epg_data = EPGData.objects.select_related('epg_source').get(id=epg_id) + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 + except EPGData.DoesNotExist: + logger.error(f"EPGData with ID {epg_id} not found") return Response( { diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 3404cca9..bfb750fc 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -4,7 +4,7 @@ from .models import EPGSource, EPGData, ProgramData from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] url = serializers.CharField( required=False, @@ -29,11 +29,12 @@ class EPGSourceSerializer(serializers.ModelSerializer): 'created_at', 'updated_at', 'custom_properties', - 'epg_data_ids' + 'epg_data_count' ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 2028cd98..b6350686 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1157,6 +1157,12 @@ def parse_programs_for_tvg_id(epg_id): epg = EPGData.objects.get(id=epg_id) epg_source = epg.epg_source + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return + if not Channel.objects.filter(epg_data=epg).exists(): logger.info(f"No channels matched to EPG {epg.tvg_id}") release_task_lock('parse_epg_programs', epg_id) diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index 0f46b012..1c576d23 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -572,10 +572,10 @@ export const WebsocketProvider = ({ children }) => { // Update the store with progress information updateEPGProgress(parsedEvent.data); - // If we have source_id/account info, update the EPG source status - if (parsedEvent.data.source_id || parsedEvent.data.account) { + // If we have source/account info, update the EPG source status + if (parsedEvent.data.source || parsedEvent.data.account) { const sourceId = - parsedEvent.data.source_id || parsedEvent.data.account; + parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; if (epg) { diff --git a/frontend/src/api.js b/frontend/src/api.js index 5b80a3f7..4281a533 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -2132,9 +2132,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx index 42184f4d..5b9b705e 100644 --- a/frontend/src/components/forms/ChannelBatch.jsx +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -55,6 +55,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const streamProfiles = useStreamProfilesStore((s) => s.profiles); const epgs = useEPGsStore((s) => s.epgs); + const tvgs = useEPGsStore((s) => s.tvgs); const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); @@ -267,17 +268,28 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { } else { // Assign the selected dummy EPG const selectedEpg = epgs[selectedDummyEpgId]; - if ( - selectedEpg && - selectedEpg.epg_data_ids && - selectedEpg.epg_data_ids.length > 0 - ) { - const epgDataId = selectedEpg.epg_data_ids[0]; - const associations = channelIds.map((id) => ({ - channel_id: id, - epg_data_id: epgDataId, - })); - await API.batchSetEPG(associations); + if (selectedEpg && selectedEpg.epg_data_count > 0) { + // Convert to number for comparison since Select returns string + const epgSourceId = parseInt(selectedDummyEpgId, 10); + + // Check if we already have EPG data loaded in the store + let epgData = tvgs.find((data) => data.epg_source === epgSourceId); + + // If not in store, fetch it + if (!epgData) { + const epgDataList = await API.getEPGData(); + epgData = epgDataList.find( + (data) => data.epg_source === epgSourceId + ); + } + + if (epgData) { + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: epgData.id, + })); + await API.batchSetEPG(associations); + } } } } diff --git a/frontend/src/components/tables/EPGsTable.jsx b/frontend/src/components/tables/EPGsTable.jsx index 53f9a72c..bb707984 100644 --- a/frontend/src/components/tables/EPGsTable.jsx +++ b/frontend/src/components/tables/EPGsTable.jsx @@ -181,8 +181,6 @@ const EPGsTable = () => { ); }; - console.log(epgs); - const columns = useMemo( //column definitions... () => [ diff --git a/frontend/src/store/epgs.jsx b/frontend/src/store/epgs.jsx index 6b3ffa81..e0576364 100644 --- a/frontend/src/store/epgs.jsx +++ b/frontend/src/store/epgs.jsx @@ -97,18 +97,29 @@ const useEPGsStore = create((set) => ({ ? 'success' // Mark as success when progress is 100% : state.epgs[data.source]?.status || 'idle'; - // Create a new epgs object with the updated source status - const newEpgs = { - ...state.epgs, - [data.source]: { - ...state.epgs[data.source], - status: sourceStatus, - last_message: - data.status === 'error' - ? data.error || 'Unknown error' - : state.epgs[data.source]?.last_message, - }, - }; + // Only update epgs object if status or last_message actually changed + // This prevents unnecessary re-renders on every progress update + const currentEpg = state.epgs[data.source]; + const newLastMessage = + data.status === 'error' + ? data.error || 'Unknown error' + : currentEpg?.last_message; + + let newEpgs = state.epgs; + if ( + currentEpg && + (currentEpg.status !== sourceStatus || + currentEpg.last_message !== newLastMessage) + ) { + newEpgs = { + ...state.epgs, + [data.source]: { + ...currentEpg, + status: sourceStatus, + last_message: newLastMessage, + }, + }; + } return { refreshProgress: newRefreshProgress, From 77e98508fbf79361f7e0abaa63dfeaf57130da9e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 4 Nov 2025 19:08:31 -0600 Subject: [PATCH 05/41] Enhancement: Refactor get_host_and_port for smarter port selection when using reverse proxies. Fixes #618 --- apps/output/views.py | 57 +++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index f36d02db..4ad11c39 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2626,50 +2626,75 @@ def get_host_and_port(request): Returns (host, port) for building absolute URIs. - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). - Falls back to Host header. - - In dev, if missing, uses 5656 or 8000 as a guess. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. """ - # 1. Try X-Forwarded-Host (may include port) + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx xfh = request.META.get("HTTP_X_FORWARDED_HOST") if xfh: if ":" in xfh: host, port = xfh.split(":", 1) + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = xfh + # Check for X-Forwarded-Port header port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - return host, port + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # No port found, assume standard port for the scheme + return host, None # 2. Try Host header raw_host = request.get_host() if ":" in raw_host: host, port = raw_host.split(":", 1) - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = raw_host - # 3. Try X-Forwarded-Port + # 3. Try X-Forwarded-Port (external reverse proxy might set this) port = request.META.get("HTTP_X_FORWARDED_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port # 4. Try SERVER_PORT from META port = request.META.get("SERVER_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port - # 5. Dev fallback: guess port + # 5. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 6. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): - guess = "5656" - return host, guess + return host, "5656" - # 6. Fallback to scheme default - port = "443" if request.is_secure() else "9191" - return host, port + # 7. Final fallback: assume standard port for scheme (omit from URL) + return host, None def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ host, port = get_host_and_port(request) - scheme = request.scheme - return f"{scheme}://{host}:{port}{path}" + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" def format_duration_hms(seconds): """ From 871f9f953ebae05b4ba87795529c8f2d704a3f28 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 5 Nov 2025 16:23:13 -0600 Subject: [PATCH 06/41] Another attempt for the get_host_and_port function to better handle port detection behind reverse proxies. --- apps/output/views.py | 45 +++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index 4ad11c39..f1557101 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2638,16 +2638,25 @@ def get_host_and_port(request): if xfh: if ":" in xfh: host, port = xfh.split(":", 1) - # Omit standard ports from URLs - return host, None if port == standard_port else port + # Omit standard ports from URLs, or omit if port doesn't match standard for scheme + # (e.g., HTTPS but port is 9191 = behind external reverse proxy) + if port == standard_port: + return host, None + # If port doesn't match standard and X-Forwarded-Proto is set, likely behind external RP + if request.META.get("HTTP_X_FORWARDED_PROTO"): + host = xfh.split(":")[0] # Strip port, will check for proper port below + else: + return host, port else: host = xfh - # Check for X-Forwarded-Port header - port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - # Omit standard ports from URLs - return host, None if port == standard_port else port - # No port found, assume standard port for the scheme + + # Check for X-Forwarded-Port header (if we didn't already find a valid port) + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): return host, None # 2. Try Host header @@ -2659,28 +2668,22 @@ def get_host_and_port(request): else: host = raw_host - # 3. Try X-Forwarded-Port (external reverse proxy might set this) - port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - # Omit standard ports from URLs - return host, None if port == standard_port else port + # 3. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None - # 4. Try SERVER_PORT from META + # 4. Try SERVER_PORT from META (only if NOT behind reverse proxy) port = request.META.get("SERVER_PORT") if port: # Omit standard ports from URLs return host, None if port == standard_port else port - # 5. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) - # If so, assume standard port for the scheme - if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): - return host, None - - # 6. Dev fallback: guess port 5656 + # 5. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): return host, "5656" - # 7. Final fallback: assume standard port for scheme (omit from URL) + # 6. Final fallback: assume standard port for scheme (omit from URL) return host, None def build_absolute_uri_with_port(request, path): From ed86eb22744e08faf05f55a48fbc6abf3039e773 Mon Sep 17 00:00:00 2001 From: 0x68732f6e69622fff <191755490+0x68732f6e69622fff@users.noreply.github.com> Date: Thu, 6 Nov 2025 14:29:13 +0000 Subject: [PATCH 07/41] Ensures that in the groups section of M3U playlist management, the EPG Source dropdown for the 'Force EPG Source' option displays entries sorted alphabetically by name --- .../src/components/forms/LiveGroupFilter.jsx | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/frontend/src/components/forms/LiveGroupFilter.jsx b/frontend/src/components/forms/LiveGroupFilter.jsx index 71b412b4..3497957b 100644 --- a/frontend/src/components/forms/LiveGroupFilter.jsx +++ b/frontend/src/components/forms/LiveGroupFilter.jsx @@ -1172,19 +1172,21 @@ const LiveGroupFilter = ({ }} data={[ { value: '0', label: 'No EPG (Disabled)' }, - ...epgSources.map((source) => ({ - value: source.id.toString(), - label: `${source.name} (${ - source.source_type === 'dummy' - ? 'Dummy' - : source.source_type === 'xmltv' - ? 'XMLTV' - : source.source_type === - 'schedules_direct' - ? 'Schedules Direct' - : source.source_type - })`, - })), + ...[...epgSources] + .sort((a, b) => a.name.localeCompare(b.name)) + .map((source) => ({ + value: source.id.toString(), + label: `${source.name} (${ + source.source_type === 'dummy' + ? 'Dummy' + : source.source_type === 'xmltv' + ? 'XMLTV' + : source.source_type === + 'schedules_direct' + ? 'Schedules Direct' + : source.source_type + })`, + })), ]} clearable searchable From da628705dfc34bddcbddd24e344ba3795cda5204 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 7 Nov 2025 13:19:18 -0600 Subject: [PATCH 08/41] Separate VOD and channel logos into distinct tables with dedicated management UI - Created VODLogo model for movies/series, separate from Logo (channels only) - Added database migration to create vodlogo table and migrate existing VOD logos - Implemented VODLogoViewSet with pagination, filtering (used/unused/movies/series), and bulk operations - Built VODLogosTable component with server-side pagination matching channel logos styling - Added VOD logos tab with on-demand loading to Logos page - Fixed orphaned VOD content cleanup to always remove unused entries - Removed redundant channel_assignable filtering from channel logos --- apps/channels/api_views.py | 86 +-- apps/channels/serializers.py | 64 +- apps/output/views.py | 10 +- apps/vod/api_urls.py | 2 + apps/vod/api_views.py | 191 +++++- ...logo_alter_movie_logo_alter_series_logo.py | 264 +++++++++ apps/vod/models.py | 18 +- apps/vod/serializers.py | 77 ++- apps/vod/tasks.py | 59 +- frontend/src/api.js | 71 +++ frontend/src/components/tables/LogosTable.jsx | 19 - .../src/components/tables/VODLogosTable.jsx | 556 ++++++++++++++++++ frontend/src/hooks/useSmartLogos.jsx | 5 +- frontend/src/pages/Logos.jsx | 89 ++- frontend/src/store/logos.jsx | 26 +- frontend/src/store/vodLogos.jsx | 128 ++++ 16 files changed, 1423 insertions(+), 242 deletions(-) create mode 100644 apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py create mode 100644 frontend/src/components/tables/VODLogosTable.jsx create mode 100644 frontend/src/store/vodLogos.jsx diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index c97d8255..fc5ea114 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1247,7 +1247,7 @@ class CleanupUnusedLogosAPIView(APIView): return [Authenticated()] @swagger_auto_schema( - operation_description="Delete all logos that are not used by any channels, movies, or series", + operation_description="Delete all channel logos that are not used by any channels", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ @@ -1261,24 +1261,11 @@ class CleanupUnusedLogosAPIView(APIView): responses={200: "Cleanup completed"}, ) def post(self, request): - """Delete all logos with no channel, movie, or series associations""" + """Delete all channel logos with no channel associations""" delete_files = request.data.get("delete_files", False) - # Find logos that are not used by channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - unused_logos = Logo.objects.filter(filter_conditions) + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) deleted_count = unused_logos.count() logo_names = list(unused_logos.values_list('name', flat=True)) local_files_deleted = 0 @@ -1350,13 +1337,6 @@ class LogoViewSet(viewsets.ModelViewSet): # Start with basic prefetch for channels queryset = Logo.objects.prefetch_related('channels').order_by('name') - # Try to prefetch VOD relations if available - try: - queryset = queryset.prefetch_related('movie', 'series') - except: - # VOD app might not be available, continue without VOD prefetch - pass - # Filter by specific IDs ids = self.request.query_params.getlist('ids') if ids: @@ -1369,62 +1349,14 @@ class LogoViewSet(viewsets.ModelViewSet): pass # Invalid IDs, return empty queryset queryset = Logo.objects.none() - # Filter by usage - now includes VOD content + # Filter by usage used_filter = self.request.query_params.get('used', None) if used_filter == 'true': - # Logo is used if it has any channels, movies, or series - filter_conditions = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - filter_conditions |= Q(movie__isnull=False) - except: - pass - - try: - filter_conditions |= Q(series__isnull=False) - except: - pass - - queryset = queryset.filter(filter_conditions).distinct() - + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() elif used_filter == 'false': - # Logo is unused if it has no channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - queryset = queryset.filter(filter_conditions) - - # Filter for channel assignment (unused + channel-used, exclude VOD-only) - channel_assignable = self.request.query_params.get('channel_assignable', None) - if channel_assignable == 'true': - # Include logos that are either: - # 1. Completely unused, OR - # 2. Used by channels (but may also be used by VOD) - # Exclude logos that are ONLY used by VOD content - - unused_condition = Q(channels__isnull=True) - channel_used_condition = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True) - except: - pass - - # Combine: unused OR used by channels - filter_conditions = unused_condition | channel_used_condition - queryset = queryset.filter(filter_conditions).distinct() + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) # Filter by name name_filter = self.request.query_params.get('name', None) diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 7058ced2..62c9650d 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -64,47 +64,15 @@ class LogoSerializer(serializers.ModelSerializer): return reverse("api:channels:logo-cache", args=[obj.id]) def get_channel_count(self, obj): - """Get the number of channels, movies, and series using this logo""" - channel_count = obj.channels.count() - - # Safely get movie count - try: - movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0 - except AttributeError: - movie_count = 0 - - # Safely get series count - try: - series_count = obj.series.count() if hasattr(obj, 'series') else 0 - except AttributeError: - series_count = 0 - - return channel_count + movie_count + series_count + """Get the number of channels using this logo""" + return obj.channels.count() def get_is_used(self, obj): - """Check if this logo is used by any channels, movies, or series""" - # Check if used by channels - if obj.channels.exists(): - return True - - # Check if used by movies (handle case where VOD app might not be available) - try: - if hasattr(obj, 'movie') and obj.movie.exists(): - return True - except AttributeError: - pass - - # Check if used by series (handle case where VOD app might not be available) - try: - if hasattr(obj, 'series') and obj.series.exists(): - return True - except AttributeError: - pass - - return False + """Check if this logo is used by any channels""" + return obj.channels.exists() def get_channel_names(self, obj): - """Get the names of channels, movies, and series using this logo (limited to first 5)""" + """Get the names of channels using this logo (limited to first 5)""" names = [] # Get channel names @@ -112,28 +80,6 @@ class LogoSerializer(serializers.ModelSerializer): for channel in channels: names.append(f"Channel: {channel.name}") - # Get movie names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'movie'): - remaining_slots = 5 - len(names) - movies = obj.movie.all()[:remaining_slots] - for movie in movies: - names.append(f"Movie: {movie.name}") - except AttributeError: - pass - - # Get series names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'series'): - remaining_slots = 5 - len(names) - series = obj.series.all()[:remaining_slots] - for series_item in series: - names.append(f"Series: {series_item.name}") - except AttributeError: - pass - # Calculate total count for "more" message total_count = self.get_channel_count(obj) if total_count > 5: diff --git a/apps/output/views.py b/apps/output/views.py index f1557101..7dc013a1 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2115,7 +2115,7 @@ def xc_get_vod_streams(request, user, category_id=None): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), #'stream_icon': movie.logo.url if movie.logo else '', @@ -2185,7 +2185,7 @@ def xc_get_series(request, user, category_id=None): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series.description or "", @@ -2378,7 +2378,7 @@ def xc_get_series_info(request, user, series_id): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series_data['description'], @@ -2506,14 +2506,14 @@ def xc_get_vod_info(request, user, vod_id): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), "movie_image": ( None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), 'description': movie_data.get('description', ''), diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py index ffccc3f5..e897bd28 100644 --- a/apps/vod/api_urls.py +++ b/apps/vod/api_urls.py @@ -6,6 +6,7 @@ from .api_views import ( SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet, + VODLogoViewSet, ) app_name = 'vod' @@ -16,5 +17,6 @@ router.register(r'episodes', EpisodeViewSet, basename='episode') router.register(r'series', SeriesViewSet, basename='series') router.register(r'categories', VODCategoryViewSet, basename='vodcategory') router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py index 517038a6..4ff1f82b 100644 --- a/apps/vod/api_views.py +++ b/apps/vod/api_views.py @@ -3,16 +3,21 @@ from rest_framework.response import Response from rest_framework.decorators import action from rest_framework.filters import SearchFilter, OrderingFilter from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny from django_filters.rest_framework import DjangoFilterBackend from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q import django_filters import logging +import os +import requests from apps.accounts.permissions import ( Authenticated, permission_classes_by_action, ) from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation ) from .serializers import ( @@ -20,6 +25,7 @@ from .serializers import ( EpisodeSerializer, SeriesSerializer, VODCategorySerializer, + VODLogoSerializer, M3UMovieRelationSerializer, M3USeriesRelationSerializer, M3UEpisodeRelationSerializer @@ -564,7 +570,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'movie' as content_type FROM vod_movie movies - LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id WHERE {where_conditions[0]} UNION ALL @@ -586,7 +592,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'series' as content_type FROM vod_series series - LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id WHERE {where_conditions[1]} ) SELECT * FROM unified_content @@ -613,10 +619,10 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): 'id': item_dict['logo_id'], 'name': item_dict['logo_name'], 'url': item_dict['logo_url'], - 'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None, - 'channel_count': 0, # We don't need this for VOD - 'is_used': True, - 'channel_names': [] # We don't need this for VOD + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True } # Convert to the format expected by frontend @@ -668,4 +674,173 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logger.error(f"Error in UnifiedContentViewSet.list(): {e}") import traceback logger.error(traceback.format_exc()) - return Response({'error': str(e)}, status=500) \ No newline at end of file + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 00000000..1bd2c418 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/models.py b/apps/vod/models.py index f0825ba2..69aed808 100644 --- a/apps/vod/models.py +++ b/apps/vod/models.py @@ -4,10 +4,22 @@ from django.utils import timezone from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from apps.m3u.models import M3UAccount -from apps.channels.models import Logo import uuid +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + class VODCategory(models.Model): """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" @@ -69,7 +81,7 @@ class Series(models.Model): year = models.IntegerField(blank=True, null=True) rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") @@ -108,7 +120,7 @@ class Movie(models.Model): rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py index 5a672b33..7747cb88 100644 --- a/apps/vod/serializers.py +++ b/apps/vod/serializers.py @@ -1,12 +1,79 @@ from rest_framework import serializers +from django.urls import reverse from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.serializers import LogoSerializer from apps.m3u.serializers import M3UAccountSerializer +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): category = serializers.IntegerField(source="category.id") m3u_account = serializers.IntegerField(source="m3u_account.id") @@ -31,7 +98,7 @@ class VODCategorySerializer(serializers.ModelSerializer): ] class SeriesSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) episode_count = serializers.SerializerMethodField() class Meta: @@ -43,7 +110,7 @@ class SeriesSerializer(serializers.ModelSerializer): class MovieSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) class Meta: model = Movie @@ -225,7 +292,7 @@ class M3UEpisodeRelationSerializer(serializers.ModelSerializer): class EnhancedSeriesSerializer(serializers.ModelSerializer): """Enhanced serializer for series with provider information""" - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) episode_count = serializers.SerializerMethodField() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index bc8ad80f..e34e00e6 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -5,10 +5,9 @@ from django.db.models import Q from apps.m3u.models import M3UAccount from core.xtream_codes import Client as XtreamCodesClient from .models import ( - VODCategory, Series, Movie, Episode, + VODCategory, Series, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.models import Logo from datetime import datetime import logging import json @@ -403,7 +402,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -411,20 +410,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N for logo_url in logo_urls: if logo_url not in existing_logos: movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') - logos_to_create.append(Logo(url=logo_url, name=movie_name)) + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for movies") + logger.info(f"Created {len(newly_created)} new VOD logos for movies") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing movies based on our keys existing_movies = {} @@ -725,7 +724,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -733,20 +732,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= for logo_url in logo_urls: if logo_url not in existing_logos: series_name = logo_url_to_name.get(logo_url, 'Unknown Series') - logos_to_create.append(Logo(url=logo_url, name=series_name)) + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for series") + logger.info(f"Created {len(newly_created)} new VOD logos for series") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing series based on our keys - same pattern as movies existing_series = {} @@ -1424,21 +1423,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id= stale_episode_count = stale_episode_relations.count() stale_episode_relations.delete() - # Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup) - if not account_id: - orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) - orphaned_movie_count = orphaned_movies.count() + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") orphaned_movies.delete() - # Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup) - orphaned_series = Series.objects.filter(m3u_relations__isnull=True) - orphaned_series_count = orphaned_series.count() + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") orphaned_series.delete() - else: - # When cleaning up for specific account, we don't remove orphaned content - # as other accounts might still reference it - orphaned_movie_count = 0 - orphaned_series_count = 0 # Episodes will be cleaned up via CASCADE when series are deleted @@ -1999,7 +1998,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): def validate_logo_reference(obj, obj_type="object"): """ - Validate that a logo reference exists in the database. + Validate that a VOD logo reference exists in the database. If not, set it to None to prevent foreign key constraint violations. Args: @@ -2019,9 +2018,9 @@ def validate_logo_reference(obj, obj_type="object"): try: # Verify the logo exists in the database - Logo.objects.get(pk=obj.logo.pk) + VODLogo.objects.get(pk=obj.logo.pk) return True - except Logo.DoesNotExist: - logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") obj.logo = None return False diff --git a/frontend/src/api.js b/frontend/src/api.js index 4281a533..8f5aeeeb 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1788,6 +1788,77 @@ export default class API { } } + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; + } + } + static async getChannelProfiles() { try { const response = await request(`${host}/api/channels/profiles/`); diff --git a/frontend/src/components/tables/LogosTable.jsx b/frontend/src/components/tables/LogosTable.jsx index 0c4f32b4..b8b80506 100644 --- a/frontend/src/components/tables/LogosTable.jsx +++ b/frontend/src/components/tables/LogosTable.jsx @@ -626,25 +626,6 @@ const LogosTable = () => { }} > - - - Logos - - - ({data.length} logo{data.length !== 1 ? 's' : ''}) - - - { + const [tableSize] = useLocalStorage('table-size', 'default'); + + const onDelete = useCallback(() => { + deleteLogo(row.original.id); + }, [row.original.id, deleteLogo]); + + const iconSize = + tableSize === 'default' ? 'sm' : tableSize === 'compact' ? 'xs' : 'md'; + + return ( + + + + + + + + ); +}; + +export default function VODLogosTable() { + const theme = useMantineTheme(); + + const { + logos, + totalCount, + isLoading, + fetchVODLogos, + deleteVODLogo, + deleteVODLogos, + cleanupUnusedVODLogos, + } = useVODLogosStore(); + + const [currentPage, setCurrentPage] = useState(1); + const [pageSize, setPageSize] = useState(25); + const [nameFilter, setNameFilter] = useState(''); + const [usageFilter, setUsageFilter] = useState('all'); + const [selectedRows, setSelectedRows] = useState(new Set()); + const [confirmDeleteOpen, setConfirmDeleteOpen] = useState(false); + const [deleteTarget, setDeleteTarget] = useState(null); + const [confirmCleanupOpen, setConfirmCleanupOpen] = useState(false); + const [paginationString, setPaginationString] = useState(''); + const [isCleaningUp, setIsCleaningUp] = useState(false); + + // Calculate unused logos count + const unusedLogosCount = useMemo(() => { + return logos.filter( + (logo) => logo.movie_count === 0 && logo.series_count === 0 + ).length; + }, [logos]); + useEffect(() => { + fetchVODLogos({ + page: currentPage, + page_size: pageSize, + name: nameFilter, + usage: usageFilter === 'all' ? undefined : usageFilter, + }); + }, [currentPage, pageSize, nameFilter, usageFilter, fetchVODLogos]); + + const handleSelectAll = useCallback( + (checked) => { + if (checked) { + setSelectedRows(new Set(logos.map((logo) => logo.id))); + } else { + setSelectedRows(new Set()); + } + }, + [logos] + ); + + const handleSelectRow = useCallback((id, checked) => { + setSelectedRows((prev) => { + const newSet = new Set(prev); + if (checked) { + newSet.add(id); + } else { + newSet.delete(id); + } + return newSet; + }); + }, []); + + const deleteLogo = useCallback((id) => { + setDeleteTarget([id]); + setConfirmDeleteOpen(true); + }, []); + + const handleDeleteSelected = useCallback(() => { + setDeleteTarget(Array.from(selectedRows)); + setConfirmDeleteOpen(true); + }, [selectedRows]); + + const handleConfirmDelete = async () => { + try { + if (deleteTarget.length === 1) { + await deleteVODLogo(deleteTarget[0]); + notifications.show({ + title: 'Success', + message: 'VOD logo deleted successfully', + color: 'green', + }); + } else { + await deleteVODLogos(deleteTarget); + notifications.show({ + title: 'Success', + message: `${deleteTarget.length} VOD logos deleted successfully`, + color: 'green', + }); + } + setSelectedRows(new Set()); + setConfirmDeleteOpen(false); + setDeleteTarget(null); + } catch (error) { + notifications.show({ + title: 'Error', + message: error.message || 'Failed to delete VOD logos', + color: 'red', + }); + } + }; + + const handleCleanupUnused = useCallback(() => { + setConfirmCleanupOpen(true); + }, []); + + const handleConfirmCleanup = async () => { + setIsCleaningUp(true); + try { + const result = await cleanupUnusedVODLogos(); + notifications.show({ + title: 'Success', + message: `Cleaned up ${result.deleted_count} unused VOD logos`, + color: 'green', + }); + setConfirmCleanupOpen(false); + } catch (error) { + notifications.show({ + title: 'Error', + message: error.message || 'Failed to cleanup unused VOD logos', + color: 'red', + }); + } finally { + setIsCleaningUp(false); + } + }; + + useEffect(() => { + setSelectedRows(new Set()); + }, [logos.length]); + + useEffect(() => { + const startItem = (currentPage - 1) * pageSize + 1; + const endItem = Math.min(currentPage * pageSize, totalCount); + setPaginationString(`${startItem} to ${endItem} of ${totalCount}`); + }, [currentPage, pageSize, totalCount]); + + const pageCount = useMemo(() => { + return Math.ceil(totalCount / pageSize); + }, [totalCount, pageSize]); + + const columns = useMemo( + () => [ + { + id: 'select', + header: ({ table }) => ( + 0 && selectedRows.size === logos.length + } + indeterminate={ + selectedRows.size > 0 && selectedRows.size < logos.length + } + onChange={(event) => handleSelectAll(event.currentTarget.checked)} + size="sm" + /> + ), + cell: ({ row }) => ( + + handleSelectRow(row.original.id, event.currentTarget.checked) + } + size="sm" + /> + ), + size: 50, + enableSorting: false, + }, + { + header: 'Preview', + accessorKey: 'cache_url', + size: 80, + enableSorting: false, + cell: ({ getValue, row }) => ( +
+ {row.original.name} { + e.target.style.transform = 'scale(1.5)'; + }} + onMouseLeave={(e) => { + e.target.style.transform = 'scale(1)'; + }} + /> +
+ ), + }, + { + header: 'Name', + accessorKey: 'name', + size: 250, + cell: ({ getValue }) => ( + + {getValue()} + + ), + }, + { + header: 'Usage', + accessorKey: 'usage', + size: 120, + cell: ({ row }) => { + const { movie_count, series_count, item_names } = row.original; + const totalUsage = movie_count + series_count; + + if (totalUsage === 0) { + return ( + + Unused + + ); + } + + // Build usage description + const usageParts = []; + if (movie_count > 0) { + usageParts.push( + `${movie_count} movie${movie_count !== 1 ? 's' : ''}` + ); + } + if (series_count > 0) { + usageParts.push(`${series_count} series`); + } + + const label = + usageParts.length === 1 + ? usageParts[0] + : `${totalUsage} item${totalUsage !== 1 ? 's' : ''}`; + + return ( + + + Used by {usageParts.join(' & ')}: + + {item_names && + item_names.map((name, index) => ( + + • {name} + + ))} + + } + multiline + width={220} + > + + {label} + + + ); + }, + }, + { + header: 'URL', + accessorKey: 'url', + grow: true, + cell: ({ getValue }) => ( + + + + {getValue()} + + + {getValue()?.startsWith('http') && ( + window.open(getValue(), '_blank')} + > + + + )} + + ), + }, + { + id: 'actions', + size: 80, + header: 'Actions', + enableSorting: false, + cell: ({ row }) => ( + + ), + }, + ], + [theme, deleteLogo, selectedRows, handleSelectAll, handleSelectRow, logos] + ); + + const renderHeaderCell = (header) => { + return ( + + {header.column.columnDef.header} + + ); + }; + + const table = useTable({ + data: logos, + columns, + manualPagination: true, + pageCount: pageCount, + allRowIds: logos.map((logo) => logo.id), + enablePagination: false, + enableRowSelection: true, + enableRowVirtualization: false, + renderTopToolbar: false, + manualSorting: false, + manualFiltering: false, + headerCellRenderFns: { + actions: renderHeaderCell, + cache_url: renderHeaderCell, + name: renderHeaderCell, + url: renderHeaderCell, + usage: renderHeaderCell, + }, + }); + + return ( + + + + {/* Top toolbar */} + + + { + const value = event.target.value; + setNameFilter(value); + }} + size="xs" + style={{ width: 200 }} + /> + { - formik.setFieldValue('channel_group_id', value); // Update Formik's state with the new value - }} - error={ - formik.errors.channel_group_id - ? formik.touched.channel_group_id - : '' - } - data={Object.values(channelGroups).map((option, index) => ({ - value: `${option.id}`, - label: option.name, - }))} - size="xs" - style={{ flex: 1 }} - /> */} - - setChannelGroupModalOpen(true)} - title="Create new group" - size="small" - variant="transparent" - style={{ marginBottom: 5 }} - > - - - - - - ({ - value: `${epg.id}`, - label: epg.name, - }))} - size="xs" - mb="xs" - /> - - {/* Filter Input */} - - setTvgFilter(event.currentTarget.value) - } - mb="xs" - size="xs" - /> - - - - - {({ index, style }) => ( -
- -
- )} -
-
- - -
- - - - - - - - ); -}; - -export default ChannelsForm; From 827501c9f71df0ccb02b46acaafde918d83ba5ad Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 18:00:08 -0600 Subject: [PATCH 33/41] Better spacing for version text. --- frontend/src/components/forms/LoginForm.jsx | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/forms/LoginForm.jsx b/frontend/src/components/forms/LoginForm.jsx index 9d284319..353cd50e 100644 --- a/frontend/src/components/forms/LoginForm.jsx +++ b/frontend/src/components/forms/LoginForm.jsx @@ -145,12 +145,10 @@ const LoginForm = () => { @@ -258,9 +256,10 @@ const LoginForm = () => { v{version} From 25145283379cba32f0931dfaae41a08250a705b6 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 19:57:59 -0600 Subject: [PATCH 34/41] Enhancement: Update channel state handling in ProxyServer and views to include 'STOPPING' state, ensuring proper cleanup and preventing reinitialization during shutdown. --- apps/proxy/ts_proxy/server.py | 13 +++++++------ apps/proxy/ts_proxy/views.py | 36 ++++++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index cca827a9..61dac1e3 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -703,9 +703,10 @@ class ProxyServer: state = metadata.get(b'state', b'unknown').decode('utf-8') owner = metadata.get(b'owner', b'').decode('utf-8') - # States that indicate the channel is running properly + # States that indicate the channel is running properly or shutting down valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, - ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING] + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING, + ChannelState.STOPPING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -720,10 +721,10 @@ class ProxyServer: logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") self._clean_zombie_channel(channel_id, metadata) return False - elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: - # These states indicate the channel should be reinitialized - logger.info(f"Channel {channel_id} exists but in terminal state: {state}") - return True + elif state in [ChannelState.STOPPED, ChannelState.ERROR]: + # These terminal states indicate the channel should be cleaned up and reinitialized + logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup") + return False else: # Unknown or initializing state, check how long it's been in this state if b'state_changed_at' in metadata: diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index c1b803ab..91f254a7 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -84,11 +84,18 @@ def stream_ts(request, channel_id): if state_field in metadata: channel_state = metadata[state_field].decode("utf-8") - if channel_state: - # Channel is being initialized or already active - no need for reinitialization + # Active/running states - channel is operational, don't reinitialize + if channel_state in [ + ChannelState.ACTIVE, + ChannelState.WAITING_FOR_CLIENTS, + ChannelState.BUFFERING, + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ChannelState.STOPPING, + ]: needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization" + f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization" ) # Special handling for initializing/connecting states @@ -98,19 +105,34 @@ def stream_ts(request, channel_id): ]: channel_initializing = True logger.debug( - f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion" + f"[{client_id}] Channel {channel_id} is still initializing, client will wait" ) + # Terminal states - channel needs cleanup before reinitialization + elif channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPED, + ]: + needs_initialization = True + logger.info( + f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize" + ) + # Unknown/empty state - check if owner is alive else: - # Only check for owner if channel is in a valid state owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): - # Owner is still active, so we don't need to reinitialize + # Owner is still active with unknown state - don't reinitialize needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} has active owner {owner}" + f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init" + ) + else: + # Owner dead - needs reinitialization + needs_initialization = True + logger.warning( + f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize" ) # Start initialization if needed From 0700cf29eab35e249e30cb053c5812f26eb2e5df Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 20:13:40 -0600 Subject: [PATCH 35/41] Enhancement: Add copy link functionality to SeriesModal and VODModal, allowing users to easily copy episode and VOD links to clipboard with notifications for success or failure. --- frontend/src/components/SeriesModal.jsx | 79 ++++++++++++++++++++----- frontend/src/components/VODModal.jsx | 38 +++++++++++- 2 files changed, 99 insertions(+), 18 deletions(-) diff --git a/frontend/src/components/SeriesModal.jsx b/frontend/src/components/SeriesModal.jsx index dcfebf86..48677646 100644 --- a/frontend/src/components/SeriesModal.jsx +++ b/frontend/src/components/SeriesModal.jsx @@ -17,7 +17,9 @@ import { Table, Divider, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -262,6 +264,39 @@ const SeriesModal = ({ series, opened, onClose }) => { showVideo(streamUrl, 'vod', episode); }; + const getEpisodeStreamUrl = (episode) => { + let streamUrl = `/proxy/vod/episode/${episode.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + return streamUrl; + }; + + const handleCopyEpisodeLink = async (episode) => { + const streamUrl = getEpisodeStreamUrl(episode); + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Episode link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + const handleEpisodeRowClick = (episode) => { setExpandedEpisode(expandedEpisode === episode.id ? null : episode.id); }; @@ -611,20 +646,34 @@ const SeriesModal = ({ series, opened, onClose }) => { - 0 && !selectedProvider - } - onClick={(e) => { - e.stopPropagation(); - handlePlayEpisode(episode); - }} - > - - + + 0 && + !selectedProvider + } + onClick={(e) => { + e.stopPropagation(); + handlePlayEpisode(episode); + }} + > + + + { + e.stopPropagation(); + handleCopyEpisodeLink(episode); + }} + > + + + {expandedEpisode === episode.id && ( diff --git a/frontend/src/components/VODModal.jsx b/frontend/src/components/VODModal.jsx index 90fd3fad..7b1d34eb 100644 --- a/frontend/src/components/VODModal.jsx +++ b/frontend/src/components/VODModal.jsx @@ -13,7 +13,9 @@ import { Stack, Modal, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -232,9 +234,9 @@ const VODModal = ({ vod, opened, onClose }) => { } }, [opened]); - const handlePlayVOD = () => { + const getStreamUrl = () => { const vodToPlay = detailedVOD || vod; - if (!vodToPlay) return; + if (!vodToPlay) return null; let streamUrl = `/proxy/vod/movie/${vod.uuid}`; @@ -253,9 +255,29 @@ const VODModal = ({ vod, opened, onClose }) => { } else { streamUrl = `${window.location.origin}${streamUrl}`; } + return streamUrl; + }; + + const handlePlayVOD = () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const vodToPlay = detailedVOD || vod; showVideo(streamUrl, 'vod', vodToPlay); }; + const handleCopyLink = async () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Stream link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + // Helper to get embeddable YouTube URL const getEmbedUrl = (url) => { if (!url) return ''; @@ -486,6 +508,16 @@ const VODModal = ({ vod, opened, onClose }) => { Watch Trailer )} + From 6bd5958c3c3f1cd27deffbc2ee007e8b9e4f385a Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sat, 15 Nov 2025 14:22:26 -0600 Subject: [PATCH 36/41] Enhancement: Improve channel shutdown logic in ProxyServer to handle connection timeouts and grace periods more effectively, ensuring proper channel management based on client connections. --- apps/proxy/ts_proxy/server.py | 68 ++++++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index 61dac1e3..f962334d 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -947,7 +947,7 @@ class ProxyServer: # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: - # Get connection ready time from metadata + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: try: @@ -955,17 +955,60 @@ class ProxyServer: except (ValueError, TypeError): pass - # If still connecting, give it more time - if channel_state == ChannelState.CONNECTING: - logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet") - continue + if total_clients == 0: + # Check if we have a connection_attempt timestamp (set when CONNECTING starts) + connection_attempt_time = None + attempt_key = RedisKeys.connection_attempt(channel_id) + if self.redis_client: + attempt_value = self.redis_client.get(attempt_key) + if attempt_value: + try: + connection_attempt_time = float(attempt_value.decode('utf-8')) + except (ValueError, TypeError): + pass - # If waiting for clients, check grace period - if connection_ready_time: + # Also get init time as a fallback + init_time = None + if metadata and b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + except (ValueError, TypeError): + pass + + # Use whichever timestamp we have (prefer connection_attempt as it's more recent) + start_time = connection_attempt_time or init_time + + if start_time: + # Check which timeout to apply based on channel lifecycle + if connection_ready_time: + # Already reached ready - use shutdown_delay + time_since_ready = time.time() - connection_ready_time + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if time_since_ready > shutdown_delay: + logger.warning( + f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s " + f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel" + ) + self.stop_channel(channel_id) + continue + else: + # Never reached ready - use grace_period timeout + time_since_start = time.time() - start_time + connecting_timeout = ConfigHelper.channel_init_grace_period() + + if time_since_start > connecting_timeout: + logger.warning( + f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s " + f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues" + ) + self.stop_channel(channel_id) + continue + elif connection_ready_time: + # We have clients now, but check grace period for state transition grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time - # Add this debug log logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, " f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, " f"total_clients={total_clients}") @@ -974,16 +1017,9 @@ class ProxyServer: # Still within grace period logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed") continue - elif total_clients == 0: - # Grace period expired with no clients - logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}") - self.stop_channel(channel_id) else: - # Grace period expired but we have clients - mark channel as active + # Grace period expired with clients - mark channel as active logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active") - old_state = "unknown" - if metadata and b'state' in metadata: - old_state = metadata[b'state'].decode('utf-8') if self.update_channel_state(channel_id, ChannelState.ACTIVE, { "grace_period_ended_at": str(time.time()), "clients_at_activation": str(total_clients) From 1560afab97e866598d3aad8db4c83070480704a5 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:33:10 -0600 Subject: [PATCH 37/41] Enhancement: Optimize bulk channel editing in ChannelViewSet by validating updates first and applying them in a single transaction, improving performance by about 50% and error handling. --- apps/channels/api_views.py | 103 ++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 36 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index fc5ea114..d2769870 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -435,8 +435,8 @@ class ChannelViewSet(viewsets.ModelViewSet): @action(detail=False, methods=["patch"], url_path="edit/bulk") def edit_bulk(self, request): """ - Bulk edit channels. - Expects a list of channels with their updates. + Bulk edit channels efficiently. + Validates all updates first, then applies in a single transaction. """ data = request.data if not isinstance(data, list): @@ -445,63 +445,94 @@ class ChannelViewSet(viewsets.ModelViewSet): status=status.HTTP_400_BAD_REQUEST, ) - updated_channels = [] - errors = [] + # Extract IDs and validate presence + channel_updates = {} + missing_ids = [] - for channel_data in data: + for i, channel_data in enumerate(data): channel_id = channel_data.get("id") if not channel_id: - errors.append({"error": "Channel ID is required"}) - continue + missing_ids.append(f"Item {i}: Channel ID is required") + else: + channel_updates[channel_id] = channel_data - try: - channel = Channel.objects.get(id=channel_id) + if missing_ids: + return Response( + {"errors": missing_ids}, + status=status.HTTP_400_BAD_REQUEST, + ) - # Handle channel_group_id properly - convert string to integer if needed - if 'channel_group_id' in channel_data: - group_id = channel_data['channel_group_id'] - if group_id is not None: - try: - channel_data['channel_group_id'] = int(group_id) - except (ValueError, TypeError): - channel_data['channel_group_id'] = None + # Fetch all channels at once (one query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Use the serializer to validate and update - serializer = ChannelSerializer( - channel, data=channel_data, partial=True - ) + # Validate and prepare updates + validated_updates = [] + errors = [] - if serializer.is_valid(): - updated_channel = serializer.save() - updated_channels.append(updated_channel) - else: - errors.append({ - "channel_id": channel_id, - "errors": serializer.errors - }) + for channel_id, channel_data in channel_updates.items(): + channel = channels_dict.get(channel_id) - except Channel.DoesNotExist: + if not channel: errors.append({ "channel_id": channel_id, "error": "Channel not found" }) - except Exception as e: + continue + + # Handle channel_group_id conversion + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None + + # Validate with serializer + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + validated_updates.append((channel, serializer.validated_data)) + else: errors.append({ "channel_id": channel_id, - "error": str(e) + "errors": serializer.errors }) if errors: return Response( - {"errors": errors, "updated_count": len(updated_channels)}, + {"errors": errors, "updated_count": len(validated_updates)}, status=status.HTTP_400_BAD_REQUEST, ) - # Serialize the updated channels for response - serialized_channels = ChannelSerializer(updated_channels, many=True).data + # Apply all updates in a transaction + with transaction.atomic(): + for channel, validated_data in validated_updates: + for key, value in validated_data.items(): + setattr(channel, key, value) + + # Single bulk_update query instead of individual saves + channels_to_update = [channel for channel, _ in validated_updates] + if channels_to_update: + Channel.objects.bulk_update( + channels_to_update, + fields=list(validated_updates[0][1].keys()), + batch_size=100 + ) + + # Return the updated objects (already in memory) + serialized_channels = ChannelSerializer( + [channel for channel, _ in validated_updates], + many=True, + context=self.get_serializer_context() + ).data return Response({ - "message": f"Successfully updated {len(updated_channels)} channels", + "message": f"Successfully updated {len(validated_updates)} channels", "channels": serialized_channels }) From 1b16df448284a936de56a5942316f12dcac40e77 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:41:52 -0600 Subject: [PATCH 38/41] Enhancement: Improve batch EPG association in ChannelViewSet by adding validation for associations and implementing bulk updates, enhancing performance and error handling. --- apps/channels/api_views.py | 74 +++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index d2769870..bc920537 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1070,8 +1070,15 @@ class ChannelViewSet(viewsets.ModelViewSet): def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" associations = request.data.get("associations", []) - channels_updated = 0 - programs_refreshed = 0 + + if not associations: + return Response( + {"error": "associations list is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract channel IDs upfront + channel_updates = {} unique_epg_ids = set() for assoc in associations: @@ -1081,39 +1088,58 @@ class ChannelViewSet(viewsets.ModelViewSet): if not channel_id: continue - try: - # Get the channel - channel = Channel.objects.get(id=channel_id) + channel_updates[channel_id] = epg_data_id + if epg_data_id: + unique_epg_ids.add(epg_data_id) - # Set the EPG data - channel.epg_data_id = epg_data_id - channel.save(update_fields=["epg_data"]) - channels_updated += 1 + # Batch fetch all channels (single query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Track unique EPG data IDs - if epg_data_id: - unique_epg_ids.add(epg_data_id) - - except Channel.DoesNotExist: + # Collect channels to update + channels_to_update = [] + for channel_id, epg_data_id in channel_updates.items(): + if channel_id not in channels_dict: logger.error(f"Channel with ID {channel_id} not found") - except Exception as e: - logger.error( - f"Error setting EPG data for channel {channel_id}: {str(e)}" + continue + + channel = channels_dict[channel_id] + channel.epg_data_id = epg_data_id + channels_to_update.append(channel) + + # Bulk update all channels (single query) + if channels_to_update: + with transaction.atomic(): + Channel.objects.bulk_update( + channels_to_update, + fields=["epg_data_id"], + batch_size=100 ) + channels_updated = len(channels_to_update) + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id from apps.epg.models import EPGData + # Batch fetch EPG data (single query) + epg_data_dict = { + epg.id: epg + for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source') + } + + programs_refreshed = 0 for epg_id in unique_epg_ids: - try: - epg_data = EPGData.objects.select_related('epg_source').get(id=epg_id) - # Only refresh non-dummy EPG sources - if epg_data.epg_source.source_type != 'dummy': - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 - except EPGData.DoesNotExist: + epg_data = epg_data_dict.get(epg_id) + if not epg_data: logger.error(f"EPGData with ID {epg_id} not found") + continue + + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 return Response( { From d8df8481363c81da61afff23203188c26e41618c Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:57:04 -0600 Subject: [PATCH 39/41] Enhancement: Add success notification for channel updates in API, improving user feedback on successful operations. --- frontend/src/api.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/frontend/src/api.js b/frontend/src/api.js index 8f5aeeeb..fac95b34 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -462,7 +462,16 @@ export default class API { } ); - // Don't automatically update the store here - let the caller handle it + // Show success notification + if (response.message) { + notifications.show({ + title: 'Channels Updated', + message: response.message, + color: 'green', + autoClose: 4000, + }); + } + return response; } catch (e) { errorNotification('Failed to update channels', e); From afedce5cb21477c1be5faf86b539e72176a630f7 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 18 Nov 2025 09:27:22 -0600 Subject: [PATCH 40/41] Enhancement: Implement background profile refresh task with rate limiting to prevent provider bans during account profile updates. --- apps/m3u/tasks.py | 152 ++++++++++++++++++++++++++++------------ dispatcharr/settings.py | 5 ++ 2 files changed, 112 insertions(+), 45 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 15479379..8bd30361 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -1217,52 +1217,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): auth_result = xc_client.authenticate() logger.debug(f"Authentication response: {auth_result}") - # Save account information to all active profiles + # Queue async profile refresh task to run in background + # This prevents any delay in the main refresh process try: - from apps.m3u.models import M3UAccountProfile - - profiles = M3UAccountProfile.objects.filter( - m3u_account=account, - is_active=True - ) - - # Update each profile with account information using its own transformed credentials - for profile in profiles: - try: - # Get transformed credentials for this specific profile - profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) - - # Create a separate XC client for this profile's credentials - with XCClient( - profile_url, - profile_username, - profile_password, - user_agent_string - ) as profile_client: - # Authenticate with this profile's credentials - if profile_client.authenticate(): - # Get account information specific to this profile's credentials - profile_account_info = profile_client.get_account_info() - - # Merge with existing custom_properties if they exist - existing_props = profile.custom_properties or {} - existing_props.update(profile_account_info) - profile.custom_properties = existing_props - profile.save(update_fields=['custom_properties']) - - logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials") - else: - logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") - - except Exception as profile_error: - logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") - # Continue with other profiles even if one fails - - logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}") - - except Exception as save_error: - logger.warning(f"Failed to process profile account information: {str(save_error)}") - # Don't fail the whole process if saving account info fails + logger.info(f"Queueing background profile refresh for account {account.name}") + refresh_account_profiles.delay(account.id) + except Exception as e: + logger.warning(f"Failed to queue profile refresh task: {str(e)}") + # Don't fail the main refresh if profile refresh can't be queued except Exception as e: error_msg = f"Failed to authenticate with XC server: {str(e)}" @@ -2269,6 +2231,106 @@ def get_transformed_credentials(account, profile=None): return base_url, base_username, base_password +@shared_task +def refresh_account_profiles(account_id): + """Refresh account information for all active profiles of an XC account. + + This task runs asynchronously in the background after account refresh completes. + It includes rate limiting delays between profile authentications to prevent provider bans. + """ + from django.conf import settings + import time + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.debug(f"Account {account_id} is not XC type, skipping profile refresh") + return f"Account {account_id} is not an XtreamCodes account" + + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + if not profiles.exists(): + logger.info(f"No active profiles found for account {account.name}") + return f"No active profiles for account {account_id}" + + # Get user agent for this account + try: + user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + if account.user_agent_id: + from core.models import UserAgent + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent: + user_agent_string = ua_obj.user_agent + except Exception as e: + logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.debug(f"Using user agent for profile refresh: {user_agent_string}") + # Get rate limiting delay from settings + profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5) + + profiles_updated = 0 + profiles_failed = 0 + + logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}") + + for idx, profile in enumerate(profiles): + try: + # Add delay between profiles to prevent rate limiting (except for first profile) + if idx > 0: + logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting") + time.sleep(profile_delay) + + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + profiles_updated += 1 + logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})") + else: + profiles_failed += 1 + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + profiles_failed += 1 + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed" + logger.info(result_msg) + return result_msg + + except M3UAccount.DoesNotExist: + error_msg = f"Account {account_id} not found" + logger.error(error_msg) + return error_msg + except Exception as e: + error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}" + logger.error(error_msg) + return error_msg + + @shared_task def refresh_account_info(profile_id): """Refresh only the account information for a specific M3U profile.""" diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index a0c4fc84..d6c29dd9 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -51,6 +51,11 @@ EPG_BATCH_SIZE = 1000 # Number of records to process in a batch EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing +# XtreamCodes Rate Limiting Settings +# Delay between profile authentications when refreshing multiple profiles +# This prevents providers from temporarily banning users with many profiles +XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes + # Database optimization settings DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries DATABASE_CONN_MAX_AGE = ( From b6c3234e961d924a1773073546176a74615a51b1 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 18 Nov 2025 10:02:35 -0600 Subject: [PATCH 41/41] Enhancement: Improve zombie channel handling in ProxyServer by checking client connections and cleaning up orphaned metadata, ensuring better resource management and stability. --- apps/proxy/ts_proxy/server.py | 120 +++++++++++++++++++++++++++++++--- 1 file changed, 112 insertions(+), 8 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index f962334d..0b07b4ae 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -719,6 +719,18 @@ class ProxyServer: else: # This is a zombie channel - owner is gone but metadata still exists logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") + + # Check if there are any clients connected + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + if client_count > 0: + logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover") + # Could potentially take ownership here in the future + # For now, just clean it up to be safe + else: + logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up") + self._clean_zombie_channel(channel_id, metadata) return False elif state in [ChannelState.STOPPED, ChannelState.ERROR]: @@ -940,6 +952,15 @@ class ProxyServer: if channel_id in self.client_managers: client_manager = self.client_managers[channel_id] total_clients = client_manager.get_total_client_count() + else: + # This can happen during reconnection attempts or crashes + # Check Redis directly for any connected clients + if self.redis_client: + client_set_key = RedisKeys.clients(channel_id) + total_clients = self.redis_client.scard(client_set_key) or 0 + + if total_clients == 0: + logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup") # Log client count periodically if time.time() % 30 < 1: # Every ~30 seconds @@ -1086,14 +1107,30 @@ class ProxyServer: continue # Check for local client count - if zero, clean up our local resources - if self.client_managers[channel_id].get_client_count() == 0: - # We're not the owner, and we have no local clients - clean up our resources - logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + if channel_id in self.client_managers: + if self.client_managers[channel_id].get_client_count() == 0: + # We're not the owner, and we have no local clients - clean up our resources + logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + self._cleanup_local_resources(channel_id) + else: + # This shouldn't happen, but clean up anyway + logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources") self._cleanup_local_resources(channel_id) except Exception as e: logger.error(f"Error in cleanup thread: {e}", exc_info=True) + # Periodically check for orphaned channels (every 30 seconds) + if hasattr(self, '_last_orphan_check'): + if time.time() - self._last_orphan_check > 30: + try: + self._check_orphaned_metadata() + self._last_orphan_check = time.time() + except Exception as orphan_error: + logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True) + else: + self._last_orphan_check = time.time() + gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval()) thread = threading.Thread(target=cleanup_task, daemon=True) @@ -1115,10 +1152,6 @@ class ProxyServer: try: channel_id = key.decode('utf-8').split(':')[2] - # Skip channels we already have locally - if channel_id in self.stream_buffers: - continue - # Check if this channel has an owner owner = self.get_channel_owner(channel_id) @@ -1133,13 +1166,84 @@ class ProxyServer: else: # Orphaned channel with no clients - clean it up logger.info(f"Cleaning up orphaned channel {channel_id}") - self._clean_redis_keys(channel_id) + + # If we have it locally, stop it properly to clean up processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) except Exception as e: logger.error(f"Error processing channel key {key}: {e}") except Exception as e: logger.error(f"Error checking orphaned channels: {e}") + def _check_orphaned_metadata(self): + """ + Check for metadata entries that have no owner and no clients. + This catches zombie channels that weren't cleaned up properly. + """ + if not self.redis_client: + return + + try: + # Get all channel metadata keys + channel_pattern = "ts_proxy:channel:*:metadata" + channel_keys = self.redis_client.keys(channel_pattern) + + for key in channel_keys: + try: + channel_id = key.decode('utf-8').split(':')[2] + + # Get metadata first + metadata = self.redis_client.hgetall(key) + if not metadata: + # Empty metadata - clean it up + logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up") + # If we have it locally, stop it properly + if channel_id in self.stream_managers or channel_id in self.client_managers: + self.stop_channel(channel_id) + else: + self._clean_redis_keys(channel_id) + continue + + # Get owner + owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else '' + + # Check if owner is still alive + owner_alive = False + if owner: + owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" + owner_alive = self.redis_client.exists(owner_heartbeat_key) + + # Check client count + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + # If no owner and no clients, clean it up + if not owner_alive and client_count == 0: + state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown' + logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up") + + # If we have it locally, stop it properly to clean up transcode/proxy processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) + elif not owner_alive and client_count > 0: + # Owner is gone but clients remain - just log for now + logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover") + + except Exception as e: + logger.error(f"Error processing metadata key {key}: {e}", exc_info=True) + + except Exception as e: + logger.error(f"Error checking orphaned metadata: {e}", exc_info=True) + def _clean_redis_keys(self, channel_id): """Clean up all Redis keys for a channel more efficiently""" # Release the channel, stream, and profile keys from the channel