From 7e5be6094f6fb9bc00ff3eee9e4e4b799c98ce3c Mon Sep 17 00:00:00 2001 From: Marlon Alkan Date: Sun, 8 Jun 2025 16:45:34 +0200 Subject: [PATCH 001/288] docker: init: 02-postgres.sh: allow DB user to create new DB (for tests) --- docker/init/02-postgres.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/init/02-postgres.sh b/docker/init/02-postgres.sh index 69a81dd4..7bb90671 100644 --- a/docker/init/02-postgres.sh +++ b/docker/init/02-postgres.sh @@ -57,13 +57,14 @@ if [ -z "$(ls -A $POSTGRES_DIR)" ]; then echo "Creating PostgreSQL database..." su - postgres -c "createdb -p ${POSTGRES_PORT} ${POSTGRES_DB}" - # Create user, set ownership, and grant privileges + # Create user, set ownership, and grant privileges, including privileges to create new databases echo "Creating PostgreSQL user..." su - postgres -c "psql -p ${POSTGRES_PORT} -d ${POSTGRES_DB}" < Date: Sun, 8 Jun 2025 16:47:00 +0200 Subject: [PATCH 002/288] apps: output: change body detection logic and add tests --- apps/output/tests.py | 23 +++++++++++++++++++++++ apps/output/views.py | 5 +++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/apps/output/tests.py b/apps/output/tests.py index e1e857ee..f87c8340 100644 --- a/apps/output/tests.py +++ b/apps/output/tests.py @@ -14,3 +14,26 @@ class OutputM3UTest(TestCase): self.assertEqual(response.status_code, 200) content = response.content.decode() self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_empty_body(self): + """ + Test that a POST request with an empty body returns 200 OK. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded') + content = response.content.decode() + + self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK") + self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_with_body(self): + """ + Test that a POST request with a non-empty body returns 403 Forbidden. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data={'evilstring': 'muhahaha'}) + + self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden") + self.assertIn("POST requests with body are not allowed, body is:", response.content.decode()) diff --git a/apps/output/views.py b/apps/output/views.py index 2b18d185..ff02560c 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -18,9 +18,10 @@ def generate_m3u(request, profile_name=None): The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ - # Check if this is a POST request with data (which we don't want to allow) + # Check if this is a POST request and the body is not empty (which we don't want to allow) if request.method == "POST" and request.body: - return HttpResponseForbidden("POST requests with content are not allowed") + if request.body.decode() != '{}': + return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) if profile_name is not None: channel_profile = ChannelProfile.objects.get(name=profile_name) From 0dbc5221b2d602323de9fa938f07f1b1a4363126 Mon Sep 17 00:00:00 2001 From: BigPanda Date: Thu, 18 Sep 2025 21:20:47 +0100 Subject: [PATCH 003/288] Add 'UK' region I'm not sure if this was intentional, but the UK seems to be missing from the region list. --- frontend/src/constants.js | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/constants.js b/frontend/src/constants.js index 78f374d4..528c5f04 100644 --- a/frontend/src/constants.js +++ b/frontend/src/constants.js @@ -303,6 +303,7 @@ export const REGION_CHOICES = [ { value: 'tz', label: 'TZ' }, { value: 'ua', label: 'UA' }, { value: 'ug', label: 'UG' }, + { value: 'uk', label: 'UK' }, { value: 'um', label: 'UM' }, { value: 'us', label: 'US' }, { value: 'uy', label: 'UY' }, From ae8b85a3e2d019234d4b183fd1963a35d0a7c85f Mon Sep 17 00:00:00 2001 From: Ragchuck Date: Wed, 15 Oct 2025 22:06:01 +0200 Subject: [PATCH 004/288] feat: added support for rtsp --- apps/m3u/tasks.py | 6 +++++- core/utils.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 0ba595c5..52847e77 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -219,6 +219,10 @@ def fetch_m3u_lines(account, use_cache=False): # Has HTTP URLs, might be a simple M3U without headers is_valid_m3u = True logger.info("Content validated as M3U: contains HTTP URLs") + elif any(line.strip().startswith('rtsp') for line in content_lines): + # Has HTTP URLs, might be a simple M3U without headers + is_valid_m3u = True + logger.info("Content validated as M3U: contains RTSP URLs") if not is_valid_m3u: # Log what we actually received for debugging @@ -1381,7 +1385,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): ) problematic_lines.append((line_index + 1, line[:200])) - elif extinf_data and line.startswith("http"): + elif extinf_data and (line.startswith("http") or line.startswith("rtsp")): url_count += 1 # Associate URL with the last EXTINF line extinf_data[-1]["url"] = line diff --git a/core/utils.py b/core/utils.py index 36ac5fef..da40d19c 100644 --- a/core/utils.py +++ b/core/utils.py @@ -377,8 +377,8 @@ def validate_flexible_url(value): import re # More flexible pattern for non-FQDN hostnames with paths - # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml - non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$' + # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1 + non_fqdn_pattern = r'^(rts?p|https?)://([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' non_fqdn_match = re.match(non_fqdn_pattern, value) if non_fqdn_match: From e6146e5243074e8b55544291a76096623fb9f576 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 4 Nov 2025 18:23:45 -0600 Subject: [PATCH 005/288] Bug fix: Reduce websocket message size when processing epgs. Also remove unnecessary console logging during epg refresh. Fixes [Bug]: Page goes blank if sending too many requests / responses Fixes #327 --- apps/channels/api_views.py | 37 +++++++++++++------ apps/epg/serializers.py | 9 +++-- apps/epg/tasks.py | 6 +++ frontend/src/WebSocket.jsx | 6 +-- frontend/src/api.js | 8 +++- .../src/components/forms/ChannelBatch.jsx | 34 +++++++++++------ frontend/src/components/tables/EPGsTable.jsx | 2 - frontend/src/store/epgs.jsx | 35 ++++++++++++------ 8 files changed, 93 insertions(+), 44 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 862de7f9..c97d8255 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -987,19 +987,27 @@ class ChannelViewSet(viewsets.ModelViewSet): channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" + + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" return Response( { - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", + "message": message, "channel": self.get_serializer(channel).data, "task_status": status_message, } @@ -1062,12 +1070,19 @@ class ChannelViewSet(viewsets.ModelViewSet): f"Error setting EPG data for channel {channel_id}: {str(e)}" ) - # Trigger program refresh for unique EPG data IDs + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + try: + epg_data = EPGData.objects.select_related('epg_source').get(id=epg_id) + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 + except EPGData.DoesNotExist: + logger.error(f"EPGData with ID {epg_id} not found") return Response( { diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 3404cca9..bfb750fc 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -4,7 +4,7 @@ from .models import EPGSource, EPGData, ProgramData from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] url = serializers.CharField( required=False, @@ -29,11 +29,12 @@ class EPGSourceSerializer(serializers.ModelSerializer): 'created_at', 'updated_at', 'custom_properties', - 'epg_data_ids' + 'epg_data_count' ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 2028cd98..b6350686 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1157,6 +1157,12 @@ def parse_programs_for_tvg_id(epg_id): epg = EPGData.objects.get(id=epg_id) epg_source = epg.epg_source + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return + if not Channel.objects.filter(epg_data=epg).exists(): logger.info(f"No channels matched to EPG {epg.tvg_id}") release_task_lock('parse_epg_programs', epg_id) diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index 0f46b012..1c576d23 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -572,10 +572,10 @@ export const WebsocketProvider = ({ children }) => { // Update the store with progress information updateEPGProgress(parsedEvent.data); - // If we have source_id/account info, update the EPG source status - if (parsedEvent.data.source_id || parsedEvent.data.account) { + // If we have source/account info, update the EPG source status + if (parsedEvent.data.source || parsedEvent.data.account) { const sourceId = - parsedEvent.data.source_id || parsedEvent.data.account; + parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; if (epg) { diff --git a/frontend/src/api.js b/frontend/src/api.js index 5b80a3f7..4281a533 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -2132,9 +2132,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx index 42184f4d..5b9b705e 100644 --- a/frontend/src/components/forms/ChannelBatch.jsx +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -55,6 +55,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const streamProfiles = useStreamProfilesStore((s) => s.profiles); const epgs = useEPGsStore((s) => s.epgs); + const tvgs = useEPGsStore((s) => s.tvgs); const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); @@ -267,17 +268,28 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { } else { // Assign the selected dummy EPG const selectedEpg = epgs[selectedDummyEpgId]; - if ( - selectedEpg && - selectedEpg.epg_data_ids && - selectedEpg.epg_data_ids.length > 0 - ) { - const epgDataId = selectedEpg.epg_data_ids[0]; - const associations = channelIds.map((id) => ({ - channel_id: id, - epg_data_id: epgDataId, - })); - await API.batchSetEPG(associations); + if (selectedEpg && selectedEpg.epg_data_count > 0) { + // Convert to number for comparison since Select returns string + const epgSourceId = parseInt(selectedDummyEpgId, 10); + + // Check if we already have EPG data loaded in the store + let epgData = tvgs.find((data) => data.epg_source === epgSourceId); + + // If not in store, fetch it + if (!epgData) { + const epgDataList = await API.getEPGData(); + epgData = epgDataList.find( + (data) => data.epg_source === epgSourceId + ); + } + + if (epgData) { + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: epgData.id, + })); + await API.batchSetEPG(associations); + } } } } diff --git a/frontend/src/components/tables/EPGsTable.jsx b/frontend/src/components/tables/EPGsTable.jsx index 53f9a72c..bb707984 100644 --- a/frontend/src/components/tables/EPGsTable.jsx +++ b/frontend/src/components/tables/EPGsTable.jsx @@ -181,8 +181,6 @@ const EPGsTable = () => { ); }; - console.log(epgs); - const columns = useMemo( //column definitions... () => [ diff --git a/frontend/src/store/epgs.jsx b/frontend/src/store/epgs.jsx index 6b3ffa81..e0576364 100644 --- a/frontend/src/store/epgs.jsx +++ b/frontend/src/store/epgs.jsx @@ -97,18 +97,29 @@ const useEPGsStore = create((set) => ({ ? 'success' // Mark as success when progress is 100% : state.epgs[data.source]?.status || 'idle'; - // Create a new epgs object with the updated source status - const newEpgs = { - ...state.epgs, - [data.source]: { - ...state.epgs[data.source], - status: sourceStatus, - last_message: - data.status === 'error' - ? data.error || 'Unknown error' - : state.epgs[data.source]?.last_message, - }, - }; + // Only update epgs object if status or last_message actually changed + // This prevents unnecessary re-renders on every progress update + const currentEpg = state.epgs[data.source]; + const newLastMessage = + data.status === 'error' + ? data.error || 'Unknown error' + : currentEpg?.last_message; + + let newEpgs = state.epgs; + if ( + currentEpg && + (currentEpg.status !== sourceStatus || + currentEpg.last_message !== newLastMessage) + ) { + newEpgs = { + ...state.epgs, + [data.source]: { + ...currentEpg, + status: sourceStatus, + last_message: newLastMessage, + }, + }; + } return { refreshProgress: newRefreshProgress, From 77e98508fbf79361f7e0abaa63dfeaf57130da9e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 4 Nov 2025 19:08:31 -0600 Subject: [PATCH 006/288] Enhancement: Refactor get_host_and_port for smarter port selection when using reverse proxies. Fixes #618 --- apps/output/views.py | 57 +++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index f36d02db..4ad11c39 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2626,50 +2626,75 @@ def get_host_and_port(request): Returns (host, port) for building absolute URIs. - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). - Falls back to Host header. - - In dev, if missing, uses 5656 or 8000 as a guess. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. """ - # 1. Try X-Forwarded-Host (may include port) + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx xfh = request.META.get("HTTP_X_FORWARDED_HOST") if xfh: if ":" in xfh: host, port = xfh.split(":", 1) + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = xfh + # Check for X-Forwarded-Port header port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - return host, port + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # No port found, assume standard port for the scheme + return host, None # 2. Try Host header raw_host = request.get_host() if ":" in raw_host: host, port = raw_host.split(":", 1) - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = raw_host - # 3. Try X-Forwarded-Port + # 3. Try X-Forwarded-Port (external reverse proxy might set this) port = request.META.get("HTTP_X_FORWARDED_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port # 4. Try SERVER_PORT from META port = request.META.get("SERVER_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port - # 5. Dev fallback: guess port + # 5. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 6. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): - guess = "5656" - return host, guess + return host, "5656" - # 6. Fallback to scheme default - port = "443" if request.is_secure() else "9191" - return host, port + # 7. Final fallback: assume standard port for scheme (omit from URL) + return host, None def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ host, port = get_host_and_port(request) - scheme = request.scheme - return f"{scheme}://{host}:{port}{path}" + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" def format_duration_hms(seconds): """ From 871f9f953ebae05b4ba87795529c8f2d704a3f28 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 5 Nov 2025 16:23:13 -0600 Subject: [PATCH 007/288] Another attempt for the get_host_and_port function to better handle port detection behind reverse proxies. --- apps/output/views.py | 45 +++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index 4ad11c39..f1557101 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2638,16 +2638,25 @@ def get_host_and_port(request): if xfh: if ":" in xfh: host, port = xfh.split(":", 1) - # Omit standard ports from URLs - return host, None if port == standard_port else port + # Omit standard ports from URLs, or omit if port doesn't match standard for scheme + # (e.g., HTTPS but port is 9191 = behind external reverse proxy) + if port == standard_port: + return host, None + # If port doesn't match standard and X-Forwarded-Proto is set, likely behind external RP + if request.META.get("HTTP_X_FORWARDED_PROTO"): + host = xfh.split(":")[0] # Strip port, will check for proper port below + else: + return host, port else: host = xfh - # Check for X-Forwarded-Port header - port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - # Omit standard ports from URLs - return host, None if port == standard_port else port - # No port found, assume standard port for the scheme + + # Check for X-Forwarded-Port header (if we didn't already find a valid port) + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): return host, None # 2. Try Host header @@ -2659,28 +2668,22 @@ def get_host_and_port(request): else: host = raw_host - # 3. Try X-Forwarded-Port (external reverse proxy might set this) - port = request.META.get("HTTP_X_FORWARDED_PORT") - if port: - # Omit standard ports from URLs - return host, None if port == standard_port else port + # 3. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None - # 4. Try SERVER_PORT from META + # 4. Try SERVER_PORT from META (only if NOT behind reverse proxy) port = request.META.get("SERVER_PORT") if port: # Omit standard ports from URLs return host, None if port == standard_port else port - # 5. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) - # If so, assume standard port for the scheme - if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): - return host, None - - # 6. Dev fallback: guess port 5656 + # 5. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): return host, "5656" - # 7. Final fallback: assume standard port for scheme (omit from URL) + # 6. Final fallback: assume standard port for scheme (omit from URL) return host, None def build_absolute_uri_with_port(request, path): From ed86eb22744e08faf05f55a48fbc6abf3039e773 Mon Sep 17 00:00:00 2001 From: 0x68732f6e69622fff <191755490+0x68732f6e69622fff@users.noreply.github.com> Date: Thu, 6 Nov 2025 14:29:13 +0000 Subject: [PATCH 008/288] Ensures that in the groups section of M3U playlist management, the EPG Source dropdown for the 'Force EPG Source' option displays entries sorted alphabetically by name --- .../src/components/forms/LiveGroupFilter.jsx | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/frontend/src/components/forms/LiveGroupFilter.jsx b/frontend/src/components/forms/LiveGroupFilter.jsx index 71b412b4..3497957b 100644 --- a/frontend/src/components/forms/LiveGroupFilter.jsx +++ b/frontend/src/components/forms/LiveGroupFilter.jsx @@ -1172,19 +1172,21 @@ const LiveGroupFilter = ({ }} data={[ { value: '0', label: 'No EPG (Disabled)' }, - ...epgSources.map((source) => ({ - value: source.id.toString(), - label: `${source.name} (${ - source.source_type === 'dummy' - ? 'Dummy' - : source.source_type === 'xmltv' - ? 'XMLTV' - : source.source_type === - 'schedules_direct' - ? 'Schedules Direct' - : source.source_type - })`, - })), + ...[...epgSources] + .sort((a, b) => a.name.localeCompare(b.name)) + .map((source) => ({ + value: source.id.toString(), + label: `${source.name} (${ + source.source_type === 'dummy' + ? 'Dummy' + : source.source_type === 'xmltv' + ? 'XMLTV' + : source.source_type === + 'schedules_direct' + ? 'Schedules Direct' + : source.source_type + })`, + })), ]} clearable searchable From da628705dfc34bddcbddd24e344ba3795cda5204 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 7 Nov 2025 13:19:18 -0600 Subject: [PATCH 009/288] Separate VOD and channel logos into distinct tables with dedicated management UI - Created VODLogo model for movies/series, separate from Logo (channels only) - Added database migration to create vodlogo table and migrate existing VOD logos - Implemented VODLogoViewSet with pagination, filtering (used/unused/movies/series), and bulk operations - Built VODLogosTable component with server-side pagination matching channel logos styling - Added VOD logos tab with on-demand loading to Logos page - Fixed orphaned VOD content cleanup to always remove unused entries - Removed redundant channel_assignable filtering from channel logos --- apps/channels/api_views.py | 86 +-- apps/channels/serializers.py | 64 +- apps/output/views.py | 10 +- apps/vod/api_urls.py | 2 + apps/vod/api_views.py | 191 +++++- ...logo_alter_movie_logo_alter_series_logo.py | 264 +++++++++ apps/vod/models.py | 18 +- apps/vod/serializers.py | 77 ++- apps/vod/tasks.py | 59 +- frontend/src/api.js | 71 +++ frontend/src/components/tables/LogosTable.jsx | 19 - .../src/components/tables/VODLogosTable.jsx | 556 ++++++++++++++++++ frontend/src/hooks/useSmartLogos.jsx | 5 +- frontend/src/pages/Logos.jsx | 89 ++- frontend/src/store/logos.jsx | 26 +- frontend/src/store/vodLogos.jsx | 128 ++++ 16 files changed, 1423 insertions(+), 242 deletions(-) create mode 100644 apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py create mode 100644 frontend/src/components/tables/VODLogosTable.jsx create mode 100644 frontend/src/store/vodLogos.jsx diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index c97d8255..fc5ea114 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1247,7 +1247,7 @@ class CleanupUnusedLogosAPIView(APIView): return [Authenticated()] @swagger_auto_schema( - operation_description="Delete all logos that are not used by any channels, movies, or series", + operation_description="Delete all channel logos that are not used by any channels", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ @@ -1261,24 +1261,11 @@ class CleanupUnusedLogosAPIView(APIView): responses={200: "Cleanup completed"}, ) def post(self, request): - """Delete all logos with no channel, movie, or series associations""" + """Delete all channel logos with no channel associations""" delete_files = request.data.get("delete_files", False) - # Find logos that are not used by channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - unused_logos = Logo.objects.filter(filter_conditions) + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) deleted_count = unused_logos.count() logo_names = list(unused_logos.values_list('name', flat=True)) local_files_deleted = 0 @@ -1350,13 +1337,6 @@ class LogoViewSet(viewsets.ModelViewSet): # Start with basic prefetch for channels queryset = Logo.objects.prefetch_related('channels').order_by('name') - # Try to prefetch VOD relations if available - try: - queryset = queryset.prefetch_related('movie', 'series') - except: - # VOD app might not be available, continue without VOD prefetch - pass - # Filter by specific IDs ids = self.request.query_params.getlist('ids') if ids: @@ -1369,62 +1349,14 @@ class LogoViewSet(viewsets.ModelViewSet): pass # Invalid IDs, return empty queryset queryset = Logo.objects.none() - # Filter by usage - now includes VOD content + # Filter by usage used_filter = self.request.query_params.get('used', None) if used_filter == 'true': - # Logo is used if it has any channels, movies, or series - filter_conditions = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - filter_conditions |= Q(movie__isnull=False) - except: - pass - - try: - filter_conditions |= Q(series__isnull=False) - except: - pass - - queryset = queryset.filter(filter_conditions).distinct() - + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() elif used_filter == 'false': - # Logo is unused if it has no channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - queryset = queryset.filter(filter_conditions) - - # Filter for channel assignment (unused + channel-used, exclude VOD-only) - channel_assignable = self.request.query_params.get('channel_assignable', None) - if channel_assignable == 'true': - # Include logos that are either: - # 1. Completely unused, OR - # 2. Used by channels (but may also be used by VOD) - # Exclude logos that are ONLY used by VOD content - - unused_condition = Q(channels__isnull=True) - channel_used_condition = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True) - except: - pass - - # Combine: unused OR used by channels - filter_conditions = unused_condition | channel_used_condition - queryset = queryset.filter(filter_conditions).distinct() + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) # Filter by name name_filter = self.request.query_params.get('name', None) diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 7058ced2..62c9650d 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -64,47 +64,15 @@ class LogoSerializer(serializers.ModelSerializer): return reverse("api:channels:logo-cache", args=[obj.id]) def get_channel_count(self, obj): - """Get the number of channels, movies, and series using this logo""" - channel_count = obj.channels.count() - - # Safely get movie count - try: - movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0 - except AttributeError: - movie_count = 0 - - # Safely get series count - try: - series_count = obj.series.count() if hasattr(obj, 'series') else 0 - except AttributeError: - series_count = 0 - - return channel_count + movie_count + series_count + """Get the number of channels using this logo""" + return obj.channels.count() def get_is_used(self, obj): - """Check if this logo is used by any channels, movies, or series""" - # Check if used by channels - if obj.channels.exists(): - return True - - # Check if used by movies (handle case where VOD app might not be available) - try: - if hasattr(obj, 'movie') and obj.movie.exists(): - return True - except AttributeError: - pass - - # Check if used by series (handle case where VOD app might not be available) - try: - if hasattr(obj, 'series') and obj.series.exists(): - return True - except AttributeError: - pass - - return False + """Check if this logo is used by any channels""" + return obj.channels.exists() def get_channel_names(self, obj): - """Get the names of channels, movies, and series using this logo (limited to first 5)""" + """Get the names of channels using this logo (limited to first 5)""" names = [] # Get channel names @@ -112,28 +80,6 @@ class LogoSerializer(serializers.ModelSerializer): for channel in channels: names.append(f"Channel: {channel.name}") - # Get movie names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'movie'): - remaining_slots = 5 - len(names) - movies = obj.movie.all()[:remaining_slots] - for movie in movies: - names.append(f"Movie: {movie.name}") - except AttributeError: - pass - - # Get series names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'series'): - remaining_slots = 5 - len(names) - series = obj.series.all()[:remaining_slots] - for series_item in series: - names.append(f"Series: {series_item.name}") - except AttributeError: - pass - # Calculate total count for "more" message total_count = self.get_channel_count(obj) if total_count > 5: diff --git a/apps/output/views.py b/apps/output/views.py index f1557101..7dc013a1 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2115,7 +2115,7 @@ def xc_get_vod_streams(request, user, category_id=None): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), #'stream_icon': movie.logo.url if movie.logo else '', @@ -2185,7 +2185,7 @@ def xc_get_series(request, user, category_id=None): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series.description or "", @@ -2378,7 +2378,7 @@ def xc_get_series_info(request, user, series_id): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series_data['description'], @@ -2506,14 +2506,14 @@ def xc_get_vod_info(request, user, vod_id): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), "movie_image": ( None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), 'description': movie_data.get('description', ''), diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py index ffccc3f5..e897bd28 100644 --- a/apps/vod/api_urls.py +++ b/apps/vod/api_urls.py @@ -6,6 +6,7 @@ from .api_views import ( SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet, + VODLogoViewSet, ) app_name = 'vod' @@ -16,5 +17,6 @@ router.register(r'episodes', EpisodeViewSet, basename='episode') router.register(r'series', SeriesViewSet, basename='series') router.register(r'categories', VODCategoryViewSet, basename='vodcategory') router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py index 517038a6..4ff1f82b 100644 --- a/apps/vod/api_views.py +++ b/apps/vod/api_views.py @@ -3,16 +3,21 @@ from rest_framework.response import Response from rest_framework.decorators import action from rest_framework.filters import SearchFilter, OrderingFilter from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny from django_filters.rest_framework import DjangoFilterBackend from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q import django_filters import logging +import os +import requests from apps.accounts.permissions import ( Authenticated, permission_classes_by_action, ) from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation ) from .serializers import ( @@ -20,6 +25,7 @@ from .serializers import ( EpisodeSerializer, SeriesSerializer, VODCategorySerializer, + VODLogoSerializer, M3UMovieRelationSerializer, M3USeriesRelationSerializer, M3UEpisodeRelationSerializer @@ -564,7 +570,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'movie' as content_type FROM vod_movie movies - LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id WHERE {where_conditions[0]} UNION ALL @@ -586,7 +592,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'series' as content_type FROM vod_series series - LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id WHERE {where_conditions[1]} ) SELECT * FROM unified_content @@ -613,10 +619,10 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): 'id': item_dict['logo_id'], 'name': item_dict['logo_name'], 'url': item_dict['logo_url'], - 'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None, - 'channel_count': 0, # We don't need this for VOD - 'is_used': True, - 'channel_names': [] # We don't need this for VOD + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True } # Convert to the format expected by frontend @@ -668,4 +674,173 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logger.error(f"Error in UnifiedContentViewSet.list(): {e}") import traceback logger.error(traceback.format_exc()) - return Response({'error': str(e)}, status=500) \ No newline at end of file + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 00000000..1bd2c418 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/models.py b/apps/vod/models.py index f0825ba2..69aed808 100644 --- a/apps/vod/models.py +++ b/apps/vod/models.py @@ -4,10 +4,22 @@ from django.utils import timezone from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from apps.m3u.models import M3UAccount -from apps.channels.models import Logo import uuid +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + class VODCategory(models.Model): """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" @@ -69,7 +81,7 @@ class Series(models.Model): year = models.IntegerField(blank=True, null=True) rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") @@ -108,7 +120,7 @@ class Movie(models.Model): rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py index 5a672b33..7747cb88 100644 --- a/apps/vod/serializers.py +++ b/apps/vod/serializers.py @@ -1,12 +1,79 @@ from rest_framework import serializers +from django.urls import reverse from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.serializers import LogoSerializer from apps.m3u.serializers import M3UAccountSerializer +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): category = serializers.IntegerField(source="category.id") m3u_account = serializers.IntegerField(source="m3u_account.id") @@ -31,7 +98,7 @@ class VODCategorySerializer(serializers.ModelSerializer): ] class SeriesSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) episode_count = serializers.SerializerMethodField() class Meta: @@ -43,7 +110,7 @@ class SeriesSerializer(serializers.ModelSerializer): class MovieSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) class Meta: model = Movie @@ -225,7 +292,7 @@ class M3UEpisodeRelationSerializer(serializers.ModelSerializer): class EnhancedSeriesSerializer(serializers.ModelSerializer): """Enhanced serializer for series with provider information""" - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) episode_count = serializers.SerializerMethodField() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index bc8ad80f..e34e00e6 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -5,10 +5,9 @@ from django.db.models import Q from apps.m3u.models import M3UAccount from core.xtream_codes import Client as XtreamCodesClient from .models import ( - VODCategory, Series, Movie, Episode, + VODCategory, Series, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.models import Logo from datetime import datetime import logging import json @@ -403,7 +402,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -411,20 +410,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N for logo_url in logo_urls: if logo_url not in existing_logos: movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') - logos_to_create.append(Logo(url=logo_url, name=movie_name)) + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for movies") + logger.info(f"Created {len(newly_created)} new VOD logos for movies") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing movies based on our keys existing_movies = {} @@ -725,7 +724,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -733,20 +732,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= for logo_url in logo_urls: if logo_url not in existing_logos: series_name = logo_url_to_name.get(logo_url, 'Unknown Series') - logos_to_create.append(Logo(url=logo_url, name=series_name)) + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for series") + logger.info(f"Created {len(newly_created)} new VOD logos for series") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing series based on our keys - same pattern as movies existing_series = {} @@ -1424,21 +1423,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id= stale_episode_count = stale_episode_relations.count() stale_episode_relations.delete() - # Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup) - if not account_id: - orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) - orphaned_movie_count = orphaned_movies.count() + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") orphaned_movies.delete() - # Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup) - orphaned_series = Series.objects.filter(m3u_relations__isnull=True) - orphaned_series_count = orphaned_series.count() + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") orphaned_series.delete() - else: - # When cleaning up for specific account, we don't remove orphaned content - # as other accounts might still reference it - orphaned_movie_count = 0 - orphaned_series_count = 0 # Episodes will be cleaned up via CASCADE when series are deleted @@ -1999,7 +1998,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): def validate_logo_reference(obj, obj_type="object"): """ - Validate that a logo reference exists in the database. + Validate that a VOD logo reference exists in the database. If not, set it to None to prevent foreign key constraint violations. Args: @@ -2019,9 +2018,9 @@ def validate_logo_reference(obj, obj_type="object"): try: # Verify the logo exists in the database - Logo.objects.get(pk=obj.logo.pk) + VODLogo.objects.get(pk=obj.logo.pk) return True - except Logo.DoesNotExist: - logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") obj.logo = None return False diff --git a/frontend/src/api.js b/frontend/src/api.js index 4281a533..8f5aeeeb 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1788,6 +1788,77 @@ export default class API { } } + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; + } + } + static async getChannelProfiles() { try { const response = await request(`${host}/api/channels/profiles/`); diff --git a/frontend/src/components/tables/LogosTable.jsx b/frontend/src/components/tables/LogosTable.jsx index 0c4f32b4..b8b80506 100644 --- a/frontend/src/components/tables/LogosTable.jsx +++ b/frontend/src/components/tables/LogosTable.jsx @@ -626,25 +626,6 @@ const LogosTable = () => { }} > - - - Logos - - - ({data.length} logo{data.length !== 1 ? 's' : ''}) - - - { + const [tableSize] = useLocalStorage('table-size', 'default'); + + const onDelete = useCallback(() => { + deleteLogo(row.original.id); + }, [row.original.id, deleteLogo]); + + const iconSize = + tableSize === 'default' ? 'sm' : tableSize === 'compact' ? 'xs' : 'md'; + + return ( + + + + + + + + ); +}; + +export default function VODLogosTable() { + const theme = useMantineTheme(); + + const { + logos, + totalCount, + isLoading, + fetchVODLogos, + deleteVODLogo, + deleteVODLogos, + cleanupUnusedVODLogos, + } = useVODLogosStore(); + + const [currentPage, setCurrentPage] = useState(1); + const [pageSize, setPageSize] = useState(25); + const [nameFilter, setNameFilter] = useState(''); + const [usageFilter, setUsageFilter] = useState('all'); + const [selectedRows, setSelectedRows] = useState(new Set()); + const [confirmDeleteOpen, setConfirmDeleteOpen] = useState(false); + const [deleteTarget, setDeleteTarget] = useState(null); + const [confirmCleanupOpen, setConfirmCleanupOpen] = useState(false); + const [paginationString, setPaginationString] = useState(''); + const [isCleaningUp, setIsCleaningUp] = useState(false); + + // Calculate unused logos count + const unusedLogosCount = useMemo(() => { + return logos.filter( + (logo) => logo.movie_count === 0 && logo.series_count === 0 + ).length; + }, [logos]); + useEffect(() => { + fetchVODLogos({ + page: currentPage, + page_size: pageSize, + name: nameFilter, + usage: usageFilter === 'all' ? undefined : usageFilter, + }); + }, [currentPage, pageSize, nameFilter, usageFilter, fetchVODLogos]); + + const handleSelectAll = useCallback( + (checked) => { + if (checked) { + setSelectedRows(new Set(logos.map((logo) => logo.id))); + } else { + setSelectedRows(new Set()); + } + }, + [logos] + ); + + const handleSelectRow = useCallback((id, checked) => { + setSelectedRows((prev) => { + const newSet = new Set(prev); + if (checked) { + newSet.add(id); + } else { + newSet.delete(id); + } + return newSet; + }); + }, []); + + const deleteLogo = useCallback((id) => { + setDeleteTarget([id]); + setConfirmDeleteOpen(true); + }, []); + + const handleDeleteSelected = useCallback(() => { + setDeleteTarget(Array.from(selectedRows)); + setConfirmDeleteOpen(true); + }, [selectedRows]); + + const handleConfirmDelete = async () => { + try { + if (deleteTarget.length === 1) { + await deleteVODLogo(deleteTarget[0]); + notifications.show({ + title: 'Success', + message: 'VOD logo deleted successfully', + color: 'green', + }); + } else { + await deleteVODLogos(deleteTarget); + notifications.show({ + title: 'Success', + message: `${deleteTarget.length} VOD logos deleted successfully`, + color: 'green', + }); + } + setSelectedRows(new Set()); + setConfirmDeleteOpen(false); + setDeleteTarget(null); + } catch (error) { + notifications.show({ + title: 'Error', + message: error.message || 'Failed to delete VOD logos', + color: 'red', + }); + } + }; + + const handleCleanupUnused = useCallback(() => { + setConfirmCleanupOpen(true); + }, []); + + const handleConfirmCleanup = async () => { + setIsCleaningUp(true); + try { + const result = await cleanupUnusedVODLogos(); + notifications.show({ + title: 'Success', + message: `Cleaned up ${result.deleted_count} unused VOD logos`, + color: 'green', + }); + setConfirmCleanupOpen(false); + } catch (error) { + notifications.show({ + title: 'Error', + message: error.message || 'Failed to cleanup unused VOD logos', + color: 'red', + }); + } finally { + setIsCleaningUp(false); + } + }; + + useEffect(() => { + setSelectedRows(new Set()); + }, [logos.length]); + + useEffect(() => { + const startItem = (currentPage - 1) * pageSize + 1; + const endItem = Math.min(currentPage * pageSize, totalCount); + setPaginationString(`${startItem} to ${endItem} of ${totalCount}`); + }, [currentPage, pageSize, totalCount]); + + const pageCount = useMemo(() => { + return Math.ceil(totalCount / pageSize); + }, [totalCount, pageSize]); + + const columns = useMemo( + () => [ + { + id: 'select', + header: ({ table }) => ( + 0 && selectedRows.size === logos.length + } + indeterminate={ + selectedRows.size > 0 && selectedRows.size < logos.length + } + onChange={(event) => handleSelectAll(event.currentTarget.checked)} + size="sm" + /> + ), + cell: ({ row }) => ( + + handleSelectRow(row.original.id, event.currentTarget.checked) + } + size="sm" + /> + ), + size: 50, + enableSorting: false, + }, + { + header: 'Preview', + accessorKey: 'cache_url', + size: 80, + enableSorting: false, + cell: ({ getValue, row }) => ( +
+ {row.original.name} { + e.target.style.transform = 'scale(1.5)'; + }} + onMouseLeave={(e) => { + e.target.style.transform = 'scale(1)'; + }} + /> +
+ ), + }, + { + header: 'Name', + accessorKey: 'name', + size: 250, + cell: ({ getValue }) => ( + + {getValue()} + + ), + }, + { + header: 'Usage', + accessorKey: 'usage', + size: 120, + cell: ({ row }) => { + const { movie_count, series_count, item_names } = row.original; + const totalUsage = movie_count + series_count; + + if (totalUsage === 0) { + return ( + + Unused + + ); + } + + // Build usage description + const usageParts = []; + if (movie_count > 0) { + usageParts.push( + `${movie_count} movie${movie_count !== 1 ? 's' : ''}` + ); + } + if (series_count > 0) { + usageParts.push(`${series_count} series`); + } + + const label = + usageParts.length === 1 + ? usageParts[0] + : `${totalUsage} item${totalUsage !== 1 ? 's' : ''}`; + + return ( + + + Used by {usageParts.join(' & ')}: + + {item_names && + item_names.map((name, index) => ( + + • {name} + + ))} + + } + multiline + width={220} + > + + {label} + + + ); + }, + }, + { + header: 'URL', + accessorKey: 'url', + grow: true, + cell: ({ getValue }) => ( + + + + {getValue()} + + + {getValue()?.startsWith('http') && ( + window.open(getValue(), '_blank')} + > + + + )} + + ), + }, + { + id: 'actions', + size: 80, + header: 'Actions', + enableSorting: false, + cell: ({ row }) => ( + + ), + }, + ], + [theme, deleteLogo, selectedRows, handleSelectAll, handleSelectRow, logos] + ); + + const renderHeaderCell = (header) => { + return ( + + {header.column.columnDef.header} + + ); + }; + + const table = useTable({ + data: logos, + columns, + manualPagination: true, + pageCount: pageCount, + allRowIds: logos.map((logo) => logo.id), + enablePagination: false, + enableRowSelection: true, + enableRowVirtualization: false, + renderTopToolbar: false, + manualSorting: false, + manualFiltering: false, + headerCellRenderFns: { + actions: renderHeaderCell, + cache_url: renderHeaderCell, + name: renderHeaderCell, + url: renderHeaderCell, + usage: renderHeaderCell, + }, + }); + + return ( + + + + {/* Top toolbar */} + + + { + const value = event.target.value; + setNameFilter(value); + }} + size="xs" + style={{ width: 200 }} + /> + { - formik.setFieldValue('channel_group_id', value); // Update Formik's state with the new value - }} - error={ - formik.errors.channel_group_id - ? formik.touched.channel_group_id - : '' - } - data={Object.values(channelGroups).map((option, index) => ({ - value: `${option.id}`, - label: option.name, - }))} - size="xs" - style={{ flex: 1 }} - /> */} - - setChannelGroupModalOpen(true)} - title="Create new group" - size="small" - variant="transparent" - style={{ marginBottom: 5 }} - > - - - - - - ({ - value: `${epg.id}`, - label: epg.name, - }))} - size="xs" - mb="xs" - /> - - {/* Filter Input */} - - setTvgFilter(event.currentTarget.value) - } - mb="xs" - size="xs" - /> - - - - - {({ index, style }) => ( -
- -
- )} -
-
- - -
- - - - - - - - ); -}; - -export default ChannelsForm; From 827501c9f71df0ccb02b46acaafde918d83ba5ad Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 18:00:08 -0600 Subject: [PATCH 034/288] Better spacing for version text. --- frontend/src/components/forms/LoginForm.jsx | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/forms/LoginForm.jsx b/frontend/src/components/forms/LoginForm.jsx index 9d284319..353cd50e 100644 --- a/frontend/src/components/forms/LoginForm.jsx +++ b/frontend/src/components/forms/LoginForm.jsx @@ -145,12 +145,10 @@ const LoginForm = () => { @@ -258,9 +256,10 @@ const LoginForm = () => { v{version} From 25145283379cba32f0931dfaae41a08250a705b6 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 19:57:59 -0600 Subject: [PATCH 035/288] Enhancement: Update channel state handling in ProxyServer and views to include 'STOPPING' state, ensuring proper cleanup and preventing reinitialization during shutdown. --- apps/proxy/ts_proxy/server.py | 13 +++++++------ apps/proxy/ts_proxy/views.py | 36 ++++++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index cca827a9..61dac1e3 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -703,9 +703,10 @@ class ProxyServer: state = metadata.get(b'state', b'unknown').decode('utf-8') owner = metadata.get(b'owner', b'').decode('utf-8') - # States that indicate the channel is running properly + # States that indicate the channel is running properly or shutting down valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, - ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING] + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING, + ChannelState.STOPPING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -720,10 +721,10 @@ class ProxyServer: logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") self._clean_zombie_channel(channel_id, metadata) return False - elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: - # These states indicate the channel should be reinitialized - logger.info(f"Channel {channel_id} exists but in terminal state: {state}") - return True + elif state in [ChannelState.STOPPED, ChannelState.ERROR]: + # These terminal states indicate the channel should be cleaned up and reinitialized + logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup") + return False else: # Unknown or initializing state, check how long it's been in this state if b'state_changed_at' in metadata: diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index c1b803ab..91f254a7 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -84,11 +84,18 @@ def stream_ts(request, channel_id): if state_field in metadata: channel_state = metadata[state_field].decode("utf-8") - if channel_state: - # Channel is being initialized or already active - no need for reinitialization + # Active/running states - channel is operational, don't reinitialize + if channel_state in [ + ChannelState.ACTIVE, + ChannelState.WAITING_FOR_CLIENTS, + ChannelState.BUFFERING, + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ChannelState.STOPPING, + ]: needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization" + f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization" ) # Special handling for initializing/connecting states @@ -98,19 +105,34 @@ def stream_ts(request, channel_id): ]: channel_initializing = True logger.debug( - f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion" + f"[{client_id}] Channel {channel_id} is still initializing, client will wait" ) + # Terminal states - channel needs cleanup before reinitialization + elif channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPED, + ]: + needs_initialization = True + logger.info( + f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize" + ) + # Unknown/empty state - check if owner is alive else: - # Only check for owner if channel is in a valid state owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): - # Owner is still active, so we don't need to reinitialize + # Owner is still active with unknown state - don't reinitialize needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} has active owner {owner}" + f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init" + ) + else: + # Owner dead - needs reinitialization + needs_initialization = True + logger.warning( + f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize" ) # Start initialization if needed From 0700cf29eab35e249e30cb053c5812f26eb2e5df Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 14 Nov 2025 20:13:40 -0600 Subject: [PATCH 036/288] Enhancement: Add copy link functionality to SeriesModal and VODModal, allowing users to easily copy episode and VOD links to clipboard with notifications for success or failure. --- frontend/src/components/SeriesModal.jsx | 79 ++++++++++++++++++++----- frontend/src/components/VODModal.jsx | 38 +++++++++++- 2 files changed, 99 insertions(+), 18 deletions(-) diff --git a/frontend/src/components/SeriesModal.jsx b/frontend/src/components/SeriesModal.jsx index dcfebf86..48677646 100644 --- a/frontend/src/components/SeriesModal.jsx +++ b/frontend/src/components/SeriesModal.jsx @@ -17,7 +17,9 @@ import { Table, Divider, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -262,6 +264,39 @@ const SeriesModal = ({ series, opened, onClose }) => { showVideo(streamUrl, 'vod', episode); }; + const getEpisodeStreamUrl = (episode) => { + let streamUrl = `/proxy/vod/episode/${episode.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + return streamUrl; + }; + + const handleCopyEpisodeLink = async (episode) => { + const streamUrl = getEpisodeStreamUrl(episode); + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Episode link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + const handleEpisodeRowClick = (episode) => { setExpandedEpisode(expandedEpisode === episode.id ? null : episode.id); }; @@ -611,20 +646,34 @@ const SeriesModal = ({ series, opened, onClose }) => { - 0 && !selectedProvider - } - onClick={(e) => { - e.stopPropagation(); - handlePlayEpisode(episode); - }} - > - - + + 0 && + !selectedProvider + } + onClick={(e) => { + e.stopPropagation(); + handlePlayEpisode(episode); + }} + > + + + { + e.stopPropagation(); + handleCopyEpisodeLink(episode); + }} + > + + + {expandedEpisode === episode.id && ( diff --git a/frontend/src/components/VODModal.jsx b/frontend/src/components/VODModal.jsx index 90fd3fad..7b1d34eb 100644 --- a/frontend/src/components/VODModal.jsx +++ b/frontend/src/components/VODModal.jsx @@ -13,7 +13,9 @@ import { Stack, Modal, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -232,9 +234,9 @@ const VODModal = ({ vod, opened, onClose }) => { } }, [opened]); - const handlePlayVOD = () => { + const getStreamUrl = () => { const vodToPlay = detailedVOD || vod; - if (!vodToPlay) return; + if (!vodToPlay) return null; let streamUrl = `/proxy/vod/movie/${vod.uuid}`; @@ -253,9 +255,29 @@ const VODModal = ({ vod, opened, onClose }) => { } else { streamUrl = `${window.location.origin}${streamUrl}`; } + return streamUrl; + }; + + const handlePlayVOD = () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const vodToPlay = detailedVOD || vod; showVideo(streamUrl, 'vod', vodToPlay); }; + const handleCopyLink = async () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Stream link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + // Helper to get embeddable YouTube URL const getEmbedUrl = (url) => { if (!url) return ''; @@ -486,6 +508,16 @@ const VODModal = ({ vod, opened, onClose }) => { Watch Trailer )} + From 6bd5958c3c3f1cd27deffbc2ee007e8b9e4f385a Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sat, 15 Nov 2025 14:22:26 -0600 Subject: [PATCH 037/288] Enhancement: Improve channel shutdown logic in ProxyServer to handle connection timeouts and grace periods more effectively, ensuring proper channel management based on client connections. --- apps/proxy/ts_proxy/server.py | 68 ++++++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index 61dac1e3..f962334d 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -947,7 +947,7 @@ class ProxyServer: # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: - # Get connection ready time from metadata + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: try: @@ -955,17 +955,60 @@ class ProxyServer: except (ValueError, TypeError): pass - # If still connecting, give it more time - if channel_state == ChannelState.CONNECTING: - logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet") - continue + if total_clients == 0: + # Check if we have a connection_attempt timestamp (set when CONNECTING starts) + connection_attempt_time = None + attempt_key = RedisKeys.connection_attempt(channel_id) + if self.redis_client: + attempt_value = self.redis_client.get(attempt_key) + if attempt_value: + try: + connection_attempt_time = float(attempt_value.decode('utf-8')) + except (ValueError, TypeError): + pass - # If waiting for clients, check grace period - if connection_ready_time: + # Also get init time as a fallback + init_time = None + if metadata and b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + except (ValueError, TypeError): + pass + + # Use whichever timestamp we have (prefer connection_attempt as it's more recent) + start_time = connection_attempt_time or init_time + + if start_time: + # Check which timeout to apply based on channel lifecycle + if connection_ready_time: + # Already reached ready - use shutdown_delay + time_since_ready = time.time() - connection_ready_time + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if time_since_ready > shutdown_delay: + logger.warning( + f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s " + f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel" + ) + self.stop_channel(channel_id) + continue + else: + # Never reached ready - use grace_period timeout + time_since_start = time.time() - start_time + connecting_timeout = ConfigHelper.channel_init_grace_period() + + if time_since_start > connecting_timeout: + logger.warning( + f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s " + f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues" + ) + self.stop_channel(channel_id) + continue + elif connection_ready_time: + # We have clients now, but check grace period for state transition grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time - # Add this debug log logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, " f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, " f"total_clients={total_clients}") @@ -974,16 +1017,9 @@ class ProxyServer: # Still within grace period logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed") continue - elif total_clients == 0: - # Grace period expired with no clients - logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}") - self.stop_channel(channel_id) else: - # Grace period expired but we have clients - mark channel as active + # Grace period expired with clients - mark channel as active logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active") - old_state = "unknown" - if metadata and b'state' in metadata: - old_state = metadata[b'state'].decode('utf-8') if self.update_channel_state(channel_id, ChannelState.ACTIVE, { "grace_period_ended_at": str(time.time()), "clients_at_activation": str(total_clients) From bbe1f6364bc392a453f3886f51f7b84e1d275dfe Mon Sep 17 00:00:00 2001 From: FiveBoroughs Date: Sun, 16 Nov 2025 23:29:17 +0100 Subject: [PATCH 038/288] Fix: Preserve stream order in ChannelSerializer PATCH/PUT responses The ChannelSerializer.to_representation() method was not respecting the ChannelStream.order field when serializing PATCH/PUT responses. This caused streams to be returned in an arbitrary order rather than the order specified in the request. The update() method correctly saves the stream order to the database using the ChannelStream.order field, and GET requests (with include_streams=True) correctly return ordered streams via get_streams(). However, standard PATCH/PUT responses were using PrimaryKeyRelatedField which doesn't respect the ordering. This fix ensures that all representations (GET, PATCH, PUT) return streams ordered by the channelstream__order field. Impact: - PATCH/PUT responses now correctly reflect the stream order saved - Clients can trust the response data without needing a follow-up GET - No breaking changes - only fixes inconsistent behavior Tested with: - PATCH request with ordered stream IDs - Verified response matches request order - Verified GET request confirms order persisted to database --- apps/channels/serializers.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 7058ced2..264a45ea 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -348,8 +348,17 @@ class ChannelSerializer(serializers.ModelSerializer): if include_streams: self.fields["streams"] = serializers.SerializerMethodField() - - return super().to_representation(instance) + return super().to_representation(instance) + else: + # Fix: For PATCH/PUT responses, ensure streams are ordered + representation = super().to_representation(instance) + if "streams" in representation: + representation["streams"] = list( + instance.streams.all() + .order_by("channelstream__order") + .values_list("id", flat=True) + ) + return representation def get_logo(self, obj): return LogoSerializer(obj.logo).data From 1560afab97e866598d3aad8db4c83070480704a5 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:33:10 -0600 Subject: [PATCH 039/288] Enhancement: Optimize bulk channel editing in ChannelViewSet by validating updates first and applying them in a single transaction, improving performance by about 50% and error handling. --- apps/channels/api_views.py | 103 ++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 36 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index fc5ea114..d2769870 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -435,8 +435,8 @@ class ChannelViewSet(viewsets.ModelViewSet): @action(detail=False, methods=["patch"], url_path="edit/bulk") def edit_bulk(self, request): """ - Bulk edit channels. - Expects a list of channels with their updates. + Bulk edit channels efficiently. + Validates all updates first, then applies in a single transaction. """ data = request.data if not isinstance(data, list): @@ -445,63 +445,94 @@ class ChannelViewSet(viewsets.ModelViewSet): status=status.HTTP_400_BAD_REQUEST, ) - updated_channels = [] - errors = [] + # Extract IDs and validate presence + channel_updates = {} + missing_ids = [] - for channel_data in data: + for i, channel_data in enumerate(data): channel_id = channel_data.get("id") if not channel_id: - errors.append({"error": "Channel ID is required"}) - continue + missing_ids.append(f"Item {i}: Channel ID is required") + else: + channel_updates[channel_id] = channel_data - try: - channel = Channel.objects.get(id=channel_id) + if missing_ids: + return Response( + {"errors": missing_ids}, + status=status.HTTP_400_BAD_REQUEST, + ) - # Handle channel_group_id properly - convert string to integer if needed - if 'channel_group_id' in channel_data: - group_id = channel_data['channel_group_id'] - if group_id is not None: - try: - channel_data['channel_group_id'] = int(group_id) - except (ValueError, TypeError): - channel_data['channel_group_id'] = None + # Fetch all channels at once (one query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Use the serializer to validate and update - serializer = ChannelSerializer( - channel, data=channel_data, partial=True - ) + # Validate and prepare updates + validated_updates = [] + errors = [] - if serializer.is_valid(): - updated_channel = serializer.save() - updated_channels.append(updated_channel) - else: - errors.append({ - "channel_id": channel_id, - "errors": serializer.errors - }) + for channel_id, channel_data in channel_updates.items(): + channel = channels_dict.get(channel_id) - except Channel.DoesNotExist: + if not channel: errors.append({ "channel_id": channel_id, "error": "Channel not found" }) - except Exception as e: + continue + + # Handle channel_group_id conversion + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None + + # Validate with serializer + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + validated_updates.append((channel, serializer.validated_data)) + else: errors.append({ "channel_id": channel_id, - "error": str(e) + "errors": serializer.errors }) if errors: return Response( - {"errors": errors, "updated_count": len(updated_channels)}, + {"errors": errors, "updated_count": len(validated_updates)}, status=status.HTTP_400_BAD_REQUEST, ) - # Serialize the updated channels for response - serialized_channels = ChannelSerializer(updated_channels, many=True).data + # Apply all updates in a transaction + with transaction.atomic(): + for channel, validated_data in validated_updates: + for key, value in validated_data.items(): + setattr(channel, key, value) + + # Single bulk_update query instead of individual saves + channels_to_update = [channel for channel, _ in validated_updates] + if channels_to_update: + Channel.objects.bulk_update( + channels_to_update, + fields=list(validated_updates[0][1].keys()), + batch_size=100 + ) + + # Return the updated objects (already in memory) + serialized_channels = ChannelSerializer( + [channel for channel, _ in validated_updates], + many=True, + context=self.get_serializer_context() + ).data return Response({ - "message": f"Successfully updated {len(updated_channels)} channels", + "message": f"Successfully updated {len(validated_updates)} channels", "channels": serialized_channels }) From 1b16df448284a936de56a5942316f12dcac40e77 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:41:52 -0600 Subject: [PATCH 040/288] Enhancement: Improve batch EPG association in ChannelViewSet by adding validation for associations and implementing bulk updates, enhancing performance and error handling. --- apps/channels/api_views.py | 74 +++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index d2769870..bc920537 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1070,8 +1070,15 @@ class ChannelViewSet(viewsets.ModelViewSet): def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" associations = request.data.get("associations", []) - channels_updated = 0 - programs_refreshed = 0 + + if not associations: + return Response( + {"error": "associations list is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract channel IDs upfront + channel_updates = {} unique_epg_ids = set() for assoc in associations: @@ -1081,39 +1088,58 @@ class ChannelViewSet(viewsets.ModelViewSet): if not channel_id: continue - try: - # Get the channel - channel = Channel.objects.get(id=channel_id) + channel_updates[channel_id] = epg_data_id + if epg_data_id: + unique_epg_ids.add(epg_data_id) - # Set the EPG data - channel.epg_data_id = epg_data_id - channel.save(update_fields=["epg_data"]) - channels_updated += 1 + # Batch fetch all channels (single query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Track unique EPG data IDs - if epg_data_id: - unique_epg_ids.add(epg_data_id) - - except Channel.DoesNotExist: + # Collect channels to update + channels_to_update = [] + for channel_id, epg_data_id in channel_updates.items(): + if channel_id not in channels_dict: logger.error(f"Channel with ID {channel_id} not found") - except Exception as e: - logger.error( - f"Error setting EPG data for channel {channel_id}: {str(e)}" + continue + + channel = channels_dict[channel_id] + channel.epg_data_id = epg_data_id + channels_to_update.append(channel) + + # Bulk update all channels (single query) + if channels_to_update: + with transaction.atomic(): + Channel.objects.bulk_update( + channels_to_update, + fields=["epg_data_id"], + batch_size=100 ) + channels_updated = len(channels_to_update) + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id from apps.epg.models import EPGData + # Batch fetch EPG data (single query) + epg_data_dict = { + epg.id: epg + for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source') + } + + programs_refreshed = 0 for epg_id in unique_epg_ids: - try: - epg_data = EPGData.objects.select_related('epg_source').get(id=epg_id) - # Only refresh non-dummy EPG sources - if epg_data.epg_source.source_type != 'dummy': - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 - except EPGData.DoesNotExist: + epg_data = epg_data_dict.get(epg_id) + if not epg_data: logger.error(f"EPGData with ID {epg_id} not found") + continue + + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 return Response( { From d8df8481363c81da61afff23203188c26e41618c Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 17 Nov 2025 17:57:04 -0600 Subject: [PATCH 041/288] Enhancement: Add success notification for channel updates in API, improving user feedback on successful operations. --- frontend/src/api.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/frontend/src/api.js b/frontend/src/api.js index 8f5aeeeb..fac95b34 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -462,7 +462,16 @@ export default class API { } ); - // Don't automatically update the store here - let the caller handle it + // Show success notification + if (response.message) { + notifications.show({ + title: 'Channels Updated', + message: response.message, + color: 'green', + autoClose: 4000, + }); + } + return response; } catch (e) { errorNotification('Failed to update channels', e); From afedce5cb21477c1be5faf86b539e72176a630f7 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 18 Nov 2025 09:27:22 -0600 Subject: [PATCH 042/288] Enhancement: Implement background profile refresh task with rate limiting to prevent provider bans during account profile updates. --- apps/m3u/tasks.py | 152 ++++++++++++++++++++++++++++------------ dispatcharr/settings.py | 5 ++ 2 files changed, 112 insertions(+), 45 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 15479379..8bd30361 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -1217,52 +1217,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): auth_result = xc_client.authenticate() logger.debug(f"Authentication response: {auth_result}") - # Save account information to all active profiles + # Queue async profile refresh task to run in background + # This prevents any delay in the main refresh process try: - from apps.m3u.models import M3UAccountProfile - - profiles = M3UAccountProfile.objects.filter( - m3u_account=account, - is_active=True - ) - - # Update each profile with account information using its own transformed credentials - for profile in profiles: - try: - # Get transformed credentials for this specific profile - profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) - - # Create a separate XC client for this profile's credentials - with XCClient( - profile_url, - profile_username, - profile_password, - user_agent_string - ) as profile_client: - # Authenticate with this profile's credentials - if profile_client.authenticate(): - # Get account information specific to this profile's credentials - profile_account_info = profile_client.get_account_info() - - # Merge with existing custom_properties if they exist - existing_props = profile.custom_properties or {} - existing_props.update(profile_account_info) - profile.custom_properties = existing_props - profile.save(update_fields=['custom_properties']) - - logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials") - else: - logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") - - except Exception as profile_error: - logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") - # Continue with other profiles even if one fails - - logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}") - - except Exception as save_error: - logger.warning(f"Failed to process profile account information: {str(save_error)}") - # Don't fail the whole process if saving account info fails + logger.info(f"Queueing background profile refresh for account {account.name}") + refresh_account_profiles.delay(account.id) + except Exception as e: + logger.warning(f"Failed to queue profile refresh task: {str(e)}") + # Don't fail the main refresh if profile refresh can't be queued except Exception as e: error_msg = f"Failed to authenticate with XC server: {str(e)}" @@ -2269,6 +2231,106 @@ def get_transformed_credentials(account, profile=None): return base_url, base_username, base_password +@shared_task +def refresh_account_profiles(account_id): + """Refresh account information for all active profiles of an XC account. + + This task runs asynchronously in the background after account refresh completes. + It includes rate limiting delays between profile authentications to prevent provider bans. + """ + from django.conf import settings + import time + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.debug(f"Account {account_id} is not XC type, skipping profile refresh") + return f"Account {account_id} is not an XtreamCodes account" + + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + if not profiles.exists(): + logger.info(f"No active profiles found for account {account.name}") + return f"No active profiles for account {account_id}" + + # Get user agent for this account + try: + user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + if account.user_agent_id: + from core.models import UserAgent + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent: + user_agent_string = ua_obj.user_agent + except Exception as e: + logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.debug(f"Using user agent for profile refresh: {user_agent_string}") + # Get rate limiting delay from settings + profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5) + + profiles_updated = 0 + profiles_failed = 0 + + logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}") + + for idx, profile in enumerate(profiles): + try: + # Add delay between profiles to prevent rate limiting (except for first profile) + if idx > 0: + logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting") + time.sleep(profile_delay) + + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + profiles_updated += 1 + logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})") + else: + profiles_failed += 1 + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + profiles_failed += 1 + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed" + logger.info(result_msg) + return result_msg + + except M3UAccount.DoesNotExist: + error_msg = f"Account {account_id} not found" + logger.error(error_msg) + return error_msg + except Exception as e: + error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}" + logger.error(error_msg) + return error_msg + + @shared_task def refresh_account_info(profile_id): """Refresh only the account information for a specific M3U profile.""" diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index a0c4fc84..d6c29dd9 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -51,6 +51,11 @@ EPG_BATCH_SIZE = 1000 # Number of records to process in a batch EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing +# XtreamCodes Rate Limiting Settings +# Delay between profile authentications when refreshing multiple profiles +# This prevents providers from temporarily banning users with many profiles +XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes + # Database optimization settings DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries DATABASE_CONN_MAX_AGE = ( From b6c3234e961d924a1773073546176a74615a51b1 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 18 Nov 2025 10:02:35 -0600 Subject: [PATCH 043/288] Enhancement: Improve zombie channel handling in ProxyServer by checking client connections and cleaning up orphaned metadata, ensuring better resource management and stability. --- apps/proxy/ts_proxy/server.py | 120 +++++++++++++++++++++++++++++++--- 1 file changed, 112 insertions(+), 8 deletions(-) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index f962334d..0b07b4ae 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -719,6 +719,18 @@ class ProxyServer: else: # This is a zombie channel - owner is gone but metadata still exists logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") + + # Check if there are any clients connected + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + if client_count > 0: + logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover") + # Could potentially take ownership here in the future + # For now, just clean it up to be safe + else: + logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up") + self._clean_zombie_channel(channel_id, metadata) return False elif state in [ChannelState.STOPPED, ChannelState.ERROR]: @@ -940,6 +952,15 @@ class ProxyServer: if channel_id in self.client_managers: client_manager = self.client_managers[channel_id] total_clients = client_manager.get_total_client_count() + else: + # This can happen during reconnection attempts or crashes + # Check Redis directly for any connected clients + if self.redis_client: + client_set_key = RedisKeys.clients(channel_id) + total_clients = self.redis_client.scard(client_set_key) or 0 + + if total_clients == 0: + logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup") # Log client count periodically if time.time() % 30 < 1: # Every ~30 seconds @@ -1086,14 +1107,30 @@ class ProxyServer: continue # Check for local client count - if zero, clean up our local resources - if self.client_managers[channel_id].get_client_count() == 0: - # We're not the owner, and we have no local clients - clean up our resources - logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + if channel_id in self.client_managers: + if self.client_managers[channel_id].get_client_count() == 0: + # We're not the owner, and we have no local clients - clean up our resources + logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + self._cleanup_local_resources(channel_id) + else: + # This shouldn't happen, but clean up anyway + logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources") self._cleanup_local_resources(channel_id) except Exception as e: logger.error(f"Error in cleanup thread: {e}", exc_info=True) + # Periodically check for orphaned channels (every 30 seconds) + if hasattr(self, '_last_orphan_check'): + if time.time() - self._last_orphan_check > 30: + try: + self._check_orphaned_metadata() + self._last_orphan_check = time.time() + except Exception as orphan_error: + logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True) + else: + self._last_orphan_check = time.time() + gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval()) thread = threading.Thread(target=cleanup_task, daemon=True) @@ -1115,10 +1152,6 @@ class ProxyServer: try: channel_id = key.decode('utf-8').split(':')[2] - # Skip channels we already have locally - if channel_id in self.stream_buffers: - continue - # Check if this channel has an owner owner = self.get_channel_owner(channel_id) @@ -1133,13 +1166,84 @@ class ProxyServer: else: # Orphaned channel with no clients - clean it up logger.info(f"Cleaning up orphaned channel {channel_id}") - self._clean_redis_keys(channel_id) + + # If we have it locally, stop it properly to clean up processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) except Exception as e: logger.error(f"Error processing channel key {key}: {e}") except Exception as e: logger.error(f"Error checking orphaned channels: {e}") + def _check_orphaned_metadata(self): + """ + Check for metadata entries that have no owner and no clients. + This catches zombie channels that weren't cleaned up properly. + """ + if not self.redis_client: + return + + try: + # Get all channel metadata keys + channel_pattern = "ts_proxy:channel:*:metadata" + channel_keys = self.redis_client.keys(channel_pattern) + + for key in channel_keys: + try: + channel_id = key.decode('utf-8').split(':')[2] + + # Get metadata first + metadata = self.redis_client.hgetall(key) + if not metadata: + # Empty metadata - clean it up + logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up") + # If we have it locally, stop it properly + if channel_id in self.stream_managers or channel_id in self.client_managers: + self.stop_channel(channel_id) + else: + self._clean_redis_keys(channel_id) + continue + + # Get owner + owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else '' + + # Check if owner is still alive + owner_alive = False + if owner: + owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" + owner_alive = self.redis_client.exists(owner_heartbeat_key) + + # Check client count + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + # If no owner and no clients, clean it up + if not owner_alive and client_count == 0: + state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown' + logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up") + + # If we have it locally, stop it properly to clean up transcode/proxy processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) + elif not owner_alive and client_count > 0: + # Owner is gone but clients remain - just log for now + logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover") + + except Exception as e: + logger.error(f"Error processing metadata key {key}: {e}", exc_info=True) + + except Exception as e: + logger.error(f"Error checking orphaned metadata: {e}", exc_info=True) + def _clean_redis_keys(self, channel_id): """Clean up all Redis keys for a channel more efficiently""" # Release the channel, stream, and profile keys from the channel From fea7c990210f73b61d4dd625affc4d0ab4238f2a Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Wed, 19 Nov 2025 03:39:13 +0000 Subject: [PATCH 044/288] Release v0.12.0 --- version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.py b/version.py index f2ebe1df..504ffa0c 100644 --- a/version.py +++ b/version.py @@ -1,5 +1,5 @@ """ Dispatcharr version information. """ -__version__ = '0.11.2' # Follow semantic versioning (MAJOR.MINOR.PATCH) +__version__ = '0.12.0' # Follow semantic versioning (MAJOR.MINOR.PATCH) __timestamp__ = None # Set during CI/CD build process From 1f0fe00cbf4a254d9e8a6aa9ea35b1a3d07cd7b8 Mon Sep 17 00:00:00 2001 From: Biologisten Date: Thu, 20 Nov 2025 17:34:03 +0100 Subject: [PATCH 045/288] UI now reflects date and time formats chosen by user --- frontend/src/pages/DVR.jsx | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/frontend/src/pages/DVR.jsx b/frontend/src/pages/DVR.jsx index ae2fd4ca..4ed6aca6 100644 --- a/frontend/src/pages/DVR.jsx +++ b/frontend/src/pages/DVR.jsx @@ -102,6 +102,16 @@ const RECURRING_DAY_OPTIONS = [ { value: 5, label: 'Sat' }, ]; +const useDateTimeFormat = () => { + const [timeFormatSetting] = useLocalStorage('time-format', '12h'); + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + // Use user preference for time format + const timeFormat = timeFormatSetting === '12h' ? 'h:mma' : 'HH:mm'; + const dateFormat = dateFormatSetting === 'mdy' ? 'MMM D' : 'D MMM'; + + return [timeFormat, dateFormat] +}; + // Short preview that triggers the details modal when clicked const RecordingSynopsis = ({ description, onOpen }) => { const truncated = description?.length > 140; @@ -139,6 +149,7 @@ const RecordingDetailsModal = ({ const { toUserTime, userNow } = useTimeHelpers(); const [childOpen, setChildOpen] = React.useState(false); const [childRec, setChildRec] = React.useState(null); + const [timeformat, dateformat] = useDateTimeFormat(); const safeRecording = recording || {}; const customProps = safeRecording.custom_properties || {}; @@ -320,7 +331,7 @@ const RecordingDetailsModal = ({ )} - {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + {start.format(`${dateformat}, YYYY ${timeformat}`)} – {end.format(timeformat)}
@@ -498,7 +509,7 @@ const RecordingDetailsModal = ({ - {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + {start.format(`${dateformat}, YYYY ${timeformat}`)} – {end.format(timeformat)} {rating && ( @@ -558,6 +569,7 @@ const RecurringRuleModal = ({ opened, onClose, ruleId, onEditOccurrence }) => { const fetchRecordings = useChannelsStore((s) => s.fetchRecordings); const recordings = useChannelsStore((s) => s.recordings); const { toUserTime, userNow } = useTimeHelpers(); + const [timeformat, dateformat] = useDateTimeFormat(); const [saving, setSaving] = useState(false); const [deleting, setDeleting] = useState(false); @@ -892,10 +904,10 @@ const RecurringRuleModal = ({ opened, onClose, ruleId, onEditOccurrence }) => { - {occStart.format('MMM D, YYYY')} + {occStart.format(`${dateformat}, YYYY`)} - {occStart.format('h:mma')} – {occEnd.format('h:mma')} + {occStart.format(timeformat)} – {occEnd.format(timeformat)} @@ -937,6 +949,7 @@ const RecordingCard = ({ recording, onOpenDetails, onOpenRecurring }) => { const showVideo = useVideoStore((s) => s.showVideo); const fetchRecordings = useChannelsStore((s) => s.fetchRecordings); const { toUserTime, userNow } = useTimeHelpers(); + const [timeformat, dateformat] = useDateTimeFormat(); const channel = channels?.[recording.channel]; @@ -1221,7 +1234,7 @@ const RecordingCard = ({ recording, onOpenDetails, onOpenRecurring }) => { {isSeriesGroup ? 'Next recording' : 'Time'} - {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + {start.format(`${dateformat}, YYYY ${timeformat}`)} – {end.format(timeformat)} @@ -1698,4 +1711,4 @@ const DVRPage = () => { ); }; -export default DVRPage; +export default DVRPage; \ No newline at end of file From 89a23164ff2f033a3871b44609879557d3316321 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 20 Nov 2025 17:41:06 -0600 Subject: [PATCH 046/288] Enhancement: Add system event logging and viewer with M3U/EPG endpoint caching System Event Logging: - Add SystemEvent model with 15 event types tracking channel operations, client connections, M3U/EPG activities, and buffering events - Log detailed metrics for M3U/EPG refresh operations (streams/programs created/updated/deleted) - Track M3U/EPG downloads with client information (IP address, user agent, profile, channel count) - Record channel lifecycle events (start, stop, reconnect) with stream and client details - Monitor client connections/disconnections and buffering events with stream metadata Event Viewer UI: - Add SystemEvents component with real-time updates via WebSocket - Implement pagination, filtering by event type, and configurable auto-refresh - Display events with color-coded badges and type-specific icons - Integrate event viewer into Stats page with modal display - Add event management settings (retention period, refresh rate) M3U/EPG Endpoint Optimizations: - Implement content caching with 5-minute TTL to reduce duplicate processing - Add client-based event deduplication (2-second window) using IP and user agent hashing - Support HEAD requests for efficient preflight checks - Cache streamed EPG responses while maintaining streaming behavior for first request --- apps/channels/tasks.py | 26 ++ apps/epg/tasks.py | 11 +- apps/m3u/tasks.py | 12 + apps/output/views.py | 130 +++++++- apps/proxy/ts_proxy/client_manager.py | 38 ++- apps/proxy/ts_proxy/server.py | 146 +++++++-- .../ts_proxy/services/channel_service.py | 6 +- apps/proxy/ts_proxy/stream_generator.py | 32 ++ apps/proxy/ts_proxy/stream_manager.py | 116 ++++++- core/api_urls.py | 12 +- core/api_views.py | 61 ++++ core/migrations/0017_systemevent.py | 28 ++ core/models.py | 40 +++ core/utils.py | 45 +++ frontend/src/api.js | 17 + frontend/src/components/SystemEvents.jsx | 304 ++++++++++++++++++ frontend/src/pages/Settings.jsx | 43 ++- frontend/src/pages/Stats.jsx | 22 ++ 18 files changed, 1022 insertions(+), 67 deletions(-) create mode 100644 core/migrations/0017_systemevent.py create mode 100644 frontend/src/components/SystemEvents.jsx diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 3943cf16..5a9528a7 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -1434,6 +1434,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str): logger.info(f"Starting recording for channel {channel.name}") + # Log system event for recording start + try: + from core.utils import log_system_event + log_system_event( + 'recording_start', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id + ) + except Exception as e: + logger.error(f"Could not log recording start event: {e}") + # Try to resolve the Recording row up front recording_obj = None try: @@ -1827,6 +1839,20 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str): # After the loop, the file and response are closed automatically. logger.info(f"Finished recording for channel {channel.name}") + # Log system event for recording end + try: + from core.utils import log_system_event + log_system_event( + 'recording_end', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id, + interrupted=interrupted, + bytes_written=bytes_written + ) + except Exception as e: + logger.error(f"Could not log recording end event: {e}") + # Remux TS to MKV container remux_success = False try: diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index b6350686..59d658b1 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -24,7 +24,7 @@ from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from .models import EPGSource, EPGData, ProgramData -from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory +from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event logger = logging.getLogger(__name__) @@ -1496,6 +1496,15 @@ def parse_programs_for_source(epg_source, tvg_id=None): epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) + # Log system event for EPG refresh + log_system_event( + event_type='epg_refresh', + source_name=epg_source.name, + programs=program_count, + channels=channel_count, + updated=updated_count, + ) + # Send completion notification with status send_epg_update(epg_source.id, "parsing_programs", 100, status="success", diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 8bd30361..cb82402e 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -24,6 +24,7 @@ from core.utils import ( acquire_task_lock, release_task_lock, natural_sort_key, + log_system_event, ) from core.models import CoreSettings, UserAgent from asgiref.sync import async_to_sync @@ -2840,6 +2841,17 @@ def refresh_single_m3u_account(account_id): account.updated_at = timezone.now() account.save(update_fields=["status", "last_message", "updated_at"]) + # Log system event for M3U refresh + log_system_event( + event_type='m3u_refresh', + account_name=account.name, + elapsed_time=round(elapsed_time, 2), + streams_created=streams_created, + streams_updated=streams_updated, + streams_deleted=streams_deleted, + total_processed=streams_processed, + ) + # Send final update with complete metrics and explicitly include success status send_m3u_update( account_id, diff --git a/apps/output/views.py b/apps/output/views.py index df18b349..327311d8 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -23,23 +23,64 @@ from django.db.models.functions import Lower import os from apps.m3u.utils import calculate_tuner_count import regex +from core.utils import log_system_event +import hashlib logger = logging.getLogger(__name__) +def get_client_identifier(request): + """Get client information including IP, user agent, and a unique hash identifier + + Returns: + tuple: (client_id_hash, client_ip, user_agent) + """ + # Get client IP (handle proxies) + x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') + if x_forwarded_for: + client_ip = x_forwarded_for.split(',')[0].strip() + else: + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + + # Get user agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + # Create a hash for a shorter cache key + client_str = f"{client_ip}:{user_agent}" + client_id_hash = hashlib.md5(client_str.encode()).hexdigest()[:12] + + return client_id_hash, client_ip, user_agent + def m3u_endpoint(request, profile_name=None, user=None): + logger.debug("m3u_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for M3U") + response = HttpResponse(content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response + return generate_m3u(request, profile_name, user) def epg_endpoint(request, profile_name=None, user=None): + logger.debug("epg_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for EPG") + response = HttpResponse(content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + return generate_epg(request, profile_name, user) @csrf_exempt -@require_http_methods(["GET", "POST"]) +@require_http_methods(["GET", "POST", "HEAD"]) def generate_m3u(request, profile_name=None, user=None): """ Dynamically generate an M3U file from channels. @@ -47,7 +88,19 @@ def generate_m3u(request, profile_name=None, user=None): Supports both GET and POST methods for compatibility with IPTVSmarters. """ # Check if this is a POST request and the body is not empty (which we don't want to allow) - logger.debug("Generating M3U for profile: %s, user: %s", profile_name, user.username if user else "Anonymous") + logger.debug("Generating M3U for profile: %s, user: %s, method: %s", profile_name, user.username if user else "Anonymous", request.method) + + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"m3u_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving M3U from cache") + response = HttpResponse(cached_content, content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response # Check if this is a POST request with data (which we don't want to allow) if request.method == "POST" and request.body: if request.body.decode() != '{}': @@ -184,6 +237,23 @@ def generate_m3u(request, profile_name=None, user=None): m3u_content += extinf_line + stream_url + "\n" + # Cache the generated content for 2 seconds to handle double-GET requests + cache.set(content_cache_key, m3u_content, 2) + + # Log system event for M3U download (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"m3u_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='m3u_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") response["Content-Disposition"] = 'attachment; filename="channels.m3u"' return response @@ -1126,8 +1196,22 @@ def generate_epg(request, profile_name=None, user=None): by their associated EPGData record. This version filters data based on the 'days' parameter and sends keep-alives during processing. """ + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"epg_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving EPG from cache") + response = HttpResponse(cached_content, content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + def epg_generator(): - """Generator function that yields EPG data with keep-alives during processing""" # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) + """Generator function that yields EPG data with keep-alives during processing""" + # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) xml_lines = [] xml_lines.append('') @@ -1286,7 +1370,8 @@ def generate_epg(request, profile_name=None, user=None): xml_lines.append(" ") # Send all channel definitions - yield '\n'.join(xml_lines) + '\n' + channel_xml = '\n'.join(xml_lines) + '\n' + yield channel_xml xml_lines = [] # Clear to save memory # Process programs for each channel @@ -1676,7 +1761,8 @@ def generate_epg(request, profile_name=None, user=None): # Send batch when full or send keep-alive if len(program_batch) >= batch_size: - yield '\n'.join(program_batch) + '\n' + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml program_batch = [] # Move to next chunk @@ -1684,12 +1770,40 @@ def generate_epg(request, profile_name=None, user=None): # Send remaining programs in batch if program_batch: - yield '\n'.join(program_batch) + '\n' + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml # Send final closing tag and completion message - yield "\n" # Return streaming response + yield "\n" + + # Log system event for EPG download after streaming completes (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"epg_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='epg_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + + # Wrapper generator that collects content for caching + def caching_generator(): + collected_content = [] + for chunk in epg_generator(): + collected_content.append(chunk) + yield chunk + # After streaming completes, cache the full content + full_content = ''.join(collected_content) + cache.set(content_cache_key, full_content, 300) + logger.debug("Cached EPG content (%d bytes)", len(full_content)) + + # Return streaming response response = StreamingHttpResponse( - streaming_content=epg_generator(), + streaming_content=caching_generator(), content_type="application/xml" ) response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index 3d89b3b8..bffecdde 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -34,6 +34,10 @@ class ClientManager: self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10) self.last_heartbeat_time = {} + # Get ProxyServer instance for ownership checks + from .server import ProxyServer + self.proxy_server = ProxyServer.get_instance() + # Start heartbeat thread for local clients self._start_heartbeat_thread() self._registered_clients = set() # Track already registered client IDs @@ -337,16 +341,30 @@ class ClientManager: self._notify_owner_of_activity() - # Publish client disconnected event - event_data = json.dumps({ - "event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string - "channel_id": self.channel_id, - "client_id": client_id, - "worker_id": self.worker_id or "unknown", - "timestamp": time.time(), - "remaining_clients": remaining - }) - self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + # Check if we're the owner - if so, handle locally; if not, publish event + am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id) + + if am_i_owner: + # We're the owner - handle the disconnect directly + logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)") + if remaining == 0: + # Trigger shutdown check directly via ProxyServer method + logger.debug(f"No clients left - triggering immediate shutdown check") + # Spawn greenlet to avoid blocking + import gevent + gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id) + else: + # We're not the owner - publish event so owner can handle it + logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}") + event_data = json.dumps({ + "event": EventType.CLIENT_DISCONNECTED, + "channel_id": self.channel_id, + "client_id": client_id, + "worker_id": self.worker_id or "unknown", + "timestamp": time.time(), + "remaining_clients": remaining + }) + self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) # Trigger channel stats update via WebSocket self._trigger_stats_update() diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index 0b07b4ae..db5b3d57 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -19,7 +19,7 @@ import gevent # Add gevent import from typing import Dict, Optional, Set from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream -from core.utils import RedisClient +from core.utils import RedisClient, log_system_event from redis.exceptions import ConnectionError, TimeoutError from .stream_manager import StreamManager from .stream_buffer import StreamBuffer @@ -194,35 +194,11 @@ class ProxyServer: self.redis_client.delete(disconnect_key) elif event_type == EventType.CLIENT_DISCONNECTED: - logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}") - # Check if any clients remain - if channel_id in self.client_managers: - # VERIFY REDIS CLIENT COUNT DIRECTLY - client_set_key = RedisKeys.clients(channel_id) - total = self.redis_client.scard(client_set_key) or 0 - - if total == 0: - logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") - # Set the disconnect timer for other workers to see - disconnect_key = RedisKeys.last_client_disconnect(channel_id) - self.redis_client.setex(disconnect_key, 60, str(time.time())) - - # Get configured shutdown delay or default - shutdown_delay = ConfigHelper.channel_shutdown_delay() - - if shutdown_delay > 0: - logger.info(f"Waiting {shutdown_delay}s before stopping channel...") - gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay) - - # Re-check client count before stopping - total = self.redis_client.scard(client_set_key) or 0 - if total > 0: - logger.info(f"New clients connected during shutdown delay - aborting shutdown") - self.redis_client.delete(disconnect_key) - return - - # Stop the channel directly - self.stop_channel(channel_id) + client_id = data.get("client_id") + worker_id = data.get("worker_id") + logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}") + # Delegate to dedicated method + self.handle_client_disconnect(channel_id) elif event_type == EventType.STREAM_SWITCH: @@ -646,6 +622,29 @@ class ProxyServer: logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager + # Log channel start event + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Get stream name if stream_id is available + stream_name = None + if channel_stream_id: + try: + stream_obj = Stream.objects.get(id=channel_stream_id) + stream_name = stream_obj.name + except Exception: + pass + + log_system_event( + 'channel_start', + channel_id=channel_id, + channel_name=channel_obj.name, + stream_name=stream_name, + stream_id=channel_stream_id + ) + except Exception as e: + logger.error(f"Could not log channel start event: {e}") + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) if channel_id not in self.client_managers: client_manager = ClientManager( @@ -800,6 +799,44 @@ class ProxyServer: logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True) return False + def handle_client_disconnect(self, channel_id): + """ + Handle client disconnect event - check if channel should shut down. + Can be called directly by owner or via PubSub from non-owner workers. + """ + if channel_id not in self.client_managers: + return + + try: + # VERIFY REDIS CLIENT COUNT DIRECTLY + client_set_key = RedisKeys.clients(channel_id) + total = self.redis_client.scard(client_set_key) or 0 + + if total == 0: + logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") + # Set the disconnect timer for other workers to see + disconnect_key = RedisKeys.last_client_disconnect(channel_id) + self.redis_client.setex(disconnect_key, 60, str(time.time())) + + # Get configured shutdown delay or default + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if shutdown_delay > 0: + logger.info(f"Waiting {shutdown_delay}s before stopping channel...") + gevent.sleep(shutdown_delay) + + # Re-check client count before stopping + total = self.redis_client.scard(client_set_key) or 0 + if total > 0: + logger.info(f"New clients connected during shutdown delay - aborting shutdown") + self.redis_client.delete(disconnect_key) + return + + # Stop the channel directly + self.stop_channel(channel_id) + except Exception as e: + logger.error(f"Error handling client disconnect for channel {channel_id}: {e}") + def stop_channel(self, channel_id): """Stop a channel with proper ownership handling""" try: @@ -847,6 +884,41 @@ class ProxyServer: self.release_ownership(channel_id) logger.info(f"Released ownership of channel {channel_id}") + # Log channel stop event (after cleanup, before releasing ownership section ends) + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Calculate runtime and get total bytes from metadata + runtime = None + total_bytes = None + if self.redis_client: + metadata_key = RedisKeys.channel_metadata(channel_id) + metadata = self.redis_client.hgetall(metadata_key) + if metadata: + # Calculate runtime from init_time + if b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + runtime = round(time.time() - init_time, 2) + except Exception: + pass + # Get total bytes transferred + if b'total_bytes' in metadata: + try: + total_bytes = int(metadata[b'total_bytes'].decode('utf-8')) + except Exception: + pass + + log_system_event( + 'channel_stop', + channel_id=channel_id, + channel_name=channel_obj.name, + runtime=runtime, + total_bytes=total_bytes + ) + except Exception as e: + logger.error(f"Could not log channel stop event: {e}") + # Always clean up local resources - WITH SAFE CHECKS if channel_id in self.stream_managers: del self.stream_managers[channel_id] @@ -968,6 +1040,13 @@ class ProxyServer: # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: @@ -1048,6 +1127,13 @@ class ProxyServer: logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period") # If active and no clients, start normal shutdown procedure elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0: + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + # Check if there's a pending no-clients timeout disconnect_key = RedisKeys.last_client_disconnect(channel_id) disconnect_time = None diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index 551e2d27..6484cd3f 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -14,6 +14,7 @@ from ..server import ProxyServer from ..redis_keys import RedisKeys from ..constants import EventType, ChannelState, ChannelMetadataField from ..url_utils import get_stream_info_for_switch +from core.utils import log_system_event logger = logging.getLogger("ts_proxy") @@ -598,7 +599,7 @@ class ChannelService: def _update_stream_stats_in_db(stream_id, **stats): """Update stream stats in database""" from django.db import connection - + try: from apps.channels.models import Stream from django.utils import timezone @@ -624,7 +625,7 @@ class ChannelService: except Exception as e: logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") return False - + finally: # Always close database connection after update try: @@ -700,6 +701,7 @@ class ChannelService: RedisKeys.events_channel(channel_id), json.dumps(switch_request) ) + return True @staticmethod diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 5d4f661f..50404f1d 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -8,6 +8,8 @@ import logging import threading import gevent # Add this import at the top of your file from apps.proxy.config import TSConfig as Config +from apps.channels.models import Channel +from core.utils import log_system_event from .server import ProxyServer from .utils import create_ts_packet, get_logger from .redis_keys import RedisKeys @@ -88,6 +90,20 @@ class StreamGenerator: if not self._setup_streaming(): return + # Log client connect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_connect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None + ) + except Exception as e: + logger.error(f"Could not log client connect event: {e}") + # Main streaming loop for chunk in self._stream_data_generator(): yield chunk @@ -439,6 +455,22 @@ class StreamGenerator: total_clients = client_manager.get_total_client_count() logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})") + # Log client disconnect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_disconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None, + duration=round(elapsed, 2), + bytes_sent=self.bytes_sent + ) + except Exception as e: + logger.error(f"Could not log client disconnect event: {e}") + # Schedule channel shutdown if no clients left if not stream_released: # Only if we haven't already released the stream self._schedule_channel_shutdown_if_needed(local_clients) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index c717398c..bbeb4bb7 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -16,6 +16,7 @@ from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile from core.models import UserAgent, CoreSettings +from core.utils import log_system_event from .stream_buffer import StreamBuffer from .utils import detect_stream_type, get_logger from .redis_keys import RedisKeys @@ -260,6 +261,20 @@ class StreamManager: # Store connection start time to measure success duration connection_start_time = time.time() + # Log reconnection event if this is a retry (not first attempt) + if self.retry_count > 0: + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + attempt=self.retry_count + 1, + max_attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + # Successfully connected - read stream data until disconnect/error self._process_stream_data() # If we get here, the connection was closed/failed @@ -289,6 +304,20 @@ class StreamManager: if self.retry_count >= self.max_retries: url_failed = True logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}") + + # Log connection error event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_failed', + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log connection error event: {e}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -302,6 +331,21 @@ class StreamManager: if self.retry_count >= self.max_retries: url_failed = True + + # Log connection error event with exception details + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_exception', + error_message=str(e)[:200], + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as log_error: + logger.error(f"Could not log connection error event: {log_error}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -702,6 +746,19 @@ class StreamManager: # Reset buffering state self.buffering = False self.buffering_start_time = None + + # Log failover event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_failover', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='buffering_timeout', + duration=buffering_duration + ) + except Exception as e: + logger.error(f"Could not log failover event: {e}") else: logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout") else: @@ -709,6 +766,19 @@ class StreamManager: self.buffering = True self.buffering_start_time = time.time() logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x") + + # Log system event for buffering + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_buffering', + channel_id=self.channel_id, + channel_name=channel_obj.name, + speed=ffmpeg_speed + ) + except Exception as e: + logger.error(f"Could not log buffering event: {e}") + # Log buffering warning logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected") # Set channel state to buffering @@ -1004,6 +1074,19 @@ class StreamManager: except Exception as e: logger.warning(f"Failed to reset buffer position: {e}") + # Log stream switch event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'stream_switch', + channel_id=self.channel_id, + channel_name=channel_obj.name, + new_url=new_url[:100] if new_url else None, + stream_id=stream_id + ) + except Exception as e: + logger.error(f"Could not log stream switch event: {e}") + return True except Exception as e: logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True) @@ -1122,6 +1205,19 @@ class StreamManager: if connection_result: self.connection_start_time = time.time() logger.info(f"Reconnect successful for channel {self.channel_id}") + + # Log reconnection event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='health_monitor' + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + return True else: logger.warning(f"Reconnect failed for channel {self.channel_id}") @@ -1199,25 +1295,17 @@ class StreamManager: logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") pass - # Enhanced transcode process cleanup with more aggressive termination + # Enhanced transcode process cleanup with immediate termination if self.transcode_process: try: - # First try polite termination - logger.debug(f"Terminating transcode process for channel {self.channel_id}") - self.transcode_process.terminate() + logger.debug(f"Killing transcode process for channel {self.channel_id}") + self.transcode_process.kill() - # Give it a short time to terminate gracefully + # Give it a very short time to die try: - self.transcode_process.wait(timeout=1.0) + self.transcode_process.wait(timeout=0.5) except subprocess.TimeoutExpired: - # If it doesn't terminate quickly, kill it - logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}") - self.transcode_process.kill() - - try: - self.transcode_process.wait(timeout=1.0) - except subprocess.TimeoutExpired: - logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") + logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") except Exception as e: logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}") diff --git a/core/api_urls.py b/core/api_urls.py index baa4bbe5..75257db1 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,7 +2,16 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint, TimezoneListView +from .api_views import ( + UserAgentViewSet, + StreamProfileViewSet, + CoreSettingsViewSet, + environment, + version, + rehash_streams_endpoint, + TimezoneListView, + get_system_events +) router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') @@ -13,5 +22,6 @@ urlpatterns = [ path('version/', version, name='version'), path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), path('timezones/', TimezoneListView.as_view(), name='timezones'), + path('system-events/', get_system_events, name='system_events'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index f475909a..c50d7fa6 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -396,3 +396,64 @@ class TimezoneListView(APIView): 'grouped': grouped, 'count': len(all_timezones) }) + + +# ───────────────────────────── +# System Events API +# ───────────────────────────── +@api_view(['GET']) +@permission_classes([IsAuthenticated]) +def get_system_events(request): + """ + Get recent system events (channel start/stop, buffering, client connections, etc.) + + Query Parameters: + limit: Number of events to return per page (default: 100, max: 1000) + offset: Number of events to skip (for pagination, default: 0) + event_type: Filter by specific event type (optional) + """ + from core.models import SystemEvent + + try: + # Get pagination params + limit = min(int(request.GET.get('limit', 100)), 1000) + offset = int(request.GET.get('offset', 0)) + + # Start with all events + events = SystemEvent.objects.all() + + # Filter by event_type if provided + event_type = request.GET.get('event_type') + if event_type: + events = events.filter(event_type=event_type) + + # Get total count before applying pagination + total_count = events.count() + + # Apply offset and limit for pagination + events = events[offset:offset + limit] + + # Serialize the data + events_data = [{ + 'id': event.id, + 'event_type': event.event_type, + 'event_type_display': event.get_event_type_display(), + 'timestamp': event.timestamp.isoformat(), + 'channel_id': str(event.channel_id) if event.channel_id else None, + 'channel_name': event.channel_name, + 'details': event.details + } for event in events] + + return Response({ + 'events': events_data, + 'count': len(events_data), + 'total': total_count, + 'offset': offset, + 'limit': limit + }) + + except Exception as e: + logger.error(f"Error fetching system events: {e}") + return Response({ + 'error': 'Failed to fetch system events' + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) diff --git a/core/migrations/0017_systemevent.py b/core/migrations/0017_systemevent.py new file mode 100644 index 00000000..9b97213c --- /dev/null +++ b/core/migrations/0017_systemevent.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-11-20 20:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_update_dvr_template_paths'), + ] + + operations = [ + migrations.CreateModel( + name='SystemEvent', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)), + ('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)), + ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), + ('channel_name', models.CharField(blank=True, max_length=255, null=True)), + ('details', models.JSONField(blank=True, default=dict)), + ], + options={ + 'ordering': ['-timestamp'], + 'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')], + }, + ), + ] diff --git a/core/models.py b/core/models.py index 3a5895ba..2a5eb1f3 100644 --- a/core/models.py +++ b/core/models.py @@ -375,3 +375,43 @@ class CoreSettings(models.Model): return rules except Exception: return rules + + +class SystemEvent(models.Model): + """ + Tracks system events like channel start/stop, buffering, failover, client connections. + Maintains a rolling history based on max_system_events setting. + """ + EVENT_TYPES = [ + ('channel_start', 'Channel Started'), + ('channel_stop', 'Channel Stopped'), + ('channel_buffering', 'Channel Buffering'), + ('channel_failover', 'Channel Failover'), + ('channel_reconnect', 'Channel Reconnected'), + ('channel_error', 'Channel Error'), + ('client_connect', 'Client Connected'), + ('client_disconnect', 'Client Disconnected'), + ('recording_start', 'Recording Started'), + ('recording_end', 'Recording Ended'), + ('stream_switch', 'Stream Switched'), + ('m3u_refresh', 'M3U Refreshed'), + ('m3u_download', 'M3U Downloaded'), + ('epg_refresh', 'EPG Refreshed'), + ('epg_download', 'EPG Downloaded'), + ] + + event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True) + timestamp = models.DateTimeField(auto_now_add=True, db_index=True) + channel_id = models.UUIDField(null=True, blank=True, db_index=True) + channel_name = models.CharField(max_length=255, null=True, blank=True) + details = models.JSONField(default=dict, blank=True) + + class Meta: + ordering = ['-timestamp'] + indexes = [ + models.Index(fields=['-timestamp']), + models.Index(fields=['event_type', '-timestamp']), + ] + + def __str__(self): + return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}" diff --git a/core/utils.py b/core/utils.py index 38b31144..7b6dd9b0 100644 --- a/core/utils.py +++ b/core/utils.py @@ -388,3 +388,48 @@ def validate_flexible_url(value): # If it doesn't match our flexible patterns, raise the original error raise ValidationError("Enter a valid URL.") + + +def log_system_event(event_type, channel_id=None, channel_name=None, **details): + """ + Log a system event and maintain the configured max history. + + Args: + event_type: Type of event (e.g., 'channel_start', 'client_connect') + channel_id: Optional UUID of the channel + channel_name: Optional name of the channel + **details: Additional details to store in the event (stored as JSON) + + Example: + log_system_event('channel_start', channel_id=uuid, channel_name='CNN', + stream_url='http://...', user='admin') + """ + from core.models import SystemEvent, CoreSettings + + try: + # Create the event + SystemEvent.objects.create( + event_type=event_type, + channel_id=channel_id, + channel_name=channel_name, + details=details + ) + + # Get max events from settings (default 100) + try: + max_events_setting = CoreSettings.objects.filter(key='max-system-events').first() + max_events = int(max_events_setting.value) if max_events_setting else 100 + except Exception: + max_events = 100 + + # Delete old events beyond the limit (keep it efficient with a single query) + total_count = SystemEvent.objects.count() + if total_count > max_events: + # Get the ID of the event at the cutoff point + cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events] + # Delete all events with ID less than cutoff (older events) + SystemEvent.objects.filter(id__lt=cutoff_event).delete() + + except Exception as e: + # Don't let event logging break the main application + logger.error(f"Failed to log system event {event_type}: {e}") diff --git a/frontend/src/api.js b/frontend/src/api.js index fac95b34..470373f1 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -2481,4 +2481,21 @@ export default class API { errorNotification('Failed to update playback position', e); } } + + static async getSystemEvents(limit = 100, offset = 0, eventType = null) { + try { + const params = new URLSearchParams(); + params.append('limit', limit); + params.append('offset', offset); + if (eventType) { + params.append('event_type', eventType); + } + const response = await request( + `${host}/api/core/system-events/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve system events', e); + } + } } diff --git a/frontend/src/components/SystemEvents.jsx b/frontend/src/components/SystemEvents.jsx new file mode 100644 index 00000000..4047801c --- /dev/null +++ b/frontend/src/components/SystemEvents.jsx @@ -0,0 +1,304 @@ +import React, { useState, useEffect, useCallback } from 'react'; +import { + ActionIcon, + Box, + Button, + Card, + Group, + NumberInput, + Pagination, + Select, + Stack, + Text, + Title, +} from '@mantine/core'; +import { + ChevronDown, + CirclePlay, + Download, + Gauge, + HardDriveDownload, + List, + RefreshCw, + SquareX, + Timer, + Users, + Video, +} from 'lucide-react'; +import dayjs from 'dayjs'; +import API from '../api'; +import useLocalStorage from '../hooks/useLocalStorage'; + +const SystemEvents = () => { + const [events, setEvents] = useState([]); + const [totalEvents, setTotalEvents] = useState(0); + const [isExpanded, setIsExpanded] = useState(false); + const [isLoading, setIsLoading] = useState(false); + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM'; + const [eventsRefreshInterval, setEventsRefreshInterval] = useLocalStorage( + 'events-refresh-interval', + 0 + ); + const [eventsLimit, setEventsLimit] = useLocalStorage('events-limit', 100); + const [currentPage, setCurrentPage] = useState(1); + + // Calculate offset based on current page and limit + const offset = (currentPage - 1) * eventsLimit; + const totalPages = Math.ceil(totalEvents / eventsLimit); + + const fetchEvents = useCallback(async () => { + try { + setIsLoading(true); + const response = await API.getSystemEvents(eventsLimit, offset); + if (response && response.events) { + setEvents(response.events); + setTotalEvents(response.total || 0); + } + } catch (error) { + console.error('Error fetching system events:', error); + } finally { + setIsLoading(false); + } + }, [eventsLimit, offset]); + + // Fetch events on mount and when eventsRefreshInterval changes + useEffect(() => { + fetchEvents(); + + // Set up polling if interval is set and events section is expanded + if (eventsRefreshInterval > 0 && isExpanded) { + const interval = setInterval(fetchEvents, eventsRefreshInterval * 1000); + return () => clearInterval(interval); + } + }, [fetchEvents, eventsRefreshInterval, isExpanded]); + + // Reset to first page when limit changes + useEffect(() => { + setCurrentPage(1); + }, [eventsLimit]); + + const getEventIcon = (eventType) => { + switch (eventType) { + case 'channel_start': + return ; + case 'channel_stop': + return ; + case 'channel_reconnect': + return ; + case 'channel_buffering': + return ; + case 'channel_failover': + return ; + case 'client_connect': + return ; + case 'client_disconnect': + return ; + case 'recording_start': + return