From cb02069fb7dc5861bae9f0b782e042eb60143391 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 20 Mar 2025 19:03:26 -0500 Subject: [PATCH 1/6] Fixed incorrect logic on stepped retry timers. --- apps/proxy/ts_proxy/stream_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 93ea4960..3d81472d 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -193,7 +193,7 @@ class StreamManager: logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url}") else: # Wait with exponential backoff before retrying - timeout = min(.25 ** self.retry_count, 3) # Cap at 3 seconds + timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds logger.info(f"Reconnecting in {timeout} seconds... (attempt {self.retry_count}/{self.max_retries})") time.sleep(timeout) @@ -206,7 +206,7 @@ class StreamManager: url_failed = True else: # Wait with exponential backoff before retrying - timeout = min(2 ** self.retry_count, 10) + timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds logger.info(f"Reconnecting in {timeout} seconds after error... (attempt {self.retry_count}/{self.max_retries})") time.sleep(timeout) From 4738d301d1bf6e06ccb11707b7f86475dd2c65a6 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 20 Mar 2025 21:03:01 -0500 Subject: [PATCH 2/6] Fixed regression with buffer checks when clients should be disconnecting due to failure. --- apps/proxy/ts_proxy/redis_keys.py | 5 + apps/proxy/ts_proxy/server.py | 9 +- .../ts_proxy/services/channel_service.py | 10 ++ apps/proxy/ts_proxy/stream_generator.py | 20 ++- apps/proxy/ts_proxy/stream_manager.py | 132 ++++++++++++++++-- 5 files changed, 161 insertions(+), 15 deletions(-) diff --git a/apps/proxy/ts_proxy/redis_keys.py b/apps/proxy/ts_proxy/redis_keys.py index ebbcbc24..1eaa8aa5 100644 --- a/apps/proxy/ts_proxy/redis_keys.py +++ b/apps/proxy/ts_proxy/redis_keys.py @@ -78,3 +78,8 @@ class RedisKeys: def worker_heartbeat(worker_id): """Key for worker heartbeat""" return f"ts_proxy:worker:{worker_id}:heartbeat" + + @staticmethod + def transcode_active(channel_id): + """Key indicating active transcode process""" + return f"ts_proxy:channel:{channel_id}:transcode_active" diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index aa2e7ffc..cfe5b4ce 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -308,6 +308,12 @@ class ProxyServer: if current and current.decode('utf-8') == self.worker_id: self.redis_client.delete(lock_key) logger.info(f"Released ownership of channel {channel_id}") + + # Also ensure channel stopping key is set to signal clients + stop_key = RedisKeys.channel_stopping(channel_id) + self.redis_client.setex(stop_key, 30, "true") + logger.info(f"Set stopping signal for channel {channel_id} clients") + except Exception as e: logger.error(f"Error releasing channel ownership: {e}") @@ -458,7 +464,8 @@ class ProxyServer: buffer, user_agent=channel_user_agent, transcode=transcode, - stream_id=channel_stream_id # Pass stream ID to the manager + stream_id=channel_stream_id, # Pass stream ID to the manager + worker_id=self.worker_id # Pass worker_id explicitly to eliminate circular dependency ) logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index e00e680d..210e4b0f 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -218,9 +218,19 @@ class ChannelService: if metadata and b'state' in metadata: state = metadata[b'state'].decode('utf-8') channel_info = {"state": state} + + # Immediately mark as stopping in metadata so clients detect it faster + proxy_server.redis_client.hset(metadata_key, "state", ChannelState.STOPPING) + proxy_server.redis_client.hset(metadata_key, "state_changed_at", str(time.time())) except Exception as e: logger.error(f"Error fetching channel state: {e}") + # Set stopping flag with higher TTL to ensure it persists + if proxy_server.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + proxy_server.redis_client.setex(stop_key, 60, "true") # Higher TTL of 60 seconds + logger.info(f"Set channel stopping flag with 60s TTL for channel {channel_id}") + # Broadcast stop event to all workers via PubSub if proxy_server.redis_client: ChannelService._publish_channel_stop_event(channel_id) diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index dcaae961..8a91f1dc 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -103,7 +103,7 @@ class StreamGenerator: if state in ['waiting_for_clients', 'active']: logger.info(f"[{self.client_id}] Channel {self.channel_id} now ready (state={state})") return True - elif state in ['error', 'stopped']: + elif state in ['error', 'stopped', 'stopping']: # Added 'stopping' to error states error_message = metadata.get(b'error_message', b'Unknown error').decode('utf-8') logger.error(f"[{self.client_id}] Channel {self.channel_id} in error state: {state}, message: {error_message}") # Send error packet before giving up @@ -119,6 +119,13 @@ class StreamGenerator: self.bytes_sent += len(keepalive_packet) last_keepalive = time.time() + # Also check stopping key directly + stop_key = RedisKeys.channel_stopping(self.channel_id) + if proxy_server.redis_client.exists(stop_key): + logger.error(f"[{self.client_id}] Channel {self.channel_id} stopping flag detected during initialization") + yield create_ts_packet('error', "Error: Channel is stopping") + return False + # Wait a bit before checking again time.sleep(0.1) @@ -221,12 +228,21 @@ class StreamGenerator: # Check if this specific client has been stopped (Redis keys, etc.) if proxy_server.redis_client: - # Channel stop check + # Channel stop check - with extended key set stop_key = RedisKeys.channel_stopping(self.channel_id) if proxy_server.redis_client.exists(stop_key): logger.info(f"[{self.client_id}] Detected channel stop signal, terminating stream") return False + # Also check channel state in metadata + metadata_key = RedisKeys.channel_metadata(self.channel_id) + metadata = proxy_server.redis_client.hgetall(metadata_key) + if metadata and b'state' in metadata: + state = metadata[b'state'].decode('utf-8') + if state in ['error', 'stopped', 'stopping']: + logger.info(f"[{self.client_id}] Channel in {state} state, terminating stream") + return False + # Client stop check client_stop_key = RedisKeys.client_stop(self.channel_id, self.client_id) if proxy_server.redis_client.exists(client_stop_key): diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 3d81472d..c47766eb 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -24,7 +24,7 @@ logger = logging.getLogger("ts_proxy") class StreamManager: """Manages a connection to a TS stream without using raw sockets""" - def __init__(self, channel_id, url, buffer, user_agent=None, transcode=False, stream_id=None): + def __init__(self, channel_id, url, buffer, user_agent=None, transcode=False, stream_id=None, worker_id=None): # Basic properties self.channel_id = channel_id self.url = url @@ -36,6 +36,8 @@ class StreamManager: self.current_response = None self.current_session = None self.url_switching = False + # Store worker_id for ownership checks + self.worker_id = worker_id # Sockets used for transcode jobs self.socket = None @@ -89,6 +91,9 @@ class StreamManager: logger.info(f"Initialized stream manager for channel {buffer.channel_id}") + # Add this flag for tracking transcoding process status + self.transcode_process_active = False + def _create_session(self): """Create and configure requests session with optimal settings""" session = requests.Session() @@ -234,9 +239,61 @@ class StreamManager: except Exception as e: logger.error(f"Stream error: {e}", exc_info=True) finally: + # Enhanced cleanup in the finally block self.connected = False + + # Explicitly cancel all timers + for timer in list(self._buffer_check_timers): + try: + if timer and timer.is_alive(): + timer.cancel() + except Exception: + pass + + self._buffer_check_timers.clear() + + # Make sure transcode process is terminated + if self.transcode_process_active: + logger.info("Ensuring transcode process is terminated in finally block") + self._close_socket() + + # Close all connections self._close_all_connections() - logger.info(f"Stream manager stopped") + + # Update channel state in Redis to prevent clients from waiting indefinitely + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + try: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + + # Check if we're the owner before updating state + owner_key = RedisKeys.channel_owner(self.channel_id) + current_owner = self.buffer.redis_client.get(owner_key) + + # Use the worker_id that was passed in during initialization + if current_owner and self.worker_id and current_owner.decode('utf-8') == self.worker_id: + # Determine the appropriate error message based on retry failures + if self.tried_stream_ids and len(self.tried_stream_ids) > 0: + error_message = f"All {len(self.tried_stream_ids)} stream options failed" + else: + error_message = f"Connection failed after {self.max_retries} attempts" + + # Update metadata to indicate error state + update_data = { + "state": ChannelState.ERROR, + "state_changed_at": str(time.time()), + "error_message": error_message, + "error_time": str(time.time()) + } + self.buffer.redis_client.hset(metadata_key, mapping=update_data) + logger.info(f"Updated channel {self.channel_id} state to ERROR in Redis after stream failure") + + # Also set stopping key to ensure clients disconnect + stop_key = RedisKeys.channel_stopping(self.channel_id) + self.buffer.redis_client.setex(stop_key, 60, "true") + except Exception as e: + logger.error(f"Failed to update channel state in Redis: {e}") + + logger.info(f"Stream manager stopped for channel {self.channel_id}") def _establish_transcode_connection(self): """Establish a connection using transcoding""" @@ -264,11 +321,14 @@ class StreamManager: self.transcode_process = subprocess.Popen( self.transcode_cmd, stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, # Suppress FFmpeg logs + stderr=subprocess.DEVNULL, # Suppress error logs bufsize=188 * 64 # Buffer optimized for TS packets ) - self.socket = self.transcode_process.stdout # Read from FFmpeg output + # Set flag that transcoding process is active + self.transcode_process_active = True + + self.socket = self.transcode_process.stdout # Read from std output self.connected = True # Set channel state to waiting for clients @@ -392,6 +452,8 @@ class StreamManager: def stop(self): """Stop the stream manager and cancel all timers""" + logger.info(f"Stopping stream manager for channel {self.channel_id}") + # Add at the beginning of your stop method self.stopping = True @@ -405,7 +467,6 @@ class StreamManager: self._buffer_check_timers.clear() - # Rest of your existing stop method... # Set the flag first self.stop_requested = True @@ -423,6 +484,9 @@ class StreamManager: except Exception: pass + # Explicitly close socket/transcode resources + self._close_socket() + # Set running to false to ensure thread exits self.running = False @@ -530,15 +594,56 @@ class StreamManager: self.socket = None self.connected = False + # Enhanced transcode process cleanup with more aggressive termination if self.transcode_process: try: + # First try polite termination + logger.debug(f"Terminating transcode process for channel {self.channel_id}") self.transcode_process.terminate() - self.transcode_process.wait() + + # Give it a short time to terminate gracefully + try: + self.transcode_process.wait(timeout=1.0) + except subprocess.TimeoutExpired: + # If it doesn't terminate quickly, kill it + logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully") + self.transcode_process.kill() + + try: + self.transcode_process.wait(timeout=1.0) + except subprocess.TimeoutExpired: + logger.error(f"Failed to kill transcode process even with force") except Exception as e: logger.debug(f"Error terminating transcode process: {e}") - pass + + # Final attempt: try to kill directly + try: + self.transcode_process.kill() + except Exception as e: + logger.error(f"Final kill attempt failed: {e}") self.transcode_process = None + self.transcode_process_active = False # Reset the flag + + # Clear transcode active key in Redis if available + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + try: + transcode_key = RedisKeys.transcode_active(self.channel_id) + self.buffer.redis_client.delete(transcode_key) + logger.debug(f"Cleared transcode active flag for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error clearing transcode flag: {e}") + + # Cancel any remaining buffer check timers + for timer in list(self._buffer_check_timers): + try: + if timer and timer.is_alive(): + timer.cancel() + logger.debug(f"Cancelled buffer check timer during socket close for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error canceling timer during socket close: {e}") + + self._buffer_check_timers = [] def fetch_chunk(self): """Fetch data from socket with direct pass-through to buffer""" @@ -649,10 +754,10 @@ class StreamManager: def _check_buffer_and_set_state(self): """Check buffer size and set state to waiting_for_clients when ready""" try: - # First check if we're stopping or reconnecting - if getattr(self, 'stopping', False) or getattr(self, 'reconnecting', False): + # Enhanced stop detection with short-circuit return + if not self.running or getattr(self, 'stopping', False) or getattr(self, 'reconnecting', False): logger.debug(f"Buffer check aborted - channel {self.buffer.channel_id} is stopping or reconnecting") - return + return False # Return value to indicate check was aborted # Clean up completed timers self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()] @@ -670,14 +775,17 @@ class StreamManager: # Still waiting, log progress and schedule another check logger.debug(f"Buffer filling for channel {channel_id}: {current_buffer_index}/{initial_chunks_needed} chunks") - # Schedule another check - NOW WITH TRACKING - if not getattr(self, 'stopping', False): + # Schedule another check - NOW WITH STOPPING CHECK + if self.running and not getattr(self, 'stopping', False): timer = threading.Timer(0.5, self._check_buffer_and_set_state) timer.daemon = True timer.start() self._buffer_check_timers.append(timer) + + return True # Return value to indicate check was successful except Exception as e: logger.error(f"Error in buffer check: {e}") + return False def _try_next_stream(self): """ From aa6cbf3b6579daaa43dcc19f6718d4a47d9fee35 Mon Sep 17 00:00:00 2001 From: Dispatcharr Date: Fri, 21 Mar 2025 08:22:17 -0500 Subject: [PATCH 3/6] Removed HDHR from root url --- apps/hdhr/api_views.py | 4 ++-- apps/hdhr/views.py | 4 ++-- apps/m3u/tasks.py | 2 +- dispatcharr/urls.py | 8 -------- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/apps/hdhr/api_views.py b/apps/hdhr/api_views.py index 409ecbfd..4dd9c07d 100644 --- a/apps/hdhr/api_views.py +++ b/apps/hdhr/api_views.py @@ -39,7 +39,7 @@ class DiscoverAPIView(APIView): responses={200: openapi.Response("HDHR Discovery JSON")} ) def get(self, request): - base_url = request.build_absolute_uri('/').rstrip('/') + base_url = request.build_absolute_uri('/hdhr/').rstrip('/') device = HDHRDevice.objects.first() if not device: @@ -115,7 +115,7 @@ class HDHRDeviceXMLAPIView(APIView): responses={200: openapi.Response("HDHR Device XML")} ) def get(self, request): - base_url = request.build_absolute_uri('/').rstrip('/') + base_url = request.build_absolute_uri('/hdhr/').rstrip('/') xml_response = f""" diff --git a/apps/hdhr/views.py b/apps/hdhr/views.py index 409ecbfd..4dd9c07d 100644 --- a/apps/hdhr/views.py +++ b/apps/hdhr/views.py @@ -39,7 +39,7 @@ class DiscoverAPIView(APIView): responses={200: openapi.Response("HDHR Discovery JSON")} ) def get(self, request): - base_url = request.build_absolute_uri('/').rstrip('/') + base_url = request.build_absolute_uri('/hdhr/').rstrip('/') device = HDHRDevice.objects.first() if not device: @@ -115,7 +115,7 @@ class HDHRDeviceXMLAPIView(APIView): responses={200: openapi.Response("HDHR Device XML")} ) def get(self, request): - base_url = request.build_absolute_uri('/').rstrip('/') + base_url = request.build_absolute_uri('/hdhr/').rstrip('/') xml_response = f""" diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index b3de8567..068626b2 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -117,7 +117,7 @@ def refresh_single_m3u_account(account_id): return err_msg headers = {"User-Agent": account.user_agent.user_agent} - response = requests.get(account.server_url, timeout=60, headers=headers) + response = requests.get(account.server_url, headers=headers) response.raise_for_status() lines = response.text.splitlines() elif account.uploaded_file: diff --git a/dispatcharr/urls.py b/dispatcharr/urls.py index 72361783..f0de138e 100644 --- a/dispatcharr/urls.py +++ b/dispatcharr/urls.py @@ -7,7 +7,6 @@ from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi from .routing import websocket_urlpatterns -from apps.hdhr.api_views import HDHRDeviceViewSet, DiscoverAPIView, LineupAPIView, LineupStatusAPIView, HDHRDeviceXMLAPIView, hdhr_dashboard_view # Define schema_view for Swagger @@ -45,13 +44,6 @@ urlpatterns = [ path('proxy/', include(('apps.proxy.urls', 'proxy'), namespace='proxy')), path('proxy', RedirectView.as_view(url='/proxy/', permanent=True)), - # HDHR API - path('discover.json', DiscoverAPIView.as_view(), name='discover'), - path('lineup.json', LineupAPIView.as_view(), name='lineup'), - path('lineup_status.json', LineupStatusAPIView.as_view(), name='lineup_status'), - path('device.xml', HDHRDeviceXMLAPIView.as_view(), name='device_xml'), - - # Swagger UI path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), From fe8d6fd0822b3b279b83f74276848a65391e3283 Mon Sep 17 00:00:00 2001 From: Dispatcharr Date: Fri, 21 Mar 2025 12:23:23 -0500 Subject: [PATCH 4/6] Updated EPGData Added EPG Data endpoint --- apps/epg/api_urls.py | 3 ++- apps/epg/api_views.py | 17 +++++++++++++++-- apps/epg/serializers.py | 16 +++++++++------- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/apps/epg/api_urls.py b/apps/epg/api_urls.py index d1ad7dcf..2818e66b 100644 --- a/apps/epg/api_urls.py +++ b/apps/epg/api_urls.py @@ -1,12 +1,13 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import EPGSourceViewSet, ProgramViewSet, EPGGridAPIView, EPGImportAPIView +from .api_views import EPGSourceViewSet, ProgramViewSet, EPGGridAPIView, EPGImportAPIView, EPGDataViewSet app_name = 'epg' router = DefaultRouter() router.register(r'sources', EPGSourceViewSet, basename='epg-source') router.register(r'programs', ProgramViewSet, basename='program') +router.register(r'epgdata', EPGDataViewSet, basename='epgdata') urlpatterns = [ path('grid/', EPGGridAPIView.as_view(), name='epg_grid'), diff --git a/apps/epg/api_views.py b/apps/epg/api_views.py index f59f63e2..571a7165 100644 --- a/apps/epg/api_views.py +++ b/apps/epg/api_views.py @@ -7,8 +7,8 @@ from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.utils import timezone from datetime import timedelta -from .models import EPGSource, ProgramData # Using ProgramData -from .serializers import ProgramDataSerializer, EPGSourceSerializer # Updated serializer +from .models import EPGSource, ProgramData, EPGData # Added ProgramData +from .serializers import ProgramDataSerializer, EPGSourceSerializer, EPGDataSerializer # Updated serializer from .tasks import refresh_epg_data logger = logging.getLogger(__name__) @@ -78,3 +78,16 @@ class EPGImportAPIView(APIView): refresh_epg_data.delay() # Trigger Celery task logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.") return Response({'success': True, 'message': 'EPG data import initiated.'}, status=status.HTTP_202_ACCEPTED) + + +# ───────────────────────────── +# 5) EPG Data View +# ───────────────────────────── +class EPGDataViewSet(viewsets.ReadOnlyModelViewSet): + """ + API endpoint that allows EPGData objects to be viewed. + """ + queryset = EPGData.objects.all() + serializer_class = EPGDataSerializer + permission_classes = [IsAuthenticated] + diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 9a62e74e..b10e7371 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -13,12 +13,14 @@ class ProgramDataSerializer(serializers.ModelSerializer): fields = ['id', 'start_time', 'end_time', 'title', 'sub_title', 'description', 'tvg_id'] class EPGDataSerializer(serializers.ModelSerializer): - programs = ProgramDataSerializer(many=True, read_only=True) - channel = serializers.SerializerMethodField() - - def get_channel(self, obj): - return {"id": obj.channel.id, "name": obj.channel.name} if obj.channel else None - + """ + Only returns the tvg_id and the 'name' field from EPGData. + We assume 'name' is effectively the channel name. + """ class Meta: model = EPGData - fields = ['id', 'channel', 'name', 'programs'] + fields = [ + 'id', + 'tvg_id', + 'name', + ] \ No newline at end of file From f0c93fa41f25572a3bab1000f67ed6b76a299153 Mon Sep 17 00:00:00 2001 From: Dispatcharr Date: Sat, 22 Mar 2025 07:57:08 -0500 Subject: [PATCH 5/6] Added /hdhr to nginx --- docker/nginx.conf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docker/nginx.conf b/docker/nginx.conf index 077914f7..0a90633d 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -22,6 +22,11 @@ server { return 301 /login; } + # Route HDHR request to Django + location /hdhr { + proxy_pass http://127.0.0.1:5656; + } + # Serve FFmpeg streams efficiently location /output/stream/ { proxy_pass http://127.0.0.1:5656; From 9733fca24278a3fc4bf1fc6c1ce9e247d3e85c1d Mon Sep 17 00:00:00 2001 From: Dispatcharr Date: Sat, 22 Mar 2025 08:32:06 -0500 Subject: [PATCH 6/6] Fixed HDHR copy url --- frontend/src/components/tables/ChannelsTable.jsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/tables/ChannelsTable.jsx b/frontend/src/components/tables/ChannelsTable.jsx index eef3bfb5..6b6b9f1c 100644 --- a/frontend/src/components/tables/ChannelsTable.jsx +++ b/frontend/src/components/tables/ChannelsTable.jsx @@ -175,7 +175,7 @@ const ChannelStreams = ({ channel, isExpanded }) => { const m3uUrl = `${window.location.protocol}//${window.location.host}/output/m3u`; const epgUrl = `${window.location.protocol}//${window.location.host}/output/epg`; -const hdhrUrl = `${window.location.protocol}//${window.location.host}/output/hdhr`; +const hdhrUrl = `${window.location.protocol}//${window.location.host}/hdhr/`; const ChannelsTable = ({}) => { const [channel, setChannel] = useState(null); @@ -441,7 +441,7 @@ const ChannelsTable = ({}) => { }; const copyHDHRUrl = () => { handleCopy( - `${window.location.protocol}//${window.location.host}/output/hdhr`, + `${window.location.protocol}//${window.location.host}/hdhr/`, hdhrUrlRef ); };