From 906fbef9c2bb2c0e85e7d17b509066da9a236f7e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 29 May 2025 13:56:20 -0500 Subject: [PATCH 1/9] Output errors from parser if found. --- apps/epg/tasks.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 1a5f832e..2ba6544c 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1034,6 +1034,11 @@ def parse_channels_only(source): if process: logger.debug(f"[parse_channels_only] Memory before cleanup: {process.memory_info().rss / 1024 / 1024:.2f} MB") try: + # Output any errors in the channel_parser error log + if 'channel_parser' in locals() and hasattr(channel_parser, 'error_log') and len(channel_parser.error_log) > 0: + logger.debug(f"XML parser errors found ({len(channel_parser.error_log)} total):") + for i, error in enumerate(channel_parser.error_log): + logger.debug(f" Error {i+1}: {error}") if 'channel_parser' in locals(): del channel_parser if 'elem' in locals(): From 67663e2946ad4cff52ef195bb114a0dcc70d54e0 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 30 May 2025 10:25:47 -0500 Subject: [PATCH 2/9] Add remove_blank_text=True to lxml parser. Fixes crashes related to poorly formatted xmltv files. --- apps/epg/tasks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 2ba6544c..3c2df895 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -843,7 +843,7 @@ def parse_channels_only(source): # Change iterparse to look for both channel and programme elements logger.debug(f"Creating iterparse context for channels and programmes") - channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme')) + channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True) if process: logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB") @@ -851,7 +851,6 @@ def parse_channels_only(source): total_elements_processed = 0 # Track total elements processed, not just channels for _, elem in channel_parser: total_elements_processed += 1 - # Only process channel elements if elem.tag == 'channel': channel_count += 1 @@ -967,6 +966,7 @@ def parse_channels_only(source): logger.debug(f"[parse_channels_only] Total elements processed: {total_elements_processed}") else: + logger.trace(f"[parse_channels_only] Skipping non-channel element: {elem.get('channel', 'unknown')} - {elem.get('start', 'unknown')} {elem.tag}") clear_element(elem) continue @@ -1195,7 +1195,7 @@ def parse_programs_for_tvg_id(epg_id): source_file = open(file_path, 'rb') # Stream parse the file using lxml's iterparse - program_parser = etree.iterparse(source_file, events=('end',), tag='programme') + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True) for _, elem in program_parser: if elem.get('channel') == epg.tvg_id: From d339c322ede5fd7504a06246327ce056b86ad923 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 30 May 2025 14:23:03 -0500 Subject: [PATCH 3/9] Support using direct logos add '?cachedlogos=false' to end of url --- apps/output/views.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/apps/output/views.py b/apps/output/views.py index 39b20a41..23f6f0be 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -21,6 +21,9 @@ def generate_m3u(request, profile_name=None): else: channels = Channel.objects.order_by('channel_number') + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + m3u_content = "#EXTM3U\n" for channel in channels: group_title = channel.channel_group.name if channel.channel_group else "Default" @@ -40,7 +43,17 @@ def generate_m3u(request, profile_name=None): tvg_logo = "" if channel.logo: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + if use_cached_logos: + # Use cached logo as before + tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + else: + # Try to find direct logo URL from channel's streams + direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None + # If direct logo found, use it; otherwise fall back to cached version + if direct_logo: + tvg_logo = direct_logo + else: + tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) # create possible gracenote id insertion tvc_guide_stationid = "" From a9cdc9e37ab79b6ebbcfb2a110e212fb1a7766a2 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 30 May 2025 14:40:06 -0500 Subject: [PATCH 4/9] Support ?cachedlogos=false for epg as well. --- apps/output/views.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index 23f6f0be..d550ec8d 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -198,15 +198,26 @@ def generate_epg(request, profile_name=None): formatted_channel_number = str(channel.channel_number) else: formatted_channel_number = str(channel.id) - + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + # Add channel logo if available + tvg_logo = "" + if channel.logo: + if use_cached_logos: + # Use cached logo as before + tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + else: + # Try to find direct logo URL from channel's streams + direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None + # If direct logo found, use it; otherwise fall back to cached version + if direct_logo: + tvg_logo = direct_logo + else: + tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) display_name = channel.epg_data.name if channel.epg_data else channel.name xml_lines.append(f' ') xml_lines.append(f' {html.escape(display_name)}') - - # Add channel logo if available - if channel.logo: - logo_url = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) - xml_lines.append(f' ') + xml_lines.append(f' ') xml_lines.append(' ') From 70e4e43d88cf8278999f7153d3b5bb3853a9d2ab Mon Sep 17 00:00:00 2001 From: Sam LaManna Date: Sun, 1 Jun 2025 12:53:17 -0400 Subject: [PATCH 5/9] Add Issue Forms --- .github/ISSUE_TEMPLATE/bug_report.yml | 63 ++++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/feature_request.yml | 38 +++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..ed29c346 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,63 @@ +name: Bug Report +description: I have an issue with Dispatcharr +title: "[Bug]: " +labels: ["Bug", "Triage"] +projects: [] +assignees: [] +body: + - type: markdown + attributes: + value: | + Please make sure you search for similar issues before submitting. Thank you for your bug report! + - type: textarea + id: describe-the-bug + attributes: + label: Describe the bug + description: Make sure to attach screenshots if possible! + placeholder: Tell us what you see! + value: "A clear and concise description of what the bug is. What did you expect to happen?" + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: How can we recreate this bug? + description: Be detailed! + placeholder: Tell us what you see! + value: "1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error" + validations: + required: true + - type: input + id: dispatcharr-version + attributes: + label: Dispatcharr Version + description: What version of Dispatcharr are you running? + placeholder: Located bottom left of main screen + validations: + required: true + - type: input + id: docker-version + attributes: + label: Docker Version + description: What version of Docker are you running? + placeholder: docker --version + validations: + required: true + - type: textarea + id: docker-compose + attributes: + label: What's in your Docker Compose file? + description: Please share your docker-compose.yml file + placeholder: Tell us what you see! + value: "If not using Docker Compose just put not using." + validations: + required: true + - type: textarea + id: client-info + attributes: + label: Client Information + description: What are you using the view the streams from Dispatcharr + placeholder: Tell us what you see! + value: "Device, App, Versions for both, etc..." + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..ec4bb386 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..2de56f8c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,38 @@ +name: Feature request +description: I want to suggest a new feature for Dispatcharr +title: "[Feature]: " +labels: ["Feature Request"] +projects: [] +assignees: [] +body: + - type: markdown + attributes: + value: | + Thank you for helping to make Dispatcharr better! + - type: textarea + id: describe-problem + attributes: + label: Is your feature request related to a problem? + description: Make sure to attach screenshots if possible! + placeholder: Tell us what you see! + value: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]" + validations: + required: true + - type: textarea + id: describe-solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + placeholder: Tell us what you see! + value: "Describe here." + validations: + required: true + - type: textarea + id: extras + attributes: + label: Additional context + description: Anything else you want to add? + placeholder: Tell us what you see! + value: "Nothing Extra" + validations: + required: true \ No newline at end of file From 39a06f9ba2ee2e6577d7d965342af18f3be52781 Mon Sep 17 00:00:00 2001 From: Sam LaManna Date: Sun, 1 Jun 2025 14:10:50 -0400 Subject: [PATCH 6/9] Add Support for Github Organization Issue Type --- .github/ISSUE_TEMPLATE/bug_report.yml | 1 + .github/ISSUE_TEMPLATE/feature_request.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index ed29c346..d36be10c 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -2,6 +2,7 @@ name: Bug Report description: I have an issue with Dispatcharr title: "[Bug]: " labels: ["Bug", "Triage"] +type: "Bug" projects: [] assignees: [] body: diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 2de56f8c..bf7db830 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -2,6 +2,7 @@ name: Feature request description: I want to suggest a new feature for Dispatcharr title: "[Feature]: " labels: ["Feature Request"] +type: "Feature" projects: [] assignees: [] body: From 58f5287a530fa51173e4c84a2043f2da1b88c87c Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 2 Jun 2025 10:44:30 -0500 Subject: [PATCH 7/9] Improved logging for M3U processing. --- apps/m3u/tasks.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index b1b1170d..9756a1f2 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -239,7 +239,7 @@ def process_groups(account, groups): group_objs = [] groups_to_create = [] for group_name, custom_props in groups.items(): - logger.debug(f"Handling group: {group_name}") + logger.debug(f"Handling group for M3U account {account.id}: {group_name}") if (group_name not in existing_groups): groups_to_create.append(ChannelGroup( name=group_name, @@ -405,7 +405,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): stream_hashes = {} # compiled_filters = [(f.filter_type, re.compile(f.regex_pattern, re.IGNORECASE)) for f in filters] - logger.debug(f"Processing batch of {len(batch)}") + logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}") for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] @@ -487,7 +487,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): except Exception as e: logger.error(f"Bulk create failed: {str(e)}") - retval = f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." + retval = f"M3U account: {account_id}, Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." # Aggressive garbage collection #del streams_to_create, streams_to_update, stream_hashes, existing_streams @@ -502,11 +502,11 @@ def cleanup_streams(account_id): m3u_account__m3u_account=account, m3u_account__enabled=True, ).values_list('id', flat=True) - logger.info(f"Found {len(existing_groups)} active groups") + logger.info(f"Found {len(existing_groups)} active groups for M3U account {account_id}") # Calculate cutoff date for stale streams stale_cutoff = timezone.now() - timezone.timedelta(days=account.stale_stream_days) - logger.info(f"Removing streams not seen since {stale_cutoff}") + logger.info(f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}") # Delete streams that are not in active groups streams_to_delete = Stream.objects.filter( @@ -527,7 +527,7 @@ def cleanup_streams(account_id): streams_to_delete.delete() stale_streams.delete() - logger.info(f"Cleanup complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale") + logger.info(f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale") @shared_task def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): @@ -712,7 +712,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): group_name = parsed["attributes"]["group-title"] # Log new groups as they're discovered if group_name not in groups: - logger.debug(f"Found new group: '{group_name}'") + logger.debug(f"Found new group for M3U account {account_id}: '{group_name}'") groups[group_name] = {} extinf_data.append(parsed) @@ -729,7 +729,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): # Periodically log progress for large files if valid_stream_count % 1000 == 0: - logger.debug(f"Processed {valid_stream_count} valid streams so far...") + logger.debug(f"Processed {valid_stream_count} valid streams so far for M3U account: {account_id}") # Log summary statistics logger.info(f"M3U parsing complete - Lines: {line_count}, EXTINF: {extinf_count}, URLs: {url_count}, Valid streams: {valid_stream_count}") @@ -962,7 +962,7 @@ def refresh_single_m3u_account(account_id): account.save(update_fields=['status']) if account.account_type == M3UAccount.Types.STADNARD: - logger.debug(f"Processing Standard account with groups: {existing_groups}") + logger.debug(f"Processing Standard account ({account_id}) with groups: {existing_groups}") # Break into batches and process in parallel batches = [extinf_data[i:i + BATCH_SIZE] for i in range(0, len(extinf_data), BATCH_SIZE)] task_group = group(process_m3u_batch.s(account_id, batch, existing_groups, hash_keys) for batch in batches) @@ -1107,8 +1107,6 @@ def refresh_single_m3u_account(account_id): message=account.last_message ) - print(f"Function took {elapsed_time} seconds to execute.") - except Exception as e: logger.error(f"Error processing M3U for account {account_id}: {str(e)}") account.status = M3UAccount.Status.ERROR From 6ce387b0b07f02da54389abf37b407cee1e830cf Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 2 Jun 2025 18:03:32 -0500 Subject: [PATCH 8/9] Auto-scales Celery based on demand. Should lower overall memory and CPU usage while allowing for high cpu demand tasks to complete quickly. Closes #111 --- dispatcharr/settings.py | 9 --------- docker/uwsgi.debug.ini | 2 +- docker/uwsgi.dev.ini | 2 +- docker/uwsgi.ini | 2 +- 4 files changed, 3 insertions(+), 12 deletions(-) diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 4e1e0d55..06084b49 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -199,15 +199,6 @@ CELERY_BROKER_TRANSPORT_OPTIONS = { CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' -# Memory management settings -#CELERY_WORKER_MAX_TASKS_PER_CHILD = 10 # Restart worker after 10 tasks to free memory -#CELERY_WORKER_PREFETCH_MULTIPLIER = 1 # Don't prefetch tasks - process one at a time -#CELERY_TASK_ACKS_LATE = True # Only acknowledge tasks after they're processed -#CELERY_TASK_TIME_LIMIT = 3600 # 1 hour time limit per task -#CELERY_TASK_SOFT_TIME_LIMIT = 3540 # Soft limit 60 seconds before hard limit -#CELERY_WORKER_CANCEL_LONG_RUNNING_TASKS_ON_CONNECTION_LOSS = True # Cancel tasks if connection lost -#CELERY_TASK_IGNORE_RESULT = True # Don't store results unless explicitly needed - CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers.DatabaseScheduler" CELERY_BEAT_SCHEDULE = { 'fetch-channel-statuses': { diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 6ca855f3..e049df87 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -8,7 +8,7 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --concurrency=4 +attach-daemon = celery -A dispatcharr worker --autoscale=6,1 attach-daemon = celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index f3e5238e..7e50f2ef 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -10,7 +10,7 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --concurrency=4 +attach-daemon = celery -A dispatcharr worker --autoscale=6,1 attach-daemon = celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index 32eb6e3c..b35ea5bf 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -10,7 +10,7 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --concurrency=4 +attach-daemon = celery -A dispatcharr worker --autoscale=6,1 attach-daemon = celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application From a72eaf118ff1fe204f7e4967835d752004927cf1 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Tue, 3 Jun 2025 10:59:53 -0500 Subject: [PATCH 9/9] Refactor channel info retrieval for safer decoding and improved error logging. Hopefully fixes stats not showing sometimes. --- apps/proxy/ts_proxy/channel_status.py | 32 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/apps/proxy/ts_proxy/channel_status.py b/apps/proxy/ts_proxy/channel_status.py index dd18d922..50e84eec 100644 --- a/apps/proxy/ts_proxy/channel_status.py +++ b/apps/proxy/ts_proxy/channel_status.py @@ -307,16 +307,23 @@ class ChannelStatus: client_count = proxy_server.redis_client.scard(client_set_key) or 0 # Calculate uptime - created_at = float(metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0').decode('utf-8')) + init_time_bytes = metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0') + created_at = float(init_time_bytes.decode('utf-8')) uptime = time.time() - created_at if created_at > 0 else 0 + # Safely decode bytes or use defaults + def safe_decode(bytes_value, default="unknown"): + if bytes_value is None: + return default + return bytes_value.decode('utf-8') + # Simplified info info = { 'channel_id': channel_id, - 'state': metadata.get(ChannelMetadataField.STATE.encode('utf-8'), b'unknown').decode('utf-8'), - 'url': metadata.get(ChannelMetadataField.URL.encode('utf-8'), b'').decode('utf-8'), - 'stream_profile': metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8'), b'').decode('utf-8'), - 'owner': metadata.get(ChannelMetadataField.OWNER.encode('utf-8'), b'unknown').decode('utf-8'), + 'state': safe_decode(metadata.get(ChannelMetadataField.STATE.encode('utf-8'))), + 'url': safe_decode(metadata.get(ChannelMetadataField.URL.encode('utf-8')), ""), + 'stream_profile': safe_decode(metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8')), ""), + 'owner': safe_decode(metadata.get(ChannelMetadataField.OWNER.encode('utf-8'))), 'buffer_index': int(buffer_index_value.decode('utf-8')) if buffer_index_value else 0, 'client_count': client_count, 'uptime': uptime @@ -376,14 +383,15 @@ class ChannelStatus: # Efficient way - just retrieve the essentials client_info = { 'client_id': client_id_str, - 'user_agent': proxy_server.redis_client.hget(client_key, 'user_agent'), - 'ip_address': proxy_server.redis_client.hget(client_key, 'ip_address').decode('utf-8'), } - if client_info['user_agent']: - client_info['user_agent'] = client_info['user_agent'].decode('utf-8') - else: - client_info['user_agent'] = 'unknown' + # Safely get user_agent and ip_address + user_agent_bytes = proxy_server.redis_client.hget(client_key, 'user_agent') + client_info['user_agent'] = safe_decode(user_agent_bytes) + + ip_address_bytes = proxy_server.redis_client.hget(client_key, 'ip_address') + if ip_address_bytes: + client_info['ip_address'] = safe_decode(ip_address_bytes) # Just get connected_at for client age connected_at_bytes = proxy_server.redis_client.hget(client_key, 'connected_at') @@ -416,5 +424,5 @@ class ChannelStatus: return info except Exception as e: - logger.error(f"Error getting channel info: {e}") + logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging return None