merged in main

This commit is contained in:
dekzter 2025-06-03 17:49:24 -04:00
commit 9f96529707
11 changed files with 176 additions and 52 deletions

64
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View file

@ -0,0 +1,64 @@
name: Bug Report
description: I have an issue with Dispatcharr
title: "[Bug]: "
labels: ["Bug", "Triage"]
type: "Bug"
projects: []
assignees: []
body:
- type: markdown
attributes:
value: |
Please make sure you search for similar issues before submitting. Thank you for your bug report!
- type: textarea
id: describe-the-bug
attributes:
label: Describe the bug
description: Make sure to attach screenshots if possible!
placeholder: Tell us what you see!
value: "A clear and concise description of what the bug is. What did you expect to happen?"
validations:
required: true
- type: textarea
id: reproduce
attributes:
label: How can we recreate this bug?
description: Be detailed!
placeholder: Tell us what you see!
value: "1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error"
validations:
required: true
- type: input
id: dispatcharr-version
attributes:
label: Dispatcharr Version
description: What version of Dispatcharr are you running?
placeholder: Located bottom left of main screen
validations:
required: true
- type: input
id: docker-version
attributes:
label: Docker Version
description: What version of Docker are you running?
placeholder: docker --version
validations:
required: true
- type: textarea
id: docker-compose
attributes:
label: What's in your Docker Compose file?
description: Please share your docker-compose.yml file
placeholder: Tell us what you see!
value: "If not using Docker Compose just put not using."
validations:
required: true
- type: textarea
id: client-info
attributes:
label: Client Information
description: What are you using the view the streams from Dispatcharr
placeholder: Tell us what you see!
value: "Device, App, Versions for both, etc..."
validations:
required: true

1
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1 @@
blank_issues_enabled: false

View file

@ -0,0 +1,39 @@
name: Feature request
description: I want to suggest a new feature for Dispatcharr
title: "[Feature]: "
labels: ["Feature Request"]
type: "Feature"
projects: []
assignees: []
body:
- type: markdown
attributes:
value: |
Thank you for helping to make Dispatcharr better!
- type: textarea
id: describe-problem
attributes:
label: Is your feature request related to a problem?
description: Make sure to attach screenshots if possible!
placeholder: Tell us what you see!
value: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]"
validations:
required: true
- type: textarea
id: describe-solution
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
placeholder: Tell us what you see!
value: "Describe here."
validations:
required: true
- type: textarea
id: extras
attributes:
label: Additional context
description: Anything else you want to add?
placeholder: Tell us what you see!
value: "Nothing Extra"
validations:
required: true

View file

@ -843,7 +843,7 @@ def parse_channels_only(source):
# Change iterparse to look for both channel and programme elements
logger.debug(f"Creating iterparse context for channels and programmes")
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'))
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
if process:
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
@ -851,7 +851,6 @@ def parse_channels_only(source):
total_elements_processed = 0 # Track total elements processed, not just channels
for _, elem in channel_parser:
total_elements_processed += 1
# Only process channel elements
if elem.tag == 'channel':
channel_count += 1
@ -967,6 +966,7 @@ def parse_channels_only(source):
logger.debug(f"[parse_channels_only] Total elements processed: {total_elements_processed}")
else:
logger.trace(f"[parse_channels_only] Skipping non-channel element: {elem.get('channel', 'unknown')} - {elem.get('start', 'unknown')} {elem.tag}")
clear_element(elem)
continue
@ -1034,6 +1034,11 @@ def parse_channels_only(source):
if process:
logger.debug(f"[parse_channels_only] Memory before cleanup: {process.memory_info().rss / 1024 / 1024:.2f} MB")
try:
# Output any errors in the channel_parser error log
if 'channel_parser' in locals() and hasattr(channel_parser, 'error_log') and len(channel_parser.error_log) > 0:
logger.debug(f"XML parser errors found ({len(channel_parser.error_log)} total):")
for i, error in enumerate(channel_parser.error_log):
logger.debug(f" Error {i+1}: {error}")
if 'channel_parser' in locals():
del channel_parser
if 'elem' in locals():
@ -1190,7 +1195,7 @@ def parse_programs_for_tvg_id(epg_id):
source_file = open(file_path, 'rb')
# Stream parse the file using lxml's iterparse
program_parser = etree.iterparse(source_file, events=('end',), tag='programme')
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
for _, elem in program_parser:
if elem.get('channel') == epg.tvg_id:

View file

@ -239,7 +239,7 @@ def process_groups(account, groups):
group_objs = []
groups_to_create = []
for group_name, custom_props in groups.items():
logger.debug(f"Handling group: {group_name}")
logger.debug(f"Handling group for M3U account {account.id}: {group_name}")
if (group_name not in existing_groups):
groups_to_create.append(ChannelGroup(
name=group_name,
@ -405,7 +405,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys):
stream_hashes = {}
# compiled_filters = [(f.filter_type, re.compile(f.regex_pattern, re.IGNORECASE)) for f in filters]
logger.debug(f"Processing batch of {len(batch)}")
logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}")
for stream_info in batch:
try:
name, url = stream_info["name"], stream_info["url"]
@ -487,7 +487,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys):
except Exception as e:
logger.error(f"Bulk create failed: {str(e)}")
retval = f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated."
retval = f"M3U account: {account_id}, Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated."
# Aggressive garbage collection
#del streams_to_create, streams_to_update, stream_hashes, existing_streams
@ -502,11 +502,11 @@ def cleanup_streams(account_id):
m3u_account__m3u_account=account,
m3u_account__enabled=True,
).values_list('id', flat=True)
logger.info(f"Found {len(existing_groups)} active groups")
logger.info(f"Found {len(existing_groups)} active groups for M3U account {account_id}")
# Calculate cutoff date for stale streams
stale_cutoff = timezone.now() - timezone.timedelta(days=account.stale_stream_days)
logger.info(f"Removing streams not seen since {stale_cutoff}")
logger.info(f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}")
# Delete streams that are not in active groups
streams_to_delete = Stream.objects.filter(
@ -527,7 +527,7 @@ def cleanup_streams(account_id):
streams_to_delete.delete()
stale_streams.delete()
logger.info(f"Cleanup complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale")
logger.info(f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale")
@shared_task
def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
@ -712,7 +712,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
group_name = parsed["attributes"]["group-title"]
# Log new groups as they're discovered
if group_name not in groups:
logger.debug(f"Found new group: '{group_name}'")
logger.debug(f"Found new group for M3U account {account_id}: '{group_name}'")
groups[group_name] = {}
extinf_data.append(parsed)
@ -729,7 +729,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
# Periodically log progress for large files
if valid_stream_count % 1000 == 0:
logger.debug(f"Processed {valid_stream_count} valid streams so far...")
logger.debug(f"Processed {valid_stream_count} valid streams so far for M3U account: {account_id}")
# Log summary statistics
logger.info(f"M3U parsing complete - Lines: {line_count}, EXTINF: {extinf_count}, URLs: {url_count}, Valid streams: {valid_stream_count}")
@ -962,7 +962,7 @@ def refresh_single_m3u_account(account_id):
account.save(update_fields=['status'])
if account.account_type == M3UAccount.Types.STADNARD:
logger.debug(f"Processing Standard account with groups: {existing_groups}")
logger.debug(f"Processing Standard account ({account_id}) with groups: {existing_groups}")
# Break into batches and process in parallel
batches = [extinf_data[i:i + BATCH_SIZE] for i in range(0, len(extinf_data), BATCH_SIZE)]
task_group = group(process_m3u_batch.s(account_id, batch, existing_groups, hash_keys) for batch in batches)
@ -1107,8 +1107,6 @@ def refresh_single_m3u_account(account_id):
message=account.last_message
)
print(f"Function took {elapsed_time} seconds to execute.")
except Exception as e:
logger.error(f"Error processing M3U for account {account_id}: {str(e)}")
account.status = M3UAccount.Status.ERROR

View file

@ -63,6 +63,9 @@ def generate_m3u(request, profile_name=None, user=None):
else:
channels = Channel.objects.order_by("channel_number")
# Check if the request wants to use direct logo URLs instead of cache
use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false'
m3u_content = "#EXTM3U\n"
for channel in channels:
group_title = channel.channel_group.name if channel.channel_group else "Default"
@ -86,9 +89,17 @@ def generate_m3u(request, profile_name=None, user=None):
tvg_logo = ""
if channel.logo:
tvg_logo = request.build_absolute_uri(
reverse("api:channels:logo-cache", args=[channel.logo.id])
)
if use_cached_logos:
# Use cached logo as before
tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id]))
else:
# Try to find direct logo URL from channel's streams
direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None
# If direct logo found, use it; otherwise fall back to cached version
if direct_logo:
tvg_logo = direct_logo
else:
tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id]))
# create possible gracenote id insertion
tvc_guide_stationid = ""
@ -277,19 +288,26 @@ def generate_epg(request, profile_name=None, user=None):
formatted_channel_number = str(channel.channel_number)
else:
formatted_channel_number = str(channel.id)
# Check if the request wants to use direct logo URLs instead of cache
use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false'
# Add channel logo if available
tvg_logo = ""
if channel.logo:
if use_cached_logos:
# Use cached logo as before
tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id]))
else:
# Try to find direct logo URL from channel's streams
direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None
# If direct logo found, use it; otherwise fall back to cached version
if direct_logo:
tvg_logo = direct_logo
else:
tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id]))
display_name = channel.epg_data.name if channel.epg_data else channel.name
xml_lines.append(f' <channel id="{formatted_channel_number}">')
xml_lines.append(
f" <display-name>{html.escape(display_name)}</display-name>"
)
# Add channel logo if available
if channel.logo:
logo_url = request.build_absolute_uri(
reverse("api:channels:logo-cache", args=[channel.logo.id])
)
xml_lines.append(f' <icon src="{html.escape(logo_url)}" />')
xml_lines.append(f' <display-name>{html.escape(display_name)}</display-name>')
xml_lines.append(f' <icon src="{html.escape(tvg_logo)}" />')
xml_lines.append(" </channel>")

View file

@ -307,16 +307,23 @@ class ChannelStatus:
client_count = proxy_server.redis_client.scard(client_set_key) or 0
# Calculate uptime
created_at = float(metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0').decode('utf-8'))
init_time_bytes = metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0')
created_at = float(init_time_bytes.decode('utf-8'))
uptime = time.time() - created_at if created_at > 0 else 0
# Safely decode bytes or use defaults
def safe_decode(bytes_value, default="unknown"):
if bytes_value is None:
return default
return bytes_value.decode('utf-8')
# Simplified info
info = {
'channel_id': channel_id,
'state': metadata.get(ChannelMetadataField.STATE.encode('utf-8'), b'unknown').decode('utf-8'),
'url': metadata.get(ChannelMetadataField.URL.encode('utf-8'), b'').decode('utf-8'),
'stream_profile': metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8'), b'').decode('utf-8'),
'owner': metadata.get(ChannelMetadataField.OWNER.encode('utf-8'), b'unknown').decode('utf-8'),
'state': safe_decode(metadata.get(ChannelMetadataField.STATE.encode('utf-8'))),
'url': safe_decode(metadata.get(ChannelMetadataField.URL.encode('utf-8')), ""),
'stream_profile': safe_decode(metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8')), ""),
'owner': safe_decode(metadata.get(ChannelMetadataField.OWNER.encode('utf-8'))),
'buffer_index': int(buffer_index_value.decode('utf-8')) if buffer_index_value else 0,
'client_count': client_count,
'uptime': uptime
@ -376,14 +383,15 @@ class ChannelStatus:
# Efficient way - just retrieve the essentials
client_info = {
'client_id': client_id_str,
'user_agent': proxy_server.redis_client.hget(client_key, 'user_agent'),
'ip_address': proxy_server.redis_client.hget(client_key, 'ip_address').decode('utf-8'),
}
if client_info['user_agent']:
client_info['user_agent'] = client_info['user_agent'].decode('utf-8')
else:
client_info['user_agent'] = 'unknown'
# Safely get user_agent and ip_address
user_agent_bytes = proxy_server.redis_client.hget(client_key, 'user_agent')
client_info['user_agent'] = safe_decode(user_agent_bytes)
ip_address_bytes = proxy_server.redis_client.hget(client_key, 'ip_address')
if ip_address_bytes:
client_info['ip_address'] = safe_decode(ip_address_bytes)
# Just get connected_at for client age
connected_at_bytes = proxy_server.redis_client.hget(client_key, 'connected_at')
@ -416,5 +424,5 @@ class ChannelStatus:
return info
except Exception as e:
logger.error(f"Error getting channel info: {e}")
logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging
return None

View file

@ -195,15 +195,6 @@ CELERY_BROKER_TRANSPORT_OPTIONS = {
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
# Memory management settings
# CELERY_WORKER_MAX_TASKS_PER_CHILD = 10 # Restart worker after 10 tasks to free memory
# CELERY_WORKER_PREFETCH_MULTIPLIER = 1 # Don't prefetch tasks - process one at a time
# CELERY_TASK_ACKS_LATE = True # Only acknowledge tasks after they're processed
# CELERY_TASK_TIME_LIMIT = 3600 # 1 hour time limit per task
# CELERY_TASK_SOFT_TIME_LIMIT = 3540 # Soft limit 60 seconds before hard limit
# CELERY_WORKER_CANCEL_LONG_RUNNING_TASKS_ON_CONNECTION_LOSS = True # Cancel tasks if connection lost
# CELERY_TASK_IGNORE_RESULT = True # Don't store results unless explicitly needed
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers.DatabaseScheduler"
CELERY_BEAT_SCHEDULE = {
"fetch-channel-statuses": {

View file

@ -8,7 +8,7 @@ exec-before = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --concurrency=4
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev

View file

@ -10,7 +10,7 @@ exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --concurrency=4
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev

View file

@ -10,7 +10,7 @@ exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --concurrency=4
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application