This commit is contained in:
SergeantPanda 2025-03-23 17:11:52 -05:00
commit a6ace3095a
61 changed files with 2507 additions and 715 deletions

55
.github/workflows/docker-build.yml vendored Normal file
View file

@ -0,0 +1,55 @@
name: Build and Push Multi-Arch Docker Image
on:
push:
branches:
- main
- dev
jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Determine image tag and branch
id: set-tag-branch
run: |
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
echo "BRANCH=main" >> $GITHUB_ENV
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "TAG=dev" >> $GITHUB_ENV
echo "BRANCH=dev" >> $GITHUB_ENV
fi
- name: Convert repository name to lowercase
run: echo "REPO_NAME=$(echo '${{ github.repository }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: docker
file: docker/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
build-args: |
BRANCH=${{ env.BRANCH }}
tags: |
ghcr.io/${{ env.REPO_NAME }}:${{ env.TAG }}
ghcr.io/${{ env.REPO_NAME }}:${{ github.sha }}

View file

@ -6,13 +6,16 @@ class StreamAdmin(admin.ModelAdmin):
list_display = (
'id', # Primary Key
'name',
'group_name',
'channel_group',
'url',
'current_viewers',
'updated_at',
)
list_filter = ('group_name',)
search_fields = ('id', 'name', 'url', 'group_name') # Added 'id' for searching by ID
list_filter = ('channel_group',) # Filter by 'channel_group' (foreign key)
search_fields = ('id', 'name', 'url', 'channel_group__name') # Search by 'ChannelGroup' name
ordering = ('-updated_at',)
@admin.register(Channel)

View file

@ -23,14 +23,14 @@ class StreamPagination(PageNumberPagination):
class StreamFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
group_name = django_filters.CharFilter(lookup_expr='icontains')
channel_group_name = django_filters.CharFilter(field_name="channel_group__name", lookup_expr="icontains")
m3u_account = django_filters.NumberFilter(field_name="m3u_account__id")
m3u_account_name = django_filters.CharFilter(field_name="m3u_account__name", lookup_expr="icontains")
m3u_account_is_active = django_filters.BooleanFilter(field_name="m3u_account__is_active")
class Meta:
model = Stream
fields = ['name', 'group_name', 'm3u_account', 'm3u_account_name', 'm3u_account_is_active']
fields = ['name', 'channel_group_name', 'm3u_account', 'm3u_account_name', 'm3u_account_is_active']
# ─────────────────────────────────────────────────────────
# 1) Stream API (CRUD)
@ -43,20 +43,27 @@ class StreamViewSet(viewsets.ModelViewSet):
filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]
filterset_class = StreamFilter
search_fields = ['name', 'group_name']
ordering_fields = ['name', 'group_name']
search_fields = ['name', 'channel_group__name']
ordering_fields = ['name', 'channel_group__name']
ordering = ['-name']
def get_queryset(self):
qs = super().get_queryset()
# Exclude streams from inactive M3U accounts
qs = qs.exclude(m3u_account__is_active=False)
assigned = self.request.query_params.get('assigned')
if assigned is not None:
qs = qs.filter(channels__id=assigned)
unassigned = self.request.query_params.get('unassigned')
if unassigned == '1':
qs = qs.filter(channels__isnull=True)
channel_group = self.request.query_params.get('channel_group')
if channel_group:
qs = qs.filter(channel_group__name=channel_group)
return qs
@action(detail=False, methods=['get'], url_path='ids')
@ -75,7 +82,8 @@ class StreamViewSet(viewsets.ModelViewSet):
@action(detail=False, methods=['get'], url_path='groups')
def get_groups(self, request, *args, **kwargs):
group_names = Stream.objects.exclude(group_name__isnull=True).exclude(group_name="").order_by().values_list('group_name', flat=True).distinct()
# Get unique ChannelGroup names that are linked to streams
group_names = ChannelGroup.objects.filter(streams__isnull=False).order_by('name').values_list('name', flat=True).distinct()
# Return the response with the list of unique group names
return Response(list(group_names))
@ -158,7 +166,7 @@ class ChannelViewSet(viewsets.ModelViewSet):
if not stream_id:
return Response({"error": "Missing stream_id"}, status=status.HTTP_400_BAD_REQUEST)
stream = get_object_or_404(Stream, pk=stream_id)
channel_group, _ = ChannelGroup.objects.get_or_create(name=stream.group_name)
channel_group = stream.channel_group
# Check if client provided a channel_number; if not, auto-assign one.
provided_number = request.data.get('channel_number')
@ -254,7 +262,7 @@ class ChannelViewSet(viewsets.ModelViewSet):
errors.append({"item": item, "error": str(e)})
continue
channel_group, _ = ChannelGroup.objects.get_or_create(name=stream.group_name)
channel_group = stream.channel_group
# Determine channel number: if provided, use it (if free); else auto assign.
provided_number = item.get('channel_number')

View file

@ -42,5 +42,5 @@ class StreamForm(forms.ModelForm):
'logo_url',
'tvg_id',
'local_file',
'group_name',
'channel_group',
]

View file

@ -0,0 +1,44 @@
# Generated by Django 5.1.6 on 2025-03-19 16:33
import datetime
import django.db.models.deletion
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0004_stream_is_custom'),
('m3u', '0003_create_custom_account'),
]
operations = [
migrations.AddField(
model_name='stream',
name='channel_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='streams', to='dispatcharr_channels.channelgroup'),
),
migrations.AddField(
model_name='stream',
name='last_seen',
field=models.DateTimeField(db_index=True, default=datetime.datetime.now),
),
migrations.AlterField(
model_name='channel',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True),
),
migrations.CreateModel(
name='ChannelGroupM3UAccount',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enabled', models.BooleanField(default=True)),
('channel_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_account', to='dispatcharr_channels.channelgroup')),
('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='channel_group', to='m3u.m3uaccount')),
],
options={
'unique_together': {('channel_group', 'm3u_account')},
},
),
]

View file

@ -0,0 +1,51 @@
# In your app's migrations folder, create a new migration file
# e.g., migrations/000X_migrate_channel_group_to_foreign_key.py
from django.db import migrations
def migrate_channel_group(apps, schema_editor):
Stream = apps.get_model('dispatcharr_channels', 'Stream')
ChannelGroup = apps.get_model('dispatcharr_channels', 'ChannelGroup')
ChannelGroupM3UAccount = apps.get_model('dispatcharr_channels', 'ChannelGroup')
M3UAccount = apps.get_model('m3u', 'M3UAccount')
streams_to_update = []
for stream in Stream.objects.all():
# If the stream has a 'channel_group' string, try to find or create the ChannelGroup
if stream.group_name: # group_name holds the channel group string
channel_group_name = stream.group_name.strip()
# Try to find the ChannelGroup by name
channel_group, created = ChannelGroup.objects.get_or_create(name=channel_group_name)
# Set the foreign key to the found or newly created ChannelGroup
stream.channel_group = channel_group
streams_to_update.append(stream)
# If the stream has an M3U account, ensure the M3U account is linked
if stream.m3u_account:
ChannelGroupM3UAccount.objects.get_or_create(
channel_group=channel_group,
m3u_account=stream.m3u_account,
enabled=True # Or set it to whatever the default logic is
)
Stream.objects.bulk_update(streams_to_update, ['channel_group'])
def reverse_migration(apps, schema_editor):
# This reverse migration would undo the changes, setting `channel_group` to `None` and clearing any relationships.
Stream = apps.get_model('yourapp', 'Stream')
for stream in Stream.objects.all():
stream.channel_group = None
stream.save()
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0005_stream_channel_group_stream_last_seen_and_more'),
]
operations = [
migrations.RunPython(migrate_channel_group, reverse_code=reverse_migration),
]

View file

@ -0,0 +1,17 @@
# Generated by Django 5.1.6 on 2025-03-19 16:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0006_migrate_stream_groups'),
]
operations = [
migrations.RemoveField(
model_name='stream',
name='group_name',
),
]

View file

@ -0,0 +1,23 @@
# Generated by Django 5.1.6 on 2025-03-19 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0007_remove_stream_group_name'),
]
operations = [
migrations.AddField(
model_name='stream',
name='stream_hash',
field=models.CharField(db_index=True, help_text='Unique hash for this stream from the M3U account', max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='stream',
name='logo_url',
field=models.TextField(blank=True, null=True),
),
]

View file

@ -3,15 +3,49 @@ from django.core.exceptions import ValidationError
from core.models import StreamProfile
from django.conf import settings
from core.models import StreamProfile, CoreSettings
from core.utils import redis_client
from core.utils import redis_client, execute_redis_command
import logging
import uuid
from datetime import datetime
import hashlib
import json
logger = logging.getLogger(__name__)
# If you have an M3UAccount model in apps.m3u, you can still import it:
from apps.m3u.models import M3UAccount
# Add fallback functions if Redis isn't available
def get_total_viewers(channel_id):
"""Get viewer count from Redis or return 0 if Redis isn't available"""
if redis_client is None:
return 0
try:
return int(redis_client.get(f"channel:{channel_id}:viewers") or 0)
except Exception:
return 0
class ChannelGroup(models.Model):
name = models.CharField(max_length=100, unique=True)
def related_channels(self):
# local import if needed to avoid cyc. Usually fine in a single file though
return Channel.objects.filter(channel_group=self)
def __str__(self):
return self.name
@classmethod
def bulk_create_and_fetch(cls, objects):
# Perform the bulk create operation
cls.objects.bulk_create(objects)
# Use a unique field to fetch the created objects (assuming 'name' is unique)
created_objects = cls.objects.filter(name__in=[obj.name for obj in objects])
return created_objects
class Stream(models.Model):
"""
Represents a single stream (e.g. from an M3U source or custom URL).
@ -25,12 +59,18 @@ class Stream(models.Model):
blank=True,
related_name="streams",
)
logo_url = models.URLField(max_length=2000, blank=True, null=True)
logo_url = models.TextField(blank=True, null=True)
tvg_id = models.CharField(max_length=255, blank=True, null=True)
local_file = models.FileField(upload_to='uploads/', blank=True, null=True)
current_viewers = models.PositiveIntegerField(default=0)
updated_at = models.DateTimeField(auto_now=True)
group_name = models.CharField(max_length=255, blank=True, null=True)
channel_group = models.ForeignKey(
ChannelGroup,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='streams'
)
stream_profile = models.ForeignKey(
StreamProfile,
null=True,
@ -42,6 +82,14 @@ class Stream(models.Model):
default=False,
help_text="Whether this is a user-created stream or from an M3U account"
)
stream_hash = models.CharField(
max_length=255,
null=True,
unique=True,
help_text="Unique hash for this stream from the M3U account",
db_index=True,
)
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
class Meta:
# If you use m3u_account, you might do unique_together = ('name','url','m3u_account')
@ -52,6 +100,37 @@ class Stream(models.Model):
def __str__(self):
return self.name or self.url or f"Stream ID {self.id}"
@classmethod
def generate_hash_key(cls, name, url, tvg_id, keys=None):
if keys is None:
keys = CoreSettings.get_m3u_hash_key().split(",")
stream_parts = {
"name": name, "url": url, "tvg_id": tvg_id
}
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
# Serialize and hash the dictionary
serialized_obj = json.dumps(hash_parts, sort_keys=True) # sort_keys ensures consistent ordering
hash_object = hashlib.sha256(serialized_obj.encode())
return hash_object.hexdigest()
@classmethod
def update_or_create_by_hash(cls, hash_value, **fields_to_update):
try:
# Try to find the Stream object with the given hash
stream = cls.objects.get(stream_hash=hash_value)
# If it exists, update the fields
for field, value in fields_to_update.items():
setattr(stream, field, value)
stream.save() # Save the updated object
return stream, False # False means it was updated, not created
except cls.DoesNotExist:
# If it doesn't exist, create a new object with the given hash
fields_to_update['stream_hash'] = hash_value # Make sure the hash field is set
stream = cls.objects.create(**fields_to_update)
return stream, True # True means it was created
class ChannelManager(models.Manager):
def active(self):
@ -95,7 +174,7 @@ class Channel(models.Model):
related_name='channels'
)
uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True, db_index=True)
def clean(self):
# Enforce unique channel_number within a given group
@ -198,16 +277,6 @@ class Channel(models.Model):
if current_count > 0:
redis_client.decr(profile_connections_key)
class ChannelGroup(models.Model):
name = models.CharField(max_length=100, unique=True)
def related_channels(self):
# local import if needed to avoid cyc. Usually fine in a single file though
return Channel.objects.filter(channel_group=self)
def __str__(self):
return self.name
class ChannelStream(models.Model):
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
stream = models.ForeignKey(Stream, on_delete=models.CASCADE)
@ -215,3 +284,22 @@ class ChannelStream(models.Model):
class Meta:
ordering = ['order'] # Ensure streams are retrieved in order
class ChannelGroupM3UAccount(models.Model):
channel_group = models.ForeignKey(
ChannelGroup,
on_delete=models.CASCADE,
related_name='m3u_account'
)
m3u_account = models.ForeignKey(
M3UAccount,
on_delete=models.CASCADE,
related_name='channel_group'
)
enabled = models.BooleanField(default=True)
class Meta:
unique_together = ('channel_group', 'm3u_account')
def __str__(self):
return f"{self.channel_group.name} - {self.m3u_account.name} (Enabled: {self.enabled})"

View file

@ -1,5 +1,5 @@
from rest_framework import serializers
from .models import Stream, Channel, ChannelGroup, ChannelStream
from .models import Stream, Channel, ChannelGroup, ChannelStream, ChannelGroupM3UAccount
from core.models import StreamProfile
#
@ -26,9 +26,9 @@ class StreamSerializer(serializers.ModelSerializer):
'local_file',
'current_viewers',
'updated_at',
'group_name',
'stream_profile_id',
'is_custom',
'channel_group',
]
def get_fields(self):
@ -41,7 +41,7 @@ class StreamSerializer(serializers.ModelSerializer):
fields['url'].read_only = True
fields['m3u_account'].read_only = True
fields['tvg_id'].read_only = True
fields['group_name'].read_only = True
fields['channel_group'].read_only = True
return fields
@ -146,3 +146,13 @@ class ChannelSerializer(serializers.ModelSerializer):
ChannelStream.objects.create(channel=instance, stream_id=stream.id, order=index)
return instance
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
enabled = serializers.BooleanField()
class Meta:
model = ChannelGroupM3UAccount
fields = ['id', 'channel_group', 'enabled']
# Optionally, if you only need the id of the ChannelGroup, you can customize it like this:
# channel_group = serializers.PrimaryKeyRelatedField(queryset=ChannelGroup.objects.all())

View file

@ -16,7 +16,7 @@ class StreamDashboardView(View):
def get(self, request, *args, **kwargs):
streams = Stream.objects.values(
'id', 'name', 'url',
'group_name', 'current_viewers'
'channel_group', 'current_viewers'
)
return JsonResponse({'data': list(streams)}, safe=False)

View file

@ -19,7 +19,7 @@ class EPGSource(models.Model):
class EPGData(models.Model):
# Removed the Channel foreign key. We now just store the original tvg_id
# and a name (which might simply be the tvg_id if no real channel exists).
tvg_id = models.CharField(max_length=255, null=True, blank=True)
tvg_id = models.CharField(max_length=255, null=True, blank=True, unique=True)
name = models.CharField(max_length=255)
def __str__(self):

View file

@ -110,6 +110,8 @@ def parse_channels_only(file_path):
epg_obj.save()
logger.debug(f"Channel <{tvg_id}> => EPGData.id={epg_obj.id}, created={created}")
parse_programs_for_tvg_id(file_path, tvg_id)
logger.info("Finished parsing channel info.")

View file

@ -11,6 +11,7 @@ from django.core.cache import cache
# Import all models, including UserAgent.
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
from core.models import UserAgent
from apps.channels.models import ChannelGroupM3UAccount
from core.serializers import UserAgentSerializer
# Import all serializers, including the UserAgentSerializer.
from .serializers import (
@ -24,10 +25,43 @@ from .tasks import refresh_single_m3u_account, refresh_m3u_accounts
class M3UAccountViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for M3U accounts"""
queryset = M3UAccount.objects.all()
queryset = M3UAccount.objects.prefetch_related('channel_group')
serializer_class = M3UAccountSerializer
permission_classes = [IsAuthenticated]
def update(self, request, *args, **kwargs):
# Get the M3UAccount instance we're updating
instance = self.get_object()
# Handle updates to the 'enabled' flag of the related ChannelGroupM3UAccount instances
updates = request.data.get('channel_groups', [])
for update_data in updates:
channel_group_id = update_data.get('channel_group')
enabled = update_data.get('enabled')
try:
# Get the specific relationship to update
relationship = ChannelGroupM3UAccount.objects.get(
m3u_account=instance, channel_group_id=channel_group_id
)
relationship.enabled = enabled
relationship.save()
except ChannelGroupM3UAccount.DoesNotExist:
return Response(
{"error": "ChannelGroupM3UAccount not found for the given M3UAccount and ChannelGroup."},
status=status.HTTP_400_BAD_REQUEST
)
# After updating the ChannelGroupM3UAccount relationships, reload the M3UAccount instance
instance.refresh_from_db()
refresh_single_m3u_account.delay(instance.id)
# Serialize and return the updated M3UAccount data
serializer = self.get_serializer(instance)
return Response(serializer.data)
class M3UFilterViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for M3U filters"""
queryset = M3UFilter.objects.all()

View file

@ -0,0 +1,20 @@
# Generated by Django 5.1.6 on 2025-03-19 16:33
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_m3u_hash_settings'),
('m3u', '0003_create_custom_account'),
]
operations = [
migrations.AddField(
model_name='m3uaccount',
name='stream_profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='m3u_accounts', to='core.streamprofile'),
),
]

View file

@ -3,6 +3,7 @@ from django.core.exceptions import ValidationError
from core.models import UserAgent
import re
from django.dispatch import receiver
from apps.channels.models import StreamProfile
CUSTOM_M3U_ACCOUNT_NAME="custom"
@ -59,6 +60,13 @@ class M3UAccount(models.Model):
default=False,
help_text="Protected - can't be deleted or modified"
)
stream_profile = models.ForeignKey(
StreamProfile,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='m3u_accounts'
)
def __str__(self):
return self.name
@ -86,6 +94,16 @@ class M3UAccount(models.Model):
def get_custom_account(cls):
return cls.objects.get(name=CUSTOM_M3U_ACCOUNT_NAME, locked=True)
# def get_channel_groups(self):
# return ChannelGroup.objects.filter(m3u_account__m3u_account=self)
# def is_channel_group_enabled(self, channel_group):
# """Check if the specified ChannelGroup is enabled for this M3UAccount."""
# return self.channel_group.filter(channel_group=channel_group, enabled=True).exists()
# def get_enabled_streams(self):
# """Return all streams linked to this account with enabled ChannelGroups."""
# return self.streams.filter(channel_group__in=ChannelGroup.objects.filter(m3u_account__enabled=True))
class M3UFilter(models.Model):
"""Defines filters for M3U accounts based on stream name or group title."""

View file

@ -1,13 +1,19 @@
from rest_framework import serializers
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
from core.models import UserAgent
from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount
from apps.channels.serializers import ChannelGroupM3UAccountSerializer, ChannelGroupSerializer
import logging
logger = logging.getLogger(__name__)
class M3UFilterSerializer(serializers.ModelSerializer):
"""Serializer for M3U Filters"""
channel_groups = ChannelGroupM3UAccountSerializer(source='m3u_account', many=True)
class Meta:
model = M3UFilter
fields = ['id', 'filter_type', 'regex_pattern', 'exclude']
fields = ['id', 'filter_type', 'regex_pattern', 'exclude', 'channel_groups']
from rest_framework import serializers
from .models import M3UAccountProfile
@ -37,14 +43,74 @@ class M3UAccountSerializer(serializers.ModelSerializer):
)
profiles = M3UAccountProfileSerializer(many=True, read_only=True)
read_only_fields = ['locked']
# channel_groups = serializers.SerializerMethodField()
channel_groups = ChannelGroupM3UAccountSerializer(source='channel_group.all', many=True, required=False)
class Meta:
model = M3UAccount
fields = [
'id', 'name', 'server_url', 'uploaded_file', 'server_group',
'max_streams', 'is_active', 'created_at', 'updated_at', 'filters', 'user_agent', 'profiles', 'locked'
'max_streams', 'is_active', 'created_at', 'updated_at', 'filters', 'user_agent', 'profiles', 'locked',
'channel_groups',
]
# def get_channel_groups(self, obj):
# # Retrieve related ChannelGroupM3UAccount records for this M3UAccount
# relations = ChannelGroupM3UAccount.objects.filter(m3u_account=obj).select_related('channel_group')
# # Serialize the channel groups with their enabled status
# return [
# {
# 'channel_group_name': relation.channel_group.name,
# 'channel_group_id': relation.channel_group.id,
# 'enabled': relation.enabled,
# }
# for relation in relations
# ]
# def to_representation(self, instance):
# """Override the default to_representation method to include channel_groups"""
# representation = super().to_representation(instance)
# # Manually add the channel_groups to the representation
# channel_groups = ChannelGroupM3UAccount.objects.filter(m3u_account=instance).select_related('channel_group')
# representation['channel_groups'] = [
# {
# 'id': relation.id,
# 'channel_group_name': relation.channel_group.name,
# 'channel_group_id': relation.channel_group.id,
# 'enabled': relation.enabled,
# }
# for relation in channel_groups
# ]
# return representation
# def update(self, instance, validated_data):
# logger.info(validated_data)
# channel_groups_data = validated_data.pop('channel_groups', None)
# instance = super().update(instance, validated_data)
# if channel_groups_data is not None:
# logger.info(json.dumps(channel_groups_data))
# # Remove existing relationships not included in the request
# existing_groups = {cg.channel_group_id: cg for cg in instance.channel_group.all()}
# # for group_id in set(existing_groups.keys()) - sent_group_ids:
# # existing_groups[group_id].delete()
# # Create or update relationships
# for cg_data in channel_groups_data:
# logger.info(json.dumps(cg_data))
# ChannelGroupM3UAccount.objects.update_or_create(
# channel_group=existing_groups[cg_data['channel_group_id']],
# m3u_account=instance,
# defaults={'enabled': cg_data.get('enabled', True)}
# )
# return instance
class ServerGroupSerializer(serializers.ModelSerializer):
"""Serializer for Server Group"""

View file

@ -2,7 +2,7 @@
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import M3UAccount
from .tasks import refresh_single_m3u_account
from .tasks import refresh_single_m3u_account, refresh_m3u_groups
@receiver(post_save, sender=M3UAccount)
def refresh_account_on_save(sender, instance, created, **kwargs):
@ -11,5 +11,5 @@ def refresh_account_on_save(sender, instance, created, **kwargs):
call a Celery task that fetches & parses that single account
if it is active or newly created.
"""
if created or instance.is_active:
refresh_single_m3u_account.delay(instance.id)
if created:
refresh_m3u_groups(instance.id)

View file

@ -4,17 +4,63 @@ import re
import requests
import os
from celery.app.control import Inspect
from celery import shared_task, current_app
from celery.result import AsyncResult
from celery import shared_task, current_app, group
from django.conf import settings
from django.core.cache import cache
from .models import M3UAccount
from apps.channels.models import Stream
from apps.channels.models import Stream, ChannelGroup, ChannelGroupM3UAccount
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.utils import timezone
import time
from channels.layers import get_channel_layer
import json
from core.utils import redis_client
from core.models import CoreSettings
from asgiref.sync import async_to_sync
logger = logging.getLogger(__name__)
LOCK_EXPIRE = 120 # Lock expires after 120 seconds
LOCK_EXPIRE = 300
BATCH_SIZE = 1000
SKIP_EXTS = {}
m3u_dir = os.path.join(settings.MEDIA_ROOT, "cached_m3u")
def fetch_m3u_lines(account, use_cache=False):
os.makedirs(m3u_dir, exist_ok=True)
file_path = os.path.join(m3u_dir, f"{account.id}.m3u")
"""Fetch M3U file lines efficiently."""
if account.server_url:
if not use_cache or not os.path.exists(file_path):
headers = {"User-Agent": account.user_agent.user_agent}
logger.info(f"Fetching from URL {account.server_url}")
try:
response = requests.get(account.server_url, headers=headers, stream=True)
response.raise_for_status() # This will raise an HTTPError if the status is not 200
with open(file_path, 'wb') as file:
# Stream the content in chunks and write to the file
for chunk in response.iter_content(chunk_size=8192): # You can adjust the chunk size
if chunk: # Ensure chunk is not empty
file.write(chunk)
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching M3U from URL {account.server_url}: {e}")
return [] # Return an empty list in case of error
with open(file_path, 'r', encoding='utf-8') as f:
return f.readlines()
elif account.uploaded_file:
try:
# Open the file and return the lines as a list or iterator
with open(account.uploaded_file.path, 'r', encoding='utf-8') as f:
return f.readlines() # Ensure you return lines from the file, not the file object
except IOError as e:
logger.error(f"Error opening file {account.uploaded_file.path}: {e}")
return [] # Return an empty list in case of error
# Return an empty list if neither server_url nor uploaded_file is available
return []
def parse_extinf_line(line: str) -> dict:
"""
@ -35,7 +81,7 @@ def parse_extinf_line(line: str) -> dict:
if len(parts) != 2:
return None
attributes_part, display_name = parts[0], parts[1].strip()
attrs = dict(re.findall(r'(\w+)=["\']([^"\']+)["\']', attributes_part))
attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part))
# Use tvg-name attribute if available; otherwise, use the display name.
name = attrs.get('tvg-name', display_name)
return {
@ -44,20 +90,18 @@ def parse_extinf_line(line: str) -> dict:
'name': name
}
def _get_group_title(extinf_line: str) -> str:
"""Extract group title from EXTINF line."""
match = re.search(r'group-title="([^"]*)"', extinf_line)
return match.group(1) if match else "Default Group"
import re
import logging
def _matches_filters(stream_name: str, group_name: str, filters) -> bool:
logger.info("Testing filter")
for f in filters:
pattern = f.regex_pattern
logger = logging.getLogger(__name__)
def _matches_filters(stream_name: str, group_name: str, filters):
"""Check if a stream or group name matches a precompiled regex filter."""
compiled_filters = [(re.compile(f.regex_pattern, re.IGNORECASE), f.exclude) for f in filters]
for pattern, exclude in compiled_filters:
target = group_name if f.filter_type == 'group' else stream_name
logger.info(f"Testing {pattern} on: {target}")
if re.search(pattern, target or '', re.IGNORECASE):
logger.debug(f"Filter matched: {pattern} on {target}. Exclude={f.exclude}")
return f.exclude
if pattern.search(target or ''):
return exclude
return False
def acquire_lock(task_name, account_id):
@ -86,198 +130,279 @@ def refresh_m3u_accounts():
logger.info(msg)
return msg
@shared_task
def refresh_single_m3u_account(account_id):
logger.info(f"Task {refresh_single_m3u_account.request.id}: Starting refresh for account_id={account_id}")
def check_field_lengths(streams_to_create):
for stream in streams_to_create:
for field, value in stream.__dict__.items():
if isinstance(value, str) and len(value) > 255:
print(f"{field} --- {value}")
print("")
print("")
@shared_task
def process_groups(account, group_names):
existing_groups = {group.name: group for group in ChannelGroup.objects.filter(name__in=group_names)}
logger.info(f"Currently {len(existing_groups)} existing groups")
groups = []
groups_to_create = []
for group_name in group_names:
logger.info(f"Handling group: {group_name}")
if group_name in existing_groups:
groups.append(existing_groups[group_name])
else:
groups_to_create.append(ChannelGroup(
name=group_name,
))
if groups_to_create:
logger.info(f"Creating {len(groups_to_create)} groups")
created = ChannelGroup.bulk_create_and_fetch(groups_to_create)
logger.info(f"Created {len(created)} groups")
groups.extend(created)
relations = []
for group in groups:
relations.append(ChannelGroupM3UAccount(
channel_group=group,
m3u_account=account,
))
ChannelGroupM3UAccount.objects.bulk_create(
relations,
ignore_conflicts=True
)
@shared_task
def process_m3u_batch(account_id, batch, group_names, hash_keys):
"""Processes a batch of M3U streams using bulk operations."""
account = M3UAccount.objects.get(id=account_id)
existing_groups = {group.name: group for group in ChannelGroup.objects.filter(
m3u_account__m3u_account=account, # Filter by the M3UAccount
m3u_account__enabled=True # Filter by the enabled flag in the join table
)}
streams_to_create = []
streams_to_update = []
stream_hashes = {}
# compiled_filters = [(f.filter_type, re.compile(f.regex_pattern, re.IGNORECASE)) for f in filters]
logger.debug(f"Processing batch of {len(batch)}")
for stream_info in batch:
name, url = stream_info["name"], stream_info["url"]
tvg_id, tvg_logo = stream_info["attributes"].get("tvg-id", ""), stream_info["attributes"].get("tvg-logo", "")
group_title = stream_info["attributes"].get("group-title", "Default Group")
# Filter out disabled groups for this account
if group_title not in existing_groups:
logger.debug(f"Skipping stream in disabled group: {group_title}")
continue
# if any(url.lower().endswith(ext) for ext in SKIP_EXTS) or len(url) > 2000:
# continue
# if _matches_filters(name, group_title, account.filters.all()):
# continue
# if any(compiled_pattern.search(current_info['name']) for ftype, compiled_pattern in compiled_filters if ftype == 'name'):
# excluded_count += 1
# current_info = None
# continue
try:
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys)
if redis_client.exists(f"m3u_refresh:{stream_hash}"):
# duplicate already processed by another batch
continue
redis_client.set(f"m3u_refresh:{stream_hash}", "true")
stream_props = {
"name": name,
"url": url,
"logo_url": tvg_logo,
"tvg_id": tvg_id,
"m3u_account": account,
"channel_group": existing_groups[group_title],
"stream_hash": stream_hash,
}
if stream_hash not in stream_hashes:
stream_hashes[stream_hash] = stream_props
except Exception as e:
logger.error(f"Failed to process stream {name}: {e}")
logger.error(json.dumps(stream_info))
existing_streams = {s.stream_hash: s for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys())}
logger.info(f"Hashed {len(stream_hashes.keys())} unique streams")
for stream_hash, stream_props in stream_hashes.items():
if stream_hash in existing_streams:
obj = existing_streams[stream_hash]
changed = False
for key, value in stream_props.items():
if getattr(obj, key) == value:
continue
changed = True
setattr(obj, key, value)
obj.last_seen = timezone.now()
if changed:
streams_to_update.append(obj)
del existing_streams[stream_hash]
else:
existing_streams[stream_hash] = obj
else:
stream_props["last_seen"] = timezone.now()
streams_to_create.append(Stream(**stream_props))
try:
if streams_to_create:
Stream.objects.bulk_create(streams_to_create, ignore_conflicts=True)
if streams_to_update:
Stream.objects.bulk_update(streams_to_update, stream_props.keys())
if len(existing_streams.keys()) > 0:
Stream.objects.bulk_update(existing_streams.values(), ["last_seen"])
except Exception as e:
logger.error(f"Bulk create failed: {str(e)}")
return f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated."
def refresh_m3u_groups(account_id):
if not acquire_lock('refresh_m3u_account_groups', account_id):
return f"Task already running for account_id={account_id}."
# Record start time
start_time = time.time()
send_progress_update(0, account_id)
try:
account = M3UAccount.objects.get(id=account_id, is_active=True)
except M3UAccount.DoesNotExist:
release_lock('refresh_m3u_account_groups', account_id)
return f"M3UAccount with ID={account_id} not found or inactive."
lines = fetch_m3u_lines(account)
extinf_data = []
groups = set(["Default Group"])
for line in lines:
line = line.strip()
if line.startswith("#EXTINF"):
parsed = parse_extinf_line(line)
if parsed:
if "group-title" in parsed["attributes"]:
groups.add(parsed["attributes"]["group-title"])
extinf_data.append(parsed)
elif extinf_data and line.startswith("http"):
# Associate URL with the last EXTINF line
extinf_data[-1]["url"] = line
groups = list(groups)
cache_path = os.path.join(m3u_dir, f"{account_id}.json")
with open(cache_path, 'w', encoding='utf-8') as f:
json.dump({
"extinf_data": extinf_data,
"groups": groups,
}, f)
process_groups(account, groups)
release_lock('refresh_m3u_account_groups`', account_id)
return extinf_data, groups
@shared_task
def refresh_single_m3u_account(account_id, use_cache=False):
"""Splits M3U processing into chunks and dispatches them as parallel tasks."""
if not acquire_lock('refresh_single_m3u_account', account_id):
return f"Task already running for account_id={account_id}."
# Record start time
start_time = time.time()
send_progress_update(0, account_id)
try:
account = M3UAccount.objects.get(id=account_id, is_active=True)
filters = list(account.filters.all())
logger.info(f"Found active M3UAccount (id={account.id}, name={account.name}).")
except M3UAccount.DoesNotExist:
msg = f"M3UAccount with ID={account_id} not found or inactive."
logger.warning(msg)
release_lock('refresh_single_m3u_account', account_id)
return msg
except Exception as e:
logger.error(f"Error fetching M3UAccount {account_id}: {e}")
release_lock('refresh_single_m3u_account', account_id)
return str(e)
return f"M3UAccount with ID={account_id} not found or inactive."
try:
lines = []
if account.server_url:
if not account.user_agent:
err_msg = f"User-Agent not provided for account id {account_id}."
logger.error(err_msg)
release_lock('refresh_single_m3u_account', account_id)
return err_msg
# Fetch M3U lines and handle potential issues
# lines = fetch_m3u_lines(account) # Extracted fetch logic into separate function
extinf_data = []
groups = None
headers = {"User-Agent": account.user_agent.user_agent}
response = requests.get(account.server_url, headers=headers)
response.raise_for_status()
lines = response.text.splitlines()
elif account.uploaded_file:
file_path = account.uploaded_file.path
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
else:
err_msg = f"No server_url or uploaded_file provided for account_id={account_id}."
logger.error(err_msg)
release_lock('refresh_single_m3u_account', account_id)
return err_msg
except Exception as e:
err_msg = f"Failed fetching M3U: {e}"
logger.error(err_msg)
release_lock('refresh_single_m3u_account', account_id)
return err_msg
cache_path = os.path.join(m3u_dir, f"{account_id}.json")
if use_cache and os.path.exists(cache_path):
with open(cache_path, 'r') as file:
data = json.load(file)
logger.info(f"M3U has {len(lines)} lines. Now parsing for Streams.")
skip_exts = ('.mkv', '.mp4', '.m4v', '.wav', '.avi', '.flv', '.m4p', '.mpg',
'.mpeg', '.m2v', '.mp2', '.mpe', '.mpv')
extinf_data = data['extinf_data']
groups = data['groups']
created_count, updated_count, excluded_count = 0, 0, 0
current_info = None
if not extinf_data:
extinf_data, groups = refresh_m3u_groups(account_id)
for line in lines:
line = line.strip()
if line.startswith('#EXTINF'):
extinf = parse_extinf_line(line)
if not extinf:
continue
name = extinf['name']
tvg_id = extinf['attributes'].get('tvg-id', '')
tvg_logo = extinf['attributes'].get('tvg-logo', '')
# Prefer group-title from attributes if available.
group_title = extinf['attributes'].get('group-title', _get_group_title(line))
logger.debug(f"Parsed EXTINF: name={name}, logo_url={tvg_logo}, tvg_id={tvg_id}, group_title={group_title}")
current_info = {
"name": name,
"logo_url": tvg_logo,
"group_title": group_title,
"tvg_id": tvg_id,
}
elif current_info and line.startswith('http'):
lower_line = line.lower()
if any(lower_line.endswith(ext) for ext in skip_exts):
logger.debug(f"Skipping file with unsupported extension: {line}")
current_info = None
continue
hash_keys = CoreSettings.get_m3u_hash_key().split(",")
if len(line) > 2000:
logger.warning(f"Stream URL too long, skipping: {line}")
excluded_count += 1
current_info = None
continue
# Break into batches and process in parallel
batches = [extinf_data[i:i + BATCH_SIZE] for i in range(0, len(extinf_data), BATCH_SIZE)]
task_group = group(process_m3u_batch.s(account_id, batch, groups, hash_keys) for batch in batches)
if _matches_filters(current_info['name'], current_info['group_title'], filters):
logger.info(f"Stream excluded by filter: {current_info['name']} in group {current_info['group_title']}")
excluded_count += 1
current_info = None
continue
total_batches = len(batches)
completed_batches = 0
logger.debug(f"Dispatched {len(batches)} parallel tasks for account_id={account_id}.")
defaults = {
"logo_url": current_info["logo_url"],
"tvg_id": current_info["tvg_id"]
}
try:
obj, created = Stream.objects.update_or_create(
name=current_info["name"],
url=line,
m3u_account=account,
group_name=current_info["group_title"],
defaults=defaults
)
if created:
created_count += 1
else:
updated_count += 1
except Exception as e:
logger.error(f"Failed to update/create stream {current_info['name']}: {e}")
finally:
current_info = None
# result = task_group.apply_async()
result = task_group.apply_async()
while completed_batches < total_batches:
for async_result in result:
if async_result.ready(): # If the task has completed
task_result = async_result.result # The result of the task
logger.debug(f"Task completed with result: {task_result}")
completed_batches += 1
# Calculate progress
progress = int((completed_batches / total_batches) * 100)
# Send progress update via Channels
send_progress_update(progress, account_id)
# Optionally remove completed task from the group to prevent processing it again
result.remove(async_result)
else:
logger.debug(f"Task is still running.")
end_time = time.time()
# Calculate elapsed time
elapsed_time = end_time - start_time
print(f"Function took {elapsed_time} seconds to execute.")
logger.info(f"Completed parsing. Created {created_count} new Streams, updated {updated_count} existing Streams, excluded {excluded_count} Streams.")
release_lock('refresh_single_m3u_account', account_id)
cursor = 0
while True:
cursor, keys = redis_client.scan(cursor, match=f"m3u_refresh:*", count=BATCH_SIZE)
if keys:
redis_client.delete(*keys) # Delete the matching keys
if cursor == 0:
break
return f"Dispatched jobs complete."
def send_progress_update(progress, account_id):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"updates",
'updates',
{
"type": "update",
"data": {"success": True, "type": "m3u_refresh", "message": "M3U refresh completed successfully"}
},
'type': 'update',
"data": {"progress": progress, "type": "m3u_refresh", "account": account_id}
}
)
return f"Account {account_id} => Created {created_count}, updated {updated_count}, excluded {excluded_count} Streams."
def process_uploaded_m3u_file(file, account):
"""Save and parse an uploaded M3U file."""
upload_dir = os.path.join(settings.MEDIA_ROOT, 'm3u_uploads')
os.makedirs(upload_dir, exist_ok=True)
file_path = os.path.join(upload_dir, file.name)
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
try:
parse_m3u_file(file_path, account)
except Exception as e:
logger.error(f"Error parsing uploaded M3U file {file_path}: {e}")
def parse_m3u_file(file_path, account):
"""Parse a local M3U file and create or update Streams."""
skip_exts = ('.mkv', '.mp4', '.ts', '.m4v', '.wav', '.avi', '.flv', '.m4p', '.mpg',
'.mpeg', '.m2v', '.mp2', '.mpe', '.mpv')
try:
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
except Exception as e:
logger.error(f"Failed to read M3U file {file_path}: {e}")
return f"Error reading M3U file {file_path}"
created_count, updated_count, excluded_count = 0, 0, 0
current_info = None
for line in lines:
line = line.strip()
if line.startswith('#EXTINF'):
extinf = parse_extinf_line(line)
if not extinf:
continue
name = extinf['name']
tvg_id = extinf['attributes'].get('tvg-id', '')
tvg_logo = extinf['attributes'].get('tvg-logo', '')
current_info = {"name": name, "logo_url": tvg_logo, "tvg_id": tvg_id}
elif current_info and line.startswith('http'):
lower_line = line.lower()
if any(lower_line.endswith(ext) for ext in skip_exts):
logger.info(f"Skipping file with unsupported extension: {line}")
current_info = None
continue
defaults = {
"logo_url": current_info["logo_url"],
"tvg_id": current_info.get("tvg_id", "")
}
try:
obj, created = Stream.objects.update_or_create(
name=current_info["name"],
url=line,
m3u_account=account,
defaults=defaults
)
if created:
created_count += 1
else:
updated_count += 1
except Exception as e:
logger.error(f"Failed to update/create stream {current_info['name']}: {e}")
finally:
current_info = None
return f"Parsed local M3U file {file_path}, created {created_count} Streams, updated {updated_count} Streams, excluded {excluded_count} Streams."

View file

@ -4,8 +4,10 @@ import re
from . import proxy_server
from .redis_keys import RedisKeys
from .constants import TS_PACKET_SIZE
from redis.exceptions import ConnectionError, TimeoutError
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class ChannelStatus:
@ -172,76 +174,98 @@ class ChannelStatus:
return info
# Function for basic channel info (used for all channels summary)
def get_basic_channel_info(channel_id):
# Get channel metadata
metadata_key = RedisKeys.channel_metadata(channel_id)
metadata = proxy_server.redis_client.hgetall(metadata_key)
if not metadata:
@staticmethod
def _execute_redis_command(command_func):
"""Execute Redis command with error handling"""
if not proxy_server.redis_client:
return None
# Basic channel info only - omit diagnostics and details
buffer_index_key = RedisKeys.buffer_index(channel_id)
buffer_index_value = proxy_server.redis_client.get(buffer_index_key)
try:
return command_func()
except (ConnectionError, TimeoutError) as e:
logger.warning(f"Redis connection error in ChannelStatus: {e}")
return None
except Exception as e:
logger.error(f"Redis command error in ChannelStatus: {e}")
return None
# Count clients (using efficient count method)
client_set_key = RedisKeys.clients(channel_id)
client_count = proxy_server.redis_client.scard(client_set_key) or 0
@staticmethod
def get_basic_channel_info(channel_id):
"""Get basic channel information with Redis error handling"""
try:
# Use _execute_redis_command for Redis operations
metadata_key = RedisKeys.channel_metadata(channel_id)
metadata = ChannelStatus._execute_redis_command(
lambda: proxy_server.redis_client.hgetall(metadata_key)
)
# Calculate uptime
created_at = float(metadata.get(b'init_time', b'0').decode('utf-8'))
uptime = time.time() - created_at if created_at > 0 else 0
if not metadata:
return None
# Simplified info
info = {
'channel_id': channel_id,
'state': metadata.get(b'state', b'unknown').decode('utf-8'),
'url': metadata.get(b'url', b'').decode('utf-8'),
'profile': metadata.get(b'profile', b'unknown').decode('utf-8'),
'owner': metadata.get(b'owner', b'unknown').decode('utf-8'),
'buffer_index': int(buffer_index_value.decode('utf-8')) if buffer_index_value else 0,
'client_count': client_count,
'uptime': uptime
}
# Basic channel info only - omit diagnostics and details
buffer_index_key = RedisKeys.buffer_index(channel_id)
buffer_index_value = proxy_server.redis_client.get(buffer_index_key)
# Quick health check if available locally
if channel_id in proxy_server.stream_managers:
manager = proxy_server.stream_managers[channel_id]
info['healthy'] = manager.healthy
# Count clients (using efficient count method)
client_set_key = RedisKeys.clients(channel_id)
client_count = proxy_server.redis_client.scard(client_set_key) or 0
# Get concise client information
clients = []
client_ids = proxy_server.redis_client.smembers(client_set_key)
# Calculate uptime
created_at = float(metadata.get(b'init_time', b'0').decode('utf-8'))
uptime = time.time() - created_at if created_at > 0 else 0
# Process only if we have clients and keep it limited
if client_ids:
# Get up to 10 clients for the basic view
for client_id in list(client_ids)[:10]:
client_id_str = client_id.decode('utf-8')
client_key = f"ts_proxy:channel:{channel_id}:clients:{client_id_str}"
# Simplified info
info = {
'channel_id': channel_id,
'state': metadata.get(b'state', b'unknown').decode('utf-8'),
'url': metadata.get(b'url', b'').decode('utf-8'),
'profile': metadata.get(b'profile', b'unknown').decode('utf-8'),
'owner': metadata.get(b'owner', b'unknown').decode('utf-8'),
'buffer_index': int(buffer_index_value.decode('utf-8')) if buffer_index_value else 0,
'client_count': client_count,
'uptime': uptime
}
# Efficient way - just retrieve the essentials
client_info = {
'client_id': client_id_str,
'user_agent': proxy_server.redis_client.hget(client_key, 'user_agent'),
'ip_address': proxy_server.redis_client.hget(client_key, 'ip_address').decode('utf-8'),
}
# Quick health check if available locally
if channel_id in proxy_server.stream_managers:
manager = proxy_server.stream_managers[channel_id]
info['healthy'] = manager.healthy
if client_info['user_agent']:
client_info['user_agent'] = client_info['user_agent'].decode('utf-8')
else:
client_info['user_agent'] = 'unknown'
# Get concise client information
clients = []
client_ids = proxy_server.redis_client.smembers(client_set_key)
# Just get connected_at for client age
connected_at_bytes = proxy_server.redis_client.hget(client_key, 'connected_at')
if connected_at_bytes:
connected_at = float(connected_at_bytes.decode('utf-8'))
client_info['connected_since'] = time.time() - connected_at
# Process only if we have clients and keep it limited
if client_ids:
# Get up to 10 clients for the basic view
for client_id in list(client_ids)[:10]:
client_id_str = client_id.decode('utf-8')
client_key = f"ts_proxy:channel:{channel_id}:clients:{client_id_str}"
clients.append(client_info)
# Efficient way - just retrieve the essentials
client_info = {
'client_id': client_id_str,
'user_agent': proxy_server.redis_client.hget(client_key, 'user_agent'),
'ip_address': proxy_server.redis_client.hget(client_key, 'ip_address').decode('utf-8'),
}
# Add clients to info
info['clients'] = clients
if client_info['user_agent']:
client_info['user_agent'] = client_info['user_agent'].decode('utf-8')
else:
client_info['user_agent'] = 'unknown'
return info
# Just get connected_at for client age
connected_at_bytes = proxy_server.redis_client.hget(client_key, 'connected_at')
if connected_at_bytes:
connected_at = float(connected_at_bytes.decode('utf-8'))
client_info['connected_since'] = time.time() - connected_at
clients.append(client_info)
# Add clients to info
info['clients'] = clients
return info
except Exception as e:
logger.error(f"Error getting channel info: {e}")
return None

View file

@ -6,11 +6,13 @@ import time
import json
from typing import Set, Optional
from apps.proxy.config import TSConfig as Config
from redis.exceptions import ConnectionError, TimeoutError
from .constants import EventType
from .config_helper import ConfigHelper
from .redis_keys import RedisKeys
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class ClientManager:
"""Manages client connections with no duplicates"""
@ -120,6 +122,20 @@ class ClientManager:
thread.start()
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
def _execute_redis_command(self, command_func):
"""Execute Redis command with error handling"""
if not self.redis_client:
return None
try:
return command_func()
except (ConnectionError, TimeoutError) as e:
logger.warning(f"Redis connection error in ClientManager: {e}")
return None
except Exception as e:
logger.error(f"Redis command error in ClientManager: {e}")
return None
def _notify_owner_of_activity(self):
"""Notify channel owner that clients are active on this worker"""
if not self.redis_client or not self.clients:
@ -130,11 +146,15 @@ class ClientManager:
# STANDARDIZED KEY: Worker info under channel namespace
worker_key = f"ts_proxy:channel:{self.channel_id}:worker:{worker_id}"
self.redis_client.setex(worker_key, self.client_ttl, str(len(self.clients)))
self._execute_redis_command(
lambda: self.redis_client.setex(worker_key, self.client_ttl, str(len(self.clients)))
)
# STANDARDIZED KEY: Activity timestamp under channel namespace
activity_key = f"ts_proxy:channel:{self.channel_id}:activity"
self.redis_client.setex(activity_key, self.client_ttl, str(time.time()))
self._execute_redis_command(
lambda: self.redis_client.setex(activity_key, self.client_ttl, str(time.time()))
)
except Exception as e:
logger.error(f"Error notifying owner of client activity: {e}")

View file

@ -2,11 +2,8 @@
Helper module to access configuration values with proper defaults.
"""
import logging
from apps.proxy.config import TSConfig as Config
logger = logging.getLogger("ts_proxy")
class ConfigHelper:
"""
Helper class for accessing configuration values with sensible defaults.

View file

@ -83,3 +83,8 @@ class RedisKeys:
def transcode_active(channel_id):
"""Key indicating active transcode process"""
return f"ts_proxy:channel:{channel_id}:transcode_active"
@staticmethod
def client_metadata(channel_id, client_id):
"""Key for client metadata hash"""
return f"ts_proxy:channel:{channel_id}:clients:{client_id}"

View file

@ -18,14 +18,17 @@ import json
from typing import Dict, Optional, Set
from apps.proxy.config import TSConfig as Config
from apps.channels.models import Channel
from core.utils import redis_client as global_redis_client, redis_pubsub_client as global_redis_pubsub_client # Import both global Redis clients
from redis.exceptions import ConnectionError, TimeoutError
from .stream_manager import StreamManager
from .stream_buffer import StreamBuffer
from .client_manager import ClientManager
from .redis_keys import RedisKeys
from .constants import ChannelState, EventType, StreamType
from .config_helper import ConfigHelper
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class ProxyServer:
"""Manages TS proxy server instance with worker coordination"""
@ -43,19 +46,25 @@ class ProxyServer:
hostname = socket.gethostname()
self.worker_id = f"{hostname}:{pid}"
# Connect to Redis
# Connect to Redis - try using global client first
self.redis_client = None
try:
import redis
from django.conf import settings
self.redis_connection_attempts = 0
self.redis_max_retries = 3
self.redis_retry_interval = 5 # seconds
try:
# First try to use the global client from core.utils
if global_redis_client is not None:
self.redis_client = global_redis_client
logger.info(f"Using global Redis client")
logger.info(f"Worker ID: {self.worker_id}")
else:
# Fall back to direct connection with retry
self._setup_redis_connection()
redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0')
self.redis_client = redis.from_url(redis_url)
logger.info(f"Connected to Redis at {redis_url}")
logger.info(f"Worker ID: {self.worker_id}")
except Exception as e:
logger.error(f"Failed to initialize Redis: {e}")
self.redis_client = None
logger.error(f"Failed to connect to Redis: {e}")
# Start cleanup thread
self.cleanup_interval = getattr(Config, 'CLEANUP_INTERVAL', 60)
@ -64,179 +73,306 @@ class ProxyServer:
# Start event listener for Redis pubsub messages
self._start_event_listener()
def _setup_redis_connection(self):
"""Setup Redis connection with retry logic"""
import redis
from django.conf import settings
while self.redis_connection_attempts < self.redis_max_retries:
try:
logger.info(f"Attempting to connect to Redis ({self.redis_connection_attempts+1}/{self.redis_max_retries})")
# Get connection parameters from settings or environment
redis_host = os.environ.get("REDIS_HOST", getattr(settings, 'REDIS_HOST', 'localhost'))
redis_port = int(os.environ.get("REDIS_PORT", getattr(settings, 'REDIS_PORT', 6379)))
redis_db = int(os.environ.get("REDIS_DB", getattr(settings, 'REDIS_DB', 0)))
# Create Redis client with reasonable timeouts
self.redis_client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=5,
socket_connect_timeout=5,
retry_on_timeout=True,
health_check_interval=30
)
# Test connection
self.redis_client.ping()
logger.info(f"Successfully connected to Redis at {redis_host}:{redis_port}/{redis_db}")
logger.info(f"Worker ID: {self.worker_id}")
break
except (ConnectionError, TimeoutError) as e:
self.redis_connection_attempts += 1
if self.redis_connection_attempts >= self.redis_max_retries:
logger.error(f"Failed to connect to Redis after {self.redis_max_retries} attempts: {e}")
self.redis_client = None
else:
# Exponential backoff with a maximum of 30 seconds
retry_delay = min(self.redis_retry_interval * (2 ** (self.redis_connection_attempts - 1)), 30)
logger.warning(f"Redis connection failed. Retrying in {retry_delay}s... ({self.redis_connection_attempts}/{self.redis_max_retries})")
time.sleep(retry_delay)
except Exception as e:
logger.error(f"Unexpected error connecting to Redis: {e}", exc_info=True)
self.redis_client = None
break
def _execute_redis_command(self, command_func, *args, **kwargs):
"""Execute Redis command with error handling and reconnection logic"""
if not self.redis_client:
return None
try:
return command_func(*args, **kwargs)
except (ConnectionError, TimeoutError) as e:
logger.warning(f"Redis connection lost: {e}. Attempting to reconnect...")
try:
# Try to reconnect
self.redis_connection_attempts = 0
self._setup_redis_connection()
if self.redis_client:
# Retry the command once
return command_func(*args, **kwargs)
except Exception as reconnect_error:
logger.error(f"Failed to reconnect to Redis: {reconnect_error}")
return None
except Exception as e:
logger.error(f"Redis command error: {e}")
return None
def _start_event_listener(self):
"""Listen for events from other workers"""
if not self.redis_client:
return
def event_listener():
try:
pubsub = self.redis_client.pubsub()
pubsub.psubscribe("ts_proxy:events:*")
retry_count = 0
max_retries = 10
base_retry_delay = 1 # Start with 1 second delay
max_retry_delay = 30 # Cap at 30 seconds
logger.info(f"Started Redis event listener for client activity")
while True:
try:
# Use the global PubSub client if available
if global_redis_pubsub_client:
pubsub_client = global_redis_pubsub_client
logger.info("Using global Redis PubSub client for event listener")
else:
# Fall back to creating a dedicated client if global one is unavailable
from django.conf import settings
import redis
for message in pubsub.listen():
if message["type"] != "pmessage":
continue
redis_host = os.environ.get("REDIS_HOST", getattr(settings, 'REDIS_HOST', 'localhost'))
redis_port = int(os.environ.get("REDIS_PORT", getattr(settings, 'REDIS_PORT', 6379)))
redis_db = int(os.environ.get("REDIS_DB", getattr(settings, 'REDIS_DB', 0)))
try:
channel = message["channel"].decode("utf-8")
data = json.loads(message["data"].decode("utf-8"))
pubsub_client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=60,
socket_connect_timeout=10,
socket_keepalive=True,
health_check_interval=30
)
logger.info("Created dedicated Redis PubSub client for event listener")
event_type = data.get("event")
channel_id = data.get("channel_id")
# Test connection before subscribing
pubsub_client.ping()
if channel_id and event_type:
# For owner, update client status immediately
if self.am_i_owner(channel_id):
if event_type == EventType.CLIENT_CONNECTED:
logger.debug(f"Owner received {EventType.CLIENT_CONNECTED} event for channel {channel_id}")
# Reset any disconnect timer
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.delete(disconnect_key)
# Create a pubsub instance from the client
pubsub = pubsub_client.pubsub()
pubsub.psubscribe("ts_proxy:events:*")
elif event_type == EventType.CLIENT_DISCONNECTED:
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
# Check if any clients remain
if channel_id in self.client_managers:
# VERIFY REDIS CLIENT COUNT DIRECTLY
client_set_key = RedisKeys.clients(channel_id)
total = self.redis_client.scard(client_set_key) or 0
logger.info(f"Started Redis event listener for client activity")
if total == 0:
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
# Set the disconnect timer for other workers to see
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.setex(disconnect_key, 60, str(time.time()))
# Reset retry count on successful connection
retry_count = 0
# Get configured shutdown delay or default
shutdown_delay = getattr(Config, 'CHANNEL_SHUTDOWN_DELAY', 0)
for message in pubsub.listen():
if message["type"] != "pmessage":
continue
if shutdown_delay > 0:
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
time.sleep(shutdown_delay)
try:
channel = message["channel"].decode("utf-8")
data = json.loads(message["data"].decode("utf-8"))
# Re-check client count before stopping
total = self.redis_client.scard(client_set_key) or 0
if total > 0:
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
self.redis_client.delete(disconnect_key)
return
event_type = data.get("event")
channel_id = data.get("channel_id")
# Stop the channel directly
if channel_id and event_type:
# For owner, update client status immediately
if self.am_i_owner(channel_id):
if event_type == EventType.CLIENT_CONNECTED:
logger.debug(f"Owner received {EventType.CLIENT_CONNECTED} event for channel {channel_id}")
# Reset any disconnect timer
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.delete(disconnect_key)
elif event_type == EventType.CLIENT_DISCONNECTED:
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
# Check if any clients remain
if channel_id in self.client_managers:
# VERIFY REDIS CLIENT COUNT DIRECTLY
client_set_key = RedisKeys.clients(channel_id)
total = self.redis_client.scard(client_set_key) or 0
if total == 0:
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
# Set the disconnect timer for other workers to see
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.setex(disconnect_key, 60, str(time.time()))
# Get configured shutdown delay or default
shutdown_delay = getattr(Config, 'CHANNEL_SHUTDOWN_DELAY', 0)
if shutdown_delay > 0:
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
time.sleep(shutdown_delay)
# Re-check client count before stopping
total = self.redis_client.scard(client_set_key) or 0
if total > 0:
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
self.redis_client.delete(disconnect_key)
return
# Stop the channel directly
self.stop_channel(channel_id)
elif event_type == EventType.STREAM_SWITCH:
logger.info(f"Owner received {EventType.STREAM_SWITCH} request for channel {channel_id}")
# Handle stream switch request
new_url = data.get("url")
user_agent = data.get("user_agent")
if new_url and channel_id in self.stream_managers:
# Update metadata in Redis
if self.redis_client:
metadata_key = RedisKeys.channel_metadata(channel_id)
self.redis_client.hset(metadata_key, "url", new_url)
if user_agent:
self.redis_client.hset(metadata_key, "user_agent", user_agent)
# Set switch status
status_key = RedisKeys.switch_status(channel_id)
self.redis_client.set(status_key, "switching")
# Perform the stream switch
stream_manager = self.stream_managers[channel_id]
success = stream_manager.update_url(new_url)
if success:
logger.info(f"Stream switch initiated for channel {channel_id}")
# Publish confirmation
switch_result = {
"event": EventType.STREAM_SWITCHED, # Use constant instead of string
"channel_id": channel_id,
"success": True,
"url": new_url,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(switch_result)
)
# Update status
if self.redis_client:
self.redis_client.set(status_key, "switched")
else:
logger.error(f"Failed to switch stream for channel {channel_id}")
# Publish failure
switch_result = {
"event": EventType.STREAM_SWITCHED,
"channel_id": channel_id,
"success": False,
"url": new_url,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(switch_result)
)
elif event_type == EventType.CHANNEL_STOP:
logger.info(f"Received {EventType.CHANNEL_STOP} event for channel {channel_id}")
# First mark channel as stopping in Redis
if self.redis_client:
# Set stopping state in metadata
metadata_key = RedisKeys.channel_metadata(channel_id)
if self.redis_client.exists(metadata_key):
self.redis_client.hset(metadata_key, mapping={
"state": ChannelState.STOPPING,
"state_changed_at": str(time.time())
})
# If we have local resources for this channel, clean them up
if channel_id in self.stream_buffers or channel_id in self.client_managers:
# Use existing stop_channel method
logger.info(f"Stopping local resources for channel {channel_id}")
self.stop_channel(channel_id)
# Acknowledge stop by publishing a response
stop_response = {
"event": EventType.CHANNEL_STOPPED,
"channel_id": channel_id,
"worker_id": self.worker_id,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(stop_response)
)
elif event_type == EventType.CLIENT_STOP:
client_id = data.get("client_id")
if client_id and channel_id:
logger.info(f"Received request to stop client {client_id} on channel {channel_id}")
elif event_type == EventType.STREAM_SWITCH:
logger.info(f"Owner received {EventType.STREAM_SWITCH} request for channel {channel_id}")
# Handle stream switch request
new_url = data.get("url")
user_agent = data.get("user_agent")
# Both remove from client manager AND set a key for the generator to detect
if channel_id in self.client_managers:
client_manager = self.client_managers[channel_id]
if client_id in client_manager.clients:
client_manager.remove_client(client_id)
logger.info(f"Removed client {client_id} from client manager")
if new_url and channel_id in self.stream_managers:
# Update metadata in Redis
if self.redis_client:
metadata_key = RedisKeys.channel_metadata(channel_id)
self.redis_client.hset(metadata_key, "url", new_url)
if user_agent:
self.redis_client.hset(metadata_key, "user_agent", user_agent)
# Set switch status
status_key = RedisKeys.switch_status(channel_id)
self.redis_client.set(status_key, "switching")
# Perform the stream switch
stream_manager = self.stream_managers[channel_id]
success = stream_manager.update_url(new_url)
if success:
logger.info(f"Stream switch initiated for channel {channel_id}")
# Publish confirmation
switch_result = {
"event": EventType.STREAM_SWITCHED, # Use constant instead of string
"channel_id": channel_id,
"success": True,
"url": new_url,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(switch_result)
)
# Update status
# Set a Redis key for the generator to detect
if self.redis_client:
self.redis_client.set(status_key, "switched")
else:
logger.error(f"Failed to switch stream for channel {channel_id}")
stop_key = RedisKeys.client_stop(channel_id, client_id)
self.redis_client.setex(stop_key, 30, "true") # 30 second TTL
logger.info(f"Set stop key for client {client_id}")
except Exception as e:
logger.error(f"Error processing event message: {e}")
# Publish failure
switch_result = {
"event": EventType.STREAM_SWITCHED,
"channel_id": channel_id,
"success": False,
"url": new_url,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(switch_result)
)
elif event_type == EventType.CHANNEL_STOP:
logger.info(f"Received {EventType.CHANNEL_STOP} event for channel {channel_id}")
# First mark channel as stopping in Redis
if self.redis_client:
# Set stopping state in metadata
metadata_key = RedisKeys.channel_metadata(channel_id)
if self.redis_client.exists(metadata_key):
self.redis_client.hset(metadata_key, mapping={
"state": ChannelState.STOPPING,
"state_changed_at": str(time.time())
})
except (ConnectionError, TimeoutError) as e:
# Calculate exponential backoff with jitter
retry_count += 1
delay = min(base_retry_delay * (2 ** (retry_count - 1)), max_retry_delay)
# Add some randomness to prevent thundering herd
jitter = random.uniform(0, 0.5 * delay)
final_delay = delay + jitter
# If we have local resources for this channel, clean them up
if channel_id in self.stream_buffers or channel_id in self.client_managers:
# Use existing stop_channel method
logger.info(f"Stopping local resources for channel {channel_id}")
self.stop_channel(channel_id)
logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})")
time.sleep(final_delay)
# Acknowledge stop by publishing a response
stop_response = {
"event": EventType.CHANNEL_STOPPED,
"channel_id": channel_id,
"worker_id": self.worker_id,
"timestamp": time.time()
}
self.redis_client.publish(
f"ts_proxy:events:{channel_id}",
json.dumps(stop_response)
)
elif event_type == EventType.CLIENT_STOP:
client_id = data.get("client_id")
if client_id and channel_id:
logger.info(f"Received request to stop client {client_id} on channel {channel_id}")
# Try to clean up the old connection
try:
if 'pubsub' in locals():
pubsub.close()
if 'pubsub_client' in locals():
pubsub_client.close()
except:
pass
# Both remove from client manager AND set a key for the generator to detect
if channel_id in self.client_managers:
client_manager = self.client_managers[channel_id]
if client_id in client_manager.clients:
client_manager.remove_client(client_id)
logger.info(f"Removed client {client_id} from client manager")
# Set a Redis key for the generator to detect
if self.redis_client:
stop_key = RedisKeys.client_stop(channel_id, client_id)
self.redis_client.setex(stop_key, 30, "true") # 30 second TTL
logger.info(f"Set stop key for client {client_id}")
except Exception as e:
logger.error(f"Error processing event message: {e}")
except Exception as e:
logger.error(f"Error in event listener: {e}")
time.sleep(5) # Wait before reconnecting
# Try to restart the listener
self._start_event_listener()
except Exception as e:
logger.error(f"Error in event listener: {e}")
# Add a short delay to prevent rapid retries on persistent errors
time.sleep(5)
thread = threading.Thread(target=event_listener, daemon=True)
thread.name = "redis-event-listener"
@ -249,10 +385,9 @@ class ProxyServer:
try:
lock_key = RedisKeys.channel_owner(channel_id)
owner = self.redis_client.get(lock_key)
if owner:
return owner.decode('utf-8')
return None
return self._execute_redis_command(
lambda: self.redis_client.get(lock_key).decode('utf-8') if self.redis_client.get(lock_key) else None
)
except Exception as e:
logger.error(f"Error getting channel owner: {e}")
return None
@ -271,20 +406,32 @@ class ProxyServer:
# Create a lock key with proper namespace
lock_key = RedisKeys.channel_owner(channel_id)
# Use Redis SETNX for atomic locking - only succeeds if the key doesn't exist
acquired = self.redis_client.setnx(lock_key, self.worker_id)
# Use Redis SETNX for atomic locking with error handling
acquired = self._execute_redis_command(
lambda: self.redis_client.setnx(lock_key, self.worker_id)
)
if acquired is None: # Redis command failed
logger.warning(f"Redis command failed during ownership acquisition - assuming ownership")
return True
# If acquired, set expiry to prevent orphaned locks
if acquired:
self.redis_client.expire(lock_key, ttl)
self._execute_redis_command(
lambda: self.redis_client.expire(lock_key, ttl)
)
logger.info(f"Worker {self.worker_id} acquired ownership of channel {channel_id}")
return True
# If not acquired, check if we already own it (might be a retry)
current_owner = self.redis_client.get(lock_key)
current_owner = self._execute_redis_command(
lambda: self.redis_client.get(lock_key)
)
if current_owner and current_owner.decode('utf-8') == self.worker_id:
# Refresh TTL
self.redis_client.expire(lock_key, ttl)
self._execute_redis_command(
lambda: self.redis_client.expire(lock_key, ttl)
)
logger.info(f"Worker {self.worker_id} refreshed ownership of channel {channel_id}")
return True
@ -689,7 +836,7 @@ class ProxyServer:
return True
except Exception as e:
logger.error(f"Error stopping channel {channel_id}: {e}", exc_info=True)
logger.error(f"Error stopping channel {channel_id}: {e}")
return False
def check_inactive_channels(self):
@ -723,7 +870,9 @@ class ProxyServer:
# Send worker heartbeat first
if self.redis_client:
worker_heartbeat_key = f"ts_proxy:worker:{self.worker_id}:heartbeat"
self.redis_client.setex(worker_heartbeat_key, 30, str(time.time()))
self._execute_redis_command(
lambda: self.redis_client.setex(worker_heartbeat_key, 30, str(time.time()))
)
# Refresh channel registry
self.refresh_channel_registry()

View file

@ -10,8 +10,9 @@ from apps.proxy.config import TSConfig as Config
from .redis_keys import RedisKeys
from .config_helper import ConfigHelper
from .constants import TS_PACKET_SIZE
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class StreamBuffer:
"""Manages stream data buffering with optimized chunk storage"""

View file

@ -8,10 +8,11 @@ import logging
import threading
from apps.proxy.config import TSConfig as Config
from . import proxy_server
from .utils import create_ts_packet
from .utils import create_ts_packet, get_logger
from .redis_keys import RedisKeys
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class StreamGenerator:
"""
@ -162,11 +163,6 @@ class StreamGenerator:
def _stream_data_generator(self):
"""Generate stream data chunks based on buffer contents."""
bytes_sent = 0
chunks_sent = 0
stream_start_time = time.time()
local_index = self.local_index
# Main streaming loop
while True:
# Check if resources still exist
@ -174,12 +170,11 @@ class StreamGenerator:
break
# Get chunks at client's position using improved strategy
chunks, next_index = self.buffer.get_optimized_client_data(local_index)
chunks, next_index = self.buffer.get_optimized_client_data(self.local_index)
if chunks:
yield from self._process_chunks(chunks, next_index, bytes_sent, chunks_sent, stream_start_time)
local_index = next_index
self.local_index = local_index
yield from self._process_chunks(chunks, next_index)
self.local_index = next_index
self.last_yield_time = time.time()
self.empty_reads = 0
self.consecutive_empty = 0
@ -188,11 +183,11 @@ class StreamGenerator:
self.empty_reads += 1
self.consecutive_empty += 1
if self._should_send_keepalive(local_index):
if self._should_send_keepalive(self.local_index):
keepalive_packet = create_ts_packet('keepalive')
logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head")
yield keepalive_packet
bytes_sent += len(keepalive_packet)
self.bytes_sent += len(keepalive_packet)
self.last_yield_time = time.time()
self.consecutive_empty = 0 # Reset consecutive counter but keep total empty_reads
time.sleep(Config.KEEPALIVE_INTERVAL)
@ -204,11 +199,11 @@ class StreamGenerator:
# Log empty reads periodically
if self.empty_reads % 50 == 0:
stream_status = "healthy" if (self.stream_manager and self.stream_manager.healthy) else "unknown"
logger.debug(f"[{self.client_id}] Waiting for chunks beyond {local_index} (buffer at {self.buffer.index}, stream: {stream_status})")
logger.debug(f"[{self.client_id}] Waiting for chunks beyond {self.local_index} (buffer at {self.buffer.index}, stream: {stream_status})")
# Check for ghost clients
if self._is_ghost_client(local_index):
logger.warning(f"[{self.client_id}] Possible ghost client: buffer has advanced {self.buffer.index - local_index} chunks ahead but client stuck at {local_index}")
if self._is_ghost_client(self.local_index):
logger.warning(f"[{self.client_id}] Possible ghost client: buffer has advanced {self.buffer.index - self.local_index} chunks ahead but client stuck at {self.local_index}")
break
# Check for timeouts
@ -258,7 +253,7 @@ class StreamGenerator:
return True
def _process_chunks(self, chunks, next_index, bytes_sent, chunks_sent, stream_start_time):
def _process_chunks(self, chunks, next_index):
"""Process and yield chunks to the client."""
# Process and send chunks
total_size = sum(len(c) for c in chunks)
@ -268,20 +263,34 @@ class StreamGenerator:
for chunk in chunks:
try:
yield chunk
bytes_sent += len(chunk)
chunks_sent += 1
self.bytes_sent += len(chunk)
self.chunks_sent += 1
logger.debug(f"[{self.client_id}] Sent chunk {self.chunks_sent} ({len(chunk)} bytes) to client")
# Log every 10 chunks and store in redis for visibility
if self.chunks_sent % 10 == 0:
elapsed = time.time() - self.stream_start_time
rate = self.bytes_sent / elapsed / 1024 if elapsed > 0 else 0
logger.debug(f"[{self.client_id}] Stats: {self.chunks_sent} chunks, {self.bytes_sent/1024:.1f} KB, {rate:.1f} KB/s")
# Store stats in Redis client metadata
if proxy_server.redis_client:
try:
client_key = RedisKeys.client_metadata(self.channel_id, self.client_id)
stats = {
"chunks_sent": str(self.chunks_sent),
"bytes_sent": str(self.bytes_sent),
"transfer_rate_KBps": str(round(rate, 1)),
"stats_updated_at": str(time.time())
}
proxy_server.redis_client.hset(client_key, mapping=stats)
# No need to set expiration as client heartbeat will refresh this key
except Exception as e:
logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}")
# Log every 100 chunks for visibility
if chunks_sent % 100 == 0:
elapsed = time.time() - stream_start_time
rate = bytes_sent / elapsed / 1024 if elapsed > 0 else 0
logger.info(f"[{self.client_id}] Stats: {chunks_sent} chunks, {bytes_sent/1024:.1f}KB, {rate:.1f}KB/s")
except Exception as e:
logger.error(f"[{self.client_id}] Error sending chunk to client: {e}")
raise # Re-raise to exit the generator
return bytes_sent, chunks_sent
def _should_send_keepalive(self, local_index):
"""Determine if a keepalive packet should be sent."""
# Check if we're caught up to buffer head

View file

@ -13,13 +13,13 @@ from apps.channels.models import Channel, Stream
from apps.m3u.models import M3UAccount, M3UAccountProfile
from core.models import UserAgent, CoreSettings
from .stream_buffer import StreamBuffer
from .utils import detect_stream_type
from .utils import detect_stream_type, get_logger
from .redis_keys import RedisKeys
from .constants import ChannelState, EventType, StreamType, TS_PACKET_SIZE
from .config_helper import ConfigHelper
from .url_utils import get_alternate_streams, get_stream_info_for_switch
logger = logging.getLogger("ts_proxy")
logger = get_logger()
class StreamManager:
"""Manages a connection to a TS stream without using raw sockets"""

View file

@ -9,8 +9,9 @@ from django.shortcuts import get_object_or_404
from apps.channels.models import Channel, Stream
from apps.m3u.models import M3UAccount, M3UAccountProfile
from core.models import UserAgent, CoreSettings
from .utils import get_logger
logger = logging.getLogger("ts_proxy")
logger = get_logger()
def generate_stream_url(channel_id: str) -> Tuple[str, str, bool]:
"""
@ -54,7 +55,7 @@ def generate_stream_url(channel_id: str) -> Tuple[str, str, bool]:
transcode = True
# Get profile name as string
profile_value = str(stream_profile)
profile_value = stream_profile.id
return stream_url, stream_user_agent, transcode, profile_value

View file

@ -1,6 +1,7 @@
import logging
import re
from urllib.parse import urlparse
import inspect
logger = logging.getLogger("ts_proxy")
@ -77,4 +78,31 @@ def create_ts_packet(packet_type='null', message=None):
msg_bytes = message.encode('utf-8')
packet[4:4+min(len(msg_bytes), 180)] = msg_bytes[:180]
return bytes(packet)
return bytes(packet)
def get_logger(component_name=None):
"""
Get a standardized logger with ts_proxy prefix and optional component name.
Args:
component_name (str, optional): Name of the component. If not provided,
will try to detect from the calling module.
Returns:
logging.Logger: A configured logger with standardized naming.
"""
if component_name:
logger_name = f"ts_proxy.{component_name}"
else:
# Try to get the calling module name if not explicitly specified
frame = inspect.currentframe().f_back
module = inspect.getmodule(frame)
if module:
# Extract just the filename without extension
module_name = module.__name__.split('.')[-1]
logger_name = f"ts_proxy.{module_name}"
else:
# Default if detection fails
logger_name = "ts_proxy"
return logging.getLogger(logger_name)

View file

@ -22,9 +22,9 @@ from .constants import ChannelState, EventType, StreamType
from .config_helper import ConfigHelper
from .services.channel_service import ChannelService
from .url_utils import generate_stream_url, transform_url, get_stream_info_for_switch
from .utils import get_logger
# Configure logging properly
logger = logging.getLogger("ts_proxy")
logger = get_logger()
@api_view(['GET'])

34
core/command_utils.py Normal file
View file

@ -0,0 +1,34 @@
import sys
import os
def is_management_command(excluded_commands=None):
"""
Detect if we're running a Django management command like migrate, collectstatic, etc.
Args:
excluded_commands: List of commands that should still use Redis (e.g. runserver)
Returns:
bool: True if we're running a management command
"""
# First check if we're in build mode
if os.environ.get("DISPATCHARR_BUILD") == "1":
return True
if excluded_commands is None:
excluded_commands = ['runserver', 'runworker', 'daphne']
# Check if we're running via manage.py
if not ('manage.py' in sys.argv[0]):
return False
# Check if we have a command argument
if len(sys.argv) > 1:
command = sys.argv[1]
# Return False if command is in excluded list - these commands DO need Redis
if command in excluded_commands:
return False
# Otherwise it's a command that should work without Redis
return True
return False

View file

@ -0,0 +1,22 @@
# Generated by Django 5.1.6 on 2025-03-01 14:01
from django.db import migrations
from django.utils.text import slugify
def preload_core_settings(apps, schema_editor):
CoreSettings = apps.get_model("core", "CoreSettings")
CoreSettings.objects.create(
key=slugify("M3U Hash Key"),
name="M3U Hash Key",
value="name,url,tvg_id",
)
class Migration(migrations.Migration):
dependencies = [
('core', '0008_rename_profile_name_streamprofile_name_and_more'),
]
operations = [
migrations.RunPython(preload_core_settings),
]

View file

@ -67,7 +67,7 @@ class StreamProfile(models.Model):
def save(self, *args, **kwargs):
if self.pk: # Only check existing records
orig = StreamProfile.objects.get(pk=self.pk)
if orig.is_protected:
if orig.locked:
allowed_fields = {"user_agent_id"} # Only allow this field to change
for field in self._meta.fields:
field_name = field.name
@ -91,7 +91,7 @@ class StreamProfile(models.Model):
def update(cls, pk, **kwargs):
instance = cls.objects.get(pk=pk)
if instance.is_protected:
if instance.locked:
allowed_fields = {"user_agent_id"} # Only allow updating this field
for field_name, new_value in kwargs.items():
@ -142,9 +142,9 @@ class StreamProfile(models.Model):
DEFAULT_USER_AGENT_KEY= slugify("Default User-Agent")
DEFAULT_STREAM_PROFILE_KEY = slugify("Default Stream Profile")
STREAM_HASH_KEY = slugify("M3U Hash Key")
PREFERRED_REGION_KEY = slugify("Preferred Region")
class CoreSettings(models.Model):
key = models.CharField(
max_length=255,
@ -170,10 +170,12 @@ class CoreSettings(models.Model):
return cls.objects.get(key=DEFAULT_STREAM_PROFILE_KEY).value
@classmethod
def get_m3u_hash_key(cls):
return cls.objects.get(key=STREAM_HASH_KEY).value
def get_preferred_region(cls):
"""Retrieve the preferred region setting (or return None if not found)."""
try:
return cls.objects.get(key=PREFERRED_REGION_KEY).value
except cls.DoesNotExist:
return None

267
core/redis_pubsub.py Normal file
View file

@ -0,0 +1,267 @@
"""
Redis PubSub utilities for maintaining long-lived connections.
"""
import threading
import time
import logging
import json
from redis import Redis
from redis.exceptions import ConnectionError, TimeoutError
logger = logging.getLogger(__name__)
class DummyPubSub:
"""Dummy PubSub implementation when Redis isn't available"""
def __init__(self):
pass
def subscribe(self, *args, **kwargs):
pass
def psubscribe(self, *args, **kwargs):
pass
def get_message(self, *args, **kwargs):
return None
def close(self):
pass
class RedisPubSubManager:
"""
A robust Redis PubSub manager that handles disconnections and reconnections.
"""
def __init__(self, redis_client=None, auto_reconnect=True):
"""
Initialize the PubSub manager.
Args:
redis_client: An existing Redis client to use
auto_reconnect: Whether to automatically reconnect on failure
"""
self.redis_client = redis_client
self.pubsub = None
self.subscriptions = set()
self.pattern_subscriptions = set()
self.auto_reconnect = auto_reconnect
self.running = True
self.lock = threading.RLock()
self.message_handlers = {} # Map of channels to handler functions
self.message_thread = None
self.is_dummy = redis_client is None
def subscribe(self, channel, handler=None):
"""
Subscribe to a channel.
Args:
channel: The channel to subscribe to
handler: Optional function to call when messages are received
"""
if self.is_dummy:
return
with self.lock:
self.subscriptions.add(channel)
if handler:
self.message_handlers[channel] = handler
if self.pubsub:
self.pubsub.subscribe(channel)
logger.info(f"Subscribed to channel: {channel}")
def psubscribe(self, pattern, handler=None):
"""
Subscribe to a channel pattern.
Args:
pattern: The pattern to subscribe to
handler: Optional function to call when messages are received
"""
if self.is_dummy:
return
with self.lock:
self.pattern_subscriptions.add(pattern)
if handler:
self.message_handlers[pattern] = handler
if self.pubsub:
self.pubsub.psubscribe(pattern)
logger.info(f"Subscribed to pattern: {pattern}")
def publish(self, channel, message):
"""
Publish a message to a channel.
Args:
channel: The channel to publish to
message: The message to publish (will be JSON-encoded if not a string)
Returns:
Number of clients that received the message
"""
if self.is_dummy:
return 0
try:
if not isinstance(message, str):
message = json.dumps(message)
return self.redis_client.publish(channel, message)
except Exception as e:
logger.error(f"Error publishing to {channel}: {e}")
return 0
def start_listening(self):
"""
Start listening for messages in a background thread.
"""
if self.is_dummy:
logger.debug("Running with dummy Redis client - not starting listener")
return
if not self.message_thread:
self._connect()
self.message_thread = threading.Thread(
target=self._listen_for_messages,
daemon=True,
name="redis-pubsub-listener"
)
self.message_thread.start()
logger.info("Started Redis PubSub listener thread")
def stop(self):
"""
Stop listening and clean up resources.
"""
if self.is_dummy:
return
self.running = False
if self.pubsub:
try:
self.pubsub.close()
except:
pass
self.pubsub = None
def _connect(self):
"""
Establish a new PubSub connection and subscribe to all channels.
"""
if self.is_dummy:
self.pubsub = DummyPubSub()
return
with self.lock:
# Close any existing connection
if self.pubsub:
try:
self.pubsub.close()
except:
pass
# Create a new PubSub instance - critical: no timeout for subscribe operations
# This prevents the connection from timing out while waiting for messages
self.pubsub = self.redis_client.pubsub()
# Resubscribe to all channels
if self.subscriptions:
self.pubsub.subscribe(*self.subscriptions)
logger.info(f"Resubscribed to channels: {self.subscriptions}")
# Resubscribe to all patterns
if self.pattern_subscriptions:
self.pubsub.psubscribe(*self.pattern_subscriptions)
logger.info(f"Resubscribed to patterns: {self.pattern_subscriptions}")
def _listen_for_messages(self):
"""
Background thread that listens for messages and handles reconnections.
"""
if self.is_dummy:
return
consecutive_errors = 0
while self.running:
try:
# Check if we need to connect
if not self.pubsub:
self._connect()
# Listen for messages with NO timeout - this is critical!
message = self.pubsub.get_message(timeout=None)
if message:
# Don't process subscription confirmation messages
if message['type'] in ('subscribe', 'psubscribe'):
continue
channel = message.get('channel')
if channel:
# Decode binary channel name if needed
if isinstance(channel, bytes):
channel = channel.decode('utf-8')
# Find and call the appropriate handler
handler = self.message_handlers.get(channel)
if handler:
try:
handler(message)
except Exception as e:
logger.error(f"Error in message handler for {channel}: {e}")
# Reset error counter on success
consecutive_errors = 0
# Small sleep to prevent excessive CPU usage
time.sleep(0.01)
except (ConnectionError, TimeoutError) as e:
consecutive_errors += 1
if not self.auto_reconnect:
logger.error(f"PubSub connection error and auto_reconnect is disabled: {e}")
break
# Exponential backoff for reconnection attempts
backoff = min(consecutive_errors * 0.5, 5)
logger.warning(f"PubSub connection error, reconnecting in {backoff} seconds: {e}")
time.sleep(backoff)
# Reconnect
self._connect()
except Exception as e:
logger.error(f"Unexpected error in PubSub listener: {e}")
time.sleep(1) # Prevent tight loop in case of persistent errors
logger.info("PubSub listener thread stopping")
# Create a singleton instance
pubsub_manager = None
def get_pubsub_manager(redis_client=None):
"""
Get or create the PubSub manager singleton.
Args:
redis_client: Optional Redis client to use
Returns:
The PubSub manager instance
"""
global pubsub_manager
if pubsub_manager is None:
pubsub_manager = RedisPubSubManager(redis_client)
# Only start if redis_client is not None
if redis_client is not None:
try:
pubsub_manager.start_listening()
except Exception as e:
logger.error(f"Failed to start PubSub listener: {e}")
return pubsub_manager

View file

@ -1,28 +1,165 @@
import redis
import logging
import time
import os
import threading
from django.conf import settings
from redis.exceptions import ConnectionError, TimeoutError
logger = logging.getLogger(__name__)
def get_redis_client():
"""Get Redis client with connection validation"""
try:
# Create Redis client
client = redis.Redis(
host=settings.REDIS_HOST,
port=getattr(settings, 'REDIS_PORT', 6379),
db=settings.REDIS_DB,
socket_timeout=5,
socket_connect_timeout=5
)
# Validate connection with ping
client.ping()
logger.info(f"Connected to Redis at {settings.REDIS_HOST}:6379/{settings.REDIS_DB}")
return client
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
# Import the command detector
from .command_utils import is_management_command
def get_redis_client(max_retries=5, retry_interval=1):
"""Get Redis client with connection validation and retry logic"""
# Skip Redis connection for management commands like collectstatic
if is_management_command():
logger.info("Running as management command - skipping Redis initialization")
return None
# Initialize the global client
redis_client = get_redis_client()
retry_count = 0
while retry_count < max_retries:
try:
# Get connection parameters from settings or environment
redis_host = os.environ.get("REDIS_HOST", getattr(settings, 'REDIS_HOST', 'localhost'))
redis_port = int(os.environ.get("REDIS_PORT", getattr(settings, 'REDIS_PORT', 6379)))
redis_db = int(os.environ.get("REDIS_DB", getattr(settings, 'REDIS_DB', 0)))
# Use standardized settings
socket_timeout = getattr(settings, 'REDIS_SOCKET_TIMEOUT', 5)
socket_connect_timeout = getattr(settings, 'REDIS_SOCKET_CONNECT_TIMEOUT', 5)
health_check_interval = getattr(settings, 'REDIS_HEALTH_CHECK_INTERVAL', 30)
socket_keepalive = getattr(settings, 'REDIS_SOCKET_KEEPALIVE', True)
retry_on_timeout = getattr(settings, 'REDIS_RETRY_ON_TIMEOUT', True)
# Create Redis client with better defaults
client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=socket_timeout,
socket_connect_timeout=socket_connect_timeout,
socket_keepalive=socket_keepalive,
health_check_interval=health_check_interval,
retry_on_timeout=retry_on_timeout
)
# Validate connection with ping
client.ping()
logger.info(f"Connected to Redis at {redis_host}:{redis_port}/{redis_db}")
return client
except (ConnectionError, TimeoutError) as e:
retry_count += 1
if retry_count >= max_retries:
logger.error(f"Failed to connect to Redis after {max_retries} attempts: {e}")
return None
else:
# Use exponential backoff for retries
wait_time = retry_interval * (2 ** (retry_count - 1))
logger.warning(f"Redis connection failed. Retrying in {wait_time}s... ({retry_count}/{max_retries})")
time.sleep(wait_time)
except Exception as e:
logger.error(f"Unexpected error connecting to Redis: {e}")
return None
def get_redis_pubsub_client(max_retries=5, retry_interval=1):
"""Get Redis client optimized for PubSub operations"""
# Skip Redis connection for management commands like collectstatic
if is_management_command():
logger.info("Running as management command - skipping Redis PubSub initialization")
return None
retry_count = 0
while retry_count < max_retries:
try:
# Get connection parameters from settings or environment
redis_host = os.environ.get("REDIS_HOST", getattr(settings, 'REDIS_HOST', 'localhost'))
redis_port = int(os.environ.get("REDIS_PORT", getattr(settings, 'REDIS_PORT', 6379)))
redis_db = int(os.environ.get("REDIS_DB", getattr(settings, 'REDIS_DB', 0)))
# Use standardized settings but without socket timeouts for PubSub
# Important: socket_timeout is None for PubSub operations
socket_connect_timeout = getattr(settings, 'REDIS_SOCKET_CONNECT_TIMEOUT', 5)
socket_keepalive = getattr(settings, 'REDIS_SOCKET_KEEPALIVE', True)
health_check_interval = getattr(settings, 'REDIS_HEALTH_CHECK_INTERVAL', 30)
retry_on_timeout = getattr(settings, 'REDIS_RETRY_ON_TIMEOUT', True)
# Create Redis client with PubSub-optimized settings - no timeout
client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=None, # Critical: No timeout for PubSub operations
socket_connect_timeout=socket_connect_timeout,
socket_keepalive=socket_keepalive,
health_check_interval=health_check_interval,
retry_on_timeout=retry_on_timeout
)
# Validate connection with ping
client.ping()
logger.info(f"Connected to Redis for PubSub at {redis_host}:{redis_port}/{redis_db}")
# We don't need the keepalive thread anymore since we're using proper PubSub handling
return client
except (ConnectionError, TimeoutError) as e:
retry_count += 1
if retry_count >= max_retries:
logger.error(f"Failed to connect to Redis for PubSub after {max_retries} attempts: {e}")
return None
else:
# Use exponential backoff for retries
wait_time = retry_interval * (2 ** (retry_count - 1))
logger.warning(f"Redis PubSub connection failed. Retrying in {wait_time}s... ({retry_count}/{max_retries})")
time.sleep(wait_time)
except Exception as e:
logger.error(f"Unexpected error connecting to Redis for PubSub: {e}")
return None
def execute_redis_command(redis_client, command_func, default_return=None):
"""
Execute a Redis command with proper error handling
Args:
redis_client: The Redis client instance
command_func: Lambda function containing the Redis command to execute
default_return: Value to return if command fails
Returns:
Command result or default_return on failure
"""
if redis_client is None:
return default_return
try:
return command_func()
except (ConnectionError, TimeoutError) as e:
logger.warning(f"Redis connection error: {e}")
return default_return
except Exception as e:
logger.error(f"Redis command error: {e}")
return default_return
# Initialize the global clients with retry logic
# Skip Redis initialization if running as a management command
if is_management_command():
redis_client = None
redis_pubsub_client = None
logger.info("Running as management command - Redis clients set to None")
else:
redis_client = get_redis_client()
redis_pubsub_client = get_redis_pubsub_client()
# Import and initialize the PubSub manager
# Skip if running as management command or if Redis client is None
if not is_management_command() and redis_client is not None:
from .redis_pubsub import get_pubsub_manager
pubsub_manager = get_pubsub_manager(redis_client)
else:
logger.info("PubSub manager not initialized (running as management command or Redis not available)")
pubsub_manager = None

View file

@ -151,6 +151,19 @@ AUTH_USER_MODEL = 'accounts.User'
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379/0')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# Configure Redis key prefix
CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = {
'prefix': 'celery-task:', # Set the Redis key prefix for Celery
}
# Set TTL (Time-to-Live) for task results (in seconds)
CELERY_RESULT_EXPIRES = 3600 # 1 hour TTL for task results
# Optionally, set visibility timeout for task retries (if using Redis)
CELERY_BROKER_TRANSPORT_OPTIONS = {
'visibility_timeout': 3600, # Time in seconds that a task remains invisible during retries
}
CELERY_BEAT_SCHEDULE = {
'fetch-channel-statuses': {
'task': 'apps.proxy.tasks.fetch_channel_stats',
@ -179,8 +192,15 @@ SIMPLE_JWT = {
'BLACKLIST_AFTER_ROTATION': True, # Optional: Whether to blacklist refresh tokens
}
# Redis settings for TS proxy
# Redis connection settings
REDIS_URL = 'redis://localhost:6379/0'
REDIS_SOCKET_TIMEOUT = 60 # Socket timeout in seconds
REDIS_SOCKET_CONNECT_TIMEOUT = 5 # Connection timeout in seconds
REDIS_HEALTH_CHECK_INTERVAL = 15 # Health check every 15 seconds
REDIS_SOCKET_KEEPALIVE = True # Enable socket keepalive
REDIS_RETRY_ON_TIMEOUT = True # Retry on timeout
REDIS_MAX_RETRIES = 10 # Maximum number of retries
REDIS_RETRY_INTERVAL = 1 # Initial retry interval in seconds
# Proxy Settings
PROXY_SETTINGS = {

View file

@ -1,9 +1,13 @@
FROM python:3.13-slim AS builder
# Define build argument with default value of "main"
ARG BRANCH=main
ENV PATH="/dispatcharrpy/bin:$PATH" \
VIRTUAL_ENV=/dispatcharrpy \
DJANGO_SETTINGS_MODULE=dispatcharr.settings \
PYTHONUNBUFFERED=1
PYTHONUNBUFFERED=1 \
DISPATCHARR_BUILD=1
RUN apt-get update && \
apt-get install -y --no-install-recommends \
@ -23,14 +27,14 @@ RUN apt-get update && \
nodejs && \
python -m pip install virtualenv && \
virtualenv /dispatcharrpy && \
git clone https://github.com/Dispatcharr/Dispatcharr /app && \
git clone -b ${BRANCH} https://github.com/Dispatcharr/Dispatcharr /app && \
cd /app && \
rm -rf .git && \
cd /app && \
pip install --no-cache-dir -r requirements.txt && \
python manage.py collectstatic --noinput && \
cd /app/frontend && \
npm install && \
npm install --legacy-peer-deps && \
npm run build && \
find . -maxdepth 1 ! -name '.' ! -name 'dist' -exec rm -rf '{}' \;
@ -45,43 +49,44 @@ ENV PATH="/dispatcharrpy/bin:$PATH" \
COPY --from=builder /dispatcharrpy /dispatcharrpy
COPY --from=builder /app /app
# Install base dependencies with memory optimization
RUN apt-get update && \
apt-get install -y --no-install-recommends \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
curl \
ffmpeg \
gnupg2 \
gpg \
libpcre3 \
libpq-dev \
lsb-release \
nginx \
procps \
streamlink \
wget && \
wget \
gnupg2 \
lsb-release && \
cp /app/docker/nginx.conf /etc/nginx/sites-enabled/default && \
echo "=== setting up postgres ====" && \
echo "deb http://apt.postgresql.org/pub/repos/apt/ bookworm-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "=== setting up redis ===" && \
curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set up Redis repository in a separate step
RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \
chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/redis.list && \
apt-get update && \
apt-get install -y \
postgresql-14 \
postgresql-contrib-14 \
redis-server && \
mkdir /data && \
apt-get remove -y \
gnupg2 \
gpg \
lsb-release && \
DEBIAN_FRONTEND=noninteractive apt-get install -y redis-server && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set up PostgreSQL repository and install in a separate step
RUN echo "=== setting up postgres ====" && \
sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y postgresql-14 postgresql-contrib-14 && \
mkdir -p /data && \
apt-get remove -y gnupg2 lsb-release && \
apt-get clean && \
apt-get autoremove -y && \
rm -rf \
/tmp/* \
/var/lib/apt/lists/* \
/var/tmp/*
rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/*
WORKDIR /app

View file

@ -1,3 +1,3 @@
#!/bin/bash
docker build -t dispatcharr/dispatcharr:dev -f Dockerfile ..
docker build --build-arg BRANCH=dev -t dispatcharr/dispatcharr:dev -f Dockerfile ..

View file

@ -85,10 +85,6 @@ else
pids+=("$nginx_pid")
fi
cd /app
python manage.py migrate --noinput
python manage.py collectstatic --noinput
uwsgi_file="/app/docker/uwsgi.ini"
if [ "$DISPATCHARR_ENV" = "dev" ]; then
uwsgi_file="/app/docker/uwsgi.dev.ini"
@ -100,6 +96,12 @@ uwsgi_pid=$(pgrep uwsgi | sort | head -n1)
echo "✅ uwsgi started with PID $uwsgi_pid"
pids+=("$uwsgi_pid")
cd /app
python manage.py migrate --noinput
python manage.py collectstatic --noinput
# Wait for at least one process to exit and log the process that exited first
if [ ${#pids[@]} -gt 0 ]; then
echo "⏳ Waiting for processes to exit..."

View file

@ -10,4 +10,10 @@ if [ "$(id -u)" = "0" ]; then
chown -R $PUID:$PGID /app
chown $PUID:www-data /app/uwsgi.sock
chmod 777 /app/uwsgi.sock
# Create and set permissions for the cached_m3u directory
mkdir -p /app/media/cached_m3u
chown -R $PUID:$PGID /app/media/cached_m3u
chmod 777 /app/media/cached_m3u
echo "Created and set permissions for cached_m3u directory"
fi

View file

@ -2,9 +2,14 @@
; exec-before = python manage.py collectstatic --noinput
; exec-before = python manage.py migrate --noinput
; First run Redis availability check script once
exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker -l info
attach-daemon = celery -A dispatcharr beat -l info
attach-daemon = redis-server
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev

View file

@ -2,9 +2,14 @@
; exec-before = python manage.py collectstatic --noinput
; exec-before = python manage.py migrate --noinput
; First run Redis availability check script once
exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker -l error
attach-daemon = celery -A dispatcharr beat -l error
attach-daemon = redis-server
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
# Core settings

View file

@ -26,6 +26,7 @@ import './index.css';
import mantineTheme from './mantineTheme';
import API from './api';
import { Notifications } from '@mantine/notifications';
import M3URefreshNotification from './components/M3URefreshNotification';
const drawerWidth = 240;
const miniDrawerWidth = 60;
@ -143,6 +144,7 @@ const App = () => {
</Box>
</AppShell.Main>
</AppShell>
<M3URefreshNotification />
</Router>
<FloatingVideo />

View file

@ -8,6 +8,8 @@ import React, {
import useStreamsStore from './store/streams';
import { notifications } from '@mantine/notifications';
import useChannelsStore from './store/channels';
import usePlaylistsStore from './store/playlists';
import useEPGsStore from './store/epgs';
export const WebsocketContext = createContext(false, null, () => {});
@ -16,7 +18,9 @@ export const WebsocketProvider = ({ children }) => {
const [val, setVal] = useState(null);
const { fetchStreams } = useStreamsStore();
const { setChannelStats } = useChannelsStore();
const { setChannelStats, fetchChannelGroups } = useChannelsStore();
const { setRefreshProgress } = usePlaylistsStore();
const { fetchEPGData } = useEPGsStore();
const ws = useRef(null);
@ -52,12 +56,20 @@ export const WebsocketProvider = ({ children }) => {
event = JSON.parse(event.data);
switch (event.data.type) {
case 'm3u_refresh':
console.log('inside m3u_refresh event');
if (event.data.success) {
fetchStreams();
notifications.show({
message: event.data.message,
color: 'green.5',
});
} else if (event.data.progress) {
if (event.data.progress == 100) {
fetchStreams();
fetchChannelGroups();
fetchEPGData();
}
setRefreshProgress(event.data.account, event.data.progress);
}
break;

View file

@ -581,6 +581,18 @@ export default class API {
return retval;
}
static async getEPGData() {
const response = await fetch(`${host}/api/epg/epgdata/`, {
headers: {
Authorization: `Bearer ${await API.getAuthToken()}`,
'Content-Type': 'application/json',
},
});
const retval = await response.json();
return retval;
}
// Notice there's a duplicated "refreshPlaylist" method above;
// you might want to rename or remove one if it's not needed.

View file

@ -3,6 +3,8 @@ import React, { useEffect, useRef } from 'react';
import Draggable from 'react-draggable';
import useVideoStore from '../store/useVideoStore';
import mpegts from 'mpegts.js';
import { ActionIcon, Flex } from '@mantine/core';
import { SquareX } from 'lucide-react';
export default function FloatingVideo() {
const { isVisible, streamUrl, hideVideo } = useVideoStore();
@ -65,28 +67,11 @@ export default function FloatingVideo() {
}}
>
{/* Simple header row with a close button */}
<div
style={{
display: 'flex',
justifyContent: 'flex-end',
padding: '4px',
}}
>
<button
onClick={hideVideo}
style={{
background: 'red',
color: '#fff',
border: 'none',
borderRadius: '4px',
cursor: 'pointer',
fontSize: '0.8rem',
padding: '2px 8px',
}}
>
X
</button>
</div>
<Flex justify="flex-end" style={{ padding: 3 }}>
<ActionIcon variant="transparent" onClick={hideVideo}>
<SquareX color="red" size="30" />
</ActionIcon>
</Flex>
{/* The <video> element used by mpegts.js */}
<video

View file

@ -0,0 +1,79 @@
// frontend/src/components/FloatingVideo.js
import React, { useState } from 'react';
import usePlaylistsStore from '../store/playlists';
import { notifications } from '@mantine/notifications';
import { IconCheck } from '@tabler/icons-react';
export default function M3URefreshNotification() {
const { playlists, refreshProgress, removeRefreshProgress } =
usePlaylistsStore();
const [progress, setProgress] = useState({});
const clearAccountNotification = (id) => {
removeRefreshProgress(id);
setProgress({
...progress,
[id]: null,
});
};
for (const id in refreshProgress) {
const playlist = playlists.find((pl) => pl.id == id);
if (!progress[id]) {
if (refreshProgress[id] == 100) {
// This situation is if it refreshes so fast we only get the 100% complete notification
const notificationId = notifications.show({
loading: false,
title: `M3U Refresh: ${playlist.name}`,
message: `Refresh complete!`,
icon: <IconCheck />,
});
setProgress({
...progress,
[id]: notificationId,
});
setTimeout(() => clearAccountNotification(id), 2000);
return;
}
console.log('starting progress bar');
const notificationId = notifications.show({
loading: true,
title: `M3U Refresh: ${playlist.name}`,
message: `Starting...`,
autoClose: false,
withCloseButton: false,
});
setProgress({
...progress,
[id]: notificationId,
});
} else {
if (refreshProgress[id] == 0) {
notifications.update({
id: progress[id],
message: `Starting...`,
});
} else if (refreshProgress[id] == 100) {
notifications.update({
id: progress[id],
message: `Refresh complete!`,
loading: false,
autoClose: 2000,
icon: <IconCheck />,
});
setTimeout(() => clearAccountNotification(id), 2000);
} else {
notifications.update({
id: progress[id],
message: `Updating M3U: ${refreshProgress[id]}%`,
});
}
}
}
return <></>;
}

View file

@ -21,14 +21,17 @@ import {
Center,
Grid,
Flex,
Select,
} from '@mantine/core';
import { SquarePlus } from 'lucide-react';
import useEPGsStore from '../../store/epgs';
const Channel = ({ channel = null, isOpen, onClose }) => {
const channelGroups = useChannelsStore((state) => state.channelGroups);
const streams = useStreamsStore((state) => state.streams);
const { profiles: streamProfiles } = useStreamProfilesStore();
const { playlists } = usePlaylistsStore();
const { tvgs } = useEPGsStore();
const [logoFile, setLogoFile] = useState(null);
const [logoPreview, setLogoPreview] = useState(logo);
@ -60,7 +63,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
name: '',
channel_number: '',
channel_group_id: '',
stream_profile_id: null,
stream_profile_id: '0',
tvg_id: '',
tvg_name: '',
},
@ -74,7 +77,6 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
values.stream_profile_id = null;
}
console.log(values);
if (channel?.id) {
await API.updateChannel({
id: channel.id,
@ -104,7 +106,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
name: channel.name,
channel_number: channel.channel_number,
channel_group_id: channel.channel_group?.id,
stream_profile_id: channel.stream_profile_id,
stream_profile_id: channel.stream_profile_id || '0',
tvg_id: channel.tvg_id,
tvg_name: channel.tvg_name,
});
@ -248,6 +250,8 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
return <></>;
}
console.log(streamProfiles);
return (
<>
<Modal opened={isOpen} onClose={onClose} size={800} title="Channel">
@ -265,7 +269,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
<Grid>
<Grid.Col span={11}>
<NativeSelect
<Select
id="channel_group_id"
name="channel_group_id"
label="Channel Group"
@ -276,7 +280,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
? formik.touched.channel_group_id
: ''
}
data={channelGroups.map((option, index) => ({
data={Object.values(channelGroups).map((option, index) => ({
value: `${option.id}`,
label: option.name,
}))}
@ -296,18 +300,20 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
</Grid.Col>
</Grid>
<NativeSelect
<Select
id="stream_profile_id"
label="Stream Profile"
name="stream_profile_id"
value={formik.values.stream_profile_id}
onChange={formik.handleChange}
onChange={(value) => {
formik.setFieldValue('stream_profile_id', value); // Update Formik's state with the new value
}}
error={
formik.errors.stream_profile_id
? formik.touched.stream_profile_id
: ''
}
data={[{ value: null, label: '(use default)' }].concat(
data={[{ value: '0', label: '(use default)' }].concat(
streamProfiles.map((option) => ({
value: `${option.id}`,
label: option.name,
@ -339,13 +345,20 @@ const Channel = ({ channel = null, isOpen, onClose }) => {
error={formik.errors.tvg_name ? formik.touched.tvg_name : ''}
/>
<TextInput
<Select
id="tvg_id"
name="tvg_id"
label="TVG ID"
searchable
value={formik.values.tvg_id}
onChange={formik.handleChange}
error={formik.errors.tvg_id ? formik.touched.tvg_id : ''}
onChange={(value) => {
formik.setFieldValue('tvg_id', value); // Update Formik's state with the new value
}}
error={formik.errors.tvg_id}
data={tvgs.map((tvg) => ({
value: tvg.name,
label: tvg.tvg_id,
}))}
/>
<TextInput

View file

@ -17,11 +17,17 @@ import {
Select,
Space,
} from '@mantine/core';
import M3UGroupFilter from './M3UGroupFilter';
import useChannelsStore from '../../store/channels';
const M3U = ({ playlist = null, isOpen, onClose, playlistCreated = false }) => {
const { userAgents } = useUserAgentsStore();
const { fetchChannelGroups } = useChannelsStore();
const M3U = ({ playlist = null, isOpen, onClose }) => {
const userAgents = useUserAgentsStore((state) => state.userAgents);
const [file, setFile] = useState(null);
const [profileModalOpen, setProfileModalOpen] = useState(false);
const [groupFilterModalOpen, setGroupFilterModalOpen] = useState(false);
const [loadingText, setLoadingText] = useState('');
const handleFileChange = (file) => {
if (file) {
@ -43,6 +49,7 @@ const M3U = ({ playlist = null, isOpen, onClose }) => {
max_streams: Yup.string().required('Max streams is required'),
}),
onSubmit: async (values, { setSubmitting, resetForm }) => {
let newPlaylist;
if (playlist?.id) {
await API.updatePlaylist({
id: playlist.id,
@ -50,19 +57,31 @@ const M3U = ({ playlist = null, isOpen, onClose }) => {
uploaded_file: file,
});
} else {
await API.addPlaylist({
setLoadingText('Loading groups...');
newPlaylist = await API.addPlaylist({
...values,
uploaded_file: file,
});
await fetchChannelGroups();
}
resetForm();
setFile(null);
setSubmitting(false);
onClose();
onClose(newPlaylist);
},
});
const closeGroupFilter = () => {
setGroupFilterModalOpen(false);
if (playlistCreated) {
formik.resetForm();
setFile(null);
onClose();
}
};
useEffect(() => {
if (playlist) {
formik.setValues({
@ -77,15 +96,25 @@ const M3U = ({ playlist = null, isOpen, onClose }) => {
}
}, [playlist]);
useEffect(() => {
if (playlistCreated) {
setGroupFilterModalOpen(true);
}
}, [playlist, playlistCreated]);
if (!isOpen) {
return <></>;
}
return (
<Modal opened={isOpen} onClose={onClose} title="M3U Account">
<div style={{ width: 400, position: 'relative' }}>
<LoadingOverlay visible={formik.isSubmitting} overlayBlur={2} />
<LoadingOverlay
visible={formik.isSubmitting}
overlayBlur={2}
loaderProps={loadingText ? { children: loadingText } : {}}
/>
<div style={{ width: 400, position: 'relative' }}>
<form onSubmit={formik.handleSubmit}>
<TextInput
fullWidth
@ -156,14 +185,24 @@ const M3U = ({ playlist = null, isOpen, onClose }) => {
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
{playlist && (
<Button
variant="contained"
color="primary"
size="small"
onClick={() => setProfileModalOpen(true)}
>
Profiles
</Button>
<>
<Button
variant="contained"
color="primary"
size="small"
onClick={() => setGroupFilterModalOpen(true)}
>
Groups
</Button>
<Button
variant="contained"
color="primary"
size="small"
onClick={() => setProfileModalOpen(true)}
>
Profiles
</Button>
</>
)}
<Button
type="submit"
@ -176,11 +215,18 @@ const M3U = ({ playlist = null, isOpen, onClose }) => {
</Button>
</Flex>
{playlist && (
<M3UProfiles
playlist={playlist}
isOpen={profileModalOpen}
onClose={() => setProfileModalOpen(false)}
/>
<>
<M3UProfiles
playlist={playlist}
isOpen={profileModalOpen}
onClose={() => setProfileModalOpen(false)}
/>
<M3UGroupFilter
isOpen={groupFilterModalOpen}
playlist={playlist}
onClose={closeGroupFilter}
/>
</>
)}
</form>
</div>

View file

@ -0,0 +1,147 @@
// Modal.js
import React, { useState, useEffect } from 'react';
import { useFormik } from 'formik';
import * as Yup from 'yup';
import API from '../../api';
import M3UProfiles from './M3UProfiles';
import {
LoadingOverlay,
TextInput,
Button,
Checkbox,
Modal,
Flex,
NativeSelect,
FileInput,
Select,
Space,
Chip,
Stack,
Group,
Center,
SimpleGrid,
} from '@mantine/core';
import useChannelsStore from '../../store/channels';
import { CircleCheck, CircleX } from 'lucide-react';
const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
const { channelGroups } = useChannelsStore();
const [groupStates, setGroupStates] = useState([]);
const [isLoading, setIsLoading] = useState(false);
const [groupFilter, setGroupFilter] = useState('');
useEffect(() => {
if (Object.keys(channelGroups).length === 0) {
return;
}
setGroupStates(
playlist.channel_groups.map((group) => ({
...group,
name: channelGroups[group.channel_group].name,
}))
);
}, [channelGroups]);
const toggleGroupEnabled = (id) => {
setGroupStates(
groupStates.map((state) => ({
...state,
enabled: state.channel_group == id ? !state.enabled : state.enabled,
}))
);
};
const submit = async () => {
setIsLoading(true);
await API.updatePlaylist({
...playlist,
channel_groups: groupStates,
});
setIsLoading(false);
onClose();
};
const selectAll = () => {
setGroupStates(
groupStates.map((state) => ({
...state,
enabled: state.name.toLowerCase().includes(groupFilter.toLowerCase())
? true
: state.enabled,
}))
);
};
const deselectAll = () => {
setGroupStates(
groupStates.map((state) => ({
...state,
enabled: state.name.toLowerCase().includes(groupFilter.toLowerCase())
? false
: state.enabled,
}))
);
};
if (!isOpen) {
return <></>;
}
return (
<Modal opened={isOpen} onClose={onClose} title="M3U Group Filter" size="xl">
<LoadingOverlay visible={isLoading} overlayBlur={2} />
<Stack>
<Flex gap="sm">
<TextInput
placeholder="Filter"
value={groupFilter}
onChange={(event) => setGroupFilter(event.currentTarget.value)}
style={{ flex: 1 }}
/>
<Button variant="default" size="sm" onClick={selectAll}>
Select Visible
</Button>
<Button variant="default" size="sm" onClick={deselectAll}>
Deselect Visible
</Button>
</Flex>
<SimpleGrid cols={4}>
{groupStates
.filter((group) =>
group.name.toLowerCase().includes(groupFilter.toLowerCase())
)
.map((group) => (
<Button
color={group.enabled ? 'green' : 'gray'}
variant="filled"
checked={group.enabled}
onClick={() => toggleGroupEnabled(group.channel_group)}
radius="xl"
leftSection={group.enabled ? <CircleCheck /> : <CircleX />}
justify="left"
>
{group.name}
</Button>
))}
</SimpleGrid>
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
<Button
type="submit"
variant="contained"
color="primary"
disabled={isLoading}
size="small"
onClick={submit}
>
Save and Refresh
</Button>
</Flex>
</Stack>
</Modal>
);
};
export default M3UGroupFilter;

View file

@ -5,16 +5,18 @@ import * as Yup from 'yup';
import API from '../../api';
import useStreamProfilesStore from '../../store/streamProfiles';
import { Modal, TextInput, Select, Button, Flex } from '@mantine/core';
import useChannelsStore from '../../store/channels';
const Stream = ({ stream = null, isOpen, onClose }) => {
const streamProfiles = useStreamProfilesStore((state) => state.profiles);
const { channelGroups } = useChannelsStore();
const [selectedStreamProfile, setSelectedStreamProfile] = useState('');
const formik = useFormik({
initialValues: {
name: '',
url: '',
group_name: '',
channel_group: null,
stream_profile_id: '',
},
validationSchema: Yup.object({
@ -23,6 +25,7 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
// stream_profile_id: Yup.string().required('Stream profile is required'),
}),
onSubmit: async (values, { setSubmitting, resetForm }) => {
console.log(values);
if (stream?.id) {
await API.updateStream({ id: stream.id, ...values });
} else {
@ -40,7 +43,7 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
formik.setValues({
name: stream.name,
url: stream.url,
group_name: stream.group_name,
channel_group: stream.channel_group,
stream_profile_id: stream.stream_profile_id,
});
} else {
@ -73,13 +76,19 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
error={formik.errors.url}
/>
<TextInput
id="group_name"
name="group_name"
<Select
id="channel_group"
name="channel_group"
label="Group"
value={formik.values.group_name}
onChange={formik.handleChange}
error={formik.errors.group_name}
value={formik.values.channel_group}
onChange={(value) => {
formik.setFieldValue('channel_group', value); // Update Formik's state with the new value
}}
error={formik.errors.channel_group}
data={channelGroups.map((group) => ({
label: group.name,
value: `${group.id}`,
}))}
/>
<Select
@ -87,8 +96,10 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
name="stream_profile_id"
label="Stream Profile"
placeholder="Optional"
value={selectedStreamProfile}
onChange={setSelectedStreamProfile}
value={formik.values.stream_profile_id}
onChange={(value) => {
formik.setFieldValue('stream_profile_id', value); // Update Formik's state with the new value
}}
error={formik.errors.stream_profile_id}
data={streamProfiles.map((profile) => ({
label: profile.name,

View file

@ -295,7 +295,7 @@ const ChannelsTable = ({}) => {
alignItems: 'center',
}}
>
<img src={cell.getValue() || logo} width="20" alt="channel logo" />
<img src={cell.getValue() || logo} height="20" alt="channel logo" />
</Grid>
),
meta: {
@ -384,7 +384,6 @@ const ChannelsTable = ({}) => {
}
};
const closeChannelForm = () => {
setChannel(null);
setChannelModalOpen(false);

View file

@ -38,14 +38,17 @@ import {
IconSortAscendingNumbers,
IconSquarePlus,
} from '@tabler/icons-react'; // Import custom icons
import M3UGroupFilter from '../forms/M3UGroupFilter';
const Example = () => {
const [playlist, setPlaylist] = useState(null);
const [playlistModalOpen, setPlaylistModalOpen] = useState(false);
const [groupFilterModalOpen, setGroupFilterModalOpen] = useState(false);
const [rowSelection, setRowSelection] = useState([]);
const [activeFilterValue, setActiveFilterValue] = useState('all');
const [playlistCreated, setPlaylistCreated] = useState(false);
const playlists = usePlaylistsStore((state) => state.playlists);
const { playlists, setRefreshProgress } = usePlaylistsStore();
const theme = useMantineTheme();
@ -109,15 +112,22 @@ const Example = () => {
const refreshPlaylist = async (id) => {
await API.refreshPlaylist(id);
setRefreshProgress(id, 0);
};
const deletePlaylist = async (id) => {
await API.deletePlaylist(id);
};
const closeModal = () => {
setPlaylistModalOpen(false);
setPlaylist(null);
const closeModal = (newPlaylist = null) => {
if (newPlaylist) {
setPlaylistCreated(true);
setPlaylist(newPlaylist);
} else {
setPlaylistModalOpen(false);
setPlaylist(null);
setPlaylistCreated(false);
}
};
const deletePlaylists = async (ids) => {
@ -266,6 +276,7 @@ const Example = () => {
playlist={playlist}
isOpen={playlistModalOpen}
onClose={closeModal}
playlistCreated={playlistCreated}
/>
</Box>
);

View file

@ -15,15 +15,18 @@ import {
Flex,
Button,
useMantineTheme,
Center,
Switch,
} from '@mantine/core';
import { IconSquarePlus } from '@tabler/icons-react';
import { SquareMinus, SquarePen, Check, X } from 'lucide-react';
import { SquareMinus, SquarePen, Check, X, Eye, EyeOff } from 'lucide-react';
const StreamProfiles = () => {
const [profile, setProfile] = useState(null);
const [profileModalOpen, setProfileModalOpen] = useState(false);
const [rowSelection, setRowSelection] = useState([]);
const [activeFilterValue, setActiveFilterValue] = useState('all');
const [hideInactive, setHideInactive] = useState(false);
const streamProfiles = useStreamProfilesStore((state) => state.profiles);
const { settings } = useSettingsStore();
@ -36,27 +39,37 @@ const StreamProfiles = () => {
{
header: 'Name',
accessorKey: 'name',
size: 50,
},
{
header: 'Command',
accessorKey: 'command',
size: 100,
},
{
header: 'Parameters',
accessorKey: 'parameters',
mantineTableBodyCellProps: {
style: {
whiteSpace: 'nowrap',
// maxWidth: 400,
paddingLeft: 10,
paddingRight: 10,
},
},
},
{
header: 'Active',
accessorKey: 'is_active',
size: 100,
sortingFn: 'basic',
muiTableBodyCellProps: {
align: 'left',
},
Cell: ({ cell }) => (
<Box sx={{ display: 'flex', justifyContent: 'center' }}>
{cell.getValue() ? <Check color="green" /> : <X color="red" />}
</Box>
size: 50,
Cell: ({ row, cell }) => (
<Center>
<Switch
size="xs"
checked={cell.getValue()}
onChange={() => toggleProfileIsActive(row.original)}
/>
</Center>
),
Filter: ({ column }) => (
<Box>
@ -124,10 +137,26 @@ const StreamProfiles = () => {
}
}, [sorting]);
const toggleHideInactive = () => {
setHideInactive(!hideInactive);
};
const toggleProfileIsActive = async (profile) => {
await API.updateStreamProfile({
id: profile.id,
...profile,
is_active: !profile.is_active,
});
};
const filteredData = streamProfiles.filter((profile) =>
hideInactive && !profile.is_active ? false : true
);
const table = useMantineReactTable({
...TableHelper.defaultProperties,
columns,
data: streamProfiles,
data: filteredData,
enablePagination: false,
enableRowVirtualization: true,
// enableRowSelection: true,
@ -144,6 +173,11 @@ const StreamProfiles = () => {
initialState: {
density: 'compact',
},
displayColumnDefOptions: {
'mrt-row-actions': {
size: 10,
},
},
enableRowActions: true,
renderRowActions: ({ row }) => (
<>
@ -151,6 +185,7 @@ const StreamProfiles = () => {
variant="transparent"
color="yellow.5"
size="sm"
disabled={row.original.locked}
onClick={() => editStreamProfile(row.original)}
>
<SquarePen size="18" /> {/* Small icon size */}
@ -159,6 +194,7 @@ const StreamProfiles = () => {
variant="transparent"
size="sm"
color="red.9"
disabled={row.original.locked}
onClick={() => deleteStreamProfile(row.original.id)}
>
<SquareMinus fontSize="small" /> {/* Small icon size */}
@ -217,6 +253,21 @@ const StreamProfiles = () => {
}}
>
<Flex gap={6}>
<Tooltip label={hideInactive ? 'Show All' : 'Hide Inactive'}>
<Center>
<ActionIcon
onClick={toggleHideInactive}
variant="filled"
color="gray"
style={{
borderWidth: '1px',
borderColor: 'white',
}}
>
{hideInactive ? <EyeOff size={18} /> : <Eye size={18} />}
</ActionIcon>
</Center>
</Tooltip>
<Tooltip label="Assign">
<Button
leftSection={<IconSquarePlus size={18} />}

View file

@ -32,6 +32,7 @@ import {
Group,
NumberInput,
NativeSelect,
MultiSelect,
} from '@mantine/core';
import {
IconArrowDown,
@ -71,7 +72,7 @@ const StreamsTable = ({}) => {
});
const [filters, setFilters] = useState({
name: '',
group_name: '',
channel_group: '',
m3u_account: '',
});
const debouncedFilters = useDebounce(filters, 500);
@ -83,7 +84,7 @@ const StreamsTable = ({}) => {
* Stores
*/
const { playlists } = usePlaylistsStore();
const { channelsPageSelection } = useChannelsStore();
const { channelGroups, channelsPageSelection } = useChannelsStore();
const channelSelectionStreams = useChannelsStore(
(state) => state.channels[state.channelsPageSelection[0]?.id]?.streams
);
@ -133,11 +134,11 @@ const StreamsTable = ({}) => {
},
{
header: 'Group',
accessorKey: 'group_name',
accessorFn: (row) => channelGroups[row.channel_group].name,
size: 100,
Header: ({ column }) => (
<Box onClick={handleSelectClick}>
<Select
<MultiSelect
placeholder="Group"
searchable
size="xs"
@ -200,7 +201,7 @@ const StreamsTable = ({}) => {
const handleGroupChange = (value) => {
setFilters((prev) => ({
...prev,
group_name: value ? value : '',
channel_group: value ? value : '',
}));
};

View file

@ -1,5 +1,18 @@
import React, { useMemo, useState, useEffect, useCallback } from 'react';
import { ActionIcon, Box, Center, Grid, Text } from '@mantine/core';
import {
ActionIcon,
Box,
Card,
Center,
Flex,
Grid,
Group,
SimpleGrid,
Stack,
Text,
Title,
Tooltip,
} from '@mantine/core';
import { MantineReactTable, useMantineReactTable } from 'mantine-react-table';
import { TableHelper } from '../helpers';
import API from '../api';
@ -15,7 +28,32 @@ import {
Binary,
ArrowDown01,
SquareX,
Timer,
} from 'lucide-react';
import dayjs from 'dayjs';
import duration from 'dayjs/plugin/duration';
import relativeTime from 'dayjs/plugin/relativeTime';
dayjs.extend(duration);
dayjs.extend(relativeTime);
const getStartDate = (uptime) => {
// Get the current date and time
const currentDate = new Date();
// Calculate the start date by subtracting uptime (in milliseconds)
const startDate = new Date(currentDate.getTime() - uptime * 1000);
// Format the date as a string (you can adjust the format as needed)
return startDate.toLocaleString({
weekday: 'short', // optional, adds day of the week
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: true, // 12-hour format with AM/PM
});
};
const ChannelsPage = () => {
const { channels, channelsByUUID, stats: channelStats } = useChannelsStore();
@ -146,32 +184,6 @@ const ChannelsPage = () => {
data: clients,
columns: useMemo(
() => [
{
id: 'logo',
header: 'Logo',
accessorKey: 'channel.logo_url',
size: 50,
Cell: ({ cell }) => (
<Center>
<img
src={cell.getValue() || logo}
width="20"
alt="channel logo"
/>
</Center>
),
},
{
header: 'Channel',
accessorKey: 'channel.name',
size: 100,
mantineTableBodyCellProps: {
style: {
whiteSpace: 'nowrap',
maxWidth: 100,
},
},
},
{
header: 'User-Agent',
accessorKey: 'user_agent',
@ -180,6 +192,8 @@ const ChannelsPage = () => {
style: {
whiteSpace: 'nowrap',
maxWidth: 400,
paddingLeft: 10,
paddingRight: 10,
},
},
},
@ -243,48 +257,54 @@ const ChannelsPage = () => {
}, [channelStats]);
return (
<Grid style={{ padding: 18 }}>
<Grid.Col span={6}>
<Text
w={88}
h={24}
style={{
fontFamily: 'Inter, sans-serif',
fontWeight: 500,
fontSize: '20px',
lineHeight: 1,
letterSpacing: '-0.3px',
color: 'gray.6', // Adjust this to match MUI's theme.palette.text.secondary
marginBottom: 0,
}}
>
Channels
</Text>
<Box style={{ paddingTop: 10 }}>
<MantineReactTable table={channelsTable} />
</Box>
</Grid.Col>
<Grid.Col span={6}>
<Text
w={88}
h={24}
style={{
fontFamily: 'Inter, sans-serif',
fontWeight: 500,
fontSize: '20px',
lineHeight: 1,
letterSpacing: '-0.3px',
color: 'gray.6', // Adjust this to match MUI's theme.palette.text.secondary
marginBottom: 0,
}}
>
Clients
</Text>
<Box style={{ paddingTop: 10 }}>
<MantineReactTable table={clientsTable} />
</Box>
</Grid.Col>
</Grid>
<SimpleGrid cols={2} spacing="md" style={{ padding: 10 }}>
{activeChannels.map((channel) => (
<Card shadow="sm" padding="lg" radius="md" withBorder>
<Stack>
<Flex justify="space-between" align="center">
<Group>
<Title order={5}>{channel.name}</Title>
<img
src={channel.logo_url || logo}
width="20"
alt="channel logo"
/>
</Group>
<Group>
<Box>
<Tooltip label={getStartDate(channel.uptime)}>
<Center>
<Timer style={{ paddingRight: 5 }} />
{dayjs.duration(channel.uptime, 'seconds').humanize()}
</Center>
</Tooltip>
</Box>
<Center>
<Tooltip label="Stop Channel">
<ActionIcon variant="transparent" color="red.9">
<SquareX size="24" />
</ActionIcon>
</Tooltip>
</Center>
</Group>
</Flex>
<Box>
<Flex
justify="space-between"
align="center"
style={{ paddingRight: 10, paddingLeft: 10 }}
>
<Text>Clients</Text>
<Text>{channel.client_count}</Text>
</Flex>
<MantineReactTable table={clientsTable} />
</Box>
</Stack>
</Card>
))}
</SimpleGrid>
);
};

View file

@ -37,6 +37,7 @@ const useAuthStore = create((set, get) => ({
useUserAgentsStore.getState().fetchUserAgents(),
usePlaylistsStore.getState().fetchPlaylists(),
useEPGsStore.getState().fetchEPGs(),
useEPGsStore.getState().fetchEPGData(),
useStreamProfilesStore.getState().fetchProfiles(),
useSettingsStore.getState().fetchSettings(),
]);
@ -119,7 +120,7 @@ const useAuthStore = create((set, get) => ({
const refreshToken = localStorage.getItem('refreshToken') || null;
if (refreshToken) {
const loggedIn = await get().refreshToken();
const loggedIn = await get().getRefreshToken();
if (loggedIn) {
return true;
}

View file

@ -5,7 +5,7 @@ import { notifications } from '@mantine/notifications';
const useChannelsStore = create((set, get) => ({
channels: [],
channelsByUUID: {},
channelGroups: [],
channelGroups: {},
channelsPageSelection: [],
stats: {},
activeChannels: {},
@ -38,7 +38,13 @@ const useChannelsStore = create((set, get) => ({
set({ isLoading: true, error: null });
try {
const channelGroups = await api.getChannelGroups();
set({ channelGroups: channelGroups, isLoading: false });
set({
channelGroups: channelGroups.reduce((acc, group) => {
acc[group.id] = group;
return acc;
}, {}),
isLoading: false,
});
} catch (error) {
console.error('Failed to fetch channel groups:', error);
set({ error: 'Failed to load channel groups.', isLoading: false });
@ -108,14 +114,16 @@ const useChannelsStore = create((set, get) => ({
addChannelGroup: (newChannelGroup) =>
set((state) => ({
channelGroups: [...state.channelGroups, newChannelGroup],
channelGroups: {
...state.channelGroups,
[newChannelGroup.id]: newChannelGroup,
},
})),
updateChannelGroup: (channelGroup) =>
set((state) => ({
channelGroups: state.channelGroups.map((group) =>
group.id === channelGroup.id ? channelGroup : group
),
...state.channelGroups,
[channelGroup.id]: channelGroup,
})),
setChannelsPageSelection: (channelsPageSelection) =>

View file

@ -1,8 +1,9 @@
import { create } from "zustand";
import api from "../api";
import { create } from 'zustand';
import api from '../api';
const useEPGsStore = create((set) => ({
epgs: [],
tvgs: [],
isLoading: false,
error: null,
@ -12,8 +13,19 @@ const useEPGsStore = create((set) => ({
const epgs = await api.getEPGs();
set({ epgs: epgs, isLoading: false });
} catch (error) {
console.error("Failed to fetch epgs:", error);
set({ error: "Failed to load epgs.", isLoading: false });
console.error('Failed to fetch epgs:', error);
set({ error: 'Failed to load epgs.', isLoading: false });
}
},
fetchEPGData: async () => {
set({ isLoading: true, error: null });
try {
const tvgs = await api.getEPGData();
set({ tvgs: tvgs, isLoading: false });
} catch (error) {
console.error('Failed to fetch tvgs:', error);
set({ error: 'Failed to load tvgs.', isLoading: false });
}
},

View file

@ -4,6 +4,7 @@ import api from '../api';
const usePlaylistsStore = create((set) => ({
playlists: [],
profiles: {},
refreshProgress: {},
isLoading: false,
error: null,
@ -60,6 +61,24 @@ const usePlaylistsStore = create((set) => ({
),
// @TODO: remove playlist profiles here
})),
setRefreshProgress: (id, progress) =>
set((state) => ({
refreshProgress: {
...state.refreshProgress,
[id]: progress,
},
})),
removeRefreshProgress: (id) =>
set((state) => {
const updatedProgress = { ...state.refreshProgress };
delete updatedProgress[id];
return {
refreshProgress: updatedProgress,
};
}),
}));
export default usePlaylistsStore;

60
scripts/wait_for_redis.py Normal file
View file

@ -0,0 +1,60 @@
#!/usr/bin/env python
"""
Helper script to wait for Redis to be available before starting the application.
"""
import redis
import time
import os
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def wait_for_redis(host='localhost', port=6379, db=0, max_retries=30, retry_interval=2):
"""Wait for Redis to become available"""
redis_client = None
retry_count = 0
logger.info(f"Waiting for Redis at {host}:{port}/{db}...")
while retry_count < max_retries:
try:
redis_client = redis.Redis(
host=host,
port=port,
db=db,
socket_timeout=2,
socket_connect_timeout=2
)
redis_client.ping()
logger.info(f"✅ Redis at {host}:{port}/{db} is now available!")
return True
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
retry_count += 1
if retry_count >= max_retries:
logger.error(f"❌ Failed to connect to Redis after {max_retries} attempts: {e}")
return False
logger.info(f"⏳ Redis not available yet, retrying in {retry_interval}s... ({retry_count}/{max_retries})")
time.sleep(retry_interval)
except Exception as e:
logger.error(f"❌ Unexpected error connecting to Redis: {e}")
return False
return False
if __name__ == "__main__":
host = os.environ.get('REDIS_HOST', 'localhost')
port = int(os.environ.get('REDIS_PORT', 6379))
db = int(os.environ.get('REDIS_DB', 0))
max_retries = int(os.environ.get('REDIS_WAIT_RETRIES', 30))
retry_interval = int(os.environ.get('REDIS_WAIT_INTERVAL', 2))
logger.info(f"Starting Redis availability check at {host}:{port}/{db}")
if wait_for_redis(host, port, db, max_retries, retry_interval):
sys.exit(0)
else:
sys.exit(1)