diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a1cb27bb..9186541d 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -184,13 +184,13 @@ jobs:
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# GitHub Container Registry manifests
- # latest tag
+ # Create one manifest with both latest and version tags
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
- --annotation "index:org.opencontainers.image.version=latest" \
+ --annotation "index:org.opencontainers.image.version=${VERSION}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
@@ -200,9 +200,11 @@ jobs:
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:latest \
- ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64
+ --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
+ ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
- # version tag
+ # Docker Hub manifests
+ # Create one manifest with both latest and version tags
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
@@ -217,43 +219,7 @@ jobs:
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
- --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
- ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
-
- # Docker Hub manifests
- # latest tag
- docker buildx imagetools create \
- --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
- --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
- --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
- --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
- --annotation "index:org.opencontainers.image.version=latest" \
- --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
- --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
- --annotation "index:org.opencontainers.image.licenses=See repository" \
- --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
- --annotation "index:org.opencontainers.image.vendor=${OWNER}" \
- --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
- --annotation "index:maintainer=${{ github.actor }}" \
- --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
- docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64
-
- # version tag
- docker buildx imagetools create \
- --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
- --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
- --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
- --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
- --annotation "index:org.opencontainers.image.version=${VERSION}" \
- --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
- --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
- --annotation "index:org.opencontainers.image.licenses=See repository" \
- --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
- --annotation "index:org.opencontainers.image.vendor=${OWNER}" \
- --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
- --annotation "index:maintainer=${{ github.actor }}" \
- --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 114d42ce..4758b146 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+### Added
+
+- Group retention policy for M3U accounts: Groups now follow the same stale retention logic as streams, using the account's `stale_stream_days` setting. Groups that temporarily disappear from an M3U source are retained for the configured retention period instead of being immediately deleted, preserving user settings and preventing data loss when providers temporarily remove/re-add groups. (Closes #809)
+- Visual stale indicators for streams and groups: Added `is_stale` field to Stream and both `is_stale` and `last_seen` fields to ChannelGroupM3UAccount models to track items in their retention grace period. Stale groups display with orange buttons and a warning tooltip, while stale streams show with a red background color matching the visual treatment of empty channels.
+
+### Changed
+
+- Docker setup enhanced for legacy CPU support: Added `USE_LEGACY_NUMPY` environment variable to enable custom-built NumPy with no CPU baseline, allowing Dispatcharr to run on older CPUs (circa 2009) that lack support for newer baseline CPU features. When set to `true`, the entrypoint script will install the legacy NumPy build instead of the standard distribution.
+- VOD upstream read timeout reduced from 30 seconds to 10 seconds to minimize lock hold time when clients disconnect during connection phase
+- Form management refactored across application: Migrated Channel, Stream, M3U Profile, Stream Profile, Logo, and User Agent forms from Formik to React Hook Form (RHF) with Yup validation for improved form handling, better validation feedback, and enhanced code maintainability
+- Stats and VOD pages refactored for clearer separation of concerns: extracted Stream/VOD connection cards (StreamConnectionCard, VodConnectionCard, VODCard, SeriesCard), moved page logic into dedicated utils, and lazy-loaded heavy components with ErrorBoundary fallbacks to improve readability and maintainability - Thanks [@nick4810](https://github.com/nick4810)
+
+### Fixed
+
+- Fixed Channel Profile filter incorrectly applying profile membership filtering even when "Show Disabled" was enabled, preventing all channels from being displayed. Profile filter now only applies when hiding disabled channels. (Fixes #825)
+- Fixed manual channel creation not adding channels to channel profiles. Manually created channels are now added to the selected profile if one is active, or to all profiles if "All" is selected, matching the behavior of channels created from streams.
+- Fixed VOD streams disappearing from stats page during playback by adding `socket-timeout = 600` to production uWSGI config. The missing directive caused uWSGI to use its default 4-second timeout, triggering premature cleanup when clients buffered content. Now matches the existing `http-timeout = 600` value and prevents timeout errors during normal client buffering - Thanks [@patchy8736](https://github.com/patchy8736)
+- Fixed Channels table EPG column showing "Not Assigned" on initial load for users with large EPG datasets. Added `tvgsLoaded` flag to EPG store to track when EPG data has finished loading, ensuring the table waits for EPG data before displaying. EPG cells now show animated skeleton placeholders while loading instead of incorrectly showing "Not Assigned". (Fixes #810)
+- Fixed VOD profile connection count not being decremented when stream connection fails (timeout, 404, etc.), preventing profiles from reaching capacity limits and rejecting valid stream requests
+- Fixed React warning in Channel form by removing invalid `removeTrailingZeros` prop from NumberInput component
+- Release workflow Docker tagging: Fixed issue where `latest` and version tags (e.g., `0.16.0`) were creating separate manifests instead of pointing to the same image digest, which caused old `latest` tags to become orphaned/untagged after new releases. Now creates a single multi-arch manifest with both tags, maintaining proper tag relationships and download statistics visibility on GitHub.
+- Fixed onboarding message appearing in the Channels Table when filtered results are empty. The onboarding message now only displays when there are no channels created at all, not when channels exist but are filtered out by current filters.
+- Fixed `M3UMovieRelation.get_stream_url()` and `M3UEpisodeRelation.get_stream_url()` to use XC client's `_normalize_url()` method instead of simple `rstrip('/')`. This properly handles malformed M3U account URLs (e.g., containing `/player_api.php` or query parameters) before constructing VOD stream endpoints, matching behavior of live channel URL building. (Closes #722)
+- Fixed bulk_create and bulk_update errors during VOD content refresh by pre-checking object existence with optimized bulk queries (3 queries total instead of N per batch) before creating new objects. This ensures all movie/series objects have primary keys before relation operations, preventing "prohibited to prevent data loss due to unsaved related object" errors. Additionally fixed duplicate key constraint violations by treating TMDB/IMDB ID values of `0` or `'0'` as invalid (some providers use this to indicate "no ID"), converting them to NULL to prevent multiple items from incorrectly sharing the same ID. (Fixes #813)
+
## [0.16.0] - 2026-01-04
### Added
diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py
index aebb74a3..6d10b942 100644
--- a/apps/channels/api_views.py
+++ b/apps/channels/api_views.py
@@ -236,12 +236,8 @@ class ChannelGroupViewSet(viewsets.ModelViewSet):
return [Authenticated()]
def get_queryset(self):
- """Add annotation for association counts"""
- from django.db.models import Count
- return ChannelGroup.objects.annotate(
- channel_count=Count('channels', distinct=True),
- m3u_account_count=Count('m3u_accounts', distinct=True)
- )
+ """Return channel groups with prefetched relations for efficient counting"""
+ return ChannelGroup.objects.prefetch_related('channels', 'm3u_accounts').all()
def update(self, request, *args, **kwargs):
"""Override update to check M3U associations"""
@@ -277,15 +273,20 @@ class ChannelGroupViewSet(viewsets.ModelViewSet):
@action(detail=False, methods=["post"], url_path="cleanup")
def cleanup_unused_groups(self, request):
"""Delete all channel groups with no channels or M3U account associations"""
- from django.db.models import Count
+ from django.db.models import Q, Exists, OuterRef
+
+ # Find groups with no channels and no M3U account associations using Exists subqueries
+ from .models import Channel, ChannelGroupM3UAccount
+
+ has_channels = Channel.objects.filter(channel_group_id=OuterRef('pk'))
+ has_accounts = ChannelGroupM3UAccount.objects.filter(channel_group_id=OuterRef('pk'))
- # Find groups with no channels and no M3U account associations
unused_groups = ChannelGroup.objects.annotate(
- channel_count=Count('channels', distinct=True),
- m3u_account_count=Count('m3u_accounts', distinct=True)
+ has_channels=Exists(has_channels),
+ has_accounts=Exists(has_accounts)
).filter(
- channel_count=0,
- m3u_account_count=0
+ has_channels=False,
+ has_accounts=False
)
deleted_count = unused_groups.count()
@@ -386,6 +387,56 @@ class ChannelViewSet(viewsets.ModelViewSet):
ordering_fields = ["channel_number", "name", "channel_group__name"]
ordering = ["-channel_number"]
+ def create(self, request, *args, **kwargs):
+ """Override create to handle channel profile membership"""
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+
+ with transaction.atomic():
+ channel = serializer.save()
+
+ # Handle channel profile membership
+ channel_profile_ids = request.data.get("channel_profile_ids")
+ if channel_profile_ids is not None:
+ # Normalize single ID to array
+ if not isinstance(channel_profile_ids, list):
+ channel_profile_ids = [channel_profile_ids]
+
+ if channel_profile_ids:
+ # Add channel only to the specified profiles
+ try:
+ channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids)
+ if len(channel_profiles) != len(channel_profile_ids):
+ missing_ids = set(channel_profile_ids) - set(channel_profiles.values_list('id', flat=True))
+ return Response(
+ {"error": f"Channel profiles with IDs {list(missing_ids)} not found"},
+ status=status.HTTP_400_BAD_REQUEST,
+ )
+
+ ChannelProfileMembership.objects.bulk_create([
+ ChannelProfileMembership(
+ channel_profile=profile,
+ channel=channel,
+ enabled=True
+ )
+ for profile in channel_profiles
+ ])
+ except Exception as e:
+ return Response(
+ {"error": f"Error creating profile memberships: {str(e)}"},
+ status=status.HTTP_400_BAD_REQUEST,
+ )
+ else:
+ # Default behavior: add to all profiles
+ profiles = ChannelProfile.objects.all()
+ ChannelProfileMembership.objects.bulk_create([
+ ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True)
+ for profile in profiles
+ ])
+
+ headers = self.get_success_headers(serializer.data)
+ return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
+
def get_permissions(self):
if self.action in [
"edit_bulk",
@@ -431,10 +482,15 @@ class ChannelViewSet(viewsets.ModelViewSet):
if channel_profile_id:
try:
profile_id_int = int(channel_profile_id)
- filters["channelprofilemembership__channel_profile_id"] = profile_id_int
if show_disabled_param is None:
+ # Show only enabled channels: channels that have a membership
+ # record for this profile with enabled=True
+ # Default is DISABLED (channels without membership are hidden)
+ filters["channelprofilemembership__channel_profile_id"] = profile_id_int
filters["channelprofilemembership__enabled"] = True
+ # If show_disabled is True, show all channels (no filtering needed)
+
except (ValueError, TypeError):
# Ignore invalid profile id values
pass
diff --git a/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py
new file mode 100644
index 00000000..2428a97b
--- /dev/null
+++ b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py
@@ -0,0 +1,29 @@
+# Generated by Django 5.2.9 on 2026-01-09 18:19
+
+import datetime
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('dispatcharr_channels', '0030_alter_stream_url'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='channelgroupm3uaccount',
+ name='is_stale',
+ field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'),
+ ),
+ migrations.AddField(
+ model_name='channelgroupm3uaccount',
+ name='last_seen',
+ field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'),
+ ),
+ migrations.AddField(
+ model_name='stream',
+ name='is_stale',
+ field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'),
+ ),
+ ]
diff --git a/apps/channels/models.py b/apps/channels/models.py
index 88df3661..6d199520 100644
--- a/apps/channels/models.py
+++ b/apps/channels/models.py
@@ -94,6 +94,11 @@ class Stream(models.Model):
db_index=True,
)
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
+ is_stale = models.BooleanField(
+ default=False,
+ db_index=True,
+ help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)"
+ )
custom_properties = models.JSONField(default=dict, blank=True, null=True)
# Stream statistics fields
@@ -589,6 +594,16 @@ class ChannelGroupM3UAccount(models.Model):
blank=True,
help_text='Starting channel number for auto-created channels in this group'
)
+ last_seen = models.DateTimeField(
+ default=datetime.now,
+ db_index=True,
+ help_text='Last time this group was seen in the M3U source during a refresh'
+ )
+ is_stale = models.BooleanField(
+ default=False,
+ db_index=True,
+ help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'
+ )
class Meta:
unique_together = ("channel_group", "m3u_account")
diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py
index 635281d5..c1919e24 100644
--- a/apps/channels/serializers.py
+++ b/apps/channels/serializers.py
@@ -119,6 +119,7 @@ class StreamSerializer(serializers.ModelSerializer):
"current_viewers",
"updated_at",
"last_seen",
+ "is_stale",
"stream_profile_id",
"is_custom",
"channel_group",
@@ -155,7 +156,7 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
class Meta:
model = ChannelGroupM3UAccount
- fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"]
+ fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"]
def to_representation(self, instance):
data = super().to_representation(instance)
@@ -179,8 +180,8 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
# Channel Group
#
class ChannelGroupSerializer(serializers.ModelSerializer):
- channel_count = serializers.IntegerField(read_only=True)
- m3u_account_count = serializers.IntegerField(read_only=True)
+ channel_count = serializers.SerializerMethodField()
+ m3u_account_count = serializers.SerializerMethodField()
m3u_accounts = ChannelGroupM3UAccountSerializer(
many=True,
read_only=True
@@ -190,6 +191,14 @@ class ChannelGroupSerializer(serializers.ModelSerializer):
model = ChannelGroup
fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"]
+ def get_channel_count(self, obj):
+ """Get count of channels in this group"""
+ return obj.channels.count()
+
+ def get_m3u_account_count(self, obj):
+ """Get count of M3U accounts associated with this group"""
+ return obj.m3u_accounts.count()
+
class ChannelProfileSerializer(serializers.ModelSerializer):
channels = serializers.SerializerMethodField()
diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py
index 87759ab9..ed9eb465 100644
--- a/apps/m3u/tasks.py
+++ b/apps/m3u/tasks.py
@@ -513,7 +513,19 @@ def check_field_lengths(streams_to_create):
@shared_task
-def process_groups(account, groups):
+def process_groups(account, groups, scan_start_time=None):
+ """Process groups and update their relationships with the M3U account.
+
+ Args:
+ account: M3UAccount instance
+ groups: Dict of group names to custom properties
+ scan_start_time: Timestamp when the scan started (for consistent last_seen marking)
+ """
+ # Use scan_start_time if provided, otherwise current time
+ # This ensures consistency with stream processing and cleanup logic
+ if scan_start_time is None:
+ scan_start_time = timezone.now()
+
existing_groups = {
group.name: group
for group in ChannelGroup.objects.filter(name__in=groups.keys())
@@ -553,24 +565,8 @@ def process_groups(account, groups):
).select_related('channel_group')
}
- # Get ALL existing relationships for this account to identify orphaned ones
- all_existing_relationships = {
- rel.channel_group.name: rel
- for rel in ChannelGroupM3UAccount.objects.filter(
- m3u_account=account
- ).select_related('channel_group')
- }
-
relations_to_create = []
relations_to_update = []
- relations_to_delete = []
-
- # Find orphaned relationships (groups that no longer exist in the source)
- current_group_names = set(groups.keys())
- for group_name, rel in all_existing_relationships.items():
- if group_name not in current_group_names:
- relations_to_delete.append(rel)
- logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}")
for group in all_group_objs:
custom_props = groups.get(group.name, {})
@@ -597,9 +593,15 @@ def process_groups(account, groups):
del updated_custom_props["xc_id"]
existing_rel.custom_properties = updated_custom_props
+ existing_rel.last_seen = scan_start_time
+ existing_rel.is_stale = False
relations_to_update.append(existing_rel)
logger.debug(f"Updated xc_id for group '{group.name}' from '{existing_xc_id}' to '{new_xc_id}' - account {account.id}")
else:
+ # Update last_seen even if xc_id hasn't changed
+ existing_rel.last_seen = scan_start_time
+ existing_rel.is_stale = False
+ relations_to_update.append(existing_rel)
logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}")
else:
# Create new relationship - this group is new to this M3U account
@@ -613,6 +615,8 @@ def process_groups(account, groups):
m3u_account=account,
custom_properties=custom_props,
enabled=auto_enable_new_groups_live,
+ last_seen=scan_start_time,
+ is_stale=False,
)
)
@@ -623,15 +627,38 @@ def process_groups(account, groups):
# Bulk update existing relationships
if relations_to_update:
- ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties'])
- logger.info(f"Updated {len(relations_to_update)} existing group relationships with new xc_id values for account {account.id}")
+ ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties', 'last_seen', 'is_stale'])
+ logger.info(f"Updated {len(relations_to_update)} existing group relationships for account {account.id}")
- # Delete orphaned relationships
- if relations_to_delete:
- ChannelGroupM3UAccount.objects.filter(
- id__in=[rel.id for rel in relations_to_delete]
- ).delete()
- logger.info(f"Deleted {len(relations_to_delete)} orphaned group relationships for account {account.id}: {[rel.channel_group.name for rel in relations_to_delete]}")
+
+def cleanup_stale_group_relationships(account, scan_start_time):
+ """
+ Remove group relationships that haven't been seen since the stale retention period.
+ This follows the same logic as stream cleanup for consistency.
+ """
+ # Calculate cutoff date for stale group relationships
+ stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days)
+ logger.info(
+ f"Removing group relationships not seen since {stale_cutoff} for M3U account {account.id}"
+ )
+
+ # Find stale relationships
+ stale_relationships = ChannelGroupM3UAccount.objects.filter(
+ m3u_account=account,
+ last_seen__lt=stale_cutoff
+ ).select_related('channel_group')
+
+ relations_to_delete = list(stale_relationships)
+ deleted_count = len(relations_to_delete)
+
+ if deleted_count > 0:
+ logger.info(
+ f"Found {deleted_count} stale group relationships for account {account.id}: "
+ f"{[rel.channel_group.name for rel in relations_to_delete]}"
+ )
+
+ # Delete the stale relationships
+ stale_relationships.delete()
# Check if any of the deleted relationships left groups with no remaining associations
orphaned_group_ids = []
@@ -656,6 +683,10 @@ def process_groups(account, groups):
deleted_groups = list(ChannelGroup.objects.filter(id__in=orphaned_group_ids).values_list('name', flat=True))
ChannelGroup.objects.filter(id__in=orphaned_group_ids).delete()
logger.info(f"Deleted {len(orphaned_group_ids)} orphaned groups that had no remaining associations: {deleted_groups}")
+ else:
+ logger.debug(f"No stale group relationships found for account {account.id}")
+
+ return deleted_count
def collect_xc_streams(account_id, enabled_groups):
@@ -803,6 +834,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
"channel_group_id": int(group_id),
"stream_hash": stream_hash,
"custom_properties": stream,
+ "is_stale": False,
}
if stream_hash not in stream_hashes:
@@ -838,10 +870,12 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
setattr(obj, key, value)
obj.last_seen = timezone.now()
obj.updated_at = timezone.now() # Update timestamp only for changed streams
+ obj.is_stale = False
streams_to_update.append(obj)
else:
# Always update last_seen, even if nothing else changed
obj.last_seen = timezone.now()
+ obj.is_stale = False
# Don't update updated_at for unchanged streams
streams_to_update.append(obj)
@@ -852,6 +886,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
stream_props["updated_at"] = (
timezone.now()
) # Set initial updated_at for new streams
+ stream_props["is_stale"] = False
streams_to_create.append(Stream(**stream_props))
try:
@@ -863,7 +898,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
# Simplified bulk update for better performance
Stream.objects.bulk_update(
streams_to_update,
- ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'],
+ ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'],
batch_size=150 # Smaller batch size for XC processing
)
@@ -976,6 +1011,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
"channel_group_id": int(groups.get(group_title)),
"stream_hash": stream_hash,
"custom_properties": stream_info["attributes"],
+ "is_stale": False,
}
if stream_hash not in stream_hashes:
@@ -1015,11 +1051,15 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
obj.custom_properties = stream_props["custom_properties"]
obj.updated_at = timezone.now()
+ # Always mark as not stale since we saw it in this refresh
+ obj.is_stale = False
+
streams_to_update.append(obj)
else:
# New stream
stream_props["last_seen"] = timezone.now()
stream_props["updated_at"] = timezone.now()
+ stream_props["is_stale"] = False
streams_to_create.append(Stream(**stream_props))
try:
@@ -1031,7 +1071,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
# Update all streams in a single bulk operation
Stream.objects.bulk_update(
streams_to_update,
- ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'],
+ ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'],
batch_size=200
)
except Exception as e:
@@ -1092,7 +1132,15 @@ def cleanup_streams(account_id, scan_start_time=timezone.now):
@shared_task
-def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
+def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False, scan_start_time=None):
+ """Refresh M3U groups for an account.
+
+ Args:
+ account_id: ID of the M3U account
+ use_cache: Whether to use cached M3U file
+ full_refresh: Whether this is part of a full refresh
+ scan_start_time: Timestamp when the scan started (for consistent last_seen marking)
+ """
if not acquire_task_lock("refresh_m3u_account_groups", account_id):
return f"Task already running for account_id={account_id}.", None
@@ -1419,7 +1467,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
send_m3u_update(account_id, "processing_groups", 0)
- process_groups(account, groups)
+ process_groups(account, groups, scan_start_time)
release_task_lock("refresh_m3u_account_groups", account_id)
@@ -2526,7 +2574,7 @@ def refresh_single_m3u_account(account_id):
if not extinf_data:
try:
logger.info(f"Calling refresh_m3u_groups for account {account_id}")
- result = refresh_m3u_groups(account_id, full_refresh=True)
+ result = refresh_m3u_groups(account_id, full_refresh=True, scan_start_time=refresh_start_timestamp)
logger.trace(f"refresh_m3u_groups result: {result}")
# Check for completely empty result or missing groups
@@ -2806,9 +2854,26 @@ def refresh_single_m3u_account(account_id):
id=-1
).exists() # This will never find anything but ensures DB sync
+ # Mark streams that weren't seen in this refresh as stale (pending deletion)
+ stale_stream_count = Stream.objects.filter(
+ m3u_account=account,
+ last_seen__lt=refresh_start_timestamp
+ ).update(is_stale=True)
+ logger.info(f"Marked {stale_stream_count} streams as stale for account {account_id}")
+
+ # Mark group relationships that weren't seen in this refresh as stale (pending deletion)
+ stale_group_count = ChannelGroupM3UAccount.objects.filter(
+ m3u_account=account,
+ last_seen__lt=refresh_start_timestamp
+ ).update(is_stale=True)
+ logger.info(f"Marked {stale_group_count} group relationships as stale for account {account_id}")
+
# Now run cleanup
streams_deleted = cleanup_streams(account_id, refresh_start_timestamp)
+ # Cleanup stale group relationships (follows same retention policy as streams)
+ cleanup_stale_group_relationships(account, refresh_start_timestamp)
+
# Run auto channel sync after successful refresh
auto_sync_message = ""
try:
diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py
index 251721c5..1534f761 100644
--- a/apps/proxy/vod_proxy/multi_worker_connection_manager.py
+++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py
@@ -357,12 +357,12 @@ class RedisBackedVODConnection:
logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL")
- # Make request
+ # Make request (10s connect, 10s read timeout - keeps lock time reasonable if client disconnects)
response = self.local_session.get(
target_url,
headers=headers,
stream=True,
- timeout=(10, 30),
+ timeout=(10, 10),
allow_redirects=allow_redirects
)
response.raise_for_status()
@@ -712,6 +712,10 @@ class MultiWorkerVODConnectionManager:
content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj)
client_id = session_id
+ # Track whether we incremented profile connections (for cleanup on error)
+ profile_connections_incremented = False
+ redis_connection = None
+
logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}")
try:
@@ -802,6 +806,7 @@ class MultiWorkerVODConnectionManager:
# Increment profile connections after successful connection creation
self._increment_profile_connections(m3u_profile)
+ profile_connections_incremented = True
logger.info(f"[{client_id}] Worker {self.worker_id} - Created consolidated connection with session metadata")
else:
@@ -1024,6 +1029,19 @@ class MultiWorkerVODConnectionManager:
except Exception as e:
logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True)
+
+ # Decrement profile connections if we incremented them but failed before streaming started
+ if profile_connections_incremented:
+ logger.info(f"[{client_id}] Connection error occurred after profile increment - decrementing profile connections")
+ self._decrement_profile_connections(m3u_profile.id)
+
+ # Also clean up the Redis connection state since we won't be using it
+ if redis_connection:
+ try:
+ redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
+ except Exception as cleanup_error:
+ logger.error(f"[{client_id}] Error during cleanup after connection failure: {cleanup_error}")
+
return HttpResponse(f"Streaming error: {str(e)}", status=500)
def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None):
diff --git a/apps/vod/models.py b/apps/vod/models.py
index 69aed808..7067856e 100644
--- a/apps/vod/models.py
+++ b/apps/vod/models.py
@@ -245,10 +245,13 @@ class M3UMovieRelation(models.Model):
"""Get the full stream URL for this movie from this provider"""
# Build URL dynamically for XtreamCodes accounts
if self.m3u_account.account_type == 'XC':
- server_url = self.m3u_account.server_url.rstrip('/')
+ from core.xtream_codes import Client as XCClient
+ # Use XC client's URL normalization to handle malformed URLs
+ # (e.g., URLs with /player_api.php or query parameters)
+ normalized_url = XCClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url)
username = self.m3u_account.username
password = self.m3u_account.password
- return f"{server_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
+ return f"{normalized_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
else:
# For other account types, we would need another way to build URLs
return None
@@ -285,10 +288,12 @@ class M3UEpisodeRelation(models.Model):
if self.m3u_account.account_type == 'XC':
# For XtreamCodes accounts, build the URL dynamically
- server_url = self.m3u_account.server_url.rstrip('/')
+ # Use XC client's URL normalization to handle malformed URLs
+ # (e.g., URLs with /player_api.php or query parameters)
+ normalized_url = XtreamCodesClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url)
username = self.m3u_account.username
password = self.m3u_account.password
- return f"{server_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
+ return f"{normalized_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
else:
# We might support non XC accounts in the future
# For now, return None
diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py
index 4eb9fadc..0dcd9cfd 100644
--- a/apps/vod/tasks.py
+++ b/apps/vod/tasks.py
@@ -410,10 +410,10 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb')
imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb')
- # Clean empty string IDs
- if tmdb_id == '':
+ # Clean empty string IDs and zero values (some providers use 0 to indicate no ID)
+ if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0':
tmdb_id = None
- if imdb_id == '':
+ if imdb_id == '' or imdb_id == 0 or imdb_id == '0':
imdb_id = None
# Create a unique key for this movie (priority: TMDB > IMDB > name+year)
@@ -614,26 +614,41 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
# First, create new movies and get their IDs
created_movies = {}
if movies_to_create:
- Movie.objects.bulk_create(movies_to_create, ignore_conflicts=True)
+ # Bulk query to check which movies already exist
+ tmdb_ids = [m.tmdb_id for m in movies_to_create if m.tmdb_id]
+ imdb_ids = [m.imdb_id for m in movies_to_create if m.imdb_id]
+ name_year_pairs = [(m.name, m.year) for m in movies_to_create if not m.tmdb_id and not m.imdb_id]
- # Get the newly created movies with their IDs
- # We need to re-fetch them to get the primary keys
+ existing_by_tmdb = {m.tmdb_id: m for m in Movie.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {}
+ existing_by_imdb = {m.imdb_id: m for m in Movie.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {}
+
+ existing_by_name_year = {}
+ if name_year_pairs:
+ for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True):
+ key = (movie.name, movie.year)
+ if key in name_year_pairs:
+ existing_by_name_year[key] = movie
+
+ # Check each movie against the bulk query results
+ movies_actually_created = []
for movie in movies_to_create:
- # Find the movie by its unique identifiers
- if movie.tmdb_id:
- db_movie = Movie.objects.filter(tmdb_id=movie.tmdb_id).first()
- elif movie.imdb_id:
- db_movie = Movie.objects.filter(imdb_id=movie.imdb_id).first()
- else:
- db_movie = Movie.objects.filter(
- name=movie.name,
- year=movie.year,
- tmdb_id__isnull=True,
- imdb_id__isnull=True
- ).first()
+ existing = None
+ if movie.tmdb_id and movie.tmdb_id in existing_by_tmdb:
+ existing = existing_by_tmdb[movie.tmdb_id]
+ elif movie.imdb_id and movie.imdb_id in existing_by_imdb:
+ existing = existing_by_imdb[movie.imdb_id]
+ elif not movie.tmdb_id and not movie.imdb_id:
+ existing = existing_by_name_year.get((movie.name, movie.year))
- if db_movie:
- created_movies[id(movie)] = db_movie
+ if existing:
+ created_movies[id(movie)] = existing
+ else:
+ movies_actually_created.append(movie)
+ created_movies[id(movie)] = movie
+
+ # Bulk create only movies that don't exist
+ if movies_actually_created:
+ Movie.objects.bulk_create(movies_actually_created)
# Update existing movies
if movies_to_update:
@@ -649,12 +664,16 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
movie.logo = movie._logo_to_update
movie.save(update_fields=['logo'])
- # Update relations to reference the correct movie objects
+ # Update relations to reference the correct movie objects (with PKs)
for relation in relations_to_create:
if id(relation.movie) in created_movies:
relation.movie = created_movies[id(relation.movie)]
- # Handle relations
+ for relation in relations_to_update:
+ if id(relation.movie) in created_movies:
+ relation.movie = created_movies[id(relation.movie)]
+
+ # All movies now have PKs, safe to bulk create/update relations
if relations_to_create:
M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
@@ -724,10 +743,10 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id')
imdb_id = series_data.get('imdb') or series_data.get('imdb_id')
- # Clean empty string IDs
- if tmdb_id == '':
+ # Clean empty string IDs and zero values (some providers use 0 to indicate no ID)
+ if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0':
tmdb_id = None
- if imdb_id == '':
+ if imdb_id == '' or imdb_id == 0 or imdb_id == '0':
imdb_id = None
# Create a unique key for this series (priority: TMDB > IMDB > name+year)
@@ -945,26 +964,41 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
# First, create new series and get their IDs
created_series = {}
if series_to_create:
- Series.objects.bulk_create(series_to_create, ignore_conflicts=True)
+ # Bulk query to check which series already exist
+ tmdb_ids = [s.tmdb_id for s in series_to_create if s.tmdb_id]
+ imdb_ids = [s.imdb_id for s in series_to_create if s.imdb_id]
+ name_year_pairs = [(s.name, s.year) for s in series_to_create if not s.tmdb_id and not s.imdb_id]
- # Get the newly created series with their IDs
- # We need to re-fetch them to get the primary keys
+ existing_by_tmdb = {s.tmdb_id: s for s in Series.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {}
+ existing_by_imdb = {s.imdb_id: s for s in Series.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {}
+
+ existing_by_name_year = {}
+ if name_year_pairs:
+ for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True):
+ key = (series.name, series.year)
+ if key in name_year_pairs:
+ existing_by_name_year[key] = series
+
+ # Check each series against the bulk query results
+ series_actually_created = []
for series in series_to_create:
- # Find the series by its unique identifiers
- if series.tmdb_id:
- db_series = Series.objects.filter(tmdb_id=series.tmdb_id).first()
- elif series.imdb_id:
- db_series = Series.objects.filter(imdb_id=series.imdb_id).first()
- else:
- db_series = Series.objects.filter(
- name=series.name,
- year=series.year,
- tmdb_id__isnull=True,
- imdb_id__isnull=True
- ).first()
+ existing = None
+ if series.tmdb_id and series.tmdb_id in existing_by_tmdb:
+ existing = existing_by_tmdb[series.tmdb_id]
+ elif series.imdb_id and series.imdb_id in existing_by_imdb:
+ existing = existing_by_imdb[series.imdb_id]
+ elif not series.tmdb_id and not series.imdb_id:
+ existing = existing_by_name_year.get((series.name, series.year))
- if db_series:
- created_series[id(series)] = db_series
+ if existing:
+ created_series[id(series)] = existing
+ else:
+ series_actually_created.append(series)
+ created_series[id(series)] = series
+
+ # Bulk create only series that don't exist
+ if series_actually_created:
+ Series.objects.bulk_create(series_actually_created)
# Update existing series
if series_to_update:
@@ -980,12 +1014,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
series.logo = series._logo_to_update
series.save(update_fields=['logo'])
- # Update relations to reference the correct series objects
+ # Update relations to reference the correct series objects (with PKs)
for relation in relations_to_create:
if id(relation.series) in created_series:
relation.series = created_series[id(relation.series)]
- # Handle relations
+ for relation in relations_to_update:
+ if id(relation.series) in created_series:
+ relation.series = created_series[id(relation.series)]
+
+ # All series now have PKs, safe to bulk create/update relations
if relations_to_create:
M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml
index fe5e1507..2b1fd2ae 100644
--- a/docker/docker-compose.aio.yml
+++ b/docker/docker-compose.aio.yml
@@ -14,6 +14,10 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=info
+ # Legacy CPU Support (Optional)
+ # Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
+ # that lack support for newer baseline CPU features
+ #- USE_LEGACY_NUMPY=true
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml
index d9dbef0e..c576cfd1 100644
--- a/docker/docker-compose.debug.yml
+++ b/docker/docker-compose.debug.yml
@@ -18,6 +18,10 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=trace
+ # Legacy CPU Support (Optional)
+ # Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
+ # that lack support for newer baseline CPU features
+ #- USE_LEGACY_NUMPY=true
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index d1bb3680..b20c3296 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -17,6 +17,10 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=debug
+ # Legacy CPU Support (Optional)
+ # Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
+ # that lack support for newer baseline CPU features
+ #- USE_LEGACY_NUMPY=true
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index aaa63990..e4093e4b 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -17,6 +17,10 @@ services:
- REDIS_HOST=redis
- CELERY_BROKER_URL=redis://redis:6379/0
- DISPATCHARR_LOG_LEVEL=info
+ # Legacy CPU Support (Optional)
+ # Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
+ # that lack support for newer baseline CPU features
+ #- USE_LEGACY_NUMPY=true
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini
index d831adfc..920bac48 100644
--- a/docker/uwsgi.ini
+++ b/docker/uwsgi.ini
@@ -37,6 +37,7 @@ http-keepalive = 1
buffer-size = 65536 # Increase buffer for large payloads
post-buffering = 4096 # Reduce buffering for real-time streaming
http-timeout = 600 # Prevent disconnects from long streams
+socket-timeout = 600 # Prevent write timeouts when client buffers
lazy-apps = true # Improve memory efficiency
# Async mode (use gevent for high concurrency)
@@ -58,4 +59,4 @@ logformat-strftime = true
log-date = %%Y-%%m-%%d %%H:%%M:%%S,000
# Use formatted time with environment variable for log level
log-format = %(ftime) $(DISPATCHARR_LOG_LEVEL) uwsgi.requests Worker ID: %(wid) %(method) %(status) %(uri) %(msecs)ms
-log-buffering = 1024 # Add buffer size limit for logging
\ No newline at end of file
+log-buffering = 1024 # Add buffer size limit for logging
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 84d18989..ed9e6010 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -12,6 +12,7 @@
"@dnd-kit/modifiers": "^9.0.0",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/utilities": "^3.2.2",
+ "@hookform/resolvers": "^5.2.2",
"@mantine/charts": "~8.0.1",
"@mantine/core": "~8.0.1",
"@mantine/dates": "~8.0.1",
@@ -22,13 +23,13 @@
"@tanstack/react-table": "^8.21.2",
"allotment": "^1.20.4",
"dayjs": "^1.11.13",
- "formik": "^2.4.6",
"hls.js": "^1.5.20",
"lucide-react": "^0.511.0",
"mpegts.js": "^1.8.0",
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-draggable": "^4.4.6",
+ "react-hook-form": "^7.70.0",
"react-pro-sidebar": "^1.1.0",
"react-router-dom": "^7.3.0",
"react-virtualized": "^9.22.6",
@@ -1248,6 +1249,18 @@
"integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==",
"license": "MIT"
},
+ "node_modules/@hookform/resolvers": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-5.2.2.tgz",
+ "integrity": "sha512-A/IxlMLShx3KjV/HeTcTfaMxdwy690+L/ZADoeaTltLx+CVuzkeVIPuybK3jrRfw7YZnmdKsVVHAlEPIAEUNlA==",
+ "license": "MIT",
+ "dependencies": {
+ "@standard-schema/utils": "^0.3.0"
+ },
+ "peerDependencies": {
+ "react-hook-form": "^7.55.0"
+ }
+ },
"node_modules/@humanfs/core": {
"version": "0.19.1",
"resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
@@ -1776,6 +1789,12 @@
"win32"
]
},
+ "node_modules/@standard-schema/utils": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz",
+ "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==",
+ "license": "MIT"
+ },
"node_modules/@swc/core": {
"name": "@swc/wasm",
"version": "1.13.20",
@@ -2008,18 +2027,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/@types/hoist-non-react-statics": {
- "version": "3.3.7",
- "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.7.tgz",
- "integrity": "sha512-PQTyIulDkIDro8P+IHbKCsw7U2xxBYflVzW/FgWdCAePD9xGSidgA76/GeJ6lBKoblyhf9pBY763gbrN+1dI8g==",
- "license": "MIT",
- "dependencies": {
- "hoist-non-react-statics": "^3.3.0"
- },
- "peerDependencies": {
- "@types/react": "*"
- }
- },
"node_modules/@types/json-schema": {
"version": "7.0.15",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
@@ -2037,6 +2044,7 @@
"version": "19.2.7",
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
"integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
+ "devOptional": true,
"license": "MIT",
"dependencies": {
"csstype": "^3.2.2"
@@ -2833,15 +2841,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/deepmerge": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz",
- "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==",
- "license": "MIT",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/dequal": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
@@ -3288,31 +3287,6 @@
"dev": true,
"license": "ISC"
},
- "node_modules/formik": {
- "version": "2.4.9",
- "resolved": "https://registry.npmjs.org/formik/-/formik-2.4.9.tgz",
- "integrity": "sha512-5nI94BMnlFDdQRBY4Sz39WkhxajZJ57Fzs8wVbtsQlm5ScKIR1QLYqv/ultBnobObtlUyxpxoLodpixrsf36Og==",
- "funding": [
- {
- "type": "individual",
- "url": "https://opencollective.com/formik"
- }
- ],
- "license": "Apache-2.0",
- "dependencies": {
- "@types/hoist-non-react-statics": "^3.3.1",
- "deepmerge": "^2.1.1",
- "hoist-non-react-statics": "^3.3.0",
- "lodash": "^4.17.21",
- "lodash-es": "^4.17.21",
- "react-fast-compare": "^2.0.1",
- "tiny-warning": "^1.0.2",
- "tslib": "^2.0.0"
- },
- "peerDependencies": {
- "react": ">=16.8.0"
- }
- },
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
@@ -3751,12 +3725,6 @@
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"license": "MIT"
},
- "node_modules/lodash-es": {
- "version": "4.17.22",
- "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.22.tgz",
- "integrity": "sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==",
- "license": "MIT"
- },
"node_modules/lodash.clamp": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/lodash.clamp/-/lodash.clamp-4.0.3.tgz",
@@ -4334,11 +4302,21 @@
"react": ">= 16.8 || 18.0.0"
}
},
- "node_modules/react-fast-compare": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-2.0.4.tgz",
- "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==",
- "license": "MIT"
+ "node_modules/react-hook-form": {
+ "version": "7.70.0",
+ "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.70.0.tgz",
+ "integrity": "sha512-COOMajS4FI3Wuwrs3GPpi/Jeef/5W1DRR84Yl5/ShlT3dKVFUfoGiEZ/QE6Uw8P4T2/CLJdcTVYKvWBMQTEpvw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/react-hook-form"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17 || ^18 || ^19"
+ }
},
"node_modules/react-is": {
"version": "16.13.1",
@@ -4923,12 +4901,6 @@
"integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==",
"license": "MIT"
},
- "node_modules/tiny-warning": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
- "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==",
- "license": "MIT"
- },
"node_modules/tinybench": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
diff --git a/frontend/package.json b/frontend/package.json
index ff5be72d..7b2d5927 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -23,11 +23,12 @@
"@mantine/form": "~8.0.1",
"@mantine/hooks": "~8.0.1",
"@mantine/notifications": "~8.0.1",
+ "@hookform/resolvers": "^5.2.2",
"@tanstack/react-table": "^8.21.2",
"allotment": "^1.20.4",
"dayjs": "^1.11.13",
- "formik": "^2.4.6",
"hls.js": "^1.5.20",
+ "react-hook-form": "^7.70.0",
"lucide-react": "^0.511.0",
"mpegts.js": "^1.8.0",
"react": "^19.1.0",
diff --git a/frontend/src/api.js b/frontend/src/api.js
index 64ce4d77..c96b3cb8 100644
--- a/frontend/src/api.js
+++ b/frontend/src/api.js
@@ -336,6 +336,15 @@ export default class API {
delete channelData.channel_number;
}
+ // Add channel profile IDs based on current selection
+ const selectedProfileId = useChannelsStore.getState().selectedProfileId;
+ if (selectedProfileId && selectedProfileId !== '0') {
+ // Specific profile selected - add only to that profile
+ channelData.channel_profile_ids = [parseInt(selectedProfileId)];
+ }
+ // If selectedProfileId is '0' or not set, don't include channel_profile_ids
+ // which will trigger the backend's default behavior of adding to all profiles
+
if (channel.logo_file) {
// Must send FormData for file upload
body = new FormData();
diff --git a/frontend/src/components/cards/SeriesCard.jsx b/frontend/src/components/cards/SeriesCard.jsx
new file mode 100644
index 00000000..f010cb44
--- /dev/null
+++ b/frontend/src/components/cards/SeriesCard.jsx
@@ -0,0 +1,85 @@
+import {
+ Badge,
+ Box,
+ Card,
+ CardSection,
+ Group,
+ Image,
+ Stack,
+ Text,
+} from '@mantine/core';
+import {Calendar, Play, Star} from "lucide-react";
+import React from "react";
+
+const SeriesCard = ({ series, onClick }) => {
+ return (
+
+
+