Merge branch 'dev' into Media-Server

This commit is contained in:
Dispatcharr 2025-12-27 10:16:45 -06:00
commit 62eb56f7f2
41 changed files with 2727 additions and 1923 deletions

View file

@ -17,10 +17,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- VOD proxy supports local file streaming and optional inclusion of inactive accounts for library playback
- Advanced filtering for Channels table: Filter menu now allows toggling disabled channels visibility (when a profile is selected) and filtering to show only empty channels without streams (Closes #182)
- Network Access warning modal now displays the client's IP address for better transparency when network restrictions are being enforced - Thanks [@damien-alt-sudo](https://github.com/damien-alt-sudo) (Closes #778)
- VLC streaming support - Thanks [@sethwv](https://github.com/sethwv)
- Added `cvlc` as an alternative streaming backend alongside FFmpeg and Streamlink
- Log parser refactoring: Introduced `LogParserFactory` and stream-specific parsers (`FFmpegLogParser`, `VLCLogParser`, `StreamlinkLogParser`) to enable codec and resolution detection from multiple streaming tools
- VLC log parsing for stream information: Detects video/audio codecs from TS demux output, supports both stream-copy and transcode modes with resolution/FPS extraction from transcode output
- Locked, read-only VLC stream profile configured for headless operation with intelligent audio/video codec detection
- VLC and required plugins installed in Docker environment with headless configuration
- ErrorBoundary component for handling frontend errors gracefully with generic error message - Thanks [@nick4810](https://github.com/nick4810)
### Changed
- Fixed event viewer arrow direction (previously inverted) — UI behavior corrected. - Thanks [@drnikcuk](https://github.com/drnikcuk) (Closes #772)
- Channel number inputs in stream-to-channel creation modals no longer have a maximum value restriction, allowing users to enter any valid channel number supported by the database
- Stream log parsing refactored to use factory pattern: Simplified `ChannelService.parse_and_store_stream_info()` to route parsing through specialized log parsers instead of inline program-specific logic (~150 lines of code removed)
- Stream profile names in fixtures updated to use proper capitalization (ffmpeg → FFmpeg, streamlink → Streamlink)
- Frontend component refactoring for improved code organization and maintainability - Thanks [@nick4810](https://github.com/nick4810)
- Extracted large nested components into separate files (RecordingCard, RecordingDetailsModal, RecurringRuleModal, RecordingSynopsis)
- Moved business logic from components into dedicated utility files (dateTimeUtils, RecordingCardUtils, RecordingDetailsModalUtils, RecurringRuleModalUtils, DVRUtils)
- Lazy loaded heavy components (SuperuserForm, RecordingDetailsModal) with loading fallbacks
- Removed unused Dashboard and Home pages
- Logo loading optimization: Logos now load only after both Channels and Streams tables complete loading to prevent blocking initial page render, with rendering gated by table readiness to ensure data loads before visual elements
- M3U stream URLs now use `build_absolute_uri_with_port()` for consistency with EPG and logo URLs, ensuring uniform port handling across all M3U file URLs
### Fixed
- M3U and EPG URLs now correctly preserve non-standard HTTPS ports (e.g., `:8443`) when accessed behind reverse proxies that forward the port in headers — `get_host_and_port()` now properly checks `X-Forwarded-Port` header before falling back to other detection methods (Fixes #704)
- M3U and EPG manager page no longer crashes when a playlist references a deleted channel group (Fixes screen blank on navigation)
- Stream validation now returns original URL instead of redirected URL to prevent issues with temporary redirect URLs that expire before clients can connect
- XtreamCodes EPG limit parameter now properly converted to integer to prevent type errors when accessing EPG listings (Fixes #781)
- Stream validation now continues with GET request if HEAD request fails due to connection issues - Thanks [@kvnnap](https://github.com/kvnnap) (Fixes #782)
- XtreamCodes M3U files now correctly set `x-tvg-url` and `url-tvg` headers to reference XC EPG URL (`xmltv.php`) instead of standard EPG endpoint when downloaded via XC API (Fixes #629)
## [0.15.1] - 2025-12-22
### Fixed
- XtreamCodes EPG `has_archive` field now returns integer `0` instead of string `"0"` for proper JSON type consistency
- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744)
## [0.15.0] - 2025-12-20

View file

@ -8,6 +8,7 @@ from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.shortcuts import get_object_or_404, get_list_or_404
from django.db import transaction
from django.db.models import Q
import os, json, requests, logging
from urllib.parse import unquote
from apps.accounts.permissions import (
@ -420,10 +421,36 @@ class ChannelViewSet(viewsets.ModelViewSet):
group_names = channel_group.split(",")
qs = qs.filter(channel_group__name__in=group_names)
if self.request.user.user_level < 10:
qs = qs.filter(user_level__lte=self.request.user.user_level)
filters = {}
q_filters = Q()
return qs
channel_profile_id = self.request.query_params.get("channel_profile_id")
show_disabled_param = self.request.query_params.get("show_disabled", None)
only_streamless = self.request.query_params.get("only_streamless", None)
if channel_profile_id:
try:
profile_id_int = int(channel_profile_id)
filters["channelprofilemembership__channel_profile_id"] = profile_id_int
if show_disabled_param is None:
filters["channelprofilemembership__enabled"] = True
except (ValueError, TypeError):
# Ignore invalid profile id values
pass
if only_streamless:
q_filters &= Q(streams__isnull=True)
if self.request.user.user_level < 10:
filters["user_level__lte"] = self.request.user.user_level
if filters:
qs = qs.filter(**filters)
if q_filters:
qs = qs.filter(q_filters)
return qs.distinct()
def get_serializer_context(self):
context = super().get_serializer_context()

View file

@ -174,16 +174,26 @@ def generate_m3u(request, profile_name=None, user=None):
tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower()
# Build EPG URL with query parameters if needed
epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint'))
# Check if this is an XC API request (has username/password in GET params and user is authenticated)
xc_username = request.GET.get('username')
xc_password = request.GET.get('password')
# Optionally preserve certain query parameters
preserved_params = ['tvg_id_source', 'cachedlogos', 'days']
query_params = {k: v for k, v in request.GET.items() if k in preserved_params}
if query_params:
from urllib.parse import urlencode
epg_url = f"{epg_base_url}?{urlencode(query_params)}"
if user is not None and xc_username and xc_password:
# This is an XC API request - use XC-style EPG URL
base_url = build_absolute_uri_with_port(request, '')
epg_url = f"{base_url}/xmltv.php?username={xc_username}&password={xc_password}"
else:
epg_url = epg_base_url
# Regular request - use standard EPG endpoint
epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint'))
# Optionally preserve certain query parameters
preserved_params = ['tvg_id_source', 'cachedlogos', 'days']
query_params = {k: v for k, v in request.GET.items() if k in preserved_params}
if query_params:
from urllib.parse import urlencode
epg_url = f"{epg_base_url}?{urlencode(query_params)}"
else:
epg_url = epg_base_url
# Add x-tvg-url and url-tvg attribute for EPG URL
m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n'
@ -247,12 +257,10 @@ def generate_m3u(request, profile_name=None, user=None):
stream_url = first_stream.url
else:
# Fall back to proxy URL if no direct URL available
base_url = request.build_absolute_uri('/')[:-1]
stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}"
stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}")
else:
# Standard behavior - use proxy URL
base_url = request.build_absolute_uri('/')[:-1]
stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}"
stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}")
m3u_content += extinf_line + stream_url + "\n"
@ -2258,7 +2266,7 @@ def xc_get_epg(request, user, short=False):
# Get the mapped integer for this specific channel
channel_num_int = channel_num_map.get(channel.id, int(channel.channel_number))
limit = request.GET.get('limit', 4)
limit = int(request.GET.get('limit', 4))
if channel.epg_data:
# Check if this is a dummy EPG that generates on-demand
if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy':
@ -2932,19 +2940,16 @@ def get_host_and_port(request):
if xfh:
if ":" in xfh:
host, port = xfh.split(":", 1)
# Omit standard ports from URLs, or omit if port doesn't match standard for scheme
# (e.g., HTTPS but port is 9191 = behind external reverse proxy)
# Omit standard ports from URLs
if port == standard_port:
return host, None
# If port doesn't match standard and X-Forwarded-Proto is set, likely behind external RP
if request.META.get("HTTP_X_FORWARDED_PROTO"):
host = xfh.split(":")[0] # Strip port, will check for proper port below
else:
return host, port
# Non-standard port in X-Forwarded-Host - return it
# This handles reverse proxies on non-standard ports (e.g., https://example.com:8443)
return host, port
else:
host = xfh
# Check for X-Forwarded-Port header (if we didn't already find a valid port)
# Check for X-Forwarded-Port header (if we didn't find a port in X-Forwarded-Host)
port = request.META.get("HTTP_X_FORWARDED_PORT")
if port:
# Omit standard ports from URLs
@ -2962,22 +2967,28 @@ def get_host_and_port(request):
else:
host = raw_host
# 3. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present)
# 3. Check for X-Forwarded-Port (when Host header has no port but we're behind a reverse proxy)
port = request.META.get("HTTP_X_FORWARDED_PORT")
if port:
# Omit standard ports from URLs
return host, None if port == standard_port else port
# 4. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present)
# If so, assume standard port for the scheme (don't trust SERVER_PORT in this case)
if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"):
return host, None
# 4. Try SERVER_PORT from META (only if NOT behind reverse proxy)
# 5. Try SERVER_PORT from META (only if NOT behind reverse proxy)
port = request.META.get("SERVER_PORT")
if port:
# Omit standard ports from URLs
return host, None if port == standard_port else port
# 5. Dev fallback: guess port 5656
# 6. Dev fallback: guess port 5656
if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"):
return host, "5656"
# 6. Final fallback: assume standard port for scheme (omit from URL)
# 7. Final fallback: assume standard port for scheme (omit from URL)
return host, None
def build_absolute_uri_with_port(request, path):

View file

@ -15,6 +15,7 @@ from ..redis_keys import RedisKeys
from ..constants import EventType, ChannelState, ChannelMetadataField
from ..url_utils import get_stream_info_for_switch
from core.utils import log_system_event
from .log_parsers import LogParserFactory
logger = logging.getLogger("ts_proxy")
@ -419,124 +420,51 @@ class ChannelService:
@staticmethod
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None):
"""Parse FFmpeg stream info line and store in Redis metadata and database"""
"""
Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB.
Uses specialized parsers for each streaming tool.
"""
try:
if stream_type == "input":
# Example lines:
# Input #0, mpegts, from 'http://example.com/stream.ts':
# Input #0, hls, from 'http://example.com/stream.m3u8':
# Use factory to parse the line based on stream type
parsed_data = LogParserFactory.parse(stream_type, stream_info_line)
if not parsed_data:
return
# Extract input format (e.g., "mpegts", "hls", "flv", etc.)
input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line)
input_format = input_match.group(1).strip() if input_match else None
# Update Redis and database with parsed data
ChannelService._update_stream_info_in_redis(
channel_id,
parsed_data.get('video_codec'),
parsed_data.get('resolution'),
parsed_data.get('width'),
parsed_data.get('height'),
parsed_data.get('source_fps'),
parsed_data.get('pixel_format'),
parsed_data.get('video_bitrate'),
parsed_data.get('audio_codec'),
parsed_data.get('sample_rate'),
parsed_data.get('audio_channels'),
parsed_data.get('audio_bitrate'),
parsed_data.get('stream_type')
)
# Store in Redis if we have valid data
if input_format:
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(stream_id, stream_type=input_format)
logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}")
elif stream_type == "video":
# Example line:
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
video_codec = codec_match.group(1) if codec_match else None
# Extract resolution (e.g., "1280x720") - be more specific to avoid hex values
# Look for resolution patterns that are realistic video dimensions
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
# Validate that these look like reasonable video dimensions
if 100 <= width <= 10000 and 100 <= height <= 10000:
resolution = f"{width}x{height}"
else:
width = height = resolution = None
else:
width = height = resolution = None
# Extract source FPS (e.g., "29.97 fps")
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
source_fps = float(fps_match.group(1)) if fps_match else None
# Extract pixel format (e.g., "yuv420p")
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
pixel_format = None
if pixel_format_match:
pf = pixel_format_match.group(1).strip()
# Clean up pixel format (remove extra info in parentheses)
if '(' in pf:
pf = pf.split('(')[0].strip()
pixel_format = pf
# Extract bitrate if present (e.g., "2000 kb/s")
video_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
video_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
video_codec=video_codec,
resolution=resolution,
source_fps=source_fps,
pixel_format=pixel_format,
video_bitrate=video_bitrate
)
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
f"Video Bitrate: {video_bitrate} kb/s")
elif stream_type == "audio":
# Example line:
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
# Extract audio codec (e.g., "aac", "mp3", etc.)
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
audio_codec = codec_match.group(1) if codec_match else None
# Extract sample rate (e.g., "48000 Hz")
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
# Extract channel layout (e.g., "stereo", "5.1", "mono")
# Look for common channel layouts
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
channels = channel_match.group(1) if channel_match else None
# Extract audio bitrate if present (e.g., "64 kb/s")
audio_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
audio_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
audio_codec=audio_codec,
sample_rate=sample_rate,
audio_channels=channels,
audio_bitrate=audio_bitrate
)
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
video_codec=parsed_data.get('video_codec'),
resolution=parsed_data.get('resolution'),
source_fps=parsed_data.get('source_fps'),
pixel_format=parsed_data.get('pixel_format'),
video_bitrate=parsed_data.get('video_bitrate'),
audio_codec=parsed_data.get('audio_codec'),
sample_rate=parsed_data.get('sample_rate'),
audio_channels=parsed_data.get('audio_channels'),
audio_bitrate=parsed_data.get('audio_bitrate'),
stream_type=parsed_data.get('stream_type')
)
except Exception as e:
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
logger.debug(f"Error parsing {stream_type} stream info: {e}")
@staticmethod
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None):

View file

@ -0,0 +1,410 @@
"""Log parsers for FFmpeg, Streamlink, and VLC output."""
import re
import logging
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
logger = logging.getLogger(__name__)
class BaseLogParser(ABC):
"""Base class for log parsers"""
# Map of stream_type -> method_name that this parser handles
STREAM_TYPE_METHODS: Dict[str, str] = {}
@abstractmethod
def can_parse(self, line: str) -> Optional[str]:
"""
Check if this parser can handle the line.
Returns the stream_type if it can parse, None otherwise.
e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink'
"""
pass
@abstractmethod
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
pass
@abstractmethod
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
pass
@abstractmethod
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
pass
class FFmpegLogParser(BaseLogParser):
"""Parser for FFmpeg log output"""
STREAM_TYPE_METHODS = {
'input': 'parse_input_format',
'video': 'parse_video_stream',
'audio': 'parse_audio_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is an FFmpeg line we can parse"""
lower = line.lower()
# Input format detection
if lower.startswith('input #'):
return 'input'
# Stream info (only during input phase, but we'll let stream_manager handle phase tracking)
if 'stream #' in lower:
if 'video:' in lower:
return 'video'
elif 'audio:' in lower:
return 'audio'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg input format (e.g., mpegts, hls)"""
try:
input_match = re.search(r'Input #\d+,\s*([^,]+)', line)
input_format = input_match.group(1).strip() if input_match else None
if input_format:
logger.debug(f"Input format info - Format: {input_format}")
return {'stream_type': input_format}
except Exception as e:
logger.debug(f"Error parsing FFmpeg input format: {e}")
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg video stream info"""
try:
result = {}
# Extract codec, resolution, fps, pixel format, bitrate
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line)
if codec_match:
result['video_codec'] = codec_match.group(1)
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line)
if fps_match:
result['source_fps'] = float(fps_match.group(1))
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line)
if pixel_format_match:
pf = pixel_format_match.group(1).strip()
if '(' in pf:
pf = pf.split('(')[0].strip()
result['pixel_format'] = pf
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
if bitrate_match:
result['video_bitrate'] = float(bitrate_match.group(1))
if result:
logger.info(f"Video stream info - Codec: {result.get('video_codec')}, "
f"Resolution: {result.get('resolution')}, "
f"Source FPS: {result.get('source_fps')}, "
f"Pixel Format: {result.get('pixel_format')}, "
f"Video Bitrate: {result.get('video_bitrate')} kb/s")
return result
except Exception as e:
logger.debug(f"Error parsing FFmpeg video stream info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg audio stream info"""
try:
result = {}
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line)
if codec_match:
result['audio_codec'] = codec_match.group(1)
sample_rate_match = re.search(r'(\d+)\s*Hz', line)
if sample_rate_match:
result['sample_rate'] = int(sample_rate_match.group(1))
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE)
if channel_match:
result['audio_channels'] = channel_match.group(1)
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
if bitrate_match:
result['audio_bitrate'] = float(bitrate_match.group(1))
if result:
return result
except Exception as e:
logger.debug(f"Error parsing FFmpeg audio stream info: {e}")
return None
class VLCLogParser(BaseLogParser):
"""Parser for VLC log output"""
STREAM_TYPE_METHODS = {
'vlc_video': 'parse_video_stream',
'vlc_audio': 'parse_audio_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is a VLC line we can parse"""
lower = line.lower()
# VLC TS demux codec detection
if 'ts demux debug' in lower and 'type=' in lower:
if 'video' in lower:
return 'vlc_video'
elif 'audio' in lower:
return 'vlc_audio'
# VLC decoder output
if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower):
if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower:
return 'vlc_audio'
else:
return 'vlc_video'
# VLC transcode output for resolution/FPS
if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)):
return 'vlc_video'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse VLC TS demux output and decoder info for video"""
try:
lower = line.lower()
result = {}
# Codec detection from TS demux
video_codec_map = {
('avc', 'h.264', 'type=0x1b'): "h264",
('hevc', 'h.265', 'type=0x24'): "hevc",
('mpeg-2', 'type=0x02'): "mpeg2video",
('mpeg-4', 'type=0x10'): "mpeg4"
}
for patterns, codec in video_codec_map.items():
if any(p in lower for p in patterns):
result['video_codec'] = codec
break
# Extract FPS from transcode output: "source fps 30/1"
fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower)
if fps_fraction_match:
numerator = int(fps_fraction_match.group(1))
denominator = int(fps_fraction_match.group(2))
if denominator > 0:
result['source_fps'] = numerator / denominator
# Extract resolution from transcode output: "source 1280x720"
source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower)
if source_res_match:
width = int(source_res_match.group(1))
height = int(source_res_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
else:
# Fallback: generic resolution pattern
resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
# Fallback: try to extract FPS from generic format
if 'source_fps' not in result:
fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower)
if fps_match:
result['source_fps'] = float(fps_match.group(1))
return result if result else None
except Exception as e:
logger.debug(f"Error parsing VLC video stream info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse VLC TS demux output and decoder info for audio"""
try:
lower = line.lower()
result = {}
# Codec detection from TS demux
audio_codec_map = {
('type=0xf', 'adts'): "aac",
('type=0x03', 'type=0x04'): "mp3",
('type=0x06', 'type=0x81'): "ac3",
('type=0x0b', 'lpcm'): "pcm"
}
for patterns, codec in audio_codec_map.items():
if any(p in lower for p in patterns):
result['audio_codec'] = codec
break
# VLC decoder format: "AAC channels: 2 samplerate: 48000"
if 'channels:' in lower:
channels_match = re.search(r'channels:\s*(\d+)', lower)
if channels_match:
num_channels = int(channels_match.group(1))
# Convert number to name
channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'}
result['audio_channels'] = channel_names.get(num_channels, str(num_channels))
if 'samplerate:' in lower:
samplerate_match = re.search(r'samplerate:\s*(\d+)', lower)
if samplerate_match:
result['sample_rate'] = int(samplerate_match.group(1))
# Try to extract sample rate (Hz format)
sample_rate_match = re.search(r'(\d+)\s*hz', lower)
if sample_rate_match and 'sample_rate' not in result:
result['sample_rate'] = int(sample_rate_match.group(1))
# Try to extract channels (word format)
if 'audio_channels' not in result:
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower)
if channel_match:
result['audio_channels'] = channel_match.group(1)
return result if result else None
except Exception as e:
logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}")
return None
class StreamlinkLogParser(BaseLogParser):
"""Parser for Streamlink log output"""
STREAM_TYPE_METHODS = {
'streamlink': 'parse_video_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is a Streamlink line we can parse"""
lower = line.lower()
if 'opening stream:' in lower or 'available streams:' in lower:
return 'streamlink'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse Streamlink quality/resolution"""
try:
quality_match = re.search(r'(\d+p|\d+x\d+)', line)
if quality_match:
quality = quality_match.group(1)
if 'x' in quality:
resolution = quality
width, height = map(int, quality.split('x'))
else:
resolutions = {
'2160p': ('3840x2160', 3840, 2160),
'1080p': ('1920x1080', 1920, 1080),
'720p': ('1280x720', 1280, 720),
'480p': ('854x480', 854, 480),
'360p': ('640x360', 640, 360)
}
resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080))
return {
'video_codec': 'h264',
'resolution': resolution,
'width': width,
'height': height,
'pixel_format': 'yuv420p'
}
except Exception as e:
logger.debug(f"Error parsing Streamlink video info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
return None
class LogParserFactory:
"""Factory to get the appropriate log parser"""
_parsers = {
'ffmpeg': FFmpegLogParser(),
'vlc': VLCLogParser(),
'streamlink': StreamlinkLogParser()
}
@classmethod
def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]:
"""Determine parser and method from stream_type"""
# Check each parser to see if it handles this stream_type
for parser in cls._parsers.values():
method_name = parser.STREAM_TYPE_METHODS.get(stream_type)
if method_name:
return (parser, method_name)
return None
@classmethod
def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]:
"""
Parse a log line based on stream type.
Returns parsed data or None if parsing fails.
"""
result = cls._get_parser_and_method(stream_type)
if not result:
return None
parser, method_name = result
method = getattr(parser, method_name, None)
if method:
return method(line)
return None
@classmethod
def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]:
"""
Automatically detect which parser can handle this line and parse it.
Returns (stream_type, parsed_data) or None if no parser can handle it.
"""
# Try each parser to see if it can handle this line
for parser in cls._parsers.values():
stream_type = parser.can_parse(line)
if stream_type:
# Parser can handle this line, now parse it
parsed_data = cls.parse(stream_type, line)
if parsed_data:
return (stream_type, parsed_data)
return None

View file

@ -107,6 +107,10 @@ class StreamManager:
# Add this flag for tracking transcoding process status
self.transcode_process_active = False
# Track stream command for efficient log parser routing
self.stream_command = None
self.parser_type = None # Will be set when transcode process starts
# Add tracking for data throughput
self.bytes_processed = 0
self.last_bytes_update = time.time()
@ -476,6 +480,21 @@ class StreamManager:
# Build and start transcode command
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
# Store stream command for efficient log parser routing
self.stream_command = stream_profile.command
# Map actual commands to parser types for direct routing
command_to_parser = {
'ffmpeg': 'ffmpeg',
'cvlc': 'vlc',
'vlc': 'vlc',
'streamlink': 'streamlink'
}
self.parser_type = command_to_parser.get(self.stream_command.lower())
if self.parser_type:
logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})")
else:
logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing")
# For UDP streams, remove any user_agent parameters from the command
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
# Filter out any arguments that contain the user_agent value or related headers
@ -645,35 +664,51 @@ class StreamManager:
if content_lower.startswith('output #') or 'encoder' in content_lower:
self.ffmpeg_input_phase = False
# Only parse stream info if we're still in the input phase
if ("stream #" in content_lower and
("video:" in content_lower or "audio:" in content_lower) and
self.ffmpeg_input_phase):
# Route to appropriate parser based on known command type
from .services.log_parsers import LogParserFactory
from .services.channel_service import ChannelService
from .services.channel_service import ChannelService
if "video:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video", self.current_stream_id)
elif "audio:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio", self.current_stream_id)
parse_result = None
# If we know the parser type, use direct routing for efficiency
if self.parser_type:
# Get the appropriate parser and check what it can parse
parser = LogParserFactory._parsers.get(self.parser_type)
if parser:
stream_type = parser.can_parse(content)
if stream_type:
# Parser can handle this line, parse it directly
parsed_data = LogParserFactory.parse(stream_type, content)
if parsed_data:
parse_result = (stream_type, parsed_data)
else:
# Unknown command type - use auto-detection as fallback
parse_result = LogParserFactory.auto_parse(content)
if parse_result:
stream_type, parsed_data = parse_result
# For FFmpeg, only parse during input phase
if stream_type in ['video', 'audio', 'input']:
if self.ffmpeg_input_phase:
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
else:
# VLC and Streamlink can be parsed anytime
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
# Determine log level based on content
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.error(f"Stream process error for channel {self.channel_id}: {content}")
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.warning(f"Stream process warning for channel {self.channel_id}: {content}")
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
# Stats lines - log at trace level to avoid spam
logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}")
logger.trace(f"Stream stats for channel {self.channel_id}: {content}")
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
# Stream info - log at info level
logger.info(f"FFmpeg info for channel {self.channel_id}: {content}")
if content.startswith('Input #0'):
# If it's input 0, parse stream info
from .services.channel_service import ChannelService
ChannelService.parse_and_store_stream_info(self.channel_id, content, "input", self.current_stream_id)
logger.info(f"Stream info for channel {self.channel_id}: {content}")
else:
# Everything else at debug level
logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.debug(f"Stream process output for channel {self.channel_id}: {content}")
except Exception as e:
logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}")

View file

@ -462,16 +462,21 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
session.headers.update(headers)
# Make HEAD request first as it's faster and doesn't download content
head_response = session.head(
url,
timeout=timeout,
allow_redirects=True
)
head_request_success = True
try:
head_response = session.head(
url,
timeout=timeout,
allow_redirects=True
)
except requests.exceptions.RequestException as e:
head_request_success = False
logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}")
# If HEAD not supported, server will return 405 or other error
if 200 <= head_response.status_code < 300:
if head_request_success and (200 <= head_response.status_code < 300):
# HEAD request successful
return True, head_response.url, head_response.status_code, "Valid (HEAD request)"
return True, url, head_response.status_code, "Valid (HEAD request)"
# Try a GET request with stream=True to avoid downloading all content
get_response = session.get(
@ -484,7 +489,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
# IMPORTANT: Check status code first before checking content
if not (200 <= get_response.status_code < 300):
logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}")
return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
# Only check content if status code is valid
try:
@ -538,7 +543,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
get_response.close()
# If we have content, consider it valid even with unrecognized content type
return is_valid, get_response.url, get_response.status_code, message
return is_valid, url, get_response.status_code, message
except requests.exceptions.Timeout:
return False, url, 0, "Timeout connecting to stream"

View file

@ -142,8 +142,12 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
},
status=status.HTTP_200_OK,
)
return Response(in_network, status=status.HTTP_200_OK)
response_data = {
**in_network,
"client_ip": str(client_ip)
}
return Response(response_data, status=status.HTTP_200_OK)
return Response({}, status=status.HTTP_200_OK)

View file

@ -23,7 +23,7 @@
"model": "core.streamprofile",
"pk": 1,
"fields": {
"name": "ffmpeg",
"name": "FFmpeg",
"command": "ffmpeg",
"parameters": "-i {streamUrl} -c:v copy -c:a copy -f mpegts pipe:1",
"is_active": true,
@ -34,11 +34,22 @@
"model": "core.streamprofile",
"pk": 2,
"fields": {
"name": "streamlink",
"name": "Streamlink",
"command": "streamlink",
"parameters": "{streamUrl} best --stdout",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.streamprofile",
"pk": 3,
"fields": {
"name": "VLC",
"command": "cvlc",
"parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
"is_active": true,
"user_agent": "1"
}
}
]

View file

@ -0,0 +1,42 @@
# Generated migration to add VLC stream profile
from django.db import migrations
def add_vlc_profile(apps, schema_editor):
StreamProfile = apps.get_model("core", "StreamProfile")
UserAgent = apps.get_model("core", "UserAgent")
# Check if VLC profile already exists
if not StreamProfile.objects.filter(name="VLC").exists():
# Get the TiviMate user agent (should be pk=1)
try:
tivimate_ua = UserAgent.objects.get(pk=1)
except UserAgent.DoesNotExist:
# Fallback: get first available user agent
tivimate_ua = UserAgent.objects.first()
if not tivimate_ua:
# No user agents exist, skip creating profile
return
StreamProfile.objects.create(
name="VLC",
command="cvlc",
parameters="-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
is_active=True,
user_agent=tivimate_ua,
locked=True, # Make it read-only like ffmpeg/streamlink
)
def remove_vlc_profile(apps, schema_editor):
StreamProfile = apps.get_model("core", "StreamProfile")
StreamProfile.objects.filter(name="VLC").delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0018_alter_systemevent_event_type'),
]
operations = [
migrations.RunPython(add_vlc_profile, remove_vlc_profile),
]

View file

@ -15,7 +15,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
python-is-python3 python3-pip \
libpcre3 libpcre3-dev libpq-dev procps \
build-essential gcc pciutils \
nginx streamlink comskip\
nginx streamlink comskip \
vlc-bin vlc-plugin-base \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# --- Create Python virtual environment ---

View file

@ -36,6 +36,14 @@ if ! [[ "$DISPATCHARR_PORT" =~ ^[0-9]+$ ]]; then
fi
sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default
# Configure nginx based on IPv6 availability
if ip -6 addr show | grep -q "inet6"; then
echo "✅ IPv6 is available, enabling IPv6 in nginx"
else
echo "⚠️ IPv6 not available, disabling IPv6 in nginx"
sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default
fi
# NOTE: mac doesn't run as root, so only manage permissions
# if this script is running as root
if [ "$(id -u)" = "0" ]; then

View file

@ -36,7 +36,7 @@
"model": "core.streamprofile",
"pk": 1,
"fields": {
"profile_name": "ffmpeg",
"profile_name": "FFmpeg",
"command": "ffmpeg",
"parameters": "-i {streamUrl} -c:a copy -c:v copy -f mpegts pipe:1",
"is_active": true,
@ -46,13 +46,23 @@
{
"model": "core.streamprofile",
"fields": {
"profile_name": "streamlink",
"profile_name": "Streamlink",
"command": "streamlink",
"parameters": "{streamUrl} best --stdout",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.streamprofile",
"fields": {
"profile_name": "VLC",
"command": "cvlc",
"parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.coresettings",
"fields": {

View file

@ -20,7 +20,6 @@ import LogosPage from './pages/Logos';
import VODsPage from './pages/VODs';
import LibraryPage from './pages/Library';
import useAuthStore from './store/auth';
import useLogosStore from './store/logos';
import FloatingVideo from './components/FloatingVideo';
import { WebsocketProvider } from './WebSocket';
import { Box, AppShell, MantineProvider } from '@mantine/core';
@ -41,8 +40,6 @@ const defaultRoute = '/channels';
const App = () => {
const [open, setOpen] = useState(true);
const [backgroundLoadingStarted, setBackgroundLoadingStarted] =
useState(false);
const isAuthenticated = useAuthStore((s) => s.isAuthenticated);
const setIsAuthenticated = useAuthStore((s) => s.setIsAuthenticated);
const logout = useAuthStore((s) => s.logout);
@ -82,11 +79,7 @@ const App = () => {
const loggedIn = await initializeAuth();
if (loggedIn) {
await initData();
// Start background logo loading after app is fully initialized (only once)
if (!backgroundLoadingStarted) {
setBackgroundLoadingStarted(true);
useLogosStore.getState().startBackgroundLoading();
}
// Logos are now loaded at the end of initData, no need for background loading
} else {
await logout();
}
@ -97,7 +90,7 @@ const App = () => {
};
checkAuth();
}, [initializeAuth, initData, logout, backgroundLoadingStarted]);
}, [initializeAuth, initData, logout]);
return (
<MantineProvider

View file

@ -0,0 +1,18 @@
import React from 'react';
class ErrorBoundary extends React.Component {
state = { hasError: false };
static getDerivedStateFromError(error) {
return { hasError: true };
}
render() {
if (this.state.hasError) {
return <div>Something went wrong</div>;
}
return this.props.children;
}
}
export default ErrorBoundary;

View file

@ -1,4 +1,4 @@
import React, { useState, useEffect, useRef } from 'react';
import React, { useState, useEffect, useRef } from 'react';
import { Skeleton } from '@mantine/core';
import useLogosStore from '../store/logos';
import logo from '../images/logo.png'; // Default logo
@ -16,15 +16,16 @@ const LazyLogo = ({
}) => {
const [isLoading, setIsLoading] = useState(false);
const [hasError, setHasError] = useState(false);
const fetchAttempted = useRef(new Set()); // Track which IDs we've already tried to fetch
const fetchAttempted = useRef(new Set());
const isMountedRef = useRef(true);
const logos = useLogosStore((s) => s.logos);
const fetchLogosByIds = useLogosStore((s) => s.fetchLogosByIds);
const allowLogoRendering = useLogosStore((s) => s.allowLogoRendering);
// Determine the logo source
const logoData = logoId && logos[logoId];
const logoSrc = logoData?.cache_url || fallbackSrc; // Only use cache URL if we have logo data
const logoSrc = logoData?.cache_url || fallbackSrc;
// Cleanup on unmount
useEffect(() => {
@ -34,6 +35,9 @@ const LazyLogo = ({
}, []);
useEffect(() => {
// Don't start fetching until logo rendering is allowed
if (!allowLogoRendering) return;
// If we have a logoId but no logo data, add it to the batch request queue
if (
logoId &&
@ -44,7 +48,7 @@ const LazyLogo = ({
isMountedRef.current
) {
setIsLoading(true);
fetchAttempted.current.add(logoId); // Mark this ID as attempted
fetchAttempted.current.add(logoId);
logoRequestQueue.add(logoId);
// Clear existing timer and set new one to batch requests
@ -82,7 +86,7 @@ const LazyLogo = ({
setIsLoading(false);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [logoId, fetchLogosByIds, logoData]); // Include logoData to detect when it becomes available
}, [logoId, fetchLogosByIds, logoData, allowLogoRendering]);
// Reset error state when logoId changes
useEffect(() => {
@ -91,8 +95,10 @@ const LazyLogo = ({
}
}, [logoId]);
// Show skeleton while loading
if (isLoading && !logoData) {
// Show skeleton if:
// 1. Logo rendering is not allowed yet, OR
// 2. We don't have logo data yet (regardless of loading state)
if (logoId && (!allowLogoRendering || !logoData)) {
return (
<Skeleton
height={style.maxHeight || 18}

View file

@ -0,0 +1,26 @@
import { Text, } from '@mantine/core';
// Short preview that triggers the details modal when clicked
const RecordingSynopsis = ({ description, onOpen }) => {
const truncated = description?.length > 140;
const preview = truncated
? `${description.slice(0, 140).trim()}...`
: description;
if (!description) return null;
return (
<Text
size="xs"
c="dimmed"
lineClamp={2}
title={description}
onClick={() => onOpen?.()}
style={{ cursor: 'pointer' }}
>
{preview}
</Text>
);
};
export default RecordingSynopsis;

View file

@ -0,0 +1,422 @@
import useChannelsStore from '../../store/channels.jsx';
import useSettingsStore from '../../store/settings.jsx';
import useVideoStore from '../../store/useVideoStore.jsx';
import { useDateTimeFormat, useTimeHelpers } from '../../utils/dateTimeUtils.js';
import { notifications } from '@mantine/notifications';
import React from 'react';
import {
ActionIcon,
Badge,
Box,
Button,
Card,
Center,
Flex,
Group,
Image,
Modal,
Stack,
Text,
Tooltip,
} from '@mantine/core';
import { AlertTriangle, SquareX } from 'lucide-react';
import RecordingSynopsis from '../RecordingSynopsis';
import {
deleteRecordingById,
deleteSeriesAndRule,
getPosterUrl,
getRecordingUrl,
getSeasonLabel,
getSeriesInfo,
getShowVideoUrl,
removeRecording,
runComSkip,
} from './../../utils/cards/RecordingCardUtils.js';
const RecordingCard = ({ recording, onOpenDetails, onOpenRecurring }) => {
const channels = useChannelsStore((s) => s.channels);
const env_mode = useSettingsStore((s) => s.environment.env_mode);
const showVideo = useVideoStore((s) => s.showVideo);
const fetchRecordings = useChannelsStore((s) => s.fetchRecordings);
const { toUserTime, userNow } = useTimeHelpers();
const [timeformat, dateformat] = useDateTimeFormat();
const channel = channels?.[recording.channel];
const customProps = recording.custom_properties || {};
const program = customProps.program || {};
const recordingName = program.title || 'Custom Recording';
const subTitle = program.sub_title || '';
const description = program.description || customProps.description || '';
const isRecurringRule = customProps?.rule?.type === 'recurring';
// Poster or channel logo
const posterUrl = getPosterUrl(
customProps.poster_logo_id, customProps, channel?.logo?.cache_url, env_mode);
const start = toUserTime(recording.start_time);
const end = toUserTime(recording.end_time);
const now = userNow();
const status = customProps.status;
const isTimeActive = now.isAfter(start) && now.isBefore(end);
const isInterrupted = status === 'interrupted';
const isInProgress = isTimeActive; // Show as recording by time, regardless of status glitches
const isUpcoming = now.isBefore(start);
const isSeriesGroup = Boolean(
recording._group_count && recording._group_count > 1
);
// Season/Episode display if present
const season = customProps.season ?? program?.custom_properties?.season;
const episode = customProps.episode ?? program?.custom_properties?.episode;
const onscreen =
customProps.onscreen_episode ??
program?.custom_properties?.onscreen_episode;
const seLabel = getSeasonLabel(season, episode, onscreen);
const handleWatchLive = () => {
if (!channel) return;
showVideo(getShowVideoUrl(channel, env_mode), 'live');
};
const handleWatchRecording = () => {
// Only enable if backend provides a playable file URL in custom properties
const fileUrl = getRecordingUrl(customProps, env_mode);
if (!fileUrl) return;
showVideo(fileUrl, 'vod', {
name: recordingName,
logo: { url: posterUrl },
});
};
const handleRunComskip = async (e) => {
e?.stopPropagation?.();
try {
await runComSkip(recording);
notifications.show({
title: 'Removing commercials',
message: 'Queued comskip for this recording',
color: 'blue.5',
autoClose: 2000,
});
} catch (error) {
console.error('Failed to queue comskip for recording', error);
}
};
// Cancel handling for series groups
const [cancelOpen, setCancelOpen] = React.useState(false);
const [busy, setBusy] = React.useState(false);
const handleCancelClick = (e) => {
e.stopPropagation();
if (isRecurringRule) {
onOpenRecurring?.(recording, true);
return;
}
if (isSeriesGroup) {
setCancelOpen(true);
} else {
removeRecording(recording.id);
}
};
const seriesInfo = getSeriesInfo(customProps);
const removeUpcomingOnly = async () => {
try {
setBusy(true);
await deleteRecordingById(recording.id);
} finally {
setBusy(false);
setCancelOpen(false);
try {
await fetchRecordings();
} catch (error) {
console.error('Failed to refresh recordings', error);
}
}
};
const removeSeriesAndRule = async () => {
try {
setBusy(true);
await deleteSeriesAndRule(seriesInfo);
} finally {
setBusy(false);
setCancelOpen(false);
try {
await fetchRecordings();
} catch (error) {
console.error(
'Failed to refresh recordings after series removal',
error
);
}
}
};
const handleOnMainCardClick = () => {
if (isRecurringRule) {
onOpenRecurring?.(recording, false);
} else {
onOpenDetails?.(recording);
}
}
const WatchLive = () => {
return <Button
size="xs"
variant="light"
onClick={(e) => {
e.stopPropagation();
handleWatchLive();
}}
>
Watch Live
</Button>;
}
const WatchRecording = () => {
return <Tooltip
label={
customProps.file_url || customProps.output_file_url
? 'Watch recording'
: 'Recording playback not available yet'
}
>
<Button
size="xs"
variant="default"
onClick={(e) => {
e.stopPropagation();
handleWatchRecording();
}}
disabled={
customProps.status === 'recording' || !(customProps.file_url || customProps.output_file_url)
}
>
Watch
</Button>
</Tooltip>;
}
const MainCard = (
<Card
shadow="sm"
padding="md"
radius="md"
withBorder
style={{
color: '#fff',
backgroundColor: isInterrupted ? '#2b1f20' : '#27272A',
borderColor: isInterrupted ? '#a33' : undefined,
height: '100%',
cursor: 'pointer',
}}
onClick={handleOnMainCardClick}
>
<Flex justify="space-between" align="center" pb={8}>
<Group gap={8} flex={1} miw={0}>
<Badge
color={
isInterrupted
? 'red.7'
: isInProgress
? 'red.6'
: isUpcoming
? 'yellow.6'
: 'gray.6'
}
>
{isInterrupted
? 'Interrupted'
: isInProgress
? 'Recording'
: isUpcoming
? 'Scheduled'
: 'Completed'}
</Badge>
{isInterrupted && <AlertTriangle size={16} color="#ffa94d" />}
<Stack gap={2} flex={1} miw={0}>
<Group gap={8} wrap="nowrap">
<Text fw={600} lineClamp={1} title={recordingName}>
{recordingName}
</Text>
{isSeriesGroup && (
<Badge color="teal" variant="filled">
Series
</Badge>
)}
{isRecurringRule && (
<Badge color="blue" variant="light">
Recurring
</Badge>
)}
{seLabel && !isSeriesGroup && (
<Badge color="gray" variant="light">
{seLabel}
</Badge>
)}
</Group>
</Stack>
</Group>
<Center>
<Tooltip label={isUpcoming || isInProgress ? 'Cancel' : 'Delete'}>
<ActionIcon
variant="transparent"
color="red.9"
onMouseDown={(e) => e.stopPropagation()}
onClick={handleCancelClick}
>
<SquareX size="20" />
</ActionIcon>
</Tooltip>
</Center>
</Flex>
<Flex gap="sm" align="center">
<Image
src={posterUrl}
w={64}
h={64}
fit="contain"
radius="sm"
alt={recordingName}
fallbackSrc="/logo.png"
/>
<Stack gap={6} flex={1}>
{!isSeriesGroup && subTitle && (
<Group justify="space-between">
<Text size="sm" c="dimmed">
Episode
</Text>
<Text size="sm" fw={700} title={subTitle}>
{subTitle}
</Text>
</Group>
)}
<Group justify="space-between">
<Text size="sm" c="dimmed">
Channel
</Text>
<Text size="sm">
{channel ? `${channel.channel_number}${channel.name}` : '—'}
</Text>
</Group>
<Group justify="space-between">
<Text size="sm" c="dimmed">
{isSeriesGroup ? 'Next recording' : 'Time'}
</Text>
<Text size="sm">
{start.format(`${dateformat}, YYYY ${timeformat}`)} {end.format(timeformat)}
</Text>
</Group>
{!isSeriesGroup && description && (
<RecordingSynopsis
description={description}
onOpen={() => onOpenDetails?.(recording)}
/>
)}
{isInterrupted && customProps.interrupted_reason && (
<Text size="xs" c="red.4">
{customProps.interrupted_reason}
</Text>
)}
<Group justify="flex-end" gap="xs" pt={4}>
{isInProgress && <WatchLive />}
{!isUpcoming && <WatchRecording />}
{!isUpcoming &&
customProps?.status === 'completed' &&
(!customProps?.comskip ||
customProps?.comskip?.status !== 'completed') && (
<Button
size="xs"
variant="light"
color="teal"
onClick={handleRunComskip}
>
Remove commercials
</Button>
)}
</Group>
</Stack>
</Flex>
{/* If this card is a grouped upcoming series, show count */}
{recording._group_count > 1 && (
<Text
size="xs"
c="dimmed"
style={{ position: 'absolute', bottom: 6, right: 12 }}
>
Next of {recording._group_count}
</Text>
)}
</Card>
);
if (!isSeriesGroup) return MainCard;
// Stacked look for series groups: render two shadow layers behind the main card
return (
<Box style={{ position: 'relative' }}>
<Modal
opened={cancelOpen}
onClose={() => setCancelOpen(false)}
title="Cancel Series"
centered
size="md"
zIndex={9999}
>
<Stack gap="sm">
<Text>This is a series rule. What would you like to cancel?</Text>
<Group justify="flex-end">
<Button
variant="default"
loading={busy}
onClick={removeUpcomingOnly}
>
Only this upcoming
</Button>
<Button color="red" loading={busy} onClick={removeSeriesAndRule}>
Entire series + rule
</Button>
</Group>
</Stack>
</Modal>
<Box
style={{
position: 'absolute',
inset: 0,
transform: 'translate(10px, 10px) rotate(-1deg)',
borderRadius: 12,
backgroundColor: '#1f1f23',
border: '1px solid #2f2f34',
boxShadow: '0 6px 18px rgba(0,0,0,0.35)',
pointerEvents: 'none',
zIndex: 0,
}}
/>
<Box
style={{
position: 'absolute',
inset: 0,
transform: 'translate(5px, 5px) rotate(1deg)',
borderRadius: 12,
backgroundColor: '#232327',
border: '1px solid #333',
boxShadow: '0 4px 12px rgba(0,0,0,0.30)',
pointerEvents: 'none',
zIndex: 1,
}}
/>
<Box style={{ position: 'relative', zIndex: 2 }}>{MainCard}</Box>
</Box>
);
};
export default RecordingCard;

View file

@ -96,28 +96,30 @@ const LiveGroupFilter = ({
}
setGroupStates(
playlist.channel_groups.map((group) => {
// Parse custom_properties if present
let customProps = {};
if (group.custom_properties) {
try {
customProps =
typeof group.custom_properties === 'string'
? JSON.parse(group.custom_properties)
: group.custom_properties;
} catch {
customProps = {};
playlist.channel_groups
.filter((group) => channelGroups[group.channel_group]) // Filter out groups that don't exist
.map((group) => {
// Parse custom_properties if present
let customProps = {};
if (group.custom_properties) {
try {
customProps =
typeof group.custom_properties === 'string'
? JSON.parse(group.custom_properties)
: group.custom_properties;
} catch {
customProps = {};
}
}
}
return {
...group,
name: channelGroups[group.channel_group].name,
auto_channel_sync: group.auto_channel_sync || false,
auto_sync_channel_start: group.auto_sync_channel_start || 1.0,
custom_properties: customProps,
original_enabled: group.enabled,
};
})
return {
...group,
name: channelGroups[group.channel_group].name,
auto_channel_sync: group.auto_channel_sync || false,
auto_sync_channel_start: group.auto_sync_channel_start || 1.0,
custom_properties: customProps,
original_enabled: group.enabled,
};
})
);
}, [playlist, channelGroups]);

View file

@ -0,0 +1,362 @@
import useChannelsStore from '../../store/channels.jsx';
import { useDateTimeFormat, useTimeHelpers } from '../../utils/dateTimeUtils.js';
import React from 'react';
import { Badge, Button, Card, Flex, Group, Image, Modal, Stack, Text, } from '@mantine/core';
import useVideoStore from '../../store/useVideoStore.jsx';
import { notifications } from '@mantine/notifications';
import {
deleteRecordingById,
getPosterUrl,
getRecordingUrl,
getSeasonLabel,
getShowVideoUrl,
runComSkip,
} from '../../utils/cards/RecordingCardUtils.js';
import {
getRating,
getStatRows,
getUpcomingEpisodes,
} from '../../utils/forms/RecordingDetailsModalUtils.js';
const RecordingDetailsModal = ({
opened,
onClose,
recording,
channel,
posterUrl,
onWatchLive,
onWatchRecording,
env_mode,
onEdit,
}) => {
const allRecordings = useChannelsStore((s) => s.recordings);
const channelMap = useChannelsStore((s) => s.channels);
const { toUserTime, userNow } = useTimeHelpers();
const [childOpen, setChildOpen] = React.useState(false);
const [childRec, setChildRec] = React.useState(null);
const [timeformat, dateformat] = useDateTimeFormat();
const safeRecording = recording || {};
const customProps = safeRecording.custom_properties || {};
const program = customProps.program || {};
const recordingName = program.title || 'Custom Recording';
const description = program.description || customProps.description || '';
const start = toUserTime(safeRecording.start_time);
const end = toUserTime(safeRecording.end_time);
const stats = customProps.stream_info || {};
const statRows = getStatRows(stats);
// Rating (if available)
const rating = getRating(customProps, program);
const ratingSystem = customProps.rating_system || 'MPAA';
const fileUrl = customProps.file_url || customProps.output_file_url;
const canWatchRecording =
(customProps.status === 'completed' ||
customProps.status === 'interrupted') &&
Boolean(fileUrl);
const isSeriesGroup = Boolean(
safeRecording._group_count && safeRecording._group_count > 1
);
const upcomingEpisodes = React.useMemo(() => {
return getUpcomingEpisodes(isSeriesGroup, allRecordings, program, toUserTime, userNow);
}, [
allRecordings,
isSeriesGroup,
program.tvg_id,
program.title,
toUserTime,
userNow,
]);
const handleOnWatchLive = () => {
const rec = childRec;
const now = userNow();
const s = toUserTime(rec.start_time);
const e = toUserTime(rec.end_time);
if (now.isAfter(s) && now.isBefore(e)) {
if (!channelMap[rec.channel]) return;
useVideoStore.getState().showVideo(getShowVideoUrl(channelMap[rec.channel], env_mode), 'live');
}
}
const handleOnWatchRecording = () => {
let fileUrl = getRecordingUrl(childRec.custom_properties, env_mode)
if (!fileUrl) return;
useVideoStore.getState().showVideo(fileUrl, 'vod', {
name:
childRec.custom_properties?.program?.title || 'Recording',
logo: {
url: getPosterUrl(
childRec.custom_properties?.poster_logo_id,
undefined,
channelMap[childRec.channel]?.logo?.cache_url
)
},
});
}
const handleRunComskip = async (e) => {
e.stopPropagation?.();
try {
await runComSkip(recording)
notifications.show({
title: 'Removing commercials',
message: 'Queued comskip for this recording',
color: 'blue.5',
autoClose: 2000,
});
} catch (error) {
console.error('Failed to run comskip', error);
}
}
if (!recording) return null;
const EpisodeRow = ({ rec }) => {
const cp = rec.custom_properties || {};
const pr = cp.program || {};
const start = toUserTime(rec.start_time);
const end = toUserTime(rec.end_time);
const season = cp.season ?? pr?.custom_properties?.season;
const episode = cp.episode ?? pr?.custom_properties?.episode;
const onscreen =
cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode;
const se = getSeasonLabel(season, episode, onscreen);
const posterLogoId = cp.poster_logo_id;
const purl = getPosterUrl(posterLogoId, cp, posterUrl);
const onRemove = async (e) => {
e?.stopPropagation?.();
try {
await deleteRecordingById(rec.id);
} catch (error) {
console.error('Failed to delete upcoming recording', error);
}
try {
await useChannelsStore.getState().fetchRecordings();
} catch (error) {
console.error('Failed to refresh recordings after delete', error);
}
};
const handleOnMainCardClick = () => {
setChildRec(rec);
setChildOpen(true);
}
return (
<Card
withBorder
radius="md"
padding="sm"
style={{ backgroundColor: '#27272A', cursor: 'pointer' }}
onClick={handleOnMainCardClick}
>
<Flex gap="sm" align="center">
<Image
src={purl}
w={64}
h={64}
fit="contain"
radius="sm"
alt={pr.title || recordingName}
fallbackSrc="/logo.png"
/>
<Stack gap={4} flex={1}>
<Group justify="space-between">
<Text
fw={600}
size="sm"
lineClamp={1}
title={pr.sub_title || pr.title}
>
{pr.sub_title || pr.title}
</Text>
{se && (
<Badge color="gray" variant="light">
{se}
</Badge>
)}
</Group>
<Text size="xs">
{start.format(`${dateformat}, YYYY ${timeformat}`)} {end.format(timeformat)}
</Text>
</Stack>
<Group gap={6}>
<Button size="xs" color="red" variant="light" onClick={onRemove}>
Remove
</Button>
</Group>
</Flex>
</Card>
);
};
const WatchLive = () => {
return <Button
size="xs"
variant="light"
onClick={(e) => {
e.stopPropagation?.();
onWatchLive();
}}
>
Watch Live
</Button>;
}
const WatchRecording = () => {
return <Button
size="xs"
variant="default"
onClick={(e) => {
e.stopPropagation?.();
onWatchRecording();
}}
disabled={!canWatchRecording}
>
Watch
</Button>;
}
const Edit = () => {
return <Button
size="xs"
variant="light"
color="blue"
onClick={(e) => {
e.stopPropagation?.();
onEdit(recording);
}}
>
Edit
</Button>;
}
const Series = () => {
return <Stack gap={10}>
{upcomingEpisodes.length === 0 && (
<Text size="sm" c="dimmed">
No upcoming episodes found
</Text>
)}
{upcomingEpisodes.map((ep) => (
<EpisodeRow key={`ep-${ep.id}`} rec={ep} />
))}
{childOpen && childRec && (
<RecordingDetailsModal
opened={childOpen}
onClose={() => setChildOpen(false)}
recording={childRec}
channel={channelMap[childRec.channel]}
posterUrl={getPosterUrl(
childRec.custom_properties?.poster_logo_id,
childRec.custom_properties,
channelMap[childRec.channel]?.logo?.cache_url
)}
env_mode={env_mode}
onWatchLive={handleOnWatchLive}
onWatchRecording={handleOnWatchRecording}
/>
)}
</Stack>;
}
const Movie = () => {
return <Flex gap="lg" align="flex-start">
<Image
src={posterUrl}
w={180}
h={240}
fit="contain"
radius="sm"
alt={recordingName}
fallbackSrc="/logo.png"
/>
<Stack gap={8} style={{ flex: 1 }}>
<Group justify="space-between" align="center">
<Text c="dimmed" size="sm">
{channel ? `${channel.channel_number}${channel.name}` : '—'}
</Text>
<Group gap={8}>
{onWatchLive && <WatchLive />}
{onWatchRecording && <WatchRecording />}
{onEdit && start.isAfter(userNow()) && <Edit />}
{customProps.status === 'completed' &&
(!customProps?.comskip ||
customProps?.comskip?.status !== 'completed') && (
<Button
size="xs"
variant="light"
color="teal"
onClick={handleRunComskip}
>
Remove commercials
</Button>
)}
</Group>
</Group>
<Text size="sm">
{start.format(`${dateformat}, YYYY ${timeformat}`)} {end.format(timeformat)}
</Text>
{rating && (
<Group gap={8}>
<Badge color="yellow" title={ratingSystem}>
{rating}
</Badge>
</Group>
)}
{description && (
<Text size="sm" style={{ whiteSpace: 'pre-wrap' }}>
{description}
</Text>
)}
{statRows.length > 0 && (
<Stack gap={4} pt={6}>
<Text fw={600} size="sm">
Stream Stats
</Text>
{statRows.map(([k, v]) => (
<Group key={k} justify="space-between">
<Text size="xs" c="dimmed">
{k}
</Text>
<Text size="xs">{v}</Text>
</Group>
))}
</Stack>
)}
</Stack>
</Flex>;
}
return (
<Modal
opened={opened}
onClose={onClose}
title={
isSeriesGroup
? `Series: ${recordingName}`
: `${recordingName}${program.sub_title ? ` - ${program.sub_title}` : ''}`
}
size="lg"
centered
radius="md"
zIndex={9999}
overlayProps={{ color: '#000', backgroundOpacity: 0.55, blur: 0 }}
styles={{
content: { backgroundColor: '#18181B', color: 'white' },
header: { backgroundColor: '#18181B', color: 'white' },
title: { color: 'white' },
}}
>
{isSeriesGroup ? <Series /> : <Movie />}
</Modal>
);
};
export default RecordingDetailsModal;

View file

@ -0,0 +1,381 @@
import useChannelsStore from '../../store/channels.jsx';
import {
parseDate,
RECURRING_DAY_OPTIONS,
toTimeString,
useDateTimeFormat,
useTimeHelpers,
} from '../../utils/dateTimeUtils.js';
import React, { useEffect, useMemo, useState } from 'react';
import { useForm } from '@mantine/form';
import dayjs from 'dayjs';
import { notifications } from '@mantine/notifications';
import { Badge, Button, Card, Group, Modal, MultiSelect, Select, Stack, Switch, Text, TextInput } from '@mantine/core';
import { DatePickerInput, TimeInput } from '@mantine/dates';
import { deleteRecordingById } from '../../utils/cards/RecordingCardUtils.js';
import {
deleteRecurringRuleById,
getChannelOptions,
getUpcomingOccurrences,
updateRecurringRule,
updateRecurringRuleEnabled,
} from '../../utils/forms/RecurringRuleModalUtils.js';
const RecurringRuleModal = ({ opened, onClose, ruleId, onEditOccurrence }) => {
const channels = useChannelsStore((s) => s.channels);
const recurringRules = useChannelsStore((s) => s.recurringRules);
const fetchRecurringRules = useChannelsStore((s) => s.fetchRecurringRules);
const fetchRecordings = useChannelsStore((s) => s.fetchRecordings);
const recordings = useChannelsStore((s) => s.recordings);
const { toUserTime, userNow } = useTimeHelpers();
const [timeformat, dateformat] = useDateTimeFormat();
const [saving, setSaving] = useState(false);
const [deleting, setDeleting] = useState(false);
const [busyOccurrence, setBusyOccurrence] = useState(null);
const rule = recurringRules.find((r) => r.id === ruleId);
const channelOptions = useMemo(() => {
return getChannelOptions(channels);
}, [channels]);
const form = useForm({
mode: 'controlled',
initialValues: {
channel_id: '',
days_of_week: [],
rule_name: '',
start_time: dayjs().startOf('hour').format('HH:mm'),
end_time: dayjs().startOf('hour').add(1, 'hour').format('HH:mm'),
start_date: dayjs().toDate(),
end_date: dayjs().toDate(),
enabled: true,
},
validate: {
channel_id: (value) => (value ? null : 'Select a channel'),
days_of_week: (value) =>
value && value.length ? null : 'Pick at least one day',
end_time: (value, values) => {
if (!value) return 'Select an end time';
const startValue = dayjs(
values.start_time,
['HH:mm', 'hh:mm A', 'h:mm A'],
true
);
const endValue = dayjs(value, ['HH:mm', 'hh:mm A', 'h:mm A'], true);
if (
startValue.isValid() &&
endValue.isValid() &&
endValue.diff(startValue, 'minute') === 0
) {
return 'End time must differ from start time';
}
return null;
},
end_date: (value, values) => {
const endDate = dayjs(value);
const startDate = dayjs(values.start_date);
if (!value) return 'Select an end date';
if (startDate.isValid() && endDate.isBefore(startDate, 'day')) {
return 'End date cannot be before start date';
}
return null;
},
},
});
useEffect(() => {
if (opened && rule) {
form.setValues({
channel_id: `${rule.channel}`,
days_of_week: (rule.days_of_week || []).map((d) => String(d)),
rule_name: rule.name || '',
start_time: toTimeString(rule.start_time),
end_time: toTimeString(rule.end_time),
start_date: parseDate(rule.start_date) || dayjs().toDate(),
end_date: parseDate(rule.end_date),
enabled: Boolean(rule.enabled),
});
} else {
form.reset();
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [opened, ruleId, rule]);
const upcomingOccurrences = useMemo(() => {
return getUpcomingOccurrences(recordings, userNow, ruleId, toUserTime);
}, [recordings, ruleId, toUserTime, userNow]);
const handleSave = async (values) => {
if (!rule) return;
setSaving(true);
try {
await updateRecurringRule(ruleId, values);
await Promise.all([fetchRecurringRules(), fetchRecordings()]);
notifications.show({
title: 'Recurring rule updated',
message: 'Schedule adjustments saved',
color: 'green',
autoClose: 2500,
});
onClose();
} catch (error) {
console.error('Failed to update recurring rule', error);
} finally {
setSaving(false);
}
};
const handleDelete = async () => {
if (!rule) return;
setDeleting(true);
try {
await deleteRecurringRuleById(ruleId);
await Promise.all([fetchRecurringRules(), fetchRecordings()]);
notifications.show({
title: 'Recurring rule removed',
message: 'All future occurrences were cancelled',
color: 'red',
autoClose: 2500,
});
onClose();
} catch (error) {
console.error('Failed to delete recurring rule', error);
} finally {
setDeleting(false);
}
};
const handleToggleEnabled = async (checked) => {
if (!rule) return;
setSaving(true);
try {
await updateRecurringRuleEnabled(ruleId, checked);
await Promise.all([fetchRecurringRules(), fetchRecordings()]);
notifications.show({
title: checked ? 'Recurring rule enabled' : 'Recurring rule paused',
message: checked
? 'Future occurrences will resume'
: 'Upcoming occurrences were removed',
color: checked ? 'green' : 'yellow',
autoClose: 2500,
});
} catch (error) {
console.error('Failed to toggle recurring rule', error);
form.setFieldValue('enabled', !checked);
} finally {
setSaving(false);
}
};
const handleCancelOccurrence = async (occurrence) => {
setBusyOccurrence(occurrence.id);
try {
await deleteRecordingById(occurrence.id);
await fetchRecordings();
notifications.show({
title: 'Occurrence cancelled',
message: 'The selected airing was removed',
color: 'yellow',
autoClose: 2000,
});
} catch (error) {
console.error('Failed to cancel occurrence', error);
} finally {
setBusyOccurrence(null);
}
};
if (!rule) {
return (
<Modal opened={opened} onClose={onClose} title="Recurring Rule" centered>
<Text size="sm">Recurring rule not found.</Text>
</Modal>
);
}
const handleEnableChange = (event) => {
form.setFieldValue('enabled', event.currentTarget.checked);
handleToggleEnabled(event.currentTarget.checked);
}
const handleStartDateChange = (value) => {
form.setFieldValue('start_date', value || dayjs().toDate());
}
const handleEndDateChange = (value) => {
form.setFieldValue('end_date', value);
}
const handleStartTimeChange = (value) => {
form.setFieldValue('start_time', toTimeString(value));
}
const handleEndTimeChange = (value) => {
form.setFieldValue('end_time', toTimeString(value));
}
const UpcomingList = () => {
return <Stack gap="xs">
{upcomingOccurrences.map((occ) => {
const occStart = toUserTime(occ.start_time);
const occEnd = toUserTime(occ.end_time);
return (
<Card
key={`occ-${occ.id}`}
withBorder
padding="sm"
radius="md"
>
<Group justify="space-between" align="center">
<Stack gap={2} flex={1}>
<Text fw={600} size="sm">
{occStart.format(`${dateformat}, YYYY`)}
</Text>
<Text size="xs" c="dimmed">
{occStart.format(timeformat)} {occEnd.format(timeformat)}
</Text>
</Stack>
<Group gap={6}>
<Button
size="xs"
variant="subtle"
onClick={() => {
onClose();
onEditOccurrence?.(occ);
}}
>
Edit
</Button>
<Button
size="xs"
color="red"
variant="light"
loading={busyOccurrence === occ.id}
onClick={() => handleCancelOccurrence(occ)}
>
Cancel
</Button>
</Group>
</Group>
</Card>
);
})}
</Stack>;
}
return (
<Modal
opened={opened}
onClose={onClose}
title={rule.name || 'Recurring Rule'}
size="lg"
centered
>
<Stack gap="md">
<Group justify="space-between" align="center">
<Text fw={600}>
{channels?.[rule.channel]?.name || `Channel ${rule.channel}`}
</Text>
<Switch
size="sm"
checked={form.values.enabled}
onChange={handleEnableChange}
label={form.values.enabled ? 'Enabled' : 'Paused'}
disabled={saving}
/>
</Group>
<form onSubmit={form.onSubmit(handleSave)}>
<Stack gap="md">
<Select
{...form.getInputProps('channel_id')}
label="Channel"
data={channelOptions}
searchable
/>
<TextInput
{...form.getInputProps('rule_name')}
label="Rule name"
placeholder="Morning News, Football Sundays, ..."
/>
<MultiSelect
{...form.getInputProps('days_of_week')}
label="Every"
data={RECURRING_DAY_OPTIONS.map((opt) => ({
value: String(opt.value),
label: opt.label,
}))}
searchable
clearable
/>
<Group grow>
<DatePickerInput
label="Start date"
value={form.values.start_date}
onChange={handleStartDateChange}
valueFormat="MMM D, YYYY"
/>
<DatePickerInput
label="End date"
value={form.values.end_date}
onChange={handleEndDateChange}
valueFormat="MMM D, YYYY"
minDate={form.values.start_date || undefined}
/>
</Group>
<Group grow>
<TimeInput
label="Start time"
value={form.values.start_time}
onChange={handleStartTimeChange}
withSeconds={false}
format="12"
amLabel="AM"
pmLabel="PM"
/>
<TimeInput
label="End time"
value={form.values.end_time}
onChange={handleEndTimeChange}
withSeconds={false}
format="12"
amLabel="AM"
pmLabel="PM"
/>
</Group>
<Group justify="space-between">
<Button type="submit" loading={saving}>
Save changes
</Button>
<Button
color="red"
variant="light"
loading={deleting}
onClick={handleDelete}
>
Delete rule
</Button>
</Group>
</Stack>
</form>
<Stack gap="sm">
<Group justify="space-between" align="center">
<Text fw={600} size="sm">
Upcoming occurrences
</Text>
<Badge color="blue.6">{upcomingOccurrences.length}</Badge>
</Group>
{upcomingOccurrences.length === 0 ? (
<Text size="sm" c="dimmed">
No future airings currently scheduled.
</Text>
) : <UpcomingList />}
</Stack>
</Stack>
</Modal>
);
};
export default RecurringRuleModal;

View file

@ -1,6 +1,11 @@
import React, { useEffect, useMemo, useState, useCallback } from 'react';
import React, {
useEffect,
useMemo,
useState,
useCallback,
useRef,
} from 'react';
import useChannelsStore from '../../store/channels';
import useLogosStore from '../../store/logos';
import { notifications } from '@mantine/notifications';
import API from '../../api';
import ChannelForm from '../forms/Channel';
@ -219,7 +224,7 @@ const ChannelRowActions = React.memo(
}
);
const ChannelsTable = ({}) => {
const ChannelsTable = ({ onReady }) => {
// EPG data lookup
const tvgsById = useEPGsStore((s) => s.tvgsById);
const epgs = useEPGsStore((s) => s.epgs);
@ -229,6 +234,7 @@ const ChannelsTable = ({}) => {
const canDeleteChannelGroup = useChannelsStore(
(s) => s.canDeleteChannelGroup
);
const hasSignaledReady = useRef(false);
/**
* STORES
@ -254,7 +260,6 @@ const ChannelsTable = ({}) => {
const channels = useChannelsStore((s) => s.channels);
const profiles = useChannelsStore((s) => s.profiles);
const selectedProfileId = useChannelsStore((s) => s.selectedProfileId);
const logos = useLogosStore((s) => s.logos);
const [tablePrefs, setTablePrefs] = useLocalStorage('channel-table-prefs', {
pageSize: 50,
});
@ -289,6 +294,9 @@ const ChannelsTable = ({}) => {
const [selectedProfile, setSelectedProfile] = useState(
profiles[selectedProfileId]
);
const [showDisabled, setShowDisabled] = useState(true);
const [showOnlyStreamlessChannels, setShowOnlyStreamlessChannels] =
useState(false);
const [paginationString, setPaginationString] = useState('');
const [filters, setFilters] = useState({
@ -307,6 +315,8 @@ const ChannelsTable = ({}) => {
const [isBulkDelete, setIsBulkDelete] = useState(false);
const [channelToDelete, setChannelToDelete] = useState(null);
const hasFetchedData = useRef(false);
// Column sizing state for resizable columns
// Store in localStorage but with empty object as default
const [columnSizing, setColumnSizing] = useLocalStorage(
@ -361,14 +371,30 @@ const ChannelsTable = ({}) => {
});
});
const channelsTableLength =
Object.keys(data).length > 0 || hasFetchedData.current
? Object.keys(data).length
: undefined;
/**
* Functions
*/
const fetchData = useCallback(async () => {
setIsLoading(true);
const params = new URLSearchParams();
params.append('page', pagination.pageIndex + 1);
params.append('page_size', pagination.pageSize);
params.append('include_streams', 'true');
if (selectedProfileId !== '0') {
params.append('channel_profile_id', selectedProfileId);
}
if (showDisabled === true) {
params.append('show_disabled', true);
}
if (showOnlyStreamlessChannels === true) {
params.append('only_streamless', true);
}
// Apply sorting
if (sorting.length > 0) {
@ -397,11 +423,29 @@ const ChannelsTable = ({}) => {
await API.getAllChannelIds(params),
]);
setIsLoading(false);
hasFetchedData.current = true;
setTablePrefs({
pageSize: pagination.pageSize,
});
setAllRowIds(ids);
}, [pagination, sorting, debouncedFilters]);
// Signal ready after first successful data fetch
// EPG data is already loaded in initData before this component mounts
if (!hasSignaledReady.current && onReady) {
hasSignaledReady.current = true;
onReady();
}
}, [
pagination,
sorting,
debouncedFilters,
onReady,
showDisabled,
selectedProfileId,
showOnlyStreamlessChannels,
]);
const stopPropagation = useCallback((e) => {
e.stopPropagation();
@ -888,8 +932,10 @@ const ChannelsTable = ({}) => {
// columns from being recreated during drag operations (which causes infinite loops).
// The column.size values are only used for INITIAL sizing - TanStack Table manages
// the actual sizes through its own state after initialization.
// Note: logos is intentionally excluded - LazyLogo components handle their own logo data
// from the store, so we don't need to recreate columns when logos load.
// eslint-disable-next-line react-hooks/exhaustive-deps
[selectedProfileId, channelGroups, logos, theme]
[selectedProfileId, channelGroups, theme]
);
const renderHeaderCell = (header) => {
@ -1326,16 +1372,20 @@ const ChannelsTable = ({}) => {
deleteChannels={deleteChannels}
selectedTableIds={table.selectedTableIds}
table={table}
showDisabled={showDisabled}
setShowDisabled={setShowDisabled}
showOnlyStreamlessChannels={showOnlyStreamlessChannels}
setShowOnlyStreamlessChannels={setShowOnlyStreamlessChannels}
/>
{/* Table or ghost empty state inside Paper */}
<Box>
{Object.keys(channels).length === 0 && (
{channelsTableLength === 0 && (
<ChannelsTableOnboarding editChannel={editChannel} />
)}
</Box>
{Object.keys(channels).length > 0 && (
{channelsTableLength > 0 && (
<Box
style={{
display: 'flex',

View file

@ -12,20 +12,22 @@ import {
Text,
TextInput,
Tooltip,
UnstyledButton,
useMantineTheme,
} from '@mantine/core';
import {
ArrowDown01,
Binary,
Check,
CircleCheck,
Ellipsis,
EllipsisVertical,
SquareMinus,
SquarePen,
SquarePlus,
Settings,
Eye,
EyeOff,
Filter,
Square,
SquareCheck,
} from 'lucide-react';
import API from '../../../api';
import { notifications } from '@mantine/notifications';
@ -102,6 +104,10 @@ const ChannelTableHeader = ({
editChannel,
deleteChannels,
selectedTableIds,
showDisabled,
setShowDisabled,
showOnlyStreamlessChannels,
setShowOnlyStreamlessChannels,
}) => {
const theme = useMantineTheme();
@ -208,6 +214,14 @@ const ChannelTableHeader = ({
);
};
const toggleShowDisabled = () => {
setShowDisabled(!showDisabled);
};
const toggleShowOnlyStreamlessChannels = () => {
setShowOnlyStreamlessChannels(!showOnlyStreamlessChannels);
};
return (
<Group justify="space-between">
<Group gap={5} style={{ paddingLeft: 10 }}>
@ -236,6 +250,41 @@ const ChannelTableHeader = ({
}}
>
<Flex gap={6}>
<Menu shadow="md" width={200}>
<Menu.Target>
<Button size="xs" variant="default" onClick={() => {}}>
<Filter size={18} />
</Button>
</Menu.Target>
<Menu.Dropdown>
<Menu.Item
onClick={toggleShowDisabled}
leftSection={
showDisabled ? <Eye size={18} /> : <EyeOff size={18} />
}
disabled={selectedProfileId === '0'}
>
<Text size="xs">
{showDisabled ? 'Hide Disabled' : 'Show Disabled'}
</Text>
</Menu.Item>
<Menu.Item
onClick={toggleShowOnlyStreamlessChannels}
leftSection={
showOnlyStreamlessChannels ? (
<SquareCheck size={18} />
) : (
<Square size={18} />
)
}
>
<Text size="xs">Only Empty Channels</Text>
</Menu.Item>
</Menu.Dropdown>
</Menu>
<Button
leftSection={<SquarePen size={18} />}
variant="default"

View file

@ -1,4 +1,10 @@
import React, { useEffect, useMemo, useCallback, useState } from 'react';
import React, {
useEffect,
useMemo,
useCallback,
useState,
useRef,
} from 'react';
import API from '../../api';
import StreamForm from '../forms/Stream';
import usePlaylistsStore from '../../store/playlists';
@ -167,8 +173,9 @@ const StreamRowActions = ({
);
};
const StreamsTable = () => {
const StreamsTable = ({ onReady }) => {
const theme = useMantineTheme();
const hasSignaledReady = useRef(false);
/**
* useState
@ -430,6 +437,12 @@ const StreamsTable = () => {
// Generate the string
setPaginationString(`${startItem} to ${endItem} of ${result.count}`);
// Signal that initial data load is complete
if (!hasSignaledReady.current && onReady) {
hasSignaledReady.current = true;
onReady();
}
} catch (error) {
console.error('Error fetching data:', error);
}
@ -442,6 +455,7 @@ const StreamsTable = () => {
groupsLoaded,
channelGroups,
fetchChannelGroups,
onReady,
]);
// Bulk creation: create channels from selected streams asynchronously
@ -1157,7 +1171,6 @@ const StreamsTable = () => {
value={customStartNumber}
onChange={setCustomStartNumber}
min={1}
max={9999}
placeholder="Enter starting number..."
/>
)}
@ -1227,7 +1240,6 @@ const StreamsTable = () => {
value={specificChannelNumber}
onChange={setSpecificChannelNumber}
min={1}
max={9999}
placeholder="Enter channel number..."
/>
)}

View file

@ -1,19 +1,59 @@
import React from 'react';
import React, { useCallback, useRef } from 'react';
import ChannelsTable from '../components/tables/ChannelsTable';
import StreamsTable from '../components/tables/StreamsTable';
import { Box } from '@mantine/core';
import { Allotment } from 'allotment';
import { USER_LEVELS } from '../constants';
import useAuthStore from '../store/auth';
import useLogosStore from '../store/logos';
import useLocalStorage from '../hooks/useLocalStorage';
import ErrorBoundary from '../components/ErrorBoundary';
const ChannelsPage = () => {
const PageContent = () => {
const authUser = useAuthStore((s) => s.user);
const fetchChannelAssignableLogos = useLogosStore(
(s) => s.fetchChannelAssignableLogos
);
const enableLogoRendering = useLogosStore((s) => s.enableLogoRendering);
const channelsReady = useRef(false);
const streamsReady = useRef(false);
const logosTriggered = useRef(false);
const [allotmentSizes, setAllotmentSizes] = useLocalStorage(
'channels-splitter-sizes',
[50, 50]
);
// Only load logos when BOTH tables are ready
const tryLoadLogos = useCallback(() => {
if (
channelsReady.current &&
streamsReady.current &&
!logosTriggered.current
) {
logosTriggered.current = true;
// Use requestAnimationFrame to defer logo loading until after browser paint
// This ensures EPG column is fully rendered before logos start loading
requestAnimationFrame(() => {
requestAnimationFrame(() => {
enableLogoRendering();
fetchChannelAssignableLogos();
});
});
}
}, [fetchChannelAssignableLogos, enableLogoRendering]);
const handleChannelsReady = useCallback(() => {
channelsReady.current = true;
tryLoadLogos();
}, [tryLoadLogos]);
const handleStreamsReady = useCallback(() => {
streamsReady.current = true;
tryLoadLogos();
}, [tryLoadLogos]);
const handleSplitChange = (sizes) => {
setAllotmentSizes(sizes);
};
@ -22,46 +62,48 @@ const ChannelsPage = () => {
setAllotmentSizes(sizes);
};
if (!authUser.id) {
return <></>;
}
if (!authUser.id) return <></>;
if (authUser.user_level <= USER_LEVELS.STANDARD) {
return (
<Box style={{ padding: 10 }}>
<ChannelsTable />
<ChannelsTable onReady={handleChannelsReady} />
</Box>
);
}
return (
<div
style={{
height: '100vh',
width: '100%',
display: 'flex',
overflowX: 'auto',
}}
>
<Box h={'100vh'} w={'100%'} display={'flex'} style={{ overflowX: 'auto' }}>
<Allotment
defaultSizes={allotmentSizes}
style={{ height: '100%', width: '100%', minWidth: '600px' }}
h={'100%'}
w={'100%'}
miw={'600px'}
className="custom-allotment"
minSize={100}
onChange={handleSplitChange}
onResize={handleResize}
>
<div style={{ padding: 10, overflowX: 'auto', minWidth: '100px' }}>
<div style={{ minWidth: '600px' }}>
<ChannelsTable />
</div>
</div>
<div style={{ padding: 10, overflowX: 'auto', minWidth: '100px' }}>
<div style={{ minWidth: '600px' }}>
<StreamsTable />
</div>
</div>
<Box p={10} miw={'100px'} style={{ overflowX: 'auto' }}>
<Box miw={'600px'}>
<ChannelsTable onReady={handleChannelsReady} />
</Box>
</Box>
<Box p={10} miw={'100px'} style={{ overflowX: 'auto' }}>
<Box miw={'600px'}>
<StreamsTable onReady={handleStreamsReady} />
</Box>
</Box>
</Allotment>
</div>
</Box>
);
};
const ChannelsPage = () => {
return (
<ErrorBoundary>
<PageContent />
</ErrorBoundary>
);
};

View file

@ -2,16 +2,18 @@ import useUserAgentsStore from '../store/userAgents';
import M3UsTable from '../components/tables/M3UsTable';
import EPGsTable from '../components/tables/EPGsTable';
import { Box, Stack } from '@mantine/core';
import ErrorBoundary from '../components/ErrorBoundary'
const M3UPage = () => {
const PageContent = () => {
const error = useUserAgentsStore((state) => state.error);
if (error) return <div>Error: {error}</div>;
if (error) throw new Error(error);
return (
<Stack
p="10"
h="100%" // Set a specific height to ensure proper display
miw="1100px" // Prevent tables from becoming too cramped
style={{
padding: 10,
height: '100%', // Set a specific height to ensure proper display
minWidth: '1100px', // Prevent tables from becoming too cramped
overflowX: 'auto', // Enable horizontal scrolling when needed
overflowY: 'auto', // Enable vertical scrolling on the container
}}
@ -26,6 +28,14 @@ const M3UPage = () => {
</Box>
</Stack>
);
};
}
const M3UPage = () => {
return (
<ErrorBoundary>
<PageContent/>
</ErrorBoundary>
);
}
export default M3UPage;

File diff suppressed because it is too large Load diff

View file

@ -1,27 +0,0 @@
// src/components/Dashboard.js
import React, { useState } from 'react';
const Dashboard = () => {
const [newStream, setNewStream] = useState('');
return (
<div>
<h1>Dashboard Page</h1>
<input
type="text"
value={newStream}
onChange={(e) => setNewStream(e.target.value)}
placeholder="Enter Stream"
/>
<h3>Streams:</h3>
<ul>
{state.streams.map((stream, index) => (
<li key={index}>{stream}</li>
))}
</ul>
</div>
);
};
export default Dashboard;

View file

@ -1,14 +0,0 @@
// src/components/Home.js
import React, { useState } from 'react';
const Home = () => {
const [newChannel, setNewChannel] = useState('');
return (
<div>
<h1>Home Page</h1>
</div>
);
};
export default Home;

View file

@ -1,13 +1,21 @@
import React from 'react';
import React, { lazy, Suspense } from 'react';
import LoginForm from '../components/forms/LoginForm';
import SuperuserForm from '../components/forms/SuperuserForm';
const SuperuserForm = lazy(() => import('../components/forms/SuperuserForm'));
import useAuthStore from '../store/auth';
import ErrorBoundary from '../components/ErrorBoundary.jsx';
import { Text } from '@mantine/core';
const Login = ({}) => {
const superuserExists = useAuthStore((s) => s.superuserExists);
if (!superuserExists) {
return <SuperuserForm />;
return (
<ErrorBoundary>
<Suspense fallback={<Text>Loading...</Text>}>
<SuperuserForm />
</Suspense>
</ErrorBoundary>
);
}
return <LoginForm />;

View file

@ -217,6 +217,8 @@ const SettingsPage = () => {
useState(false);
const [netNetworkAccessConfirmCIDRs, setNetNetworkAccessConfirmCIDRs] =
useState([]);
const [clientIpAddress, setClientIpAddress] = useState(null);
const [proxySettingsSaved, setProxySettingsSaved] = useState(false);
const [generalSettingsSaved, setGeneralSettingsSaved] = useState(false);
@ -560,6 +562,9 @@ const SettingsPage = () => {
return;
}
// Store the client IP
setClientIpAddress(check.client_ip);
// For now, only warn if we're blocking the UI
const blockedAccess = check.UI;
if (blockedAccess.length == 0) {
@ -1752,7 +1757,7 @@ Please ensure you have time to let this complete before proceeding.`}
message={
<>
<Text>
Your client is not included in the allowed networks for the web
Your client {clientIpAddress && `(${clientIpAddress}) `}is not included in the allowed networks for the web
UI. Are you sure you want to proceed?
</Text>

View file

@ -481,8 +481,8 @@ const VODCard = ({ vodContent, stopVODClient }) => {
size={16}
style={{
transform: isClientExpanded
? 'rotate(180deg)'
: 'rotate(0deg)',
? 'rotate(0deg)'
: 'rotate(180deg)',
transition: 'transform 0.2s',
}}
/>

View file

@ -1,55 +1,25 @@
import React, { useState } from 'react';
import UsersTable from '../components/tables/UsersTable';
import { Box } from '@mantine/core';
import useAuthStore from '../store/auth';
import { USER_LEVELS } from '../constants';
import ErrorBoundary from '../components/ErrorBoundary';
const UsersPage = () => {
const PageContent = () => {
const authUser = useAuthStore((s) => s.user);
const [selectedUser, setSelectedUser] = useState(null);
const [userModalOpen, setUserModalOpen] = useState(false);
const [confirmDeleteOpen, setConfirmDeleteOpen] = useState(false);
const [deleteTarget, setDeleteTarget] = useState(null);
const [userToDelete, setUserToDelete] = useState(null);
if (!authUser.id) {
return <></>;
}
const closeUserModal = () => {
setSelectedUser(null);
setUserModalOpen(false);
};
const editUser = (user) => {
setSelectedUser(user);
setUserModalOpen(true);
};
const deleteUser = (id) => {
// Get user details for the confirmation dialog
const user = users.find((u) => u.id === id);
setUserToDelete(user);
setDeleteTarget(id);
// Skip warning if it's been suppressed
if (isWarningSuppressed('delete-user')) {
return executeDeleteUser(id);
}
setConfirmDeleteOpen(true);
};
const executeDeleteUser = async (id) => {
await API.deleteUser(id);
setConfirmDeleteOpen(false);
};
if (!authUser.id) throw new Error();
return (
<Box style={{ padding: 10 }}>
<Box p={10}>
<UsersTable />
</Box>
);
}
const UsersPage = () => {
return (
<ErrorBoundary>
<PageContent/>
</ErrorBoundary>
);
};
export default UsersPage;

View file

@ -7,7 +7,6 @@ import useEPGsStore from './epgs';
import useStreamProfilesStore from './streamProfiles';
import useUserAgentsStore from './userAgents';
import useUsersStore from './users';
import useLogosStore from './logos';
import API from '../api';
import { USER_LEVELS } from '../constants';
@ -43,6 +42,8 @@ const useAuthStore = create((set, get) => ({
throw new Error('Unauthorized');
}
set({ user, isAuthenticated: true });
// Ensure settings are loaded first
await useSettingsStore.getState().fetchSettings();
@ -63,7 +64,8 @@ const useAuthStore = create((set, get) => ({
await Promise.all([useUsersStore.getState().fetchUsers()]);
}
set({ user, isAuthenticated: true });
// Note: Logos are loaded after the Channels page tables finish loading
// This is handled by the tables themselves signaling completion
} catch (error) {
console.error('Error initializing data:', error);
}

View file

@ -9,16 +9,10 @@ const useLogosStore = create((set, get) => ({
hasLoadedAll: false, // Track if we've loaded all logos
hasLoadedChannelLogos: false, // Track if we've loaded channel logos
error: null,
allowLogoRendering: false, // Gate to prevent logo rendering until tables are ready
// Basic CRUD operations
setLogos: (logos) => {
set({
logos: logos.reduce((acc, logo) => {
acc[logo.id] = { ...logo };
return acc;
}, {}),
});
},
// Enable logo rendering (call this after tables have loaded and painted)
enableLogoRendering: () => set({ allowLogoRendering: true }),
addLogo: (newLogo) =>
set((state) => {
@ -73,6 +67,9 @@ const useLogosStore = create((set, get) => ({
// Smart loading methods
fetchLogos: async (pageSize = 100) => {
// Don't fetch if logo fetching is not allowed yet
if (!get().allowLogoFetching) return [];
set({ isLoading: true, error: null });
try {
const response = await api.getLogos({ page_size: pageSize });
@ -163,59 +160,28 @@ const useLogosStore = create((set, get) => ({
},
fetchChannelAssignableLogos: async () => {
const { backgroundLoading, hasLoadedChannelLogos, channelLogos } = get();
const { hasLoadedChannelLogos, channelLogos } = get();
// Prevent concurrent calls
if (
backgroundLoading ||
(hasLoadedChannelLogos && Object.keys(channelLogos).length > 0)
) {
// Return cached if already loaded
if (hasLoadedChannelLogos && Object.keys(channelLogos).length > 0) {
return Object.values(channelLogos);
}
set({ backgroundLoading: true, error: null });
try {
// Load all channel logos (no special filtering needed - all Logo entries are for channels)
const response = await api.getLogos({
no_pagination: 'true', // Get all channel logos
});
// Fetch all logos and cache them as channel logos
const logos = await get().fetchAllLogos();
// Handle both paginated and non-paginated responses
const logos = Array.isArray(response) ? response : response.results || [];
set({
channelLogos: logos.reduce((acc, logo) => {
acc[logo.id] = { ...logo };
return acc;
}, {}),
hasLoadedChannelLogos: true,
});
console.log(`Fetched ${logos.length} channel logos`);
// Store in both places, but this is intentional and only when specifically requested
set({
logos: {
...get().logos, // Keep existing logos
...logos.reduce((acc, logo) => {
acc[logo.id] = { ...logo };
return acc;
}, {}),
},
channelLogos: logos.reduce((acc, logo) => {
acc[logo.id] = { ...logo };
return acc;
}, {}),
hasLoadedChannelLogos: true,
backgroundLoading: false,
});
return logos;
} catch (error) {
console.error('Failed to fetch channel logos:', error);
set({
error: 'Failed to load channel logos.',
backgroundLoading: false,
});
throw error;
}
return logos;
},
fetchLogosByIds: async (logoIds) => {
if (!logoIds || logoIds.length === 0) return [];
try {
// Filter out logos we already have
const missingIds = logoIds.filter((id) => !get().logos[id]);

View file

@ -0,0 +1,92 @@
import API from '../../api.js';
import useChannelsStore from '../../store/channels.jsx';
export const removeRecording = (id) => {
// Optimistically remove immediately from UI
try {
useChannelsStore.getState().removeRecording(id);
} catch (error) {
console.error('Failed to optimistically remove recording', error);
}
// Fire-and-forget server delete; websocket will keep others in sync
API.deleteRecording(id).catch(() => {
// On failure, fallback to refetch to restore state
try {
useChannelsStore.getState().fetchRecordings();
} catch (error) {
console.error('Failed to refresh recordings after delete', error);
}
});
};
export const getPosterUrl = (posterLogoId, customProperties, posterUrl) => {
let purl = posterLogoId
? `/api/channels/logos/${posterLogoId}/cache/`
: customProperties?.poster_url || posterUrl || '/logo.png';
if (
typeof import.meta !== 'undefined' &&
import.meta.env &&
import.meta.env.DEV &&
purl &&
purl.startsWith('/')
) {
purl = `${window.location.protocol}//${window.location.hostname}:5656${purl}`;
}
return purl;
};
export const getShowVideoUrl = (channel, env_mode) => {
let url = `/proxy/ts/stream/${channel.uuid}`;
if (env_mode === 'dev') {
url = `${window.location.protocol}//${window.location.hostname}:5656${url}`;
}
return url;
};
export const runComSkip = async (recording) => {
await API.runComskip(recording.id);
};
export const deleteRecordingById = async (recordingId) => {
await API.deleteRecording(recordingId);
};
export const deleteSeriesAndRule = async (seriesInfo) => {
const { tvg_id, title } = seriesInfo;
if (tvg_id) {
try {
await API.bulkRemoveSeriesRecordings({
tvg_id,
title,
scope: 'title',
});
} catch (error) {
console.error('Failed to remove series recordings', error);
}
try {
await API.deleteSeriesRule(tvg_id);
} catch (error) {
console.error('Failed to delete series rule', error);
}
}
};
export const getRecordingUrl = (customProps, env_mode) => {
let fileUrl = customProps?.file_url || customProps?.output_file_url;
if (fileUrl && env_mode === 'dev' && fileUrl.startsWith('/')) {
fileUrl = `${window.location.protocol}//${window.location.hostname}:5656${fileUrl}`;
}
return fileUrl;
};
export const getSeasonLabel = (season, episode, onscreen) => {
return season && episode
? `S${String(season).padStart(2, '0')}E${String(episode).padStart(2, '0')}`
: onscreen || null;
};
export const getSeriesInfo = (customProps) => {
const cp = customProps || {};
const pr = cp.program || {};
return { tvg_id: pr.tvg_id, title: pr.title };
};

View file

@ -0,0 +1,89 @@
import { useEffect, useCallback } from 'react';
import dayjs from 'dayjs';
import duration from 'dayjs/plugin/duration';
import relativeTime from 'dayjs/plugin/relativeTime';
import utc from 'dayjs/plugin/utc';
import timezone from 'dayjs/plugin/timezone';
import useSettingsStore from '../store/settings';
import useLocalStorage from '../hooks/useLocalStorage';
dayjs.extend(duration);
dayjs.extend(relativeTime);
dayjs.extend(utc);
dayjs.extend(timezone);
export const useUserTimeZone = () => {
const settings = useSettingsStore((s) => s.settings);
const [timeZone, setTimeZone] = useLocalStorage(
'time-zone',
dayjs.tz?.guess
? dayjs.tz.guess()
: Intl.DateTimeFormat().resolvedOptions().timeZone
);
useEffect(() => {
const tz = settings?.['system-time-zone']?.value;
if (tz && tz !== timeZone) {
setTimeZone(tz);
}
}, [settings, timeZone, setTimeZone]);
return timeZone;
};
export const useTimeHelpers = () => {
const timeZone = useUserTimeZone();
const toUserTime = useCallback(
(value) => {
if (!value) return dayjs.invalid();
try {
return dayjs(value).tz(timeZone);
} catch (error) {
return dayjs(value);
}
},
[timeZone]
);
const userNow = useCallback(() => dayjs().tz(timeZone), [timeZone]);
return { timeZone, toUserTime, userNow };
};
export const RECURRING_DAY_OPTIONS = [
{ value: 6, label: 'Sun' },
{ value: 0, label: 'Mon' },
{ value: 1, label: 'Tue' },
{ value: 2, label: 'Wed' },
{ value: 3, label: 'Thu' },
{ value: 4, label: 'Fri' },
{ value: 5, label: 'Sat' },
];
export const useDateTimeFormat = () => {
const [timeFormatSetting] = useLocalStorage('time-format', '12h');
const [dateFormatSetting] = useLocalStorage('date-format', 'mdy');
// Use user preference for time format
const timeFormat = timeFormatSetting === '12h' ? 'h:mma' : 'HH:mm';
const dateFormat = dateFormatSetting === 'mdy' ? 'MMM D' : 'D MMM';
return [timeFormat, dateFormat]
};
export const toTimeString = (value) => {
if (!value) return '00:00';
if (typeof value === 'string') {
const parsed = dayjs(value, ['HH:mm', 'HH:mm:ss', 'h:mm A'], true);
if (parsed.isValid()) return parsed.format('HH:mm');
return value;
}
const parsed = dayjs(value);
return parsed.isValid() ? parsed.format('HH:mm') : '00:00';
};
export const parseDate = (value) => {
if (!value) return null;
const parsed = dayjs(value, ['YYYY-MM-DD', dayjs.ISO_8601], true);
return parsed.isValid() ? parsed.toDate() : null;
};

View file

@ -0,0 +1,87 @@
export const getStatRows = (stats) => {
return [
['Video Codec', stats.video_codec],
[
'Resolution',
stats.resolution ||
(stats.width && stats.height ? `${stats.width}x${stats.height}` : null),
],
['FPS', stats.source_fps],
['Video Bitrate', stats.video_bitrate && `${stats.video_bitrate} kb/s`],
['Audio Codec', stats.audio_codec],
['Audio Channels', stats.audio_channels],
['Sample Rate', stats.sample_rate && `${stats.sample_rate} Hz`],
['Audio Bitrate', stats.audio_bitrate && `${stats.audio_bitrate} kb/s`],
].filter(([, v]) => v !== null && v !== undefined && v !== '');
};
export const getRating = (customProps, program) => {
return (
customProps.rating ||
customProps.rating_value ||
(program && program.custom_properties && program.custom_properties.rating)
);
};
const filterByUpcoming = (arr, tvid, titleKey, toUserTime, userNow) => {
return arr.filter((r) => {
const cp = r.custom_properties || {};
const pr = cp.program || {};
if ((pr.tvg_id || '') !== tvid) return false;
if ((pr.title || '').toLowerCase() !== titleKey) return false;
const st = toUserTime(r.start_time);
return st.isAfter(userNow());
});
}
const dedupeByProgram = (filtered) => {
// Deduplicate by program.id if present, else by time+title
const seen = new Set();
const deduped = [];
for (const r of filtered) {
const cp = r.custom_properties || {};
const pr = cp.program || {};
// Prefer season/episode or onscreen code; else fall back to sub_title; else program id/slot
const season = cp.season ?? pr?.custom_properties?.season;
const episode = cp.episode ?? pr?.custom_properties?.episode;
const onscreen =
cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode;
let key = null;
if (season != null && episode != null) key = `se:${season}:${episode}`;
else if (onscreen) key = `onscreen:${String(onscreen).toLowerCase()}`;
else if (pr.sub_title) key = `sub:${(pr.sub_title || '').toLowerCase()}`;
else if (pr.id != null) key = `id:${pr.id}`;
else
key = `slot:${r.channel}|${r.start_time}|${r.end_time}|${pr.title || ''}`;
if (seen.has(key)) continue;
seen.add(key);
deduped.push(r);
}
return deduped;
}
export const getUpcomingEpisodes = (
isSeriesGroup,
allRecordings,
program,
toUserTime,
userNow
) => {
if (!isSeriesGroup) return [];
const arr = Array.isArray(allRecordings)
? allRecordings
: Object.values(allRecordings || {});
const tvid = program.tvg_id || '';
const titleKey = (program.title || '').toLowerCase();
const filtered = filterByUpcoming(arr, tvid, titleKey, toUserTime, userNow);
return dedupeByProgram(filtered).sort(
(a, b) => toUserTime(a.start_time) - toUserTime(b.start_time)
);
};

View file

@ -0,0 +1,66 @@
import API from '../../api.js';
import { toTimeString } from '../dateTimeUtils.js';
import dayjs from 'dayjs';
export const getChannelOptions = (channels) => {
return Object.values(channels || {})
.sort((a, b) => {
const aNum = Number(a.channel_number) || 0;
const bNum = Number(b.channel_number) || 0;
if (aNum === bNum) {
return (a.name || '').localeCompare(b.name || '');
}
return aNum - bNum;
})
.map((item) => ({
value: `${item.id}`,
label: item.name || `Channel ${item.id}`,
}));
};
export const getUpcomingOccurrences = (
recordings,
userNow,
ruleId,
toUserTime
) => {
const list = Array.isArray(recordings)
? recordings
: Object.values(recordings || {});
const now = userNow();
return list
.filter(
(rec) =>
rec?.custom_properties?.rule?.id === ruleId &&
toUserTime(rec.start_time).isAfter(now)
)
.sort(
(a, b) =>
toUserTime(a.start_time).valueOf() - toUserTime(b.start_time).valueOf()
);
};
export const updateRecurringRule = async (ruleId, values) => {
await API.updateRecurringRule(ruleId, {
channel: values.channel_id,
days_of_week: (values.days_of_week || []).map((d) => Number(d)),
start_time: toTimeString(values.start_time),
end_time: toTimeString(values.end_time),
start_date: values.start_date
? dayjs(values.start_date).format('YYYY-MM-DD')
: null,
end_date: values.end_date
? dayjs(values.end_date).format('YYYY-MM-DD')
: null,
name: values.rule_name?.trim() || '',
enabled: Boolean(values.enabled),
});
};
export const deleteRecurringRuleById = async (ruleId) => {
await API.deleteRecurringRule(ruleId);
};
export const updateRecurringRuleEnabled = async (ruleId, checked) => {
await API.updateRecurringRule(ruleId, { enabled: checked });
};

View file

@ -0,0 +1,90 @@
// Deduplicate in-progress and upcoming by program id or channel+slot
const dedupeByProgramOrSlot = (arr) => {
const out = [];
const sigs = new Set();
for (const r of arr) {
const cp = r.custom_properties || {};
const pr = cp.program || {};
const sig =
pr?.id != null
? `id:${pr.id}`
: `slot:${r.channel}|${r.start_time}|${r.end_time}|${pr.title || ''}`;
if (sigs.has(sig)) continue;
sigs.add(sig);
out.push(r);
}
return out;
};
const dedupeById = (list, toUserTime, completed, now, inProgress, upcoming) => {
// ID-based dedupe guard in case store returns duplicates
const seenIds = new Set();
for (const rec of list) {
if (rec && rec.id != null) {
const k = String(rec.id);
if (seenIds.has(k)) continue;
seenIds.add(k);
}
const s = toUserTime(rec.start_time);
const e = toUserTime(rec.end_time);
const status = rec.custom_properties?.status;
if (status === 'interrupted' || status === 'completed') {
completed.push(rec);
} else {
if (now.isAfter(s) && now.isBefore(e)) inProgress.push(rec);
else if (now.isBefore(s)) upcoming.push(rec);
else completed.push(rec);
}
}
}
export const categorizeRecordings = (recordings, toUserTime, now) => {
const inProgress = [];
const upcoming = [];
const completed = [];
const list = Array.isArray(recordings)
? recordings
: Object.values(recordings || {});
dedupeById(list, toUserTime, completed, now, inProgress, upcoming);
const inProgressDedup = dedupeByProgramOrSlot(inProgress).sort(
(a, b) => toUserTime(b.start_time) - toUserTime(a.start_time)
);
// Group upcoming by series title+tvg_id (keep only next episode)
const upcomingDedup = dedupeByProgramOrSlot(upcoming).sort(
(a, b) => toUserTime(a.start_time) - toUserTime(b.start_time)
);
const grouped = new Map();
for (const rec of upcomingDedup) {
const cp = rec.custom_properties || {};
const prog = cp.program || {};
const key = `${prog.tvg_id || ''}|${(prog.title || '').toLowerCase()}`;
if (!grouped.has(key)) {
grouped.set(key, { rec, count: 1 });
} else {
const entry = grouped.get(key);
entry.count += 1;
}
}
const upcomingGrouped = Array.from(grouped.values()).map((e) => {
const item = { ...e.rec };
item._group_count = e.count;
return item;
});
completed.sort((a, b) => toUserTime(b.end_time) - toUserTime(a.end_time));
return {
inProgress: inProgressDedup,
upcoming: upcomingGrouped,
completed,
};
}

View file

@ -1,5 +1,5 @@
"""
Dispatcharr version information.
"""
__version__ = '0.15.0' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__version__ = '0.15.1' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__timestamp__ = None # Set during CI/CD build process