Merge branch 'dev' of https://github.com/Dispatcharr/Dispatcharr into pr/nick4810/761

This commit is contained in:
SergeantPanda 2025-12-26 12:37:57 -06:00
commit 874e981449
18 changed files with 732 additions and 153 deletions

View file

@ -7,6 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Added
- Advanced filtering for Channels table: Filter menu now allows toggling disabled channels visibility (when a profile is selected) and filtering to show only empty channels without streams (Closes #182)
- Network Access warning modal now displays the client's IP address for better transparency when network restrictions are being enforced - Thanks [@damien-alt-sudo](https://github.com/damien-alt-sudo) (Closes #778)
- VLC streaming support - Thanks [@sethwv](https://github.com/sethwv)
- Added `cvlc` as an alternative streaming backend alongside FFmpeg and Streamlink
- Log parser refactoring: Introduced `LogParserFactory` and stream-specific parsers (`FFmpegLogParser`, `VLCLogParser`, `StreamlinkLogParser`) to enable codec and resolution detection from multiple streaming tools
- VLC log parsing for stream information: Detects video/audio codecs from TS demux output, supports both stream-copy and transcode modes with resolution/FPS extraction from transcode output
- Locked, read-only VLC stream profile configured for headless operation with intelligent audio/video codec detection
- VLC and required plugins installed in Docker environment with headless configuration
### Changed
- Fixed event viewer arrow direction (previously inverted) — UI behavior corrected. - Thanks [@drnikcuk](https://github.com/drnikcuk) (Closes #772)
- Stream log parsing refactored to use factory pattern: Simplified `ChannelService.parse_and_store_stream_info()` to route parsing through specialized log parsers instead of inline program-specific logic (~150 lines of code removed)
- Stream profile names in fixtures updated to use proper capitalization (ffmpeg → FFmpeg, streamlink → Streamlink)
### Fixed
- Stream validation now returns original URL instead of redirected URL to prevent issues with temporary redirect URLs that expire before clients can connect
## [0.15.1] - 2025-12-22
### Fixed
- XtreamCodes EPG `has_archive` field now returns integer `0` instead of string `"0"` for proper JSON type consistency
- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744)
## [0.15.0] - 2025-12-20
### Added

View file

@ -8,6 +8,7 @@ from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.shortcuts import get_object_or_404, get_list_or_404
from django.db import transaction
from django.db.models import Q
import os, json, requests, logging
from urllib.parse import unquote
from apps.accounts.permissions import (
@ -420,10 +421,36 @@ class ChannelViewSet(viewsets.ModelViewSet):
group_names = channel_group.split(",")
qs = qs.filter(channel_group__name__in=group_names)
if self.request.user.user_level < 10:
qs = qs.filter(user_level__lte=self.request.user.user_level)
filters = {}
q_filters = Q()
return qs
channel_profile_id = self.request.query_params.get("channel_profile_id")
show_disabled_param = self.request.query_params.get("show_disabled", None)
only_streamless = self.request.query_params.get("only_streamless", None)
if channel_profile_id:
try:
profile_id_int = int(channel_profile_id)
filters["channelprofilemembership__channel_profile_id"] = profile_id_int
if show_disabled_param is None:
filters["channelprofilemembership__enabled"] = True
except (ValueError, TypeError):
# Ignore invalid profile id values
pass
if only_streamless:
q_filters &= Q(streams__isnull=True)
if self.request.user.user_level < 10:
filters["user_level__lte"] = self.request.user.user_level
if filters:
qs = qs.filter(**filters)
if q_filters:
qs = qs.filter(q_filters)
return qs.distinct()
def get_serializer_context(self):
context = super().get_serializer_context()

View file

@ -2326,7 +2326,7 @@ def xc_get_epg(request, user, short=False):
if short == False:
program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0
program_output["has_archive"] = "0"
program_output["has_archive"] = 0
output['epg_listings'].append(program_output)

View file

@ -15,6 +15,7 @@ from ..redis_keys import RedisKeys
from ..constants import EventType, ChannelState, ChannelMetadataField
from ..url_utils import get_stream_info_for_switch
from core.utils import log_system_event
from .log_parsers import LogParserFactory
logger = logging.getLogger("ts_proxy")
@ -419,124 +420,51 @@ class ChannelService:
@staticmethod
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None):
"""Parse FFmpeg stream info line and store in Redis metadata and database"""
"""
Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB.
Uses specialized parsers for each streaming tool.
"""
try:
if stream_type == "input":
# Example lines:
# Input #0, mpegts, from 'http://example.com/stream.ts':
# Input #0, hls, from 'http://example.com/stream.m3u8':
# Use factory to parse the line based on stream type
parsed_data = LogParserFactory.parse(stream_type, stream_info_line)
if not parsed_data:
return
# Extract input format (e.g., "mpegts", "hls", "flv", etc.)
input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line)
input_format = input_match.group(1).strip() if input_match else None
# Update Redis and database with parsed data
ChannelService._update_stream_info_in_redis(
channel_id,
parsed_data.get('video_codec'),
parsed_data.get('resolution'),
parsed_data.get('width'),
parsed_data.get('height'),
parsed_data.get('source_fps'),
parsed_data.get('pixel_format'),
parsed_data.get('video_bitrate'),
parsed_data.get('audio_codec'),
parsed_data.get('sample_rate'),
parsed_data.get('audio_channels'),
parsed_data.get('audio_bitrate'),
parsed_data.get('stream_type')
)
# Store in Redis if we have valid data
if input_format:
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(stream_id, stream_type=input_format)
logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}")
elif stream_type == "video":
# Example line:
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
video_codec = codec_match.group(1) if codec_match else None
# Extract resolution (e.g., "1280x720") - be more specific to avoid hex values
# Look for resolution patterns that are realistic video dimensions
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
# Validate that these look like reasonable video dimensions
if 100 <= width <= 10000 and 100 <= height <= 10000:
resolution = f"{width}x{height}"
else:
width = height = resolution = None
else:
width = height = resolution = None
# Extract source FPS (e.g., "29.97 fps")
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
source_fps = float(fps_match.group(1)) if fps_match else None
# Extract pixel format (e.g., "yuv420p")
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
pixel_format = None
if pixel_format_match:
pf = pixel_format_match.group(1).strip()
# Clean up pixel format (remove extra info in parentheses)
if '(' in pf:
pf = pf.split('(')[0].strip()
pixel_format = pf
# Extract bitrate if present (e.g., "2000 kb/s")
video_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
video_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
video_codec=video_codec,
resolution=resolution,
source_fps=source_fps,
pixel_format=pixel_format,
video_bitrate=video_bitrate
)
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
f"Video Bitrate: {video_bitrate} kb/s")
elif stream_type == "audio":
# Example line:
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
# Extract audio codec (e.g., "aac", "mp3", etc.)
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
audio_codec = codec_match.group(1) if codec_match else None
# Extract sample rate (e.g., "48000 Hz")
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
# Extract channel layout (e.g., "stereo", "5.1", "mono")
# Look for common channel layouts
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
channels = channel_match.group(1) if channel_match else None
# Extract audio bitrate if present (e.g., "64 kb/s")
audio_bitrate = None
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
if bitrate_match:
audio_bitrate = float(bitrate_match.group(1))
# Store in Redis if we have valid data
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None)
# Save to database if stream_id is provided
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
audio_codec=audio_codec,
sample_rate=sample_rate,
audio_channels=channels,
audio_bitrate=audio_bitrate
)
if stream_id:
ChannelService._update_stream_stats_in_db(
stream_id,
video_codec=parsed_data.get('video_codec'),
resolution=parsed_data.get('resolution'),
source_fps=parsed_data.get('source_fps'),
pixel_format=parsed_data.get('pixel_format'),
video_bitrate=parsed_data.get('video_bitrate'),
audio_codec=parsed_data.get('audio_codec'),
sample_rate=parsed_data.get('sample_rate'),
audio_channels=parsed_data.get('audio_channels'),
audio_bitrate=parsed_data.get('audio_bitrate'),
stream_type=parsed_data.get('stream_type')
)
except Exception as e:
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
logger.debug(f"Error parsing {stream_type} stream info: {e}")
@staticmethod
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None):

View file

@ -0,0 +1,410 @@
"""Log parsers for FFmpeg, Streamlink, and VLC output."""
import re
import logging
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
logger = logging.getLogger(__name__)
class BaseLogParser(ABC):
"""Base class for log parsers"""
# Map of stream_type -> method_name that this parser handles
STREAM_TYPE_METHODS: Dict[str, str] = {}
@abstractmethod
def can_parse(self, line: str) -> Optional[str]:
"""
Check if this parser can handle the line.
Returns the stream_type if it can parse, None otherwise.
e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink'
"""
pass
@abstractmethod
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
pass
@abstractmethod
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
pass
@abstractmethod
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
pass
class FFmpegLogParser(BaseLogParser):
"""Parser for FFmpeg log output"""
STREAM_TYPE_METHODS = {
'input': 'parse_input_format',
'video': 'parse_video_stream',
'audio': 'parse_audio_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is an FFmpeg line we can parse"""
lower = line.lower()
# Input format detection
if lower.startswith('input #'):
return 'input'
# Stream info (only during input phase, but we'll let stream_manager handle phase tracking)
if 'stream #' in lower:
if 'video:' in lower:
return 'video'
elif 'audio:' in lower:
return 'audio'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg input format (e.g., mpegts, hls)"""
try:
input_match = re.search(r'Input #\d+,\s*([^,]+)', line)
input_format = input_match.group(1).strip() if input_match else None
if input_format:
logger.debug(f"Input format info - Format: {input_format}")
return {'stream_type': input_format}
except Exception as e:
logger.debug(f"Error parsing FFmpeg input format: {e}")
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg video stream info"""
try:
result = {}
# Extract codec, resolution, fps, pixel format, bitrate
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line)
if codec_match:
result['video_codec'] = codec_match.group(1)
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line)
if fps_match:
result['source_fps'] = float(fps_match.group(1))
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line)
if pixel_format_match:
pf = pixel_format_match.group(1).strip()
if '(' in pf:
pf = pf.split('(')[0].strip()
result['pixel_format'] = pf
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
if bitrate_match:
result['video_bitrate'] = float(bitrate_match.group(1))
if result:
logger.info(f"Video stream info - Codec: {result.get('video_codec')}, "
f"Resolution: {result.get('resolution')}, "
f"Source FPS: {result.get('source_fps')}, "
f"Pixel Format: {result.get('pixel_format')}, "
f"Video Bitrate: {result.get('video_bitrate')} kb/s")
return result
except Exception as e:
logger.debug(f"Error parsing FFmpeg video stream info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse FFmpeg audio stream info"""
try:
result = {}
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line)
if codec_match:
result['audio_codec'] = codec_match.group(1)
sample_rate_match = re.search(r'(\d+)\s*Hz', line)
if sample_rate_match:
result['sample_rate'] = int(sample_rate_match.group(1))
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE)
if channel_match:
result['audio_channels'] = channel_match.group(1)
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
if bitrate_match:
result['audio_bitrate'] = float(bitrate_match.group(1))
if result:
return result
except Exception as e:
logger.debug(f"Error parsing FFmpeg audio stream info: {e}")
return None
class VLCLogParser(BaseLogParser):
"""Parser for VLC log output"""
STREAM_TYPE_METHODS = {
'vlc_video': 'parse_video_stream',
'vlc_audio': 'parse_audio_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is a VLC line we can parse"""
lower = line.lower()
# VLC TS demux codec detection
if 'ts demux debug' in lower and 'type=' in lower:
if 'video' in lower:
return 'vlc_video'
elif 'audio' in lower:
return 'vlc_audio'
# VLC decoder output
if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower):
if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower:
return 'vlc_audio'
else:
return 'vlc_video'
# VLC transcode output for resolution/FPS
if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)):
return 'vlc_video'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse VLC TS demux output and decoder info for video"""
try:
lower = line.lower()
result = {}
# Codec detection from TS demux
video_codec_map = {
('avc', 'h.264', 'type=0x1b'): "h264",
('hevc', 'h.265', 'type=0x24'): "hevc",
('mpeg-2', 'type=0x02'): "mpeg2video",
('mpeg-4', 'type=0x10'): "mpeg4"
}
for patterns, codec in video_codec_map.items():
if any(p in lower for p in patterns):
result['video_codec'] = codec
break
# Extract FPS from transcode output: "source fps 30/1"
fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower)
if fps_fraction_match:
numerator = int(fps_fraction_match.group(1))
denominator = int(fps_fraction_match.group(2))
if denominator > 0:
result['source_fps'] = numerator / denominator
# Extract resolution from transcode output: "source 1280x720"
source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower)
if source_res_match:
width = int(source_res_match.group(1))
height = int(source_res_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
else:
# Fallback: generic resolution pattern
resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
if 100 <= width <= 10000 and 100 <= height <= 10000:
result['resolution'] = f"{width}x{height}"
result['width'] = width
result['height'] = height
# Fallback: try to extract FPS from generic format
if 'source_fps' not in result:
fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower)
if fps_match:
result['source_fps'] = float(fps_match.group(1))
return result if result else None
except Exception as e:
logger.debug(f"Error parsing VLC video stream info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse VLC TS demux output and decoder info for audio"""
try:
lower = line.lower()
result = {}
# Codec detection from TS demux
audio_codec_map = {
('type=0xf', 'adts'): "aac",
('type=0x03', 'type=0x04'): "mp3",
('type=0x06', 'type=0x81'): "ac3",
('type=0x0b', 'lpcm'): "pcm"
}
for patterns, codec in audio_codec_map.items():
if any(p in lower for p in patterns):
result['audio_codec'] = codec
break
# VLC decoder format: "AAC channels: 2 samplerate: 48000"
if 'channels:' in lower:
channels_match = re.search(r'channels:\s*(\d+)', lower)
if channels_match:
num_channels = int(channels_match.group(1))
# Convert number to name
channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'}
result['audio_channels'] = channel_names.get(num_channels, str(num_channels))
if 'samplerate:' in lower:
samplerate_match = re.search(r'samplerate:\s*(\d+)', lower)
if samplerate_match:
result['sample_rate'] = int(samplerate_match.group(1))
# Try to extract sample rate (Hz format)
sample_rate_match = re.search(r'(\d+)\s*hz', lower)
if sample_rate_match and 'sample_rate' not in result:
result['sample_rate'] = int(sample_rate_match.group(1))
# Try to extract channels (word format)
if 'audio_channels' not in result:
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower)
if channel_match:
result['audio_channels'] = channel_match.group(1)
return result if result else None
except Exception as e:
logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}")
return None
class StreamlinkLogParser(BaseLogParser):
"""Parser for Streamlink log output"""
STREAM_TYPE_METHODS = {
'streamlink': 'parse_video_stream'
}
def can_parse(self, line: str) -> Optional[str]:
"""Check if this is a Streamlink line we can parse"""
lower = line.lower()
if 'opening stream:' in lower or 'available streams:' in lower:
return 'streamlink'
return None
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
return None
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
"""Parse Streamlink quality/resolution"""
try:
quality_match = re.search(r'(\d+p|\d+x\d+)', line)
if quality_match:
quality = quality_match.group(1)
if 'x' in quality:
resolution = quality
width, height = map(int, quality.split('x'))
else:
resolutions = {
'2160p': ('3840x2160', 3840, 2160),
'1080p': ('1920x1080', 1920, 1080),
'720p': ('1280x720', 1280, 720),
'480p': ('854x480', 854, 480),
'360p': ('640x360', 640, 360)
}
resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080))
return {
'video_codec': 'h264',
'resolution': resolution,
'width': width,
'height': height,
'pixel_format': 'yuv420p'
}
except Exception as e:
logger.debug(f"Error parsing Streamlink video info: {e}")
return None
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
return None
class LogParserFactory:
"""Factory to get the appropriate log parser"""
_parsers = {
'ffmpeg': FFmpegLogParser(),
'vlc': VLCLogParser(),
'streamlink': StreamlinkLogParser()
}
@classmethod
def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]:
"""Determine parser and method from stream_type"""
# Check each parser to see if it handles this stream_type
for parser in cls._parsers.values():
method_name = parser.STREAM_TYPE_METHODS.get(stream_type)
if method_name:
return (parser, method_name)
return None
@classmethod
def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]:
"""
Parse a log line based on stream type.
Returns parsed data or None if parsing fails.
"""
result = cls._get_parser_and_method(stream_type)
if not result:
return None
parser, method_name = result
method = getattr(parser, method_name, None)
if method:
return method(line)
return None
@classmethod
def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]:
"""
Automatically detect which parser can handle this line and parse it.
Returns (stream_type, parsed_data) or None if no parser can handle it.
"""
# Try each parser to see if it can handle this line
for parser in cls._parsers.values():
stream_type = parser.can_parse(line)
if stream_type:
# Parser can handle this line, now parse it
parsed_data = cls.parse(stream_type, line)
if parsed_data:
return (stream_type, parsed_data)
return None

View file

@ -107,6 +107,10 @@ class StreamManager:
# Add this flag for tracking transcoding process status
self.transcode_process_active = False
# Track stream command for efficient log parser routing
self.stream_command = None
self.parser_type = None # Will be set when transcode process starts
# Add tracking for data throughput
self.bytes_processed = 0
self.last_bytes_update = time.time()
@ -476,6 +480,21 @@ class StreamManager:
# Build and start transcode command
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
# Store stream command for efficient log parser routing
self.stream_command = stream_profile.command
# Map actual commands to parser types for direct routing
command_to_parser = {
'ffmpeg': 'ffmpeg',
'cvlc': 'vlc',
'vlc': 'vlc',
'streamlink': 'streamlink'
}
self.parser_type = command_to_parser.get(self.stream_command.lower())
if self.parser_type:
logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})")
else:
logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing")
# For UDP streams, remove any user_agent parameters from the command
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
# Filter out any arguments that contain the user_agent value or related headers
@ -645,35 +664,51 @@ class StreamManager:
if content_lower.startswith('output #') or 'encoder' in content_lower:
self.ffmpeg_input_phase = False
# Only parse stream info if we're still in the input phase
if ("stream #" in content_lower and
("video:" in content_lower or "audio:" in content_lower) and
self.ffmpeg_input_phase):
# Route to appropriate parser based on known command type
from .services.log_parsers import LogParserFactory
from .services.channel_service import ChannelService
from .services.channel_service import ChannelService
if "video:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video", self.current_stream_id)
elif "audio:" in content_lower:
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio", self.current_stream_id)
parse_result = None
# If we know the parser type, use direct routing for efficiency
if self.parser_type:
# Get the appropriate parser and check what it can parse
parser = LogParserFactory._parsers.get(self.parser_type)
if parser:
stream_type = parser.can_parse(content)
if stream_type:
# Parser can handle this line, parse it directly
parsed_data = LogParserFactory.parse(stream_type, content)
if parsed_data:
parse_result = (stream_type, parsed_data)
else:
# Unknown command type - use auto-detection as fallback
parse_result = LogParserFactory.auto_parse(content)
if parse_result:
stream_type, parsed_data = parse_result
# For FFmpeg, only parse during input phase
if stream_type in ['video', 'audio', 'input']:
if self.ffmpeg_input_phase:
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
else:
# VLC and Streamlink can be parsed anytime
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
# Determine log level based on content
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.error(f"Stream process error for channel {self.channel_id}: {content}")
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.warning(f"Stream process warning for channel {self.channel_id}: {content}")
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
# Stats lines - log at trace level to avoid spam
logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}")
logger.trace(f"Stream stats for channel {self.channel_id}: {content}")
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
# Stream info - log at info level
logger.info(f"FFmpeg info for channel {self.channel_id}: {content}")
if content.startswith('Input #0'):
# If it's input 0, parse stream info
from .services.channel_service import ChannelService
ChannelService.parse_and_store_stream_info(self.channel_id, content, "input", self.current_stream_id)
logger.info(f"Stream info for channel {self.channel_id}: {content}")
else:
# Everything else at debug level
logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}")
logger.debug(f"Stream process output for channel {self.channel_id}: {content}")
except Exception as e:
logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}")

View file

@ -471,7 +471,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
# If HEAD not supported, server will return 405 or other error
if 200 <= head_response.status_code < 300:
# HEAD request successful
return True, head_response.url, head_response.status_code, "Valid (HEAD request)"
return True, url, head_response.status_code, "Valid (HEAD request)"
# Try a GET request with stream=True to avoid downloading all content
get_response = session.get(
@ -484,7 +484,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
# IMPORTANT: Check status code first before checking content
if not (200 <= get_response.status_code < 300):
logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}")
return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
# Only check content if status code is valid
try:
@ -538,7 +538,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
get_response.close()
# If we have content, consider it valid even with unrecognized content type
return is_valid, get_response.url, get_response.status_code, message
return is_valid, url, get_response.status_code, message
except requests.exceptions.Timeout:
return False, url, 0, "Timeout connecting to stream"

View file

@ -142,8 +142,12 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
},
status=status.HTTP_200_OK,
)
return Response(in_network, status=status.HTTP_200_OK)
response_data = {
**in_network,
"client_ip": str(client_ip)
}
return Response(response_data, status=status.HTTP_200_OK)
return Response({}, status=status.HTTP_200_OK)

View file

@ -23,7 +23,7 @@
"model": "core.streamprofile",
"pk": 1,
"fields": {
"name": "ffmpeg",
"name": "FFmpeg",
"command": "ffmpeg",
"parameters": "-i {streamUrl} -c:v copy -c:a copy -f mpegts pipe:1",
"is_active": true,
@ -34,11 +34,22 @@
"model": "core.streamprofile",
"pk": 2,
"fields": {
"name": "streamlink",
"name": "Streamlink",
"command": "streamlink",
"parameters": "{streamUrl} best --stdout",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.streamprofile",
"pk": 3,
"fields": {
"name": "VLC",
"command": "cvlc",
"parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
"is_active": true,
"user_agent": "1"
}
}
]

View file

@ -0,0 +1,42 @@
# Generated migration to add VLC stream profile
from django.db import migrations
def add_vlc_profile(apps, schema_editor):
StreamProfile = apps.get_model("core", "StreamProfile")
UserAgent = apps.get_model("core", "UserAgent")
# Check if VLC profile already exists
if not StreamProfile.objects.filter(name="VLC").exists():
# Get the TiviMate user agent (should be pk=1)
try:
tivimate_ua = UserAgent.objects.get(pk=1)
except UserAgent.DoesNotExist:
# Fallback: get first available user agent
tivimate_ua = UserAgent.objects.first()
if not tivimate_ua:
# No user agents exist, skip creating profile
return
StreamProfile.objects.create(
name="VLC",
command="cvlc",
parameters="-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
is_active=True,
user_agent=tivimate_ua,
locked=True, # Make it read-only like ffmpeg/streamlink
)
def remove_vlc_profile(apps, schema_editor):
StreamProfile = apps.get_model("core", "StreamProfile")
StreamProfile.objects.filter(name="VLC").delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0018_alter_systemevent_event_type'),
]
operations = [
migrations.RunPython(add_vlc_profile, remove_vlc_profile),
]

View file

@ -15,7 +15,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
python-is-python3 python3-pip \
libpcre3 libpcre3-dev libpq-dev procps \
build-essential gcc pciutils \
nginx streamlink comskip\
nginx streamlink comskip \
vlc-bin vlc-plugin-base \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# --- Create Python virtual environment ---

View file

@ -36,6 +36,14 @@ if ! [[ "$DISPATCHARR_PORT" =~ ^[0-9]+$ ]]; then
fi
sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default
# Configure nginx based on IPv6 availability
if ip -6 addr show | grep -q "inet6"; then
echo "✅ IPv6 is available, enabling IPv6 in nginx"
else
echo "⚠️ IPv6 not available, disabling IPv6 in nginx"
sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default
fi
# NOTE: mac doesn't run as root, so only manage permissions
# if this script is running as root
if [ "$(id -u)" = "0" ]; then

View file

@ -36,7 +36,7 @@
"model": "core.streamprofile",
"pk": 1,
"fields": {
"profile_name": "ffmpeg",
"profile_name": "FFmpeg",
"command": "ffmpeg",
"parameters": "-i {streamUrl} -c:a copy -c:v copy -f mpegts pipe:1",
"is_active": true,
@ -46,13 +46,23 @@
{
"model": "core.streamprofile",
"fields": {
"profile_name": "streamlink",
"profile_name": "Streamlink",
"command": "streamlink",
"parameters": "{streamUrl} best --stdout",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.streamprofile",
"fields": {
"profile_name": "VLC",
"command": "cvlc",
"parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
"is_active": true,
"user_agent": "1"
}
},
{
"model": "core.coresettings",
"fields": {

View file

@ -386,6 +386,15 @@ const ChannelsTable = ({ onReady }) => {
params.append('page', pagination.pageIndex + 1);
params.append('page_size', pagination.pageSize);
params.append('include_streams', 'true');
if (selectedProfileId !== '0') {
params.append('channel_profile_id', selectedProfileId);
}
if (showDisabled === true) {
params.append('show_disabled', true);
}
if (showOnlyStreamlessChannels === true) {
params.append('only_streamless', true);
}
// Apply sorting
if (sorting.length > 0) {
@ -428,7 +437,15 @@ const ChannelsTable = ({ onReady }) => {
hasSignaledReady.current = true;
onReady();
}
}, [pagination, sorting, debouncedFilters, onReady]);
}, [
pagination,
sorting,
debouncedFilters,
onReady,
showDisabled,
selectedProfileId,
showOnlyStreamlessChannels,
]);
const stopPropagation = useCallback((e) => {
e.stopPropagation();
@ -1355,6 +1372,10 @@ const ChannelsTable = ({ onReady }) => {
deleteChannels={deleteChannels}
selectedTableIds={table.selectedTableIds}
table={table}
showDisabled={showDisabled}
setShowDisabled={setShowDisabled}
showOnlyStreamlessChannels={showOnlyStreamlessChannels}
setShowOnlyStreamlessChannels={setShowOnlyStreamlessChannels}
/>
{/* Table or ghost empty state inside Paper */}

View file

@ -12,20 +12,22 @@ import {
Text,
TextInput,
Tooltip,
UnstyledButton,
useMantineTheme,
} from '@mantine/core';
import {
ArrowDown01,
Binary,
Check,
CircleCheck,
Ellipsis,
EllipsisVertical,
SquareMinus,
SquarePen,
SquarePlus,
Settings,
Eye,
EyeOff,
Filter,
Square,
SquareCheck,
} from 'lucide-react';
import API from '../../../api';
import { notifications } from '@mantine/notifications';
@ -102,6 +104,10 @@ const ChannelTableHeader = ({
editChannel,
deleteChannels,
selectedTableIds,
showDisabled,
setShowDisabled,
showOnlyStreamlessChannels,
setShowOnlyStreamlessChannels,
}) => {
const theme = useMantineTheme();
@ -208,6 +214,14 @@ const ChannelTableHeader = ({
);
};
const toggleShowDisabled = () => {
setShowDisabled(!showDisabled);
};
const toggleShowOnlyStreamlessChannels = () => {
setShowOnlyStreamlessChannels(!showOnlyStreamlessChannels);
};
return (
<Group justify="space-between">
<Group gap={5} style={{ paddingLeft: 10 }}>
@ -236,6 +250,41 @@ const ChannelTableHeader = ({
}}
>
<Flex gap={6}>
<Menu shadow="md" width={200}>
<Menu.Target>
<Button size="xs" variant="default" onClick={() => {}}>
<Filter size={18} />
</Button>
</Menu.Target>
<Menu.Dropdown>
<Menu.Item
onClick={toggleShowDisabled}
leftSection={
showDisabled ? <Eye size={18} /> : <EyeOff size={18} />
}
disabled={selectedProfileId === '0'}
>
<Text size="xs">
{showDisabled ? 'Hide Disabled' : 'Show Disabled'}
</Text>
</Menu.Item>
<Menu.Item
onClick={toggleShowOnlyStreamlessChannels}
leftSection={
showOnlyStreamlessChannels ? (
<SquareCheck size={18} />
) : (
<Square size={18} />
)
}
>
<Text size="xs">Only Empty Channels</Text>
</Menu.Item>
</Menu.Dropdown>
</Menu>
<Button
leftSection={<SquarePen size={18} />}
variant="default"

View file

@ -191,6 +191,8 @@ const SettingsPage = () => {
useState(false);
const [netNetworkAccessConfirmCIDRs, setNetNetworkAccessConfirmCIDRs] =
useState([]);
const [clientIpAddress, setClientIpAddress] = useState(null);
const [proxySettingsSaved, setProxySettingsSaved] = useState(false);
const [generalSettingsSaved, setGeneralSettingsSaved] = useState(false);
@ -508,6 +510,9 @@ const SettingsPage = () => {
return;
}
// Store the client IP
setClientIpAddress(check.client_ip);
// For now, only warn if we're blocking the UI
const blockedAccess = check.UI;
if (blockedAccess.length == 0) {
@ -1365,7 +1370,7 @@ Please ensure you have time to let this complete before proceeding.`}
message={
<>
<Text>
Your client is not included in the allowed networks for the web
Your client {clientIpAddress && `(${clientIpAddress}) `}is not included in the allowed networks for the web
UI. Are you sure you want to proceed?
</Text>

View file

@ -481,8 +481,8 @@ const VODCard = ({ vodContent, stopVODClient }) => {
size={16}
style={{
transform: isClientExpanded
? 'rotate(180deg)'
: 'rotate(0deg)',
? 'rotate(0deg)'
: 'rotate(180deg)',
transition: 'transform 0.2s',
}}
/>

View file

@ -1,5 +1,5 @@
"""
Dispatcharr version information.
"""
__version__ = '0.15.0' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__version__ = '0.15.1' # Follow semantic versioning (MAJOR.MINOR.PATCH)
__timestamp__ = None # Set during CI/CD build process