diff --git a/apps/accounts/migrations/0001_initial.py b/apps/accounts/migrations/0001_initial.py index 4ce00e01..bc92ebe6 100644 --- a/apps/accounts/migrations/0001_initial.py +++ b/apps/accounts/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 import django.contrib.auth.models import django.contrib.auth.validators diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index ea55e3e6..d1858165 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -9,6 +9,8 @@ from django.shortcuts import get_object_or_404 from .models import Stream, Channel, ChannelGroup from .serializers import StreamSerializer, ChannelSerializer, ChannelGroupSerializer +from .tasks import match_epg_channels + # ───────────────────────────────────────────────────────── # 1) Stream API (CRUD) @@ -30,6 +32,7 @@ class StreamViewSet(viewsets.ModelViewSet): qs = qs.filter(channels__isnull=True) return qs + # ───────────────────────────────────────────────────────── # 2) Channel Group Management (CRUD) # ───────────────────────────────────────────────────────── @@ -38,6 +41,7 @@ class ChannelGroupViewSet(viewsets.ModelViewSet): serializer_class = ChannelGroupSerializer permission_classes = [IsAuthenticated] + # ───────────────────────────────────────────────────────── # 3) Channel Management (CRUD) # ───────────────────────────────────────────────────────── @@ -131,6 +135,7 @@ class ChannelViewSet(viewsets.ModelViewSet): 'tvg_id': stream.tvg_id, 'channel_group_id': channel_group.id, 'logo_url': stream.logo_url, + 'streams': [stream_id] } serializer = self.get_serializer(data=channel_data) serializer.is_valid(raise_exception=True) @@ -178,6 +183,7 @@ class ChannelViewSet(viewsets.ModelViewSet): # Gather current used numbers once. used_numbers = set(Channel.objects.all().values_list('channel_number', flat=True)) next_number = 1 + def get_auto_number(): nonlocal next_number while next_number in used_numbers: @@ -221,6 +227,7 @@ class ChannelViewSet(viewsets.ModelViewSet): "tvg_id": stream.tvg_id, "channel_group_id": channel_group.id, "logo_url": stream.logo_url, + "streams": [stream_id], } serializer = self.get_serializer(data=channel_data) if serializer.is_valid(): @@ -236,6 +243,20 @@ class ChannelViewSet(viewsets.ModelViewSet): return Response(response_data, status=status.HTTP_201_CREATED) + # ───────────────────────────────────────────────────────── + # 6) EPG Fuzzy Matching + # ───────────────────────────────────────────────────────── + @swagger_auto_schema( + method='post', + operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data.", + responses={202: "EPG matching task initiated"} + ) + @action(detail=False, methods=['post'], url_path='match-epg') + def match_epg(self, request): + match_epg_channels.delay() + return Response({"message": "EPG matching task initiated."}, status=status.HTTP_202_ACCEPTED) + + # ───────────────────────────────────────────────────────── # 4) Bulk Delete Streams # ───────────────────────────────────────────────────────── @@ -262,6 +283,7 @@ class BulkDeleteStreamsAPIView(APIView): Stream.objects.filter(id__in=stream_ids).delete() return Response({"message": "Streams deleted successfully!"}, status=status.HTTP_204_NO_CONTENT) + # ───────────────────────────────────────────────────────── # 5) Bulk Delete Channels # ───────────────────────────────────────────────────────── diff --git a/apps/channels/migrations/0001_initial.py b/apps/channels/migrations/0001_initial.py index 2e3990e2..8401450e 100644 --- a/apps/channels/migrations/0001_initial.py +++ b/apps/channels/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 import django.db.models.deletion from django.db import migrations, models @@ -21,6 +21,20 @@ class Migration(migrations.Migration): ('name', models.CharField(max_length=100, unique=True)), ], ), + migrations.CreateModel( + name='Channel', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('channel_number', models.IntegerField()), + ('channel_name', models.CharField(max_length=255)), + ('logo_url', models.URLField(blank=True, max_length=2000, null=True)), + ('logo_file', models.ImageField(blank=True, null=True, upload_to='logos/')), + ('tvg_id', models.CharField(blank=True, max_length=255, null=True)), + ('tvg_name', models.CharField(blank=True, max_length=255, null=True)), + ('stream_profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channels', to='core.streamprofile')), + ('channel_group', models.ForeignKey(blank=True, help_text='Channel group this channel belongs to.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channels', to='channels.channelgroup')), + ], + ), migrations.CreateModel( name='Stream', fields=[ @@ -44,18 +58,20 @@ class Migration(migrations.Migration): }, ), migrations.CreateModel( - name='Channel', + name='ChannelStream', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('channel_number', models.IntegerField()), - ('channel_name', models.CharField(max_length=255)), - ('logo_url', models.URLField(blank=True, max_length=2000, null=True)), - ('logo_file', models.ImageField(blank=True, null=True, upload_to='logos/')), - ('tvg_id', models.CharField(blank=True, max_length=255, null=True)), - ('tvg_name', models.CharField(blank=True, max_length=255, null=True)), - ('stream_profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channels', to='core.streamprofile')), - ('channel_group', models.ForeignKey(blank=True, help_text='Channel group this channel belongs to.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channels', to='channels.channelgroup')), - ('streams', models.ManyToManyField(blank=True, related_name='channels', to='channels.stream')), + ('order', models.PositiveIntegerField(default=0)), + ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='channels.channel')), + ('stream', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='channels.stream')), ], + options={ + 'ordering': ['order'], + }, + ), + migrations.AddField( + model_name='channel', + name='streams', + field=models.ManyToManyField(blank=True, related_name='channels', through='channels.ChannelStream', to='channels.stream'), ), ] diff --git a/apps/channels/models.py b/apps/channels/models.py index b965e32d..bd4045ba 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -61,6 +61,7 @@ class Channel(models.Model): streams = models.ManyToManyField( Stream, blank=True, + through='ChannelStream', related_name='channels' ) @@ -84,7 +85,7 @@ class Channel(models.Model): related_name='channels' ) - + def clean(self): # Enforce unique channel_number within a given group existing = Channel.objects.filter( @@ -109,3 +110,11 @@ class ChannelGroup(models.Model): def __str__(self): return self.name + +class ChannelStream(models.Model): + channel = models.ForeignKey(Channel, on_delete=models.CASCADE) + stream = models.ForeignKey(Stream, on_delete=models.CASCADE) + order = models.PositiveIntegerField(default=0) # Ordering field + + class Meta: + ordering = ['order'] # Ensure streams are retrieved in order diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 0e4fba76..a0dcb6a6 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,5 +1,5 @@ from rest_framework import serializers -from .models import Stream, Channel, ChannelGroup +from .models import Stream, Channel, ChannelGroup, ChannelStream from core.models import StreamProfile # @@ -73,8 +73,10 @@ class ChannelSerializer(serializers.ModelSerializer): required=False ) - # Possibly show streams inline, or just by ID - # streams = StreamSerializer(many=True, read_only=True) + streams = serializers.ListField( + child=serializers.IntegerField(), write_only=True + ) + stream_ids = serializers.SerializerMethodField() class Meta: model = Channel @@ -89,5 +91,39 @@ class ChannelSerializer(serializers.ModelSerializer): 'tvg_id', 'tvg_name', 'streams', + 'stream_ids', 'stream_profile_id', ] + + def get_stream_ids(self, obj): + """Retrieve ordered stream IDs for GET requests.""" + return list(obj.streams.all().order_by('channelstream__order').values_list('id', flat=True)) + + def create(self, validated_data): + stream_ids = validated_data.pop('streams', []) + channel = Channel.objects.create(**validated_data) + + # Add streams in the specified order + for index, stream_id in enumerate(stream_ids): + ChannelStream.objects.create(channel=channel, stream_id=stream_id, order=index) + + return channel + + def update(self, instance, validated_data): + print("Validated Data:", validated_data) + stream_ids = validated_data.get('streams', None) + print(f'stream ids: {stream_ids}') + + # Update basic fields + instance.name = validated_data.get('channel_name', instance.channel_name) + instance.save() + + if stream_ids is not None: + # Clear existing relationships + instance.channelstream_set.all().delete() + + # Add new streams in order + for index, stream_id in enumerate(stream_ids): + ChannelStream.objects.create(channel=instance, stream_id=stream_id, order=index) + + return instance diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py new file mode 100644 index 00000000..c4bf8177 --- /dev/null +++ b/apps/channels/tasks.py @@ -0,0 +1,207 @@ +# apps/channels/tasks.py + +import logging +import re + +from celery import shared_task +from rapidfuzz import fuzz +from sentence_transformers import SentenceTransformer, util +from django.db import transaction + +from apps.channels.models import Channel +from apps.epg.models import EPGData +from core.models import CoreSettings # to retrieve "preferred-region" setting + +logger = logging.getLogger(__name__) + +# Load the model once at module level +SENTENCE_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" +st_model = SentenceTransformer(SENTENCE_MODEL_NAME) + +# Threshold constants +BEST_FUZZY_THRESHOLD = 70 +LOWER_FUZZY_THRESHOLD = 40 +EMBED_SIM_THRESHOLD = 0.65 + +# Common extraneous words +COMMON_EXTRANEOUS_WORDS = [ + "tv", "channel", "network", "television", + "east", "west", "hd", "uhd", "us", "usa", "not", "24/7", + "1080p", "720p", "540p", "480p", + "arabic", "latino", "film", "movie", "movies" +] + +def normalize_channel_name(name: str) -> str: + """ + A more aggressive normalization that: + - Lowercases + - Removes bracketed/parenthesized text + - Removes punctuation + - Strips extraneous words + - Collapses extra spaces + """ + if not name: + return "" + + # Lowercase + norm = name.lower() + + # Remove bracketed text + norm = re.sub(r"\[.*?\]", "", norm) + norm = re.sub(r"\(.*?\)", "", norm) + + # Remove punctuation except word chars/spaces + norm = re.sub(r"[^\w\s]", "", norm) + + # Remove extraneous tokens + tokens = norm.split() + tokens = [t for t in tokens if t not in COMMON_EXTRANEOUS_WORDS] + + # Rejoin + norm = " ".join(tokens).strip() + return norm + +@shared_task +def match_epg_channels(): + """ + Goes through all Channels and tries to find a matching EPGData row by: + 1) If channel.tvg_id is valid in EPGData, skip + 2) If channel has a tvg_id but not found in EPGData, attempt direct EPGData lookup + 3) Otherwise do name-based fuzzy ratio pass: + - add region-based bonus if region code is found in the EPG row + - if fuzzy >= BEST_FUZZY_THRESHOLD => accept + - if fuzzy in [LOWER_FUZZY_THRESHOLD..BEST_FUZZY_THRESHOLD) => do embedding check + - else skip + 4) Log summary + """ + logger.info("Starting EPG matching logic...") + + # Try to get user's preferred region from CoreSettings + try: + region_obj = CoreSettings.objects.get(key="preferred-region") + region_code = region_obj.value.strip().lower() # e.g. "us" + except CoreSettings.DoesNotExist: + region_code = None + + # 1) Gather EPG rows + all_epg = list(EPGData.objects.all()) + epg_rows = [] + for e in all_epg: + epg_rows.append({ + "epg_id": e.id, + "tvg_id": e.tvg_id or "", # e.g. "Fox News.us" + "raw_name": e.channel_name, + "norm_name": normalize_channel_name(e.channel_name), + }) + + # 2) Pre-encode embeddings if possible + epg_embeddings = None + if any(row["norm_name"] for row in epg_rows): + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_rows], + convert_to_tensor=True + ) + + matched_channels = [] + + with transaction.atomic(): + for chan in Channel.objects.all(): + # A) Skip if channel.tvg_id is valid + if chan.tvg_id and EPGData.objects.filter(tvg_id=chan.tvg_id).exists(): + continue + + # B) If channel has a tvg_id but not in EPG, do direct lookup + if chan.tvg_id: + epg_match = EPGData.objects.filter(tvg_id=chan.tvg_id).first() + if epg_match: + logger.info( + f"Channel {chan.id} '{chan.channel_name}' => found EPG by tvg_id={chan.tvg_id}" + ) + continue + + # C) No valid tvg_id => name-based matching + fallback_name = chan.tvg_name.strip() if chan.tvg_name else chan.channel_name + norm_chan = normalize_channel_name(fallback_name) + if not norm_chan: + logger.info( + f"Channel {chan.id} '{chan.channel_name}' => empty after normalization, skipping" + ) + continue + + best_score = 0 + best_epg = None + + for row in epg_rows: + if not row["norm_name"]: + continue + # Base fuzzy ratio + base_score = fuzz.ratio(norm_chan, row["norm_name"]) + + # If we have a region_code, add a small bonus if the epg row has that region + # e.g. tvg_id or raw_name might contain ".us" or "us" + bonus = 0 + if region_code: + # example: if region_code is "us" and row["tvg_id"] ends with ".us" + # or row["raw_name"] has "us" in it, etc. + # We'll do a naive check: + combined_text = row["tvg_id"].lower() + " " + row["raw_name"].lower() + if region_code in combined_text: + bonus = 15 # pick a small bonus + + score = base_score + bonus + + if score > best_score: + best_score = score + best_epg = row + + if not best_epg: + logger.info(f"Channel {chan.id} '{fallback_name}' => no EPG match at all.") + continue + + # E) Decide acceptance + if best_score >= BEST_FUZZY_THRESHOLD: + # Accept + chan.tvg_id = best_epg["tvg_id"] + chan.save() + matched_channels.append((chan.id, fallback_name, best_epg["tvg_id"])) + logger.info( + f"Channel {chan.id} '{fallback_name}' => matched tvg_id={best_epg['tvg_id']} (score={best_score})" + ) + elif best_score >= LOWER_FUZZY_THRESHOLD and epg_embeddings is not None: + # borderline => do embedding + chan_embedding = st_model.encode(norm_chan, convert_to_tensor=True) + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= EMBED_SIM_THRESHOLD: + matched_epg = epg_rows[top_index] + chan.tvg_id = matched_epg["tvg_id"] + chan.save() + matched_channels.append((chan.id, fallback_name, matched_epg["tvg_id"])) + logger.info( + f"Channel {chan.id} '{fallback_name}' => matched EPG tvg_id={matched_epg['tvg_id']} " + f"(fuzzy={best_score}, cos-sim={top_value:.2f})" + ) + else: + logger.info( + f"Channel {chan.id} '{fallback_name}' => fuzzy={best_score}, " + f"cos-sim={top_value:.2f} < {EMBED_SIM_THRESHOLD}, skipping" + ) + else: + # no match + logger.info( + f"Channel {chan.id} '{fallback_name}' => fuzzy={best_score} < {LOWER_FUZZY_THRESHOLD}, skipping" + ) + + # Final summary + total_matched = len(matched_channels) + if total_matched: + logger.info(f"Match Summary: {total_matched} channel(s) matched.") + for (cid, cname, tvg) in matched_channels: + logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") + else: + logger.info("No new channels were matched.") + + logger.info("Finished EPG matching logic.") + return f"Done. Matched {total_matched} channel(s)." diff --git a/apps/dashboard/migrations/0001_initial.py b/apps/dashboard/migrations/0001_initial.py index 1ce7b141..9c39a3b7 100644 --- a/apps/dashboard/migrations/0001_initial.py +++ b/apps/dashboard/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 from django.db import migrations, models diff --git a/apps/epg/migrations/0001_initial.py b/apps/epg/migrations/0001_initial.py index dfb9c0f0..9454d514 100644 --- a/apps/epg/migrations/0001_initial.py +++ b/apps/epg/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 import django.db.models.deletion from django.db import migrations, models diff --git a/apps/hdhr/migrations/0001_initial.py b/apps/hdhr/migrations/0001_initial.py index 826c036d..54ad7c8c 100644 --- a/apps/hdhr/migrations/0001_initial.py +++ b/apps/hdhr/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 from django.db import migrations, models diff --git a/apps/m3u/migrations/0001_initial.py b/apps/m3u/migrations/0001_initial.py index c78afaa1..eb92f063 100644 --- a/apps/m3u/migrations/0001_initial.py +++ b/apps/m3u/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 import django.db.models.deletion from django.db import migrations, models diff --git a/apps/proxy/hls_proxy b/apps/proxy/hls_proxy new file mode 100644 index 00000000..98ba38a5 --- /dev/null +++ b/apps/proxy/hls_proxy @@ -0,0 +1,1227 @@ +""" +HLS Proxy Server with Advanced Stream Switching Support +This proxy handles HLS live streams with support for: +- Stream switching with proper discontinuity handling +- Buffer management +- Segment validation +- Connection pooling and reuse +""" + +from flask import Flask, Response, request, jsonify +import requests +import threading +import logging +import m3u8 +import time +from urllib.parse import urlparse, urljoin +import argparse +from typing import Optional, Dict, List, Set, Deque +import sys +import os + +# Initialize Flask app +app = Flask(__name__) + +# Global state management +manifest_buffer = None # Stores current manifest content +segment_buffers = {} # Maps sequence numbers to segment data +buffer_lock = threading.Lock() # Synchronizes access to buffers + +class Config: + """Configuration settings for stream handling and buffering""" + # Buffer size settings + MIN_SEGMENTS = 12 # Minimum segments to maintain + MAX_SEGMENTS = 16 # Maximum segments to store + WINDOW_SIZE = 12 # Number of segments in manifest window + INITIAL_SEGMENTS = 3 # Initial segments to buffer before playback + DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' + INITIAL_CONNECTION_WINDOW = 10 # Seconds to wait for first client + CLIENT_TIMEOUT_FACTOR = 1.5 # Multiplier for target duration to determine client timeout + CLIENT_CLEANUP_INTERVAL = 10 # Seconds between client cleanup checks + FIRST_SEGMENT_TIMEOUT = 5.0 # Seconds to wait for first segment + + # Initial buffering settings + INITIAL_BUFFER_SECONDS = 25.0 # Initial buffer in seconds before allowing clients + MAX_INITIAL_SEGMENTS = 10 # Maximum segments to fetch during initialization + BUFFER_READY_TIMEOUT = 30.0 # Maximum time to wait for initial buffer (seconds) + +class StreamBuffer: + """ + Manages buffering of stream segments with thread-safe access. + + Attributes: + buffer (Dict[int, bytes]): Maps sequence numbers to segment data + lock (threading.Lock): Thread safety for buffer access + + Features: + - Thread-safe segment storage and retrieval + - Automatic cleanup of old segments + - Sequence number based indexing + """ + + def __init__(self): + self.buffer: Dict[int, bytes] = {} # Maps sequence numbers to segment data + self.lock: threading.Lock = threading.Lock() + + def __getitem__(self, key: int) -> Optional[bytes]: + """Get segment data by sequence number""" + return self.buffer.get(key) + + def __setitem__(self, key: int, value: bytes): + """Store segment data by sequence number""" + self.buffer[key] = value + # Cleanup old segments if we exceed MAX_SEGMENTS + if len(self.buffer) > Config.MAX_SEGMENTS: + keys = sorted(self.buffer.keys()) + # Keep the most recent MAX_SEGMENTS + to_remove = keys[:-Config.MAX_SEGMENTS] + for k in to_remove: + del self.buffer[k] + + def __contains__(self, key: int) -> bool: + """Check if sequence number exists in buffer""" + return key in self.buffer + + def keys(self) -> List[int]: + """Get list of available sequence numbers""" + return list(self.buffer.keys()) + + def cleanup(self, keep_sequences: List[int]): + """Remove segments not in keep list""" + for seq in list(self.buffer.keys()): + if seq not in keep_sequences: + del self.buffer[seq] + +class ClientManager: + """Manages client connections and activity tracking""" + + def __init__(self): + self.last_activity = {} # Maps client IPs to last activity timestamp + self.lock = threading.Lock() + + def record_activity(self, client_ip: str): + """Record client activity timestamp""" + with self.lock: + prev_time = self.last_activity.get(client_ip) + current_time = time.time() + self.last_activity[client_ip] = current_time + if not prev_time: + logging.info(f"New client connected: {client_ip}") + else: + logging.debug(f"Client activity: {client_ip}") + + def cleanup_inactive(self, timeout: float) -> bool: + """Remove inactive clients""" + now = time.time() + with self.lock: + active_clients = { + ip: last_time + for ip, last_time in self.last_activity.items() + if (now - last_time) < timeout + } + + removed = set(self.last_activity.keys()) - set(active_clients.keys()) + if removed: + for ip in removed: + inactive_time = now - self.last_activity[ip] + logging.warning(f"Client {ip} inactive for {inactive_time:.1f}s, removing") + + self.last_activity = active_clients + if active_clients: + oldest = min(now - t for t in active_clients.values()) + logging.debug(f"Active clients: {len(active_clients)}, oldest activity: {oldest:.1f}s ago") + + return len(active_clients) == 0 + +class StreamManager: + """ + Manages HLS stream state and switching logic. + + Attributes: + current_url (str): Current stream URL + channel_id (str): Unique channel identifier + running (bool): Stream activity flag + next_sequence (int): Next sequence number to assign + buffered_sequences (set): Currently buffered sequence numbers + source_changes (set): Sequences where stream source changed + + Features: + - Stream URL management + - Sequence number assignment + - Discontinuity tracking + - Thread coordination + - Buffer state management + """ + def __init__(self, initial_url: str, channel_id: str, user_agent: Optional[str] = None): + # Stream state + self.current_url = initial_url + self.channel_id = channel_id + self.user_agent = user_agent or Config.DEFAULT_USER_AGENT + self.running = True + self.switching_stream = False + + # Sequence tracking + self.next_sequence = 0 + self.highest_sequence = 0 + self.buffered_sequences = set() + self.downloaded_sources = {} + self.segment_durations = {} + + # Source tracking + self.current_source = None + self.source_changes = set() + self.stream_switch_count = 0 + + # Threading + self.fetcher = None + self.fetch_thread = None + self.url_changed = threading.Event() + + # Add manifest info + self.target_duration = 10.0 # Default, will be updated from manifest + self.manifest_version = 3 # Default, will be updated from manifest + + self.cleanup_thread = None + self.cleanup_running = False # New flag to control cleanup thread + self.cleanup_enabled = False # New flag to control when cleanup starts + self.initialization_time = time.time() # Add initialization timestamp + self.first_client_connected = False + self.cleanup_started = False # New flag to track cleanup state + + # Add client manager reference + self.client_manager = None + self.proxy_server = None # Reference to proxy server for cleanup + self.cleanup_thread = None + self.cleanup_interval = Config.CLIENT_CLEANUP_INTERVAL + + logging.info(f"Initialized stream manager for channel {channel_id}") + + # Buffer state tracking + self.buffer_ready = threading.Event() + self.buffered_duration = 0.0 + self.initial_buffering = True + + def update_url(self, new_url: str) -> bool: + """ + Handle stream URL changes with proper discontinuity marking. + + Args: + new_url: New stream URL to switch to + + Returns: + bool: True if URL changed, False if unchanged + + Side effects: + - Sets switching_stream flag + - Updates current_url + - Maintains sequence numbering + - Marks discontinuity point + - Signals fetch thread + """ + if new_url != self.current_url: + with buffer_lock: + self.switching_stream = True + self.current_url = new_url + + # Continue sequence numbering from last sequence + if self.buffered_sequences: + self.next_sequence = max(self.buffered_sequences) + 1 + + # Mark discontinuity at next sequence + self.source_changes.add(self.next_sequence) + + logging.info(f"Stream switch - next sequence will start at {self.next_sequence}") + + # Clear state but maintain sequence numbers + self.downloaded_sources.clear() + self.segment_durations.clear() + self.current_source = None + + # Signal thread to switch URL + self.url_changed.set() + + return True + return False + + def get_next_sequence(self, source_id: str) -> Optional[int]: + """ + Assign sequence numbers to segments with source change detection. + + Args: + source_id: Unique identifier for segment source + + Returns: + int: Next available sequence number + None: If segment already downloaded + + Side effects: + - Updates buffered sequences set + - Tracks source changes for discontinuity + - Maintains sequence numbering + """ + if source_id in self.downloaded_sources: + return None + + seq = self.next_sequence + while (seq in self.buffered_sequences): + seq += 1 + + # Track source changes for discontinuity markers + source_prefix = source_id.split('_')[0] + if not self.switching_stream and self.current_source and self.current_source != source_prefix: + self.source_changes.add(seq) + logging.debug(f"Source change detected at sequence {seq}") + self.current_source = source_prefix + + # Update tracking + self.downloaded_sources[source_id] = seq + self.buffered_sequences.add(seq) + self.next_sequence = seq + 1 + self.highest_sequence = max(self.highest_sequence, seq) + + return seq + + def _fetch_loop(self): + """Background thread for continuous stream fetching""" + while self.running: + try: + fetcher = StreamFetcher(self, self.buffer) + fetch_stream(fetcher, self.url_changed, self.next_sequence) + except Exception as e: + logging.error(f"Stream error: {e}") + time.sleep(5) # Wait before retry + + self.url_changed.clear() + + def start(self): + """Start the background fetch thread""" + if not self.fetch_thread or not self.fetch_thread.is_alive(): + self.running = True + self.fetch_thread = threading.Thread( + target=self._fetch_loop, + name="StreamFetcher", + daemon=True # Thread will exit when main program does + ) + self.fetch_thread.start() + logging.info("Stream manager started") + + def stop(self): + """Stop the stream manager and cleanup resources""" + self.running = False + self.cleanup_running = False + if self.fetch_thread and self.fetch_thread.is_alive(): + self.url_changed.set() + self.fetch_thread.join(timeout=5) + logging.info(f"Stream manager stopped for channel {self.channel_id}") + + def enable_cleanup(self): + """Enable cleanup after first client connects""" + if not self.first_client_connected: + self.first_client_connected = True + logging.info(f"First client connected to channel {self.channel_id}") + + def start_cleanup_thread(self): + """Start background thread for client activity monitoring""" + def cleanup_loop(): + # Wait for initial connection window + start_time = time.time() + while self.cleanup_running and (time.time() - start_time) < Config.INITIAL_CONNECTION_WINDOW: + if self.first_client_connected: + break + time.sleep(1) + + if not self.first_client_connected: + logging.info(f"Channel {self.channel_id}: No clients connected within {Config.INITIAL_CONNECTION_WINDOW}s window") + self.proxy_server.stop_channel(self.channel_id) + return + + # Normal client activity monitoring + while self.cleanup_running and self.running: + try: + timeout = self.target_duration * Config.CLIENT_TIMEOUT_FACTOR + if self.client_manager.cleanup_inactive(timeout): + logging.info(f"Channel {self.channel_id}: All clients disconnected for {timeout:.1f}s") + self.proxy_server.stop_channel(self.channel_id) + break + except Exception as e: + logging.error(f"Cleanup error: {e}") + if "cannot join current thread" not in str(e): + time.sleep(Config.CLIENT_CLEANUP_INTERVAL) + time.sleep(Config.CLIENT_CLEANUP_INTERVAL) + + if not self.cleanup_started: + self.cleanup_started = True + self.cleanup_running = True + self.cleanup_thread = threading.Thread( + target=cleanup_loop, + name=f"Cleanup-{self.channel_id}", + daemon=True + ) + self.cleanup_thread.start() + logging.info(f"Started cleanup thread for channel {self.channel_id}") + +class StreamFetcher: + """ + Handles HTTP requests for stream segments with connection pooling. + + Attributes: + manager (StreamManager): Associated stream manager instance + buffer (StreamBuffer): Buffer for storing segments + session (requests.Session): Persistent HTTP session + redirect_cache (dict): Cache for redirect responses + + Features: + - Connection pooling and reuse + - Redirect caching + - Rate limiting + - Automatic retries + - Host fallback + """ + def __init__(self, manager: StreamManager, buffer: StreamBuffer): + self.manager = manager + self.buffer = buffer + self.stream_url = manager.current_url + self.session = requests.Session() + + # Configure session headers + self.session.headers.update({ + 'User-Agent': manager.user_agent, + 'Connection': 'keep-alive' + }) + + # Set up connection pooling + adapter = requests.adapters.HTTPAdapter( + pool_connections=2, # Number of connection pools + pool_maxsize=4, # Connections per pool + max_retries=3, # Auto-retry failed requests + pool_block=False # Don't block when pool is full + ) + + # Apply adapter to both HTTP and HTTPS + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + # Request optimization + self.last_request_time = 0 + self.min_request_interval = 0.05 # Minimum time between requests + self.last_host = None # Cache last successful host + self.redirect_cache = {} # Cache redirect responses + self.redirect_cache_limit = 1000 + + def cleanup_redirect_cache(self): + """Remove old redirect cache entries""" + if len(self.redirect_cache) > self.redirect_cache_limit: + self.redirect_cache.clear() + + def get_base_host(self, url: str) -> str: + """ + Extract base host from URL. + + Args: + url: Full URL to parse + + Returns: + str: Base host in format 'scheme://hostname' + + Example: + 'http://example.com/path' -> 'http://example.com' + """ + try: + parsed = urlparse(url) + return f"{parsed.scheme}://{parsed.netloc}" + except Exception as e: + logging.error(f"Error extracting base host: {e}") + return url + + def download(self, url: str) -> tuple[bytes, str]: + """ + Download content with connection reuse and redirect handling. + + Args: + url: URL to download from + + Returns: + tuple containing: + bytes: Downloaded content + str: Final URL after any redirects + + Features: + - Connection pooling/reuse + - Redirect caching + - Rate limiting + - Host fallback on failure + - Automatic retries + """ + now = time.time() + wait_time = self.last_request_time + self.min_request_interval - now + if (wait_time > 0): + time.sleep(wait_time) + + try: + # Use cached redirect if available + if url in self.redirect_cache: + logging.debug(f"Using cached redirect for {url}") + final_url = self.redirect_cache[url] + response = self.session.get(final_url, timeout=10) + else: + response = self.session.get(url, allow_redirects=True, timeout=10) + if response.history: # Cache redirects + logging.debug(f"Caching redirect for {url} -> {response.url}") + self.redirect_cache[url] = response.url + + self.last_request_time = time.time() + + if response.status_code == 200: + self.last_host = self.get_base_host(response.url) + + return response.content, response.url + + except Exception as e: + logging.error(f"Download error: {e}") + if self.last_host and not url.startswith(self.last_host): + # Use urljoin to handle path resolution + new_url = urljoin(self.last_host + '/', url.split('://')[-1].split('/', 1)[-1]) + logging.debug(f"Retrying with last host: {new_url}") + return self.download(new_url) + raise + + def fetch_loop(self): + """Main fetch loop for stream data""" + retry_delay = 1 + max_retry_delay = 8 + last_manifest_time = 0 + downloaded_segments = set() # Track downloaded segment URIs + + while self.manager.running: + try: + current_time = time.time() + + # Check manifest update timing + if last_manifest_time: + time_since_last = current_time - last_manifest_time + if time_since_last < (self.manager.target_duration * 0.5): + time.sleep(self.manager.target_duration * 0.5 - time_since_last) + continue + + # Get manifest data + manifest_data, final_url = self.download(self.manager.current_url) + manifest = m3u8.loads(manifest_data.decode()) + + # Update manifest info + if manifest.target_duration: + self.manager.target_duration = float(manifest.target_duration) + if manifest.version: + self.manager.manifest_version = manifest.version + + if not manifest.segments: + continue + + if self.manager.initial_buffering: + segments_to_fetch = [] + current_duration = 0.0 + successful_downloads = 0 # Initialize counter here + + # Start from the end of the manifest + for segment in reversed(manifest.segments): + current_duration += float(segment.duration) + segments_to_fetch.append(segment) + + # Stop when we have enough duration or hit max segments + if (current_duration >= Config.INITIAL_BUFFER_SECONDS or + len(segments_to_fetch) >= Config.MAX_INITIAL_SEGMENTS): + break + + # Reverse back to chronological order + segments_to_fetch.reverse() + + # Download initial segments + for segment in segments_to_fetch: + try: + segment_url = urljoin(final_url, segment.uri) + segment_data, _ = self.download(segment_url) + + validation = verify_segment(segment_data) + if validation.get('valid', False): + with self.buffer.lock: + seq = self.manager.next_sequence + self.buffer[seq] = segment_data + duration = float(segment.duration) + self.manager.segment_durations[seq] = duration + self.manager.buffered_duration += duration + self.manager.next_sequence += 1 + successful_downloads += 1 + logging.debug(f"Buffered initial segment {seq} (source: {segment.uri}, duration: {duration}s)") + except Exception as e: + logging.error(f"Initial segment download error: {e}") + + # Only mark buffer ready if we got some segments + if successful_downloads > 0: + self.manager.initial_buffering = False + self.manager.buffer_ready.set() + logging.info(f"Initial buffer ready with {successful_downloads} segments " + f"({self.manager.buffered_duration:.1f}s of content)") + continue + + # Normal operation - get latest segment if we haven't already + latest_segment = manifest.segments[-1] + if latest_segment.uri in downloaded_segments: + # Wait for next manifest update + time.sleep(self.manager.target_duration * 0.5) + continue + + try: + segment_url = urljoin(final_url, latest_segment.uri) + segment_data, _ = self.download(segment_url) + + # Try several times if segment validation fails + max_retries = 3 + retry_count = 0 + while retry_count < max_retries: + verification = verify_segment(segment_data) + if verification.get('valid', False): + break + logging.warning(f"Invalid segment, retry {retry_count + 1}/{max_retries}: {verification.get('error')}") + time.sleep(0.5) # Short delay before retry + segment_data, _ = self.download(segment_url) + retry_count += 1 + + if verification.get('valid', False): + with self.buffer.lock: + seq = self.manager.next_sequence + self.buffer[seq] = segment_data + self.manager.segment_durations[seq] = float(latest_segment.duration) + self.manager.next_sequence += 1 + downloaded_segments.add(latest_segment.uri) + logging.debug(f"Stored segment {seq} (source: {latest_segment.uri}, " + f"duration: {latest_segment.duration}s, " + f"size: {len(segment_data)})") + + # Update timing + last_manifest_time = time.time() + retry_delay = 1 # Reset retry delay on success + else: + logging.error(f"Segment validation failed after {max_retries} retries") + + except Exception as e: + logging.error(f"Segment download error: {e}") + continue + + # Cleanup old segment URIs from tracking + if len(downloaded_segments) > 100: + downloaded_segments.clear() + + except Exception as e: + logging.error(f"Fetch error: {e}") + time.sleep(retry_delay) + retry_delay = min(retry_delay * 2, max_retry_delay) + +def get_segment_sequence(segment_uri: str) -> Optional[int]: + """ + Extract sequence number from segment URI pattern. + + Args: + segment_uri: Segment filename or path + + Returns: + int: Extracted sequence number if found + None: If no valid sequence number can be extracted + + Handles common patterns like: + - Numerical sequences (e.g., segment_1234.ts) + - Complex patterns with stream IDs (e.g., stream_123_456.ts) + """ + + try: + # Try numerical sequence (e.g., 1038_3693.ts) + if '_' in segment_uri: + return int(segment_uri.split('_')[-1].split('.')[0]) + return None + except ValueError: + return None + +# Update verify_segment with more thorough checks +def verify_segment(data: bytes) -> dict: + """ + Verify MPEG-TS segment integrity and structure. + + Args: + data: Raw segment data bytes + + Returns: + dict containing: + valid (bool): True if segment passes all checks + packets (int): Number of valid packets found + size (int): Total segment size in bytes + error (str): Description if validation fails + + Checks: + - Minimum size requirements + - Packet size alignment + - Sync byte presence + - Transport error indicators + """ + + # Check minimum size + if len(data) < 188: + return {'valid': False, 'error': 'Segment too short'} + + # Verify segment size is multiple of packet size + if len(data) % 188 != 0: + return {'valid': False, 'error': 'Invalid segment size'} + + valid_packets = 0 + total_packets = len(data) // 188 + + # Scan all packets in segment + for i in range(0, len(data), 188): + packet = data[i:i+188] + + # Check packet completeness + if len(packet) != 188: + return {'valid': False, 'error': 'Incomplete packet'} + + # Verify sync byte + if packet[0] != 0x47: + return {'valid': False, 'error': f'Invalid sync byte at offset {i}'} + + # Check transport error indicator + if packet[1] & 0x80: + return {'valid': False, 'error': 'Transport error indicator set'} + + valid_packets += 1 + + return { + 'valid': True, + 'packets': valid_packets, + 'size': len(data) + } + +def fetch_stream(fetcher: StreamFetcher, stop_event: threading.Event, start_sequence: int = 0): + """ + Main streaming function that handles manifest updates and segment downloads. + + Args: + fetcher: StreamFetcher instance to handle HTTP requests + stop_event: Threading event to signal when to stop fetching + start_sequence: Initial sequence number to start from + + The function implements the core HLS fetching logic: + - Fetches and parses manifest files + - Downloads new segments when they become available + - Handles stream switches with proper discontinuity marking + - Maintains buffer state and segment sequence numbering + """ + # Remove global stream_manager reference + retry_delay = 1 + max_retry_delay = 8 + last_segment_time = 0 + buffer_initialized = False + manifest_update_needed = True + segment_duration = None + + while not stop_event.is_set(): + try: + now = time.time() + + # Only update manifest when it's time for next segment + should_update = ( + manifest_update_needed or + not segment_duration or + (last_segment_time and (now - last_segment_time) >= segment_duration * 0.8) + ) + + if should_update: + manifest_data, final_url = fetcher.download(fetcher.stream_url) + manifest = m3u8.loads(manifest_data.decode()) + + if not manifest.segments: + continue + + with buffer_lock: + manifest_content = manifest_data.decode() + new_segments = {} + + if fetcher.manager.switching_stream: # Use fetcher.manager instead of stream_manager + # Stream switch - only get latest segment + manifest_segments = [manifest.segments[-1]] + seq_start = fetcher.manager.next_sequence + max_segments = 1 + logging.debug(f"Processing stream switch - getting latest segment at sequence {seq_start}") + elif not buffer_initialized: + # Initial buffer + manifest_segments = manifest.segments[-Config.INITIAL_SEGMENTS:] + seq_start = fetcher.manager.next_sequence + max_segments = Config.INITIAL_SEGMENTS + logging.debug(f"Starting initial buffer at sequence {seq_start}") + else: + # Normal operation + manifest_segments = [manifest.segments[-1]] + seq_start = fetcher.manager.next_sequence + max_segments = 1 + + # Map segments + segments_mapped = 0 + for segment in manifest_segments: + if segments_mapped >= max_segments: + break + + source_id = segment.uri.split('/')[-1].split('.')[0] + next_seq = fetcher.manager.get_next_sequence(source_id) + + if next_seq is not None: + duration = float(segment.duration) + new_segments[next_seq] = { + 'uri': segment.uri, + 'duration': duration, + 'source_id': source_id + } + fetcher.manager.segment_durations[next_seq] = duration + segments_mapped += 1 + + manifest_buffer = manifest_content + + # Download segments + for sequence_id, segment_info in new_segments.items(): + try: + segment_url = f"{fetcher.last_host}{segment_info['uri']}" + logging.debug(f"Downloading {segment_info['uri']} as segment {sequence_id}.ts " + f"(source: {segment_info['source_id']}, duration: {segment_info['duration']:.3f}s)") + + segment_data, _ = fetcher.download(segment_url) + validation = verify_segment(segment_data) + + if validation.get('valid', False): + with buffer_lock: + segment_buffers[sequence_id] = segment_data + logging.debug(f"Downloaded and verified segment {sequence_id} (packets: {validation['packets']})") + + if fetcher.manager.switching_stream: + fetcher.manager.switching_stream = False + stop_event.set() # Force fetcher restart with new URL + break + elif not buffer_initialized and len(segment_buffers) >= Config.INITIAL_SEGMENTS: + buffer_initialized = True + manifest_update_needed = True + break + except Exception as e: + logging.error(f"Segment download error: {e}") + continue + + else: + # Short sleep to prevent CPU spinning + threading.Event().wait(0.1) + + except Exception as e: + logging.error(f"Manifest error: {e}") + threading.Event().wait(retry_delay) + retry_delay = min(retry_delay * 2, max_retry_delay) + manifest_update_needed = True + + + +@app.before_request +def log_request_info(): + """ + Log client connections and important requests. + + Logs: + INFO: + - First manifest request from new client + - Stream switch requests + DEBUG: + - All other requests + + Format: + {client_ip} - {method} {path} + """ + if request.path == '/stream.m3u8' and not segment_buffers: + # First manifest request from a client + logging.info(f"New client connected from {request.remote_addr}") + elif request.path.startswith('/change_stream'): + # Keep stream switch requests as INFO + logging.info(f"Stream switch requested from {request.remote_addr}") + else: + # Move routine requests to DEBUG + logging.debug(f"{request.remote_addr} - {request.method} {request.path}") + +# Configure Werkzeug logger to DEBUG +logging.getLogger('werkzeug').setLevel(logging.DEBUG) + +class ProxyServer: + """Manages HLS proxy server instance""" + + def __init__(self, user_agent: Optional[str] = None): + self.app = Flask(__name__) + self.stream_managers: Dict[str, StreamManager] = {} + self.stream_buffers: Dict[str, StreamBuffer] = {} + self.client_managers: Dict[str, ClientManager] = {} + self.fetch_threads: Dict[str, threading.Thread] = {} + self.user_agent: str = user_agent or Config.DEFAULT_USER_AGENT + self._setup_routes() + + def _setup_routes(self) -> None: + """Configure Flask routes""" + self.app.add_url_rule( + '/stream/', # Changed from //stream.m3u8 + view_func=self.stream_endpoint + ) + self.app.add_url_rule( + '/stream//segments/', # Updated to match new pattern + view_func=self.get_segment + ) + self.app.add_url_rule( + '/change_stream/', # Changed from //change_stream + view_func=self.change_stream, + methods=['POST'] + ) + + def initialize_channel(self, url: str, channel_id: str) -> None: + """Initialize a new channel stream""" + if channel_id in self.stream_managers: + self.stop_channel(channel_id) + + manager = StreamManager( + url, + channel_id, + user_agent=self.user_agent + ) + buffer = StreamBuffer() + client_manager = ClientManager() + + # Set up references + manager.client_manager = client_manager + manager.proxy_server = self + + # Store resources + self.stream_managers[channel_id] = manager + self.stream_buffers[channel_id] = buffer + self.client_managers[channel_id] = client_manager + + # Create and store fetcher + fetcher = StreamFetcher(manager, buffer) + manager.fetcher = fetcher + + # Start fetch thread + self.fetch_threads[channel_id] = threading.Thread( + target=fetcher.fetch_loop, + name=f"StreamFetcher-{channel_id}", + daemon=True + ) + self.fetch_threads[channel_id].start() + + # Start cleanup monitoring immediately + manager.start_cleanup_thread() + + logging.info(f"Initialized channel {channel_id} with URL {url}") + + def stop_channel(self, channel_id: str) -> None: + """Stop and cleanup a channel""" + if channel_id in self.stream_managers: + self.stream_managers[channel_id].stop() + if channel_id in self.fetch_threads: + self.fetch_threads[channel_id].join(timeout=5) + self._cleanup_channel(channel_id) + + def _cleanup_channel(self, channel_id: str) -> None: + """ + Remove all resources associated with a channel. + + Args: + channel_id: Channel to cleanup + + Removes: + - Stream manager instance + - Segment buffer + - Client manager + - Fetch thread reference + + Thread safety: + Should only be called after stream manager is stopped + and fetch thread has completed + """ + + for collection in [self.stream_managers, self.stream_buffers, + self.client_managers, self.fetch_threads]: + collection.pop(channel_id, None) + + def run(self, host: str = '0.0.0.0', port: int = 5000) -> None: + """Start the proxy server""" + try: + self.app.run(host=host, port=port, threaded=True) + except KeyboardInterrupt: + logging.info("Shutting down gracefully...") + self.shutdown() + except Exception as e: + logging.error(f"Server error: {e}") + self.shutdown() + raise + + def shutdown(self) -> None: + """ + Stop all channels and cleanup resources. + + Steps: + 1. Stop all active stream managers + 2. Join fetch threads + 3. Clean up channel resources + 4. Release system resources + + Thread Safety: + Safe to call from signal handlers or during shutdown + """ + for channel_id in list(self.stream_managers.keys()): + self.stop_channel(channel_id) + + def stream_endpoint(self, channel_id: str): + """Flask route handler for serving HLS manifests.""" + if channel_id not in self.stream_managers: + return Response('Channel not found', status=404) + + manager = self.stream_managers[channel_id] + + # Wait for initial buffer + if not manager.buffer_ready.wait(Config.BUFFER_READY_TIMEOUT): + logging.error(f"Timeout waiting for initial buffer for channel {channel_id}") + return Response('Initial buffer not ready', status=503) + + try: + if (channel_id not in self.stream_managers) or (not self.stream_managers[channel_id].running): + return Response('Channel not found', status=404) + + manager = self.stream_managers[channel_id] + buffer = self.stream_buffers[channel_id] + + # Record client activity and enable cleanup + client_ip = request.remote_addr + manager.enable_cleanup() + self.client_managers[channel_id].record_activity(client_ip) + + # Wait for first segment with timeout + start_time = time.time() + while True: + with buffer.lock: + available = sorted(buffer.keys()) + if available: + break + + if time.time() - start_time > Config.FIRST_SEGMENT_TIMEOUT: + logging.warning(f"Timeout waiting for first segment for channel {channel_id}") + return Response('No segments available', status=503) + + time.sleep(0.1) # Short sleep to prevent CPU spinning + + # Rest of manifest generation code... + with buffer.lock: + max_seq = max(available) + # Find the first segment after any discontinuity + discontinuity_start = min(available) + for seq in available: + if seq in manager.source_changes: + discontinuity_start = seq + break + + # Calculate window bounds starting from discontinuity + if len(available) <= Config.INITIAL_SEGMENTS: + min_seq = discontinuity_start + else: + min_seq = max( + discontinuity_start, + max_seq - Config.WINDOW_SIZE + 1 + ) + + # Build manifest with proper tags + new_manifest = ['#EXTM3U'] + new_manifest.append(f'#EXT-X-VERSION:{manager.manifest_version}') + new_manifest.append(f'#EXT-X-MEDIA-SEQUENCE:{min_seq}') + new_manifest.append(f'#EXT-X-TARGETDURATION:{int(manager.target_duration)}') + + # Filter segments within window + window_segments = [s for s in available if min_seq <= s <= max_seq] + + # Add segments with discontinuity handling + for seq in window_segments: + if seq in manager.source_changes: + new_manifest.append('#EXT-X-DISCONTINUITY') + logging.debug(f"Added discontinuity marker before segment {seq}") + + duration = manager.segment_durations.get(seq, 10.0) + new_manifest.append(f'#EXTINF:{duration},') + new_manifest.append(f'/stream/{channel_id}/segments/{seq}.ts') + + manifest_content = '\n'.join(new_manifest) + logging.debug(f"Serving manifest with segments {min_seq}-{max_seq} (window: {len(window_segments)})") + return Response(manifest_content, content_type='application/vnd.apple.mpegurl') + except ConnectionAbortedError: + logging.debug("Client disconnected") + return '', 499 + except Exception as e: + logging.error(f"Stream endpoint error: {e}") + return '', 500 + + def get_segment(self, channel_id: str, segment_name: str): + """ + Serve individual MPEG-TS segments to clients. + + Args: + channel_id: Unique identifier for the channel + segment_name: Segment filename (e.g., '123.ts') + + Returns: + Flask Response: + - MPEG-TS segment data with video/MP2T content type + - 404 if segment or channel not found + + Error Handling: + - Logs warning if segment not found + - Logs error on unexpected exceptions + - Returns 404 on any error + """ + if channel_id not in self.stream_managers: + return Response('Channel not found', status=404) + + try: + # Record client activity + client_ip = request.remote_addr + self.client_managers[channel_id].record_activity(client_ip) + + segment_id = int(segment_name.split('.')[0]) + buffer = self.stream_buffers[channel_id] + + with buffer_lock: + if segment_id in buffer: + return Response(buffer[segment_id], content_type='video/MP2T') + + logging.warning(f"Segment {segment_id} not found for channel {channel_id}") + except Exception as e: + logging.error(f"Error serving segment {segment_name}: {e}") + return '', 404 + + def change_stream(self, channel_id: str): + """ + Handle stream URL changes via POST request. + + Args: + channel_id: Channel to modify + + Expected JSON body: + { + "url": "new_stream_url" + } + + Returns: + JSON response with: + - Success/failure message + - Channel ID + - New/current URL + - HTTP 404 if channel not found + - HTTP 400 if URL missing from request + + Side effects: + - Updates stream manager URL + - Triggers stream switch sequence + - Maintains segment numbering + """ + if channel_id not in self.stream_managers: + return jsonify({'error': 'Channel not found'}), 404 + + new_url = request.json.get('url') + if not new_url: + return jsonify({'error': 'No URL provided'}), 400 + + manager = self.stream_managers[channel_id] + if manager.update_url(new_url): + return jsonify({ + 'message': 'Stream URL updated', + 'channel': channel_id, + 'url': new_url + }) + return jsonify({ + 'message': 'URL unchanged', + 'channel': channel_id, + 'url': new_url + }) + + @app.before_request + def log_request_info(): + """ + Log client connections and important requests. + + Log Levels: + INFO: + - First manifest request from new client + - Stream switch requests + DEBUG: + - Segment requests + - Routine manifest updates + + Format: + "{client_ip} - {method} {path}" + + Side Effects: + - Updates logging configuration based on request type + - Tracks client connections + """ + +# Main Application Setup +if __name__ == '__main__': + # Command line argument parsing + parser = argparse.ArgumentParser(description='HLS Proxy Server with Stream Switching') + parser.add_argument( + '--url', '-u', + required=True, + help='Initial HLS stream URL to proxy' + ) + parser.add_argument( + '--channel', '-c', + required=True, + help='Channel ID for the stream (default: default)' + ) + parser.add_argument( + '--port', '-p', + type=int, + default=5000, + help='Local port to serve proxy on (default: 5000)' + ) + parser.add_argument( + '--host', '-H', + default='0.0.0.0', + help='Interface to bind server to (default: all interfaces)' + ) + parser.add_argument( + '--user-agent', '-ua', + help='Custom User-Agent string' + ) + parser.add_argument( + '--debug', + action='store_true', + help='Enable debug logging' + ) + args = parser.parse_args() + + # Configure logging + logging.basicConfig( + level=logging.DEBUG if args.debug else logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + + try: + # Initialize proxy server + proxy = ProxyServer(user_agent=args.user_agent) + + # Initialize channel with provided URL + proxy.initialize_channel(args.url, args.channel) + + logging.info(f"Starting HLS proxy server on {args.host}:{args.port}") + logging.info(f"Initial stream URL: {args.url}") + logging.info(f"Channel ID: {args.channel}") + + # Run Flask development server + proxy.run(host=args.host, port=args.port) + + except Exception as e: + logging.error(f"Failed to start server: {e}") + sys.exit(1) + finally: + if 'proxy' in locals(): + proxy.shutdown() diff --git a/apps/proxy/ts_proxy b/apps/proxy/ts_proxy new file mode 100644 index 00000000..ec6d62d7 --- /dev/null +++ b/apps/proxy/ts_proxy @@ -0,0 +1,323 @@ +""" +Transport Stream (TS) Proxy Server +Handles live TS stream proxying with support for: +- Stream switching +- Buffer management +- Multiple client connections +- Connection state tracking +""" + +from flask import Flask, Response, request, jsonify +import requests +import threading +import logging +from collections import deque +import time +import os +from typing import Optional, Set, Deque, Dict + +# Configuration +class Config: + CHUNK_SIZE: int = 8192 # Buffer chunk size (bytes) + BUFFER_SIZE: int = 1000 # Number of chunks to keep in memory + RECONNECT_DELAY: int = 5 # Seconds between reconnection attempts + CLIENT_POLL_INTERVAL: float = 0.1 # Seconds between client buffer checks + MAX_RETRIES: int = 3 # Maximum connection retry attempts + DEFAULT_USER_AGENT: str = 'VLC/3.0.20 LibVLC/3.0.20' # Default user agent + +class StreamManager: + """Manages TS stream state and connection handling""" + + def __init__(self, initial_url: str, channel_id: str, user_agent: Optional[str] = None): + self.current_url: str = initial_url + self.channel_id: str = channel_id + self.user_agent: str = user_agent or Config.DEFAULT_USER_AGENT + self.url_changed: threading.Event = threading.Event() + self.running: bool = True + self.session: requests.Session = self._create_session() + self.connected: bool = False + self.retry_count: int = 0 + logging.info(f"Initialized stream manager for channel {channel_id}") + + def _create_session(self) -> requests.Session: + """Create and configure requests session""" + session = requests.Session() + session.headers.update({ + 'User-Agent': self.user_agent, + 'Connection': 'keep-alive' + }) + return session + + def update_url(self, new_url: str) -> bool: + """Update stream URL and signal connection change""" + if new_url != self.current_url: + logging.info(f"Stream switch initiated: {self.current_url} -> {new_url}") + self.current_url = new_url + self.connected = False + self.url_changed.set() + return True + return False + + def should_retry(self) -> bool: + """Check if connection retry is allowed""" + return self.retry_count < Config.MAX_RETRIES + + def stop(self) -> None: + """Clean shutdown of stream manager""" + self.running = False + if self.session: + self.session.close() + +class StreamBuffer: + """Manages stream data buffering""" + + def __init__(self): + self.buffer: Deque[bytes] = deque(maxlen=Config.BUFFER_SIZE) + self.lock: threading.Lock = threading.Lock() + self.index: int = 0 + +class ClientManager: + """Manages active client connections""" + + def __init__(self): + self.active_clients: Set[int] = set() + self.lock: threading.Lock = threading.Lock() + + def add_client(self, client_id: int) -> None: + """Add new client connection""" + with self.lock: + self.active_clients.add(client_id) + logging.info(f"New client connected: {client_id} (total: {len(self.active_clients)})") + + def remove_client(self, client_id: int) -> int: + """Remove client and return remaining count""" + with self.lock: + self.active_clients.remove(client_id) + remaining = len(self.active_clients) + logging.info(f"Client disconnected: {client_id} (remaining: {remaining})") + return remaining + +class StreamFetcher: + """Handles stream data fetching""" + + def __init__(self, manager: StreamManager, buffer: StreamBuffer): + self.manager = manager + self.buffer = buffer + + def fetch_loop(self) -> None: + """Main fetch loop for stream data""" + while self.manager.running: + try: + if not self._handle_connection(): + continue + + with self.manager.session.get(self.manager.current_url, stream=True) as response: + if response.status_code == 200: + self._handle_successful_connection() + self._process_stream(response) + + except requests.exceptions.RequestException as e: + self._handle_connection_error(e) + + def _handle_connection(self) -> bool: + """Handle connection state and retries""" + if not self.manager.connected: + if not self.manager.should_retry(): + logging.error(f"Failed to connect after {Config.MAX_RETRIES} attempts") + return False + + if not self.manager.running: + return False + + self.manager.retry_count += 1 + logging.info(f"Connecting to stream: {self.manager.current_url} " + f"(attempt {self.manager.retry_count}/{Config.MAX_RETRIES})") + return True + + def _handle_successful_connection(self) -> None: + """Handle successful stream connection""" + if not self.manager.connected: + logging.info("Stream connected successfully") + self.manager.connected = True + self.manager.retry_count = 0 + + def _process_stream(self, response: requests.Response) -> None: + """Process incoming stream data""" + for chunk in response.iter_content(chunk_size=Config.CHUNK_SIZE): + if not self.manager.running: + logging.info("Stream fetch stopped - shutting down") + return + + if chunk: + if self.manager.url_changed.is_set(): + logging.info("Stream switch in progress, closing connection") + self.manager.url_changed.clear() + break + + with self.buffer.lock: + self.buffer.buffer.append(chunk) + self.buffer.index += 1 + + def _handle_connection_error(self, error: Exception) -> None: + """Handle stream connection errors""" + logging.error(f"Stream connection error: {error}") + self.manager.connected = False + + if not self.manager.running: + return + + logging.info(f"Attempting to reconnect in {Config.RECONNECT_DELAY} seconds...") + if not wait_for_running(self.manager, Config.RECONNECT_DELAY): + return + +def wait_for_running(manager: StreamManager, delay: float) -> bool: + """Wait while checking manager running state""" + start = time.time() + while time.time() - start < delay: + if not manager.running: + return False + threading.Event().wait(0.1) + return True + +class ProxyServer: + """Manages TS proxy server instance""" + + def __init__(self, user_agent: Optional[str] = None): + self.app = Flask(__name__) + self.stream_managers: Dict[str, StreamManager] = {} + self.stream_buffers: Dict[str, StreamBuffer] = {} + self.client_managers: Dict[str, ClientManager] = {} + self.fetch_threads: Dict[str, threading.Thread] = {} + self.user_agent: str = user_agent or Config.DEFAULT_USER_AGENT + self._setup_routes() + + def _setup_routes(self) -> None: + """Configure Flask routes""" + self.app.route('/stream/')(self.stream_endpoint) + self.app.route('/change_stream/', methods=['POST'])(self.change_stream) + + def initialize_channel(self, url: str, channel_id: str) -> None: + """Initialize a new channel stream""" + if channel_id in self.stream_managers: + self.stop_channel(channel_id) + + self.stream_managers[channel_id] = StreamManager( + url, + channel_id, + user_agent=self.user_agent + ) + self.stream_buffers[channel_id] = StreamBuffer() + self.client_managers[channel_id] = ClientManager() + + fetcher = StreamFetcher( + self.stream_managers[channel_id], + self.stream_buffers[channel_id] + ) + + self.fetch_threads[channel_id] = threading.Thread( + target=fetcher.fetch_loop, + name=f"StreamFetcher-{channel_id}", + daemon=True + ) + self.fetch_threads[channel_id].start() + logging.info(f"Initialized channel {channel_id} with URL {url}") + + def stop_channel(self, channel_id: str) -> None: + """Stop and cleanup a channel""" + if channel_id in self.stream_managers: + self.stream_managers[channel_id].stop() + if channel_id in self.fetch_threads: + self.fetch_threads[channel_id].join(timeout=5) + self._cleanup_channel(channel_id) + + def _cleanup_channel(self, channel_id: str) -> None: + """Remove channel resources""" + for collection in [self.stream_managers, self.stream_buffers, + self.client_managers, self.fetch_threads]: + collection.pop(channel_id, None) + + def stream_endpoint(self, channel_id: str): + """Stream endpoint that serves TS data to clients""" + if channel_id not in self.stream_managers: + return Response('Channel not found', status=404) + + def generate(): + client_id = threading.get_ident() + buffer = self.stream_buffers[channel_id] + client_manager = self.client_managers[channel_id] + + client_manager.add_client(client_id) + last_index = buffer.index + + try: + while True: + with buffer.lock: + if buffer.index > last_index: + chunks_behind = buffer.index - last_index + start_pos = max(0, len(buffer.buffer) - chunks_behind) + + for i in range(start_pos, len(buffer.buffer)): + yield buffer.buffer[i] + last_index = buffer.index + + threading.Event().wait(Config.CLIENT_POLL_INTERVAL) + except GeneratorExit: + remaining = client_manager.remove_client(client_id) + if remaining == 0: + logging.info(f"No clients remaining for channel {channel_id}") + self.stop_channel(channel_id) + + return Response(generate(), content_type='video/mp2t') + + def change_stream(self, channel_id: str): + """Handle stream URL changes""" + if channel_id not in self.stream_managers: + return jsonify({'error': 'Channel not found'}), 404 + + new_url = request.json.get('url') + if not new_url: + return jsonify({'error': 'No URL provided'}), 400 + + manager = self.stream_managers[channel_id] + if manager.update_url(new_url): + return jsonify({ + 'message': 'Stream URL updated', + 'channel': channel_id, + 'url': new_url + }) + return jsonify({ + 'message': 'URL unchanged', + 'channel': channel_id, + 'url': new_url + }) + + def run(self, host: str = '0.0.0.0', port: int = 5000) -> None: + """Start the proxy server""" + self.app.run(host=host, port=port, threaded=True) + + def shutdown(self) -> None: + """Stop all channels and cleanup""" + for channel_id in list(self.stream_managers.keys()): + self.stop_channel(channel_id) + +def main(): + """Initialize and start the proxy server""" + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + + logging.getLogger('werkzeug').setLevel(logging.DEBUG) + + proxy_server = ProxyServer() + initial_url = os.getenv('STREAM_URL', 'http://example.com/stream.ts') + proxy_server.initialize_channel(initial_url, "default_channel") + + try: + proxy_server.run() + finally: + proxy_server.shutdown() + +if __name__ == '__main__': + main() diff --git a/core/migrations/0001_initial.py b/core/migrations/0001_initial.py index 5ddb01cb..79757ec4 100644 --- a/core/migrations/0001_initial.py +++ b/core/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 5.1.6 on 2025-03-02 00:01 +# Generated by Django 5.1.6 on 2025-03-02 13:52 from django.db import migrations, models diff --git a/core/views.py b/core/views.py index 0ec84bde..59d69ee0 100644 --- a/core/views.py +++ b/core/views.py @@ -32,10 +32,16 @@ def settings_view(request): def stream_view(request, stream_id): """ Streams the first available stream for the given channel. - It uses the channel’s assigned StreamProfile. + It uses the channel’s assigned StreamProfile with a fallback to core default A persistent Redis lock is used to prevent concurrent streaming on the same channel. + Priority: + - iterate through all streams + - iterate through each stream's m3u profile """ try: + redis_host = getattr(settings, "REDIS_HOST", "localhost") + redis_client = redis.Redis(host=settings.REDIS_HOST, port=6379, db=0) + # Retrieve the channel by the provided stream_id. channel = Channel.objects.get(channel_number=stream_id) logger.debug("Channel retrieved: ID=%s, Name=%s", channel.id, channel.channel_name) @@ -45,46 +51,78 @@ def stream_view(request, stream_id): logger.error("No streams found for channel ID=%s", channel.id) return HttpResponseServerError("No stream found for this channel.") - # Get the first available stream. - stream = channel.streams.first() - logger.debug("Using stream: ID=%s, Name=%s", stream.id, stream.name) - - # Retrieve the M3U account associated with the stream. - m3u_account = stream.m3u_account - logger.debug("Using M3U account ID=%s, Name=%s", m3u_account.id, m3u_account.name) - - # Use the custom URL if available; otherwise, use the standard URL. - input_url = stream.custom_url or stream.url - logger.debug("Input URL: %s", input_url) - - # Determine which profile we can use. - m3u_profiles = m3u_account.profiles.all() - default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - profiles = [obj for obj in m3u_profiles if not obj.is_default] - active_profile = None - # -- Loop through profiles and pick the first active one -- - for profile in [default_profile] + profiles: - logger.debug(f'Checking profile {profile.name}...') - if not profile.is_active: - logger.debug('Profile is not active, skipping.') + lock_key = None + persistent_lock = None + + # iterate through channel's streams + for stream in channel.streams.all().order_by('channelstream__order'): + logger.debug(f"Checking stream: ID={stream.id}, Name={stream.name}") + + # Retrieve the M3U account associated with the stream. + m3u_account = stream.m3u_account + logger.debug(f"Using M3U account ID={m3u_account.id}, Name={m3u_account.name}") + + # Use the custom URL if available; otherwise, use the standard URL. + input_url = stream.custom_url or stream.url + logger.debug(f"Input URL: {input_url}") + + # Determine which profile we can use. + m3u_profiles = m3u_account.profiles.all() + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) + profiles = [obj for obj in m3u_profiles if not obj.is_default] + + # -- Loop through profiles and pick the first active one -- + for profile in [default_profile] + profiles: + logger.debug(f'Checking profile {profile.name}...') + if not profile.is_active: + logger.debug('Profile is not active, skipping.') + continue + + # Acquire the persistent Redis lock, indexed by 0 through max_streams available in the profile + stream_index = 0 + while True: + stream_index += 1 + if stream_index > profile.max_streams: + # @TODO: we are bailing here if no profile was found, but we need to end up supporting looping through + # all available channel streams + logger.debug(f"Profile is using all available streams.") + break + + lock_key = f"lock:{channel.id}:{stream.id}:{profile.id}:{stream_index}" + persistent_lock = PersistentLock(redis_client, lock_key, lock_timeout=120) + + if not persistent_lock.acquire(): + logger.error(f"Could not acquire persistent lock for profile {profile.id} index {stream_index}, currently in use.") + continue + + break + + if persistent_lock.has_lock: + break + + if persistent_lock.has_lock == False: + logger.debug(f'Unable to get lock for profile {profile.id}:{profile.name}. Skipping...') continue - # *** DISABLE FAKE LOCKS: Ignore current_viewers/max_streams check *** - logger.debug(f"Using M3U profile ID={profile.id} (ignoring viewer count limits)") - active_profile = M3UAccountProfile.objects.get(id=profile.id) - # Prepare the pattern replacement. - logger.debug("Executing the following pattern replacement:") - logger.debug(f" search: {profile.search_pattern}") - safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', profile.replace_pattern) - logger.debug(f" replace: {profile.replace_pattern}") - logger.debug(f" safe replace: {safe_replace_pattern}") - stream_url = re.sub(profile.search_pattern, safe_replace_pattern, input_url) - logger.debug(f"Generated stream url: {stream_url}") + break - if active_profile is None: - logger.exception("No available profiles for the stream") - return HttpResponseServerError("No available profiles for the stream") + if persistent_lock.has_lock == False: + logger.debug(f"Unable to find any available streams or stream profiles.") + return HttpResponseServerError("Resource busy, please try again later.") + + # *** DISABLE FAKE LOCKS: Ignore current_viewers/max_streams check *** + logger.debug(f"Using stream {stream.id}{stream.name}, M3U profile {profile.id}{profile.name}, stream index {stream_index}") + active_profile = M3UAccountProfile.objects.get(id=profile.id) + + # Prepare the pattern replacement. + logger.debug("Executing the following pattern replacement:") + logger.debug(f" search: {active_profile.search_pattern}") + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', active_profile.replace_pattern) + logger.debug(f" replace: {active_profile.replace_pattern}") + logger.debug(f" safe replace: {safe_replace_pattern}") + stream_url = re.sub(active_profile.search_pattern, safe_replace_pattern, input_url) + logger.debug(f"Generated stream url: {stream_url}") # Get the stream profile set on the channel. stream_profile = channel.stream_profile @@ -106,19 +144,9 @@ def stream_view(request, stream_id): cmd = [stream_profile.command] + parameters.split() logger.debug("Executing command: %s", cmd) - # Acquire the persistent Redis lock. - redis_host = getattr(settings, "REDIS_HOST", "localhost") - redis_client = redis.Redis(host=settings.REDIS_HOST, port=6379, db=0) - lock_key = f"lock:channel:{channel.id}" - persistent_lock = PersistentLock(redis_client, lock_key, lock_timeout=120) - - if not persistent_lock.acquire(): - logger.error("Could not acquire persistent lock for channel %s", channel.id) - return HttpResponseServerError("Resource busy, please try again later.") - try: # Start the streaming process. - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=8192) except Exception as e: persistent_lock.release() # Ensure the lock is released on error. logger.exception("Error starting stream for channel ID=%s", stream_id) @@ -137,6 +165,7 @@ def stream_view(request, stream_id): yield chunk finally: try: + proc.terminate() logger.debug("Streaming process terminated for stream ID=%s", s.id) except Exception as e: @@ -144,6 +173,7 @@ def stream_view(request, stream_id): persistent_lock.release() logger.debug("Persistent lock released for channel ID=%s", channel.id) + return StreamingHttpResponse( stream_generator(process, stream, persistent_lock), content_type="video/MP2T" diff --git a/dispatcharr/persistent_lock.py b/dispatcharr/persistent_lock.py index 3df2b650..360c9b5d 100644 --- a/dispatcharr/persistent_lock.py +++ b/dispatcharr/persistent_lock.py @@ -5,7 +5,7 @@ import redis class PersistentLock: """ A persistent, auto-expiring lock that uses Redis. - + Usage: 1. Instantiate with a Redis client, a unique lock key (e.g. "lock:account:123"), and an optional timeout (in seconds). @@ -16,7 +16,7 @@ class PersistentLock: def __init__(self, redis_client: redis.Redis, lock_key: str, lock_timeout: int = 120): """ Initialize the lock. - + :param redis_client: An instance of redis.Redis. :param lock_key: The unique key for the lock. :param lock_timeout: Time-to-live for the lock in seconds. @@ -25,6 +25,10 @@ class PersistentLock: self.lock_key = lock_key self.lock_timeout = lock_timeout self.lock_token = None + self.has_lock = False + + def has_lock(self) -> bool: + return self.has_lock def acquire(self) -> bool: """ @@ -33,6 +37,9 @@ class PersistentLock: self.lock_token = str(uuid.uuid4()) # Set the lock with NX (only if not exists) and EX (expire time) result = self.redis_client.set(self.lock_key, self.lock_token, nx=True, ex=self.lock_timeout) + if result is not None: + self.has_lock = True + return result is not None def refresh(self) -> bool: @@ -43,6 +50,7 @@ class PersistentLock: current_value = self.redis_client.get(self.lock_key) if current_value and current_value.decode("utf-8") == self.lock_token: self.redis_client.expire(self.lock_key, self.lock_timeout) + self.has_lock = False return True return False diff --git a/docker/Dockerfile b/docker/Dockerfile index 0b25212d..ef9799e7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,28 +1,34 @@ -FROM alpine +FROM python:3.13-slim ENV PATH="/dispatcharrpy/bin:$PATH" \ VIRTUAL_ENV=/dispatcharrpy \ DJANGO_SETTINGS_MODULE=dispatcharr.settings \ PYTHONUNBUFFERED=1 -RUN apk add \ - python3 \ - python3-dev \ - gcc \ - musl-dev \ - linux-headers \ - py3-pip \ +RUN apt-get update && \ + apt-get install -y \ + curl \ ffmpeg \ - streamlink \ - vlc \ - libpq-dev \ gcc \ - py3-virtualenv \ - uwsgi \ - uwsgi-python \ - nodejs \ - npm \ git \ + gpg \ + libpq-dev \ + lsb-release \ + python3-virtualenv \ + streamlink + +RUN \ + curl -sL https://deb.nodesource.com/setup_23.x -o /tmp/nodesource_setup.sh && \ + bash /tmp/nodesource_setup.sh && \ + curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ + chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/redis.list && \ + apt-get update && \ + apt-get install -y redis + +RUN apt-get update && \ + apt-get install -y \ + nodejs \ redis RUN \ @@ -30,24 +36,26 @@ RUN \ virtualenv /dispatcharrpy && \ git clone https://github.com/Dispatcharr/Dispatcharr /app && \ cd /app && \ - /dispatcharrpy/bin/pip install --no-cache-dir -r requirements.txt && \ + pip install --no-cache-dir -r requirements.txt && \ cd /app/frontend && \ npm install && \ npm run build && \ find . -maxdepth 1 ! -name '.' ! -name 'build' -exec rm -rf '{}' \; && \ cd /app && \ - python manage.py collectstatic --noinput || true - -# Cleanup -RUN \ - apk del \ - nodejs \ - npm \ - git \ + python manage.py collectstatic --noinput || true && \ + apt-get remove -y \ gcc \ - musl-dev \ - python3-dev \ - linux-headers + git \ + gpg \ + libpq-dev \ + lsb-release \ + nodejs && \ + apt-get clean && \ + apt-get autoremove -y && \ + rm -rf \ + /tmp/* \ + /var/lib/apt/lists/* \ + /var/tmp/* WORKDIR /app diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine new file mode 100644 index 00000000..0b25212d --- /dev/null +++ b/docker/Dockerfile.alpine @@ -0,0 +1,54 @@ +FROM alpine + +ENV PATH="/dispatcharrpy/bin:$PATH" \ + VIRTUAL_ENV=/dispatcharrpy \ + DJANGO_SETTINGS_MODULE=dispatcharr.settings \ + PYTHONUNBUFFERED=1 + +RUN apk add \ + python3 \ + python3-dev \ + gcc \ + musl-dev \ + linux-headers \ + py3-pip \ + ffmpeg \ + streamlink \ + vlc \ + libpq-dev \ + gcc \ + py3-virtualenv \ + uwsgi \ + uwsgi-python \ + nodejs \ + npm \ + git \ + redis + +RUN \ + mkdir /data && \ + virtualenv /dispatcharrpy && \ + git clone https://github.com/Dispatcharr/Dispatcharr /app && \ + cd /app && \ + /dispatcharrpy/bin/pip install --no-cache-dir -r requirements.txt && \ + cd /app/frontend && \ + npm install && \ + npm run build && \ + find . -maxdepth 1 ! -name '.' ! -name 'build' -exec rm -rf '{}' \; && \ + cd /app && \ + python manage.py collectstatic --noinput || true + +# Cleanup +RUN \ + apk del \ + nodejs \ + npm \ + git \ + gcc \ + musl-dev \ + python3-dev \ + linux-headers + +WORKDIR /app + +CMD ["/app/docker/entrypoint.aio.sh"] diff --git a/docker/entrypoint.aio.sh b/docker/entrypoint.aio.sh index e8441cd4..244df3ab 100755 --- a/docker/entrypoint.aio.sh +++ b/docker/entrypoint.aio.sh @@ -1,20 +1,105 @@ -#!/bin/sh +#!/bin/bash -# Check the value of DISPATCHARR_ENV and run the corresponding program -case "$DISPATCHARR_ENV" in - "dev") - echo "DISPATCHARR_ENV is set to 'dev'. Running Development Program..." - apk add nodejs npm - cd /app/frontend && npm install - cd /app - exec /usr/sbin/uwsgi --ini uwsgi.dev.ini - ;; - "aio") - echo "DISPATCHARR_ENV is set to 'aio'. Running All-in-One Program..." - exec /usr/sbin/uwsgi --ini uwsgi.aio.ini - ;; - *) - echo "DISPATCHARR_ENV is not set or has an unexpected value. Running standalone..." - exec /usr/sbin/uwsgi --ini uwsgi.ini - ;; -esac +# Run Django migrations and collect static files +python manage.py collectstatic --noinput +python manage.py migrate --noinput + +# Function to clean up only running processes +cleanup() { + echo "🔥 Cleanup triggered! Stopping services..." + for pid in "${pids[@]}"; do + if [ -n "$pid" ] && kill -0 "$pid" 2>/dev/null; then + echo "⛔ Stopping process (PID: $pid)..." + kill -TERM "$pid" 2>/dev/null + else + echo "✅ Process (PID: $pid) already stopped." + fi + done + wait +} + +# Catch termination signals (CTRL+C, Docker Stop, etc.) +trap cleanup TERM INT + +# Initialize an array to store PIDs +pids=() + +GUNICORN_PORT=9191 + +# If running in development mode, install and start frontend +if [ "$DISPATCHARR_ENV" = "dev" ]; then + echo "🚀 Development Mode - Setting up Frontend..." + GUNICORN_PORT=5656 + + # Install Node.js + apt-get update && apt-get install -y nodejs + + # Install frontend dependencies + cd /app/frontend && npm install + cd /app + + # Start React development server + echo "🚀 Starting React Dev Server..." + cd /app/frontend + PORT=9191 ./node_modules/pm2/bin/pm2 --name test start npm -- start + ./node_modules/pm2/bin/pm2 logs & + react_pid=$! + echo "✅ React started with PID $react_pid" + pids+=("$react_pid") + cd /app +fi + +# If running in `dev` or `aio`, start Redis and Celery +if [ "$DISPATCHARR_ENV" = "dev" ] || [ "$DISPATCHARR_ENV" = "aio" ]; then + echo "🚀 Running Redis and Celery for '$DISPATCHARR_ENV'..." + + # Start Redis + echo "🚀 Starting Redis..." + redis-server --daemonize no & + sleep 1 # Give Redis time to start + redis_pid=$(pgrep -x redis-server) + if [ -n "$redis_pid" ]; then + echo "✅ Redis started with PID $redis_pid" + pids+=("$redis_pid") + else + echo "❌ Redis failed to start!" + fi + + # Start Celery + echo "🚀 Starting Celery..." + celery -A dispatcharr worker -l info & + celery_pid=$! + echo "✅ Celery started with PID $celery_pid" + pids+=("$celery_pid") +fi + +# Always start Gunicorn +echo "🚀 Starting Gunicorn..." +gunicorn --workers=4 --worker-class=gevent --timeout=300 --bind 0.0.0.0:${GUNICORN_PORT} dispatcharr.wsgi:application & +gunicorn_pid=$! +echo "✅ Gunicorn started with PID $gunicorn_pid" +pids+=("$gunicorn_pid") + +# Log PIDs +echo "📝 Process PIDs: ${pids[*]}" + +# Wait for at least one process to exit and log the process that exited first +if [ ${#pids[@]} -gt 0 ]; then + echo "⏳ Waiting for processes to exit..." + ps -aux | grep -E 'redis-server|celery|gunicorn|npm' + wait -n "${pids[@]}" + echo "🚨 One of the processes exited! Checking which one..." + + for pid in "${pids[@]}"; do + if ! kill -0 "$pid" 2>/dev/null; then + process_name=$(ps -p "$pid" -o comm=) + echo "❌ Process $process_name (PID: $pid) has exited!" + fi + done +else + echo "❌ No processes started. Exiting." + exit 1 +fi + +# Cleanup and stop remaining processes +cleanup diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 816be251..b9b072dc 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -22,6 +22,7 @@ "material-react-table": "^3.2.0", "mpegts.js": "^1.4.2", "planby": "^1.1.7", + "pm2": "^5.4.3", "prettier": "^3.5.2", "react": "18.2.0", "react-dom": "18.2.0", @@ -3384,6 +3385,243 @@ "node": ">=14" } }, + "node_modules/@pm2/agent": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@pm2/agent/-/agent-2.0.4.tgz", + "integrity": "sha512-n7WYvvTJhHLS2oBb1PjOtgLpMhgImOq8sXkPBw6smeg9LJBWZjiEgPKOpR8mn9UJZsB5P3W4V/MyvNnp31LKeA==", + "license": "AGPL-3.0", + "dependencies": { + "async": "~3.2.0", + "chalk": "~3.0.0", + "dayjs": "~1.8.24", + "debug": "~4.3.1", + "eventemitter2": "~5.0.1", + "fast-json-patch": "^3.0.0-1", + "fclone": "~1.0.11", + "nssocket": "0.6.0", + "pm2-axon": "~4.0.1", + "pm2-axon-rpc": "~0.7.0", + "proxy-agent": "~6.3.0", + "semver": "~7.5.0", + "ws": "~7.5.10" + } + }, + "node_modules/@pm2/agent/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pm2/agent/node_modules/dayjs": { + "version": "1.8.36", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.8.36.tgz", + "integrity": "sha512-3VmRXEtw7RZKAf+4Tv1Ym9AGeo8r8+CjDi26x+7SYQil1UqtqdaokhzoEJohqlzt0m5kacJSDhJQkG/LWhpRBw==", + "license": "MIT" + }, + "node_modules/@pm2/agent/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@pm2/agent/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@pm2/agent/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@pm2/agent/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "license": "ISC" + }, + "node_modules/@pm2/io": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@pm2/io/-/io-6.0.1.tgz", + "integrity": "sha512-KiA+shC6sULQAr9mGZ1pg+6KVW9MF8NpG99x26Lf/082/Qy8qsTCtnJy+HQReW1A9Rdf0C/404cz0RZGZro+IA==", + "license": "Apache-2", + "dependencies": { + "async": "~2.6.1", + "debug": "~4.3.1", + "eventemitter2": "^6.3.1", + "require-in-the-middle": "^5.0.0", + "semver": "~7.5.4", + "shimmer": "^1.2.0", + "signal-exit": "^3.0.3", + "tslib": "1.9.3" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/@pm2/io/node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/@pm2/io/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@pm2/io/node_modules/eventemitter2": { + "version": "6.4.9", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz", + "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==", + "license": "MIT" + }, + "node_modules/@pm2/io/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@pm2/io/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@pm2/io/node_modules/tslib": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz", + "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ==", + "license": "Apache-2.0" + }, + "node_modules/@pm2/io/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "license": "ISC" + }, + "node_modules/@pm2/js-api": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@pm2/js-api/-/js-api-0.8.0.tgz", + "integrity": "sha512-nmWzrA/BQZik3VBz+npRcNIu01kdBhWL0mxKmP1ciF/gTcujPTQqt027N9fc1pK9ERM8RipFhymw7RcmCyOEYA==", + "license": "Apache-2", + "dependencies": { + "async": "^2.6.3", + "debug": "~4.3.1", + "eventemitter2": "^6.3.1", + "extrareqp2": "^1.0.0", + "ws": "^7.0.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@pm2/js-api/node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/@pm2/js-api/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@pm2/js-api/node_modules/eventemitter2": { + "version": "6.4.9", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz", + "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==", + "license": "MIT" + }, + "node_modules/@pm2/pm2-version-check": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@pm2/pm2-version-check/-/pm2-version-check-1.0.4.tgz", + "integrity": "sha512-SXsM27SGH3yTWKc2fKR4SYNxsmnvuBQ9dd6QHtEWmiZ/VqaOYPAIlS8+vMcn27YLtAEBGvNRSh3TPNvtjZgfqA==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.1" + } + }, "node_modules/@pmmmwh/react-refresh-webpack-plugin": { "version": "0.5.15", "resolved": "https://registry.npmjs.org/@pmmmwh/react-refresh-webpack-plugin/-/react-refresh-webpack-plugin-0.5.15.tgz", @@ -3849,6 +4087,12 @@ "node": ">= 6" } }, + "node_modules/@tootallnate/quickjs-emscripten": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", + "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", + "license": "MIT" + }, "node_modules/@trysound/sax": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", @@ -4818,6 +5062,30 @@ "ajv": "^6.9.1" } }, + "node_modules/amp": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/amp/-/amp-0.3.1.tgz", + "integrity": "sha512-OwIuC4yZaRogHKiuU5WlMR5Xk/jAcpPtawWL05Gj8Lvm2F6mwoJt4O/bHI+DHwG79vWd+8OFYM4/BzYqyRd3qw==", + "license": "MIT" + }, + "node_modules/amp-message": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/amp-message/-/amp-message-0.1.2.tgz", + "integrity": "sha512-JqutcFwoU1+jhv7ArgW38bqrE+LQdcRv4NxNw0mp0JHQyB6tXesWRjtYKlDgHRY2o3JE5UTaBGUK8kSWUdxWUg==", + "license": "MIT", + "dependencies": { + "amp": "0.3.1" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/ansi-escapes": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", @@ -5100,6 +5368,18 @@ "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==" }, + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/ast-types-flow": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", @@ -5467,6 +5747,15 @@ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, + "node_modules/basic-ftp": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.5.tgz", + "integrity": "sha512-4Bcg1P8xhUuqcii/S0Z9wiHIrQVPMermM1any+MX5GeGD7faD3/msQUDGLol9wOcz4/jbg/WJnGqoJF6LiBdtg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, "node_modules/batch": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", @@ -5506,11 +5795,29 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/blessed": { + "version": "0.1.81", + "resolved": "https://registry.npmjs.org/blessed/-/blessed-0.1.81.tgz", + "integrity": "sha512-LoF5gae+hlmfORcG1M5+5XZi4LBmvlXTzwJWzUlPryN/SJdSflZvROM2TwkT0GMpq7oqT48NRd4GS7BiVBc5OQ==", + "license": "MIT", + "bin": { + "blessed": "bin/tput.js" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/bluebird": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" }, + "node_modules/bodec": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/bodec/-/bodec-0.1.0.tgz", + "integrity": "sha512-Ylo+MAo5BDUq1KA3f3R/MFhh+g8cnHmo8bz3YPGhI1znrMaf77ol1sfvYJzsw3nTE+Y2GryfDxBaR+AqpAkEHQ==", + "license": "MIT" + }, "node_modules/body-parser": { "version": "1.20.3", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", @@ -5801,6 +6108,12 @@ "node": ">=10" } }, + "node_modules/charm": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/charm/-/charm-0.1.2.tgz", + "integrity": "sha512-syedaZ9cPe7r3hoQA9twWYKu5AIyCswN5+szkmPBe9ccdLrj4bYaCnLVPTLd2kgVRc7+zoX4tyPgRnFKCj5YjQ==", + "license": "MIT/X11" + }, "node_modules/check-types": { "version": "11.2.3", "resolved": "https://registry.npmjs.org/check-types/-/check-types-11.2.3.tgz", @@ -5886,6 +6199,30 @@ "node": ">=0.10.0" } }, + "node_modules/cli-tableau": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/cli-tableau/-/cli-tableau-2.0.1.tgz", + "integrity": "sha512-he+WTicka9cl0Fg/y+YyxcN6/bfQ/1O3QmgxRXDhABKqLzvoOSM4fMzp39uMyLBulAFuywD2N7UaoQE7WaADxQ==", + "dependencies": { + "chalk": "3.0.0" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/cli-tableau/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/cliui": { "version": "7.0.4", "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", @@ -6203,6 +6540,12 @@ "node": ">=10" } }, + "node_modules/croner": { + "version": "4.1.97", + "resolved": "https://registry.npmjs.org/croner/-/croner-4.1.97.tgz", + "integrity": "sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==", + "license": "MIT" + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -6574,12 +6917,27 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, + "node_modules/culvert": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/culvert/-/culvert-0.1.2.tgz", + "integrity": "sha512-yi1x3EAWKjQTreYWeSd98431AV+IEE0qoDyOoaHJ7KJ21gv6HtBXHVLX74opVSGqcR8/AbjJBHAHpcOy2bj5Gg==", + "license": "MIT" + }, "node_modules/damerau-levenshtein": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", "license": "BSD-2-Clause" }, + "node_modules/data-uri-to-buffer": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz", + "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/data-urls": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-2.0.0.tgz", @@ -6753,6 +7111,20 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/degenerator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz", + "integrity": "sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==", + "license": "MIT", + "dependencies": { + "ast-types": "^0.13.4", + "escodegen": "^2.1.0", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -7079,6 +7451,18 @@ "node": ">=10.13.0" } }, + "node_modules/enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.1" + }, + "engines": { + "node": ">=8.6" + } + }, "node_modules/entities": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", @@ -7956,6 +8340,12 @@ "node": ">= 0.6" } }, + "node_modules/eventemitter2": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-5.0.1.tgz", + "integrity": "sha512-5EM1GHXycJBS6mauYAbVKT1cVs7POKWb2NXD4Vyt8dDqeZa7LaDK1/sjtL+Zb0lzTpSNil4596Dyu97hz37QLg==", + "license": "MIT" + }, "node_modules/eventemitter3": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", @@ -8071,6 +8461,15 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, + "node_modules/extrareqp2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/extrareqp2/-/extrareqp2-1.0.0.tgz", + "integrity": "sha512-Gum0g1QYb6wpPJCVypWP3bbIuaibcFiJcpuPM10YSXp/tzqi84x9PJageob+eN4xVRIOto4wjSGNLyMD54D2xA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.14.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -8102,6 +8501,12 @@ "node": ">= 6" } }, + "node_modules/fast-json-patch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz", + "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==", + "license": "MIT" + }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", @@ -8154,6 +8559,12 @@ "bser": "2.1.1" } }, + "node_modules/fclone": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/fclone/-/fclone-1.0.11.tgz", + "integrity": "sha512-GDqVQezKzRABdeqflsgMr7ktzgF9CyS+p2oe0jJqUY6izSSbhPIQJDpoU4PtGcD7VPM9xh/dVrTu6z1nwgmEGw==", + "license": "MIT" + }, "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -8718,6 +9129,32 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/get-uri": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.4.tgz", + "integrity": "sha512-E1b1lFFLvLgak2whF2xDBcOy6NLVGZBqqjJjsIhvopKfWWEi64pLVTWWehV8KlLerZkfNTA95sTe2OdJKm1OzQ==", + "license": "MIT", + "dependencies": { + "basic-ftp": "^5.0.2", + "data-uri-to-buffer": "^6.0.2", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/git-node-fs": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/git-node-fs/-/git-node-fs-1.0.0.tgz", + "integrity": "sha512-bLQypt14llVXBg0S0u8q8HmU7g9p3ysH+NvVlae5vILuUvs759665HvmR5+wb04KjHyjFcDRxdYb4kyNnluMUQ==", + "license": "MIT" + }, + "node_modules/git-sha1": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/git-sha1/-/git-sha1-0.1.2.tgz", + "integrity": "sha512-2e/nZezdVlyCopOCYHeW0onkbZg7xP1Ad6pndPy1rCygeRykefUS6r7oA5cJRGEFvseiaz5a/qUHFVX1dd6Isg==", + "license": "MIT" + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -9385,6 +9822,25 @@ "node": ">= 0.4" } }, + "node_modules/ip-address": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-9.0.5.tgz", + "integrity": "sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==", + "license": "MIT", + "dependencies": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/ip-address/node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "license": "BSD-3-Clause" + }, "node_modules/ipaddr.js": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", @@ -10858,6 +11314,18 @@ "jiti": "bin/jiti.js" } }, + "node_modules/js-git": { + "version": "0.7.8", + "resolved": "https://registry.npmjs.org/js-git/-/js-git-0.7.8.tgz", + "integrity": "sha512-+E5ZH/HeRnoc/LW0AmAyhU+mNcWBzAKE+30+IDMLSLbbK+Tdt02AdkOKq9u15rlJsDEGFqtgckc8ZM59LhhiUA==", + "license": "MIT", + "dependencies": { + "bodec": "^0.1.0", + "culvert": "^0.1.2", + "git-sha1": "^0.1.2", + "pako": "^0.2.5" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -10875,6 +11343,12 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==", + "license": "MIT" + }, "node_modules/jsdom": { "version": "16.7.0", "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-16.7.0.tgz", @@ -10957,6 +11431,13 @@ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==" }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "license": "ISC", + "optional": true + }, "node_modules/json5": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", @@ -11084,6 +11565,15 @@ "shell-quote": "^1.8.1" } }, + "node_modules/lazy": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz", + "integrity": "sha512-Y+CjUfLmIpoUCCRl0ub4smrYtGGr5AOa2AKOaWelGHOGz33X/Y/KizefGqbkwfz44+cnq/+9habclf8vOmu2LA==", + "license": "MIT", + "engines": { + "node": ">=0.2.0" + } + }, "node_modules/leven": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", @@ -11475,6 +11965,12 @@ "mkdirp": "bin/cmd.js" } }, + "node_modules/module-details-from-path": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.3.tgz", + "integrity": "sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A==", + "license": "MIT" + }, "node_modules/mpd-parser": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/mpd-parser/-/mpd-parser-1.3.1.tgz", @@ -11517,6 +12013,12 @@ "multicast-dns": "cli.js" } }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "license": "ISC" + }, "node_modules/mux.js": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/mux.js/-/mux.js-7.1.0.tgz", @@ -11572,6 +12074,44 @@ "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", "license": "MIT" }, + "node_modules/needle": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/needle/-/needle-2.4.0.tgz", + "integrity": "sha512-4Hnwzr3mi5L97hMYeNl8wRW/Onhy4nUKR/lVemJ8gJedxxUyBLm9kkrDColJvoSfwi0jCNhD+xCdOtiGDQiRZg==", + "license": "MIT", + "dependencies": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + }, + "bin": { + "needle": "bin/needle" + }, + "engines": { + "node": ">= 4.4.x" + } + }, + "node_modules/needle/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/needle/node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/negotiator": { "version": "0.6.4", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", @@ -11585,6 +12125,15 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, + "node_modules/netmask": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", + "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/no-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", @@ -11650,6 +12199,25 @@ "node": ">=8" } }, + "node_modules/nssocket": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/nssocket/-/nssocket-0.6.0.tgz", + "integrity": "sha512-a9GSOIql5IqgWJR3F/JXG4KpJTA3Z53Cj0MeMvGpglytB1nxE4PdFNC0jINe27CS7cGivoynwc054EzCcT3M3w==", + "license": "MIT", + "dependencies": { + "eventemitter2": "~0.4.14", + "lazy": "~1.0.11" + }, + "engines": { + "node": ">= 0.10.x" + } + }, + "node_modules/nssocket/node_modules/eventemitter2": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "integrity": "sha512-K7J4xq5xAD5jHsGM5ReWXRTFa3JRGofHiMcVgQ8PRwgWxzjHpMWCIzsmyf60+mh8KLsqYPcjUMa0AC4hd6lPyQ==", + "license": "MIT" + }, "node_modules/nth-check": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", @@ -11942,11 +12510,84 @@ "node": ">=6" } }, + "node_modules/pac-proxy-agent": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz", + "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==", + "license": "MIT", + "dependencies": { + "@tootallnate/quickjs-emscripten": "^0.23.0", + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "get-uri": "^6.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.6", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-resolver": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", + "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==", + "license": "MIT", + "dependencies": { + "degenerator": "^5.0.0", + "netmask": "^2.0.2" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/package-json-from-dist": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==" }, + "node_modules/pako": { + "version": "0.2.9", + "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz", + "integrity": "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==", + "license": "MIT" + }, "node_modules/param-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", @@ -12089,6 +12730,18 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pidusage": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/pidusage/-/pidusage-3.0.2.tgz", + "integrity": "sha512-g0VU+y08pKw5M8EZ2rIGiEBaB8wrQMjYGFfW2QVIfyT8V+fq8YFLkvlz4bz5ljvFDJYNFCWT3PWqcRr2FKO81w==", + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/pify": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", @@ -12203,6 +12856,186 @@ "react": ">=16" } }, + "node_modules/pm2": { + "version": "5.4.3", + "resolved": "https://registry.npmjs.org/pm2/-/pm2-5.4.3.tgz", + "integrity": "sha512-4/I1htIHzZk1Y67UgOCo4F1cJtas1kSds31N8zN0PybO230id1nigyjGuGFzUnGmUFPmrJ0On22fO1ChFlp7VQ==", + "license": "AGPL-3.0", + "dependencies": { + "@pm2/agent": "~2.0.0", + "@pm2/io": "~6.0.1", + "@pm2/js-api": "~0.8.0", + "@pm2/pm2-version-check": "latest", + "async": "~3.2.0", + "blessed": "0.1.81", + "chalk": "3.0.0", + "chokidar": "^3.5.3", + "cli-tableau": "^2.0.0", + "commander": "2.15.1", + "croner": "~4.1.92", + "dayjs": "~1.11.5", + "debug": "^4.3.1", + "enquirer": "2.3.6", + "eventemitter2": "5.0.1", + "fclone": "1.0.11", + "js-yaml": "~4.1.0", + "mkdirp": "1.0.4", + "needle": "2.4.0", + "pidusage": "~3.0", + "pm2-axon": "~4.0.1", + "pm2-axon-rpc": "~0.7.1", + "pm2-deploy": "~1.0.2", + "pm2-multimeter": "^0.1.2", + "promptly": "^2", + "semver": "^7.2", + "source-map-support": "0.5.21", + "sprintf-js": "1.1.2", + "vizion": "~2.2.1" + }, + "bin": { + "pm2": "bin/pm2", + "pm2-dev": "bin/pm2-dev", + "pm2-docker": "bin/pm2-docker", + "pm2-runtime": "bin/pm2-runtime" + }, + "engines": { + "node": ">=12.0.0" + }, + "optionalDependencies": { + "pm2-sysmonit": "^1.2.8" + } + }, + "node_modules/pm2-axon": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pm2-axon/-/pm2-axon-4.0.1.tgz", + "integrity": "sha512-kES/PeSLS8orT8dR5jMlNl+Yu4Ty3nbvZRmaAtROuVm9nYYGiaoXqqKQqQYzWQzMYWUKHMQTvBlirjE5GIIxqg==", + "license": "MIT", + "dependencies": { + "amp": "~0.3.1", + "amp-message": "~0.1.1", + "debug": "^4.3.1", + "escape-string-regexp": "^4.0.0" + }, + "engines": { + "node": ">=5" + } + }, + "node_modules/pm2-axon-rpc": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/pm2-axon-rpc/-/pm2-axon-rpc-0.7.1.tgz", + "integrity": "sha512-FbLvW60w+vEyvMjP/xom2UPhUN/2bVpdtLfKJeYM3gwzYhoTEEChCOICfFzxkxuoEleOlnpjie+n1nue91bDQw==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.1" + }, + "engines": { + "node": ">=5" + } + }, + "node_modules/pm2-deploy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pm2-deploy/-/pm2-deploy-1.0.2.tgz", + "integrity": "sha512-YJx6RXKrVrWaphEYf++EdOOx9EH18vM8RSZN/P1Y+NokTKqYAca/ejXwVLyiEpNju4HPZEk3Y2uZouwMqUlcgg==", + "license": "MIT", + "dependencies": { + "run-series": "^1.1.8", + "tv4": "^1.3.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pm2-multimeter": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/pm2-multimeter/-/pm2-multimeter-0.1.2.tgz", + "integrity": "sha512-S+wT6XfyKfd7SJIBqRgOctGxaBzUOmVQzTAS+cg04TsEUObJVreha7lvCfX8zzGVr871XwCSnHUU7DQQ5xEsfA==", + "license": "MIT/X11", + "dependencies": { + "charm": "~0.1.1" + } + }, + "node_modules/pm2-sysmonit": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/pm2-sysmonit/-/pm2-sysmonit-1.2.8.tgz", + "integrity": "sha512-ACOhlONEXdCTVwKieBIQLSi2tQZ8eKinhcr9JpZSUAL8Qy0ajIgRtsLxG/lwPOW3JEKqPyw/UaHmTWhUzpP4kA==", + "license": "Apache", + "optional": true, + "dependencies": { + "async": "^3.2.0", + "debug": "^4.3.1", + "pidusage": "^2.0.21", + "systeminformation": "^5.7", + "tx2": "~1.0.4" + } + }, + "node_modules/pm2-sysmonit/node_modules/pidusage": { + "version": "2.0.21", + "resolved": "https://registry.npmjs.org/pidusage/-/pidusage-2.0.21.tgz", + "integrity": "sha512-cv3xAQos+pugVX+BfXpHsbyz/dLzX+lr44zNMsYiGxUw+kV5sgQCIcLd1z+0vq+KyC7dJ+/ts2PsfgWfSC3WXA==", + "license": "MIT", + "optional": true, + "dependencies": { + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pm2/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/pm2/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pm2/node_modules/commander": { + "version": "2.15.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", + "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", + "license": "MIT" + }, + "node_modules/pm2/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/pm2/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/pm2/node_modules/sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==", + "license": "BSD-3-Clause" + }, "node_modules/possible-typed-array-names": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", @@ -13529,6 +14362,15 @@ "asap": "~2.0.6" } }, + "node_modules/promptly": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/promptly/-/promptly-2.2.0.tgz", + "integrity": "sha512-aC9j+BZsRSSzEsXBNBwDnAxujdx19HycZoKgRgzWnS8eOHg1asuf9heuLprfbe739zY3IdUQx+Egv6Jn135WHA==", + "license": "MIT", + "dependencies": { + "read": "^1.0.4" + } + }, "node_modules/prompts": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", @@ -13581,6 +14423,69 @@ "node": ">= 0.10" } }, + "node_modules/proxy-agent": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.3.1.tgz", + "integrity": "sha512-Rb5RVBy1iyqOtNl15Cw/llpeLH8bsb37gM1FUfKQ+Wck6xHlbAhWGUFiTRHtkjqGTA5pSHz6+0hrPW/oECihPQ==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "^4.3.4", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.2", + "lru-cache": "^7.14.1", + "pac-proxy-agent": "^7.0.1", + "proxy-from-env": "^1.1.0", + "socks-proxy-agent": "^8.0.2" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", @@ -14037,6 +14942,18 @@ "react-dom": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/read": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/read/-/read-1.0.7.tgz", + "integrity": "sha512-rSOKNYUmaxy0om1BNjMN4ezNT6VKK+2xF4GBhc81mkH7L60i6dp8qPYrkndNLT3QPphoII3maL9PVC9XmhHwVQ==", + "license": "ISC", + "dependencies": { + "mute-stream": "~0.0.4" + }, + "engines": { + "node": ">=0.8" + } + }, "node_modules/read-cache": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", @@ -14239,6 +15156,20 @@ "node": ">=0.10.0" } }, + "node_modules/require-in-the-middle": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-5.2.0.tgz", + "integrity": "sha512-efCx3b+0Z69/LGJmm9Yvi4cqEdxnoGnxYxGxBghkkTTFeXRtTCmmhO0AnAfHz59k957uTSuy8WaHqOs8wbYUWg==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.1", + "module-details-from-path": "^1.0.3", + "resolve": "^1.22.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -14455,6 +15386,26 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/run-series": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/run-series/-/run-series-1.1.9.tgz", + "integrity": "sha512-Arc4hUN896vjkqCYrUXquBFtRZdv1PfLbTYP71efP6butxyQ0kWpiNJyAgsxscmQg1cqvHY32/UCBzXedTpU2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/safe-array-concat": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", @@ -14892,6 +15843,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/shimmer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", + "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==", + "license": "BSD-2-Clause" + }, "node_modules/side-channel": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", @@ -14978,6 +15935,16 @@ "node": ">=8" } }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, "node_modules/sockjs": { "version": "0.3.24", "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", @@ -14988,6 +15955,43 @@ "websocket-driver": "^0.7.4" } }, + "node_modules/socks": { + "version": "2.8.4", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.4.tgz", + "integrity": "sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==", + "license": "MIT", + "dependencies": { + "ip-address": "^9.0.5", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/source-list-map": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", @@ -15746,6 +16750,33 @@ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, + "node_modules/systeminformation": { + "version": "5.25.11", + "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-5.25.11.tgz", + "integrity": "sha512-jI01fn/t47rrLTQB0FTlMCC+5dYx8o0RRF+R4BPiUNsvg5OdY0s9DKMFmJGrx5SwMZQ4cag0Gl6v8oycso9b/g==", + "license": "MIT", + "optional": true, + "os": [ + "darwin", + "linux", + "win32", + "freebsd", + "openbsd", + "netbsd", + "sunos", + "android" + ], + "bin": { + "systeminformation": "lib/cli.js" + }, + "engines": { + "node": ">=8.0.0" + }, + "funding": { + "type": "Buy me a coffee", + "url": "https://www.buymeacoffee.com/systeminfo" + } + }, "node_modules/tailwindcss": { "version": "3.4.17", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.17.tgz", @@ -16101,6 +17132,34 @@ "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", "license": "ISC" }, + "node_modules/tv4": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/tv4/-/tv4-1.3.0.tgz", + "integrity": "sha512-afizzfpJgvPr+eDkREK4MxJ/+r8nEEHcmitwgnPUqpaP+FpwQyadnxNoSACbgc/b1LsZYtODGoPiFxQrgJgjvw==", + "license": [ + { + "type": "Public Domain", + "url": "http://geraintluff.github.io/tv4/LICENSE.txt" + }, + { + "type": "MIT", + "url": "http://jsonary.com/LICENSE.txt" + } + ], + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/tx2": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tx2/-/tx2-1.0.5.tgz", + "integrity": "sha512-sJ24w0y03Md/bxzK4FU8J8JveYYUbSs2FViLJ2D/8bytSiyPRbuE3DyL/9UKYXTZlV3yXq0L8GLlhobTnekCVg==", + "license": "MIT", + "optional": true, + "dependencies": { + "json-stringify-safe": "^5.0.1" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -16523,6 +17582,30 @@ "global": "^4.3.1" } }, + "node_modules/vizion": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/vizion/-/vizion-2.2.1.tgz", + "integrity": "sha512-sfAcO2yeSU0CSPFI/DmZp3FsFE9T+8913nv1xWBOyzODv13fwkn6Vl7HqxGpkr9F608M+8SuFId3s+BlZqfXww==", + "license": "Apache-2.0", + "dependencies": { + "async": "^2.6.3", + "git-node-fs": "^1.0.0", + "ini": "^1.3.5", + "js-git": "^0.7.8" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/vizion/node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.14" + } + }, "node_modules/w3c-hr-time": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz", diff --git a/frontend/package.json b/frontend/package.json index 38affdfe..b35b8885 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -17,6 +17,7 @@ "material-react-table": "^3.2.0", "mpegts.js": "^1.4.2", "planby": "^1.1.7", + "pm2": "^5.4.3", "prettier": "^3.5.2", "react": "18.2.0", "react-dom": "18.2.0", diff --git a/frontend/public/index.html b/frontend/public/index.html index de1fb039..5e0e89a0 100644 --- a/frontend/public/index.html +++ b/frontend/public/index.html @@ -10,7 +10,7 @@