From 557056296085cac63ad4695e9053546b15381999 Mon Sep 17 00:00:00 2001 From: dekzter Date: Mon, 7 Apr 2025 11:57:00 -0400 Subject: [PATCH 1/5] epg match run externally to keep memory usage low --- apps/channels/tasks.py | 174 +++++++++++----------------------------- core/utils.py | 31 ------- dispatcharr/settings.py | 1 - scripts/epg_match.py | 159 ++++++++++++++++++++++++++++++++++++ 4 files changed, 205 insertions(+), 160 deletions(-) create mode 100644 scripts/epg_match.py diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 7e271846..2cecbf04 100644 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -4,17 +4,15 @@ import os import re import requests import time -import gc +import json +import subprocess from datetime import datetime from celery import shared_task -from rapidfuzz import fuzz -from django.conf import settings -from django.db import transaction from django.utils.text import slugify from apps.channels.models import Channel -from apps.epg.models import EPGData, EPGSource +from apps.epg.models import EPGData from core.models import CoreSettings from channels.layers import get_channel_layer @@ -22,15 +20,10 @@ from asgiref.sync import async_to_sync from asgiref.sync import async_to_sync from channels.layers import get_channel_layer -from core.utils import SentenceTransformer +import tempfile logger = logging.getLogger(__name__) -# Thresholds -BEST_FUZZY_THRESHOLD = 85 -LOWER_FUZZY_THRESHOLD = 40 -EMBED_SIM_THRESHOLD = 0.65 - # Words we remove to help with fuzzy + embedding matching COMMON_EXTRANEOUS_WORDS = [ "tv", "channel", "network", "television", @@ -70,12 +63,8 @@ def match_epg_channels(): 4) If a match is found, we set channel.tvg_id 5) Summarize and log results. """ - from sentence_transformers import util - logger.info("Starting EPG matching logic...") - st_model = SentenceTransformer.get_model() - # Attempt to retrieve a "preferred-region" if configured try: region_obj = CoreSettings.objects.get(key="preferred-region") @@ -83,130 +72,61 @@ def match_epg_channels(): except CoreSettings.DoesNotExist: region_code = None - # Gather EPGData rows so we can do fuzzy matching in memory - all_epg = {e.id: e for e in EPGData.objects.all()} - - epg_rows = [] - for e in list(all_epg.values()): - epg_rows.append({ - "epg_id": e.id, - "tvg_id": e.tvg_id or "", - "raw_name": e.name, - "norm_name": normalize_name(e.name), - }) - - epg_embeddings = None - if any(row["norm_name"] for row in epg_rows): - epg_embeddings = st_model.encode( - [row["norm_name"] for row in epg_rows], - convert_to_tensor=True - ) - matched_channels = [] channels_to_update = [] - source = EPGSource.objects.filter(is_active=True).first() - epg_file_path = getattr(source, 'file_path', None) if source else None + channels_json = [{ + "id": channel.id, + "name": channel.name, + "tvg_id": channel.tvg_id, + "fallback_name": channel.tvg_id.strip() if channel.tvg_id else channel.name, + "norm_chan": normalize_name(channel.tvg_id.strip() if channel.tvg_id else channel.name) + } for channel in Channel.objects.all() if not channel.epg_data] - with transaction.atomic(): - for chan in Channel.objects.all(): - # skip if channel already assigned an EPG - if chan.epg_data: - continue + epg_json = [{ + 'id': epg.id, + 'tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id, + } for epg in EPGData.objects.all()] - # If channel has a tvg_id that doesn't exist in EPGData, do direct check. - # I don't THINK this should happen now that we assign EPG on channel creation. - if chan.tvg_id: - epg_match = EPGData.objects.filter(tvg_id=chan.tvg_id).first() - if epg_match: - chan.epg_data = epg_match - logger.info(f"Channel {chan.id} '{chan.name}' => EPG found by tvg_id={chan.tvg_id}") - channels_to_update.append(chan) - continue + payload = { + "channels": channels_json, + "epg_data": epg_json, + "region_code": region_code, + } - # C) Perform name-based fuzzy matching - fallback_name = chan.tvg_id.strip() if chan.tvg_id else chan.name - norm_chan = normalize_name(fallback_name) - if not norm_chan: - logger.info(f"Channel {chan.id} '{chan.name}' => empty after normalization, skipping") - continue + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + temp_file.write(json.dumps(payload).encode('utf-8')) + temp_file_path = temp_file.name - best_score = 0 - best_epg = None - for row in epg_rows: - if not row["norm_name"]: - continue - base_score = fuzz.ratio(norm_chan, row["norm_name"]) - bonus = 0 - # Region-based bonus/penalty - combined_text = row["tvg_id"].lower() + " " + row["raw_name"].lower() - dot_regions = re.findall(r'\.([a-z]{2})', combined_text) - if region_code: - if dot_regions: - if region_code in dot_regions: - bonus = 30 # bigger bonus if .us or .ca matches - else: - bonus = -15 - elif region_code in combined_text: - bonus = 15 - score = base_score + bonus + process = subprocess.Popen( + ['python', '/app/scripts/epg_match.py', temp_file_path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) - logger.debug( - f"Channel {chan.id} '{fallback_name}' => EPG row {row['epg_id']}: " - f"raw_name='{row['raw_name']}', norm_name='{row['norm_name']}', " - f"combined_text='{combined_text}', dot_regions={dot_regions}, " - f"base_score={base_score}, bonus={bonus}, total_score={score}" - ) + # Log stderr in real-time + for line in iter(process.stderr.readline, ''): + if line: + logger.info(line.strip()) - if score > best_score: - best_score = score - best_epg = row + process.stderr.close() + stdout, stderr = process.communicate() - # If no best match was found, skip - if not best_epg: - logger.info(f"Channel {chan.id} '{fallback_name}' => no EPG match at all.") - continue + os.remove(temp_file_path) - # If best_score is above BEST_FUZZY_THRESHOLD => direct accept - if best_score >= BEST_FUZZY_THRESHOLD: - chan.epg_data = all_epg[best_epg["epg_id"]] - chan.save() + if process.returncode != 0: + return f"Failed to process EPG matching: {stderr}" - matched_channels.append((chan.id, fallback_name, best_epg["tvg_id"])) - logger.info( - f"Channel {chan.id} '{fallback_name}' => matched tvg_id={best_epg['tvg_id']} " - f"(score={best_score})" - ) + result = json.loads(stdout) + channels_to_update = result["channels_to_update"] + matched_channels = result["matched_channels"] - # If best_score is in the “middle range,” do embedding check - elif best_score >= LOWER_FUZZY_THRESHOLD and epg_embeddings is not None: - chan_embedding = st_model.encode(norm_chan, convert_to_tensor=True) - sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] - top_index = int(sim_scores.argmax()) - top_value = float(sim_scores[top_index]) - if top_value >= EMBED_SIM_THRESHOLD: - matched_epg = epg_rows[top_index] - chan.epg_data = all_epg[matched_epg["epg_id"]] - chan.save() - - matched_channels.append((chan.id, fallback_name, matched_epg["tvg_id"])) - logger.info( - f"Channel {chan.id} '{fallback_name}' => matched EPG tvg_id={matched_epg['tvg_id']} " - f"(fuzzy={best_score}, cos-sim={top_value:.2f})" - ) - else: - logger.info( - f"Channel {chan.id} '{fallback_name}' => fuzzy={best_score}, " - f"cos-sim={top_value:.2f} < {EMBED_SIM_THRESHOLD}, skipping" - ) - else: - logger.info( - f"Channel {chan.id} '{fallback_name}' => fuzzy={best_score} < " - f"{LOWER_FUZZY_THRESHOLD}, skipping" - ) - - if channels_to_update: - Channel.objects.bulk_update(channels_to_update, ['epg_data']) + if channels_to_update: + Channel.objects.bulk_update(channels_to_update, ['epg_data']) total_matched = len(matched_channels) if total_matched: @@ -227,8 +147,6 @@ def match_epg_channels(): } ) - SentenceTransformer.clear() - gc.collect() return f"Done. Matched {total_matched} channel(s)." @shared_task diff --git a/core/utils.py b/core/utils.py index d6f0b446..3a5d84f4 100644 --- a/core/utils.py +++ b/core/utils.py @@ -160,34 +160,3 @@ def send_websocket_event(event, success, data): "data": {"success": True, "type": "epg_channels"} } ) - -class SentenceTransformer: - _instance = None - - @classmethod - def get_model(cls): - if cls._instance is None: - from sentence_transformers import SentenceTransformer as st - - # Load the sentence-transformers model once at the module level - SENTENCE_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" - MODEL_PATH = os.path.join(settings.MEDIA_ROOT, "models", "all-MiniLM-L6-v2") - os.makedirs(MODEL_PATH, exist_ok=True) - - # If not present locally, download: - if not os.path.exists(os.path.join(MODEL_PATH, "config.json")): - logger.info(f"Local model not found in {MODEL_PATH}; downloading from {SENTENCE_MODEL_NAME}...") - cls._instance = st(SENTENCE_MODEL_NAME, cache_folder=MODEL_PATH) - else: - logger.info(f"Loading local model from {MODEL_PATH}") - cls._instance = st(MODEL_PATH) - - return cls._instance - - @classmethod - def clear(cls): - """Clear the model instance and release memory.""" - if cls._instance is not None: - del cls._instance - cls._instance = None - gc.collect() diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 8aa2c8ca..96bda89b 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -1,7 +1,6 @@ import os from pathlib import Path from datetime import timedelta -from celery.schedules import crontab BASE_DIR = Path(__file__).resolve().parent.parent diff --git a/scripts/epg_match.py b/scripts/epg_match.py new file mode 100644 index 00000000..bfeecd16 --- /dev/null +++ b/scripts/epg_match.py @@ -0,0 +1,159 @@ +# ml_model.py + +import sys +import json +import re +import os +import sys +from rapidfuzz import fuzz +from sentence_transformers import util +from sentence_transformers import SentenceTransformer as st + +# Load the sentence-transformers model once at the module level +SENTENCE_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" +MODEL_PATH = os.path.join("/app/media", "models", "all-MiniLM-L6-v2") + +# Thresholds +BEST_FUZZY_THRESHOLD = 85 +LOWER_FUZZY_THRESHOLD = 40 +EMBED_SIM_THRESHOLD = 0.65 + +def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + +def process_data(input_data): + os.makedirs(MODEL_PATH, exist_ok=True) + + # If not present locally, download: + if not os.path.exists(os.path.join(MODEL_PATH, "config.json")): + eprint(f"Local model not found in {MODEL_PATH}; downloading from {SENTENCE_MODEL_NAME}...") + st_model = st(SENTENCE_MODEL_NAME, cache_folder=MODEL_PATH) + else: + eprint(f"Loading local model from {MODEL_PATH}") + st_model = st(MODEL_PATH) + + channels = input_data["channels"] + epg_data = input_data["epg_data"] + region_code = input_data["region_code"] + + epg_embeddings = None + if any(row["norm_name"] for row in epg_data): + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data], + convert_to_tensor=True + ) + + channels_to_update = [] + matched_channels = [] + + for chan in channels: + # If channel has a tvg_id that doesn't exist in EPGData, do direct check. + # I don't THINK this should happen now that we assign EPG on channel creation. + if chan["tvg_id"]: + epg_match = [epg["id"] for epg in epg_data if epg["tvg_id"] == chan["tvg_id"]] + if epg_match: + chan["epg_data_id"] = epg_match[0]["id"] + eprint(f"Channel {chan['id']} '{chan['name']}' => EPG found by tvg_id={chan['tvg_id']}") + channels_to_update.append(chan) + continue + + # C) Perform name-based fuzzy matching + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + if not chan["norm_chan"]: + eprint(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping") + continue + + best_score = 0 + best_epg = None + for row in epg_data: + if not row["norm_name"]: + continue + + base_score = fuzz.ratio(chan["norm_chan"], row["norm_name"]) + bonus = 0 + # Region-based bonus/penalty + combined_text = row["tvg_id"].lower() + " " + row["name"].lower() + dot_regions = re.findall(r'\.([a-z]{2})', combined_text) + if region_code: + if dot_regions: + if region_code in dot_regions: + bonus = 30 # bigger bonus if .us or .ca matches + else: + bonus = -15 + elif region_code in combined_text: + bonus = 15 + score = base_score + bonus + + eprint( + f"Channel {chan['id']} '{fallback_name}' => EPG row {row['id']}: " + f"name='{row['name']}', norm_name='{row['norm_name']}', " + f"combined_text='{combined_text}', dot_regions={dot_regions}, " + f"base_score={base_score}, bonus={bonus}, total_score={score}" + ) + + if score > best_score: + best_score = score + best_epg = row + + # If no best match was found, skip + if not best_epg: + eprint(f"Channel {chan['id']} '{fallback_name}' => no EPG match at all.") + continue + + # If best_score is above BEST_FUZZY_THRESHOLD => direct accept + if best_score >= BEST_FUZZY_THRESHOLD: + chan["epg_data_id"] = best_epg["id"] + channels_to_update.append(chan) + + matched_channels.append((chan['id'], fallback_name, best_epg["tvg_id"])) + eprint( + f"Channel {chan['id']} '{fallback_name}' => matched tvg_id={best_epg['tvg_id']} " + f"(score={best_score})" + ) + + # If best_score is in the “middle range,” do embedding check + elif best_score >= LOWER_FUZZY_THRESHOLD and epg_embeddings is not None: + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + if top_value >= EMBED_SIM_THRESHOLD: + matched_epg = epg_data[top_index] + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + + matched_channels.append((chan['id'], fallback_name, matched_epg["tvg_id"])) + eprint( + f"Channel {chan['id']} '{fallback_name}' => matched EPG tvg_id={matched_epg['tvg_id']} " + f"(fuzzy={best_score}, cos-sim={top_value:.2f})" + ) + else: + eprint( + f"Channel {chan['id']} '{fallback_name}' => fuzzy={best_score}, " + f"cos-sim={top_value:.2f} < {EMBED_SIM_THRESHOLD}, skipping" + ) + else: + eprint( + f"Channel {chan['id']} '{fallback_name}' => fuzzy={best_score} < " + f"{LOWER_FUZZY_THRESHOLD}, skipping" + ) + + return { + "channels_to_update": channels_to_update, + "matched_channels": matched_channels, + } + +def main(): + # Read input data from a file + input_file_path = sys.argv[1] + with open(input_file_path, 'r') as f: + input_data = json.load(f) + + # Process data with the ML model (or your logic) + result = process_data(input_data) + + # Output result to stdout + print(json.dumps(result)) + +if __name__ == "__main__": + main() From 20c8ff21792941259c3219956e1f92bf439d97b8 Mon Sep 17 00:00:00 2001 From: dekzter Date: Mon, 7 Apr 2025 12:20:46 -0400 Subject: [PATCH 2/5] file extension check for m3u and epg watcher --- core/tasks.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/tasks.py b/core/tasks.py index 62c20b3e..061dd1a5 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -43,6 +43,9 @@ def scan_and_process_files(): if not os.path.isfile(filepath): continue + if not filename.endswith('.m3u') and not filename.endswith('.m3u8'): + continue + mtime = os.path.getmtime(filepath) age = now - mtime redis_key = REDIS_PREFIX + filepath @@ -85,6 +88,9 @@ def scan_and_process_files(): if not os.path.isfile(filepath): continue + if not filename.endswith('.xml') and not filename.endswith('.gz'): + continue + mtime = os.path.getmtime(filepath) age = now - mtime redis_key = REDIS_PREFIX + filepath From e507c6f23c35eae45baf58fbfa29799bc4655990 Mon Sep 17 00:00:00 2001 From: dekzter Date: Mon, 7 Apr 2025 12:46:45 -0400 Subject: [PATCH 3/5] updated timestamp and extension checks for m3 uand epg --- ...gsource_created_at_epgsource_updated_at.py | 24 ++++++++++ ...009_alter_epgsource_created_at_and_more.py | 23 ++++++++++ apps/epg/models.py | 8 ++++ apps/epg/serializers.py | 3 +- apps/epg/tasks.py | 2 + apps/m3u/serializers.py | 2 +- apps/m3u/tasks.py | 1 + docker/Dockerfile | 2 +- .../src/components/forms/ChannelGroup.jsx | 46 ++++++++----------- frontend/src/components/forms/Recording.jsx | 28 ++++------- .../src/components/tables/ChannelsTable.jsx | 2 +- frontend/src/components/tables/EPGsTable.jsx | 6 +++ frontend/src/components/tables/M3UsTable.jsx | 6 +++ .../src/components/tables/StreamsTable.jsx | 6 +-- 14 files changed, 106 insertions(+), 53 deletions(-) create mode 100644 apps/epg/migrations/0008_epgsource_created_at_epgsource_updated_at.py create mode 100644 apps/epg/migrations/0009_alter_epgsource_created_at_and_more.py diff --git a/apps/epg/migrations/0008_epgsource_created_at_epgsource_updated_at.py b/apps/epg/migrations/0008_epgsource_created_at_epgsource_updated_at.py new file mode 100644 index 00000000..1dcfeed0 --- /dev/null +++ b/apps/epg/migrations/0008_epgsource_created_at_epgsource_updated_at.py @@ -0,0 +1,24 @@ +# Generated by Django 5.1.6 on 2025-04-07 16:29 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0007_populate_periodic_tasks'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='created_at', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Time when this source was created'), + ), + migrations.AddField( + model_name='epgsource', + name='updated_at', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Time when this source was last updated'), + ), + ] diff --git a/apps/epg/migrations/0009_alter_epgsource_created_at_and_more.py b/apps/epg/migrations/0009_alter_epgsource_created_at_and_more.py new file mode 100644 index 00000000..cb8088eb --- /dev/null +++ b/apps/epg/migrations/0009_alter_epgsource_created_at_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.1.6 on 2025-04-07 16:29 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0008_epgsource_created_at_epgsource_updated_at'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Time when this source was created'), + ), + migrations.AlterField( + model_name='epgsource', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Time when this source was last updated'), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index 3f9b018d..09986bfe 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -17,6 +17,14 @@ class EPGSource(models.Model): refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Time when this source was created" + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Time when this source was last updated" + ) def __str__(self): return self.name diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index e4a2a4b3..e4ff932e 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -4,10 +4,11 @@ from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): epg_data_ids = serializers.SerializerMethodField() + read_only_fields = ['created_at', 'updated_at'] class Meta: model = EPGSource - fields = ['id', 'name', 'source_type', 'url', 'api_key', 'is_active', 'epg_data_ids', 'refresh_interval'] + fields = ['id', 'name', 'source_type', 'url', 'api_key', 'is_active', 'epg_data_ids', 'refresh_interval', 'created_at', 'updated_at'] def get_epg_data_ids(self, obj): return list(obj.epgs.values_list('id', flat=True)) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 3b84df6d..33e981a6 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -50,6 +50,8 @@ def refresh_epg_data(source_id): elif source.source_type == 'schedules_direct': fetch_schedules_direct(source) + source.save(update_fields=['updated_at']) + release_task_lock('refresh_epg_data', source_id) def fetch_xmltv(source): diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index e7dbfcea..d3948145 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -56,7 +56,7 @@ class M3UAccountSerializer(serializers.ModelSerializer): required=True ) profiles = M3UAccountProfileSerializer(many=True, read_only=True) - read_only_fields = ['locked'] + read_only_fields = ['locked', 'created_at', 'updated_at'] # channel_groups = serializers.SerializerMethodField() channel_groups = ChannelGroupM3UAccountSerializer(source='channel_group', many=True, required=False) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index e2de2af3..82dd7864 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -430,6 +430,7 @@ def refresh_single_m3u_account(account_id): # Calculate elapsed time elapsed_time = end_time - start_time + account.save(update_fields=['updated_at']) print(f"Function took {elapsed_time} seconds to execute.") diff --git a/docker/Dockerfile b/docker/Dockerfile index 9a576eeb..e3f8a165 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -48,7 +48,7 @@ ENV PATH="/dispatcharrpy/bin:$PATH" \ # Copy the virtual environment and application from the builder stage COPY --from=builder /dispatcharrpy /dispatcharrpy COPY --from=builder /app /app -COPY --from=frontend-builder /app/frontend /app/frontend +COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist # Run collectstatic after frontend assets are copied RUN cd /app && python manage.py collectstatic --noinput diff --git a/frontend/src/components/forms/ChannelGroup.jsx b/frontend/src/components/forms/ChannelGroup.jsx index 93741ef1..2429d1d9 100644 --- a/frontend/src/components/forms/ChannelGroup.jsx +++ b/frontend/src/components/forms/ChannelGroup.jsx @@ -1,40 +1,31 @@ // Modal.js -import React, { useEffect } from 'react'; -import { useFormik } from 'formik'; -import * as Yup from 'yup'; +import React from 'react'; import API from '../../api'; import { Flex, TextInput, Button, Modal } from '@mantine/core'; +import { isNotEmpty, useForm } from '@mantine/form'; const ChannelGroup = ({ channelGroup = null, isOpen, onClose }) => { - const formik = useFormik({ + const form = useForm({ + mode: 'uncontrolled', initialValues: { - name: '', + name: channelGroup ? channelGroup.name : '', }, - validationSchema: Yup.object({ - name: Yup.string().required('Name is required'), - }), - onSubmit: async (values, { setSubmitting, resetForm }) => { - if (channelGroup?.id) { - await API.updateChannelGroup({ id: channelGroup.id, ...values }); - } else { - await API.addChannelGroup(values); - } - resetForm(); - setSubmitting(false); - onClose(); + validate: { + name: isNotEmpty('Specify a name'), }, }); - useEffect(() => { + const onSubmit = async () => { + const values = form.getValues(); if (channelGroup) { - formik.setValues({ - name: channelGroup.name, - }); + await API.updateChannelGroup({ id: channelGroup.id, ...values }); } else { - formik.resetForm(); + await API.addChannelGroup(values); } - }, [channelGroup]); + + return form.reset(); + }; if (!isOpen) { return <>; @@ -42,14 +33,13 @@ const ChannelGroup = ({ channelGroup = null, isOpen, onClose }) => { return ( -
+ @@ -57,7 +47,7 @@ const ChannelGroup = ({ channelGroup = null, isOpen, onClose }) => { type="submit" variant="contained" color="primary" - disabled={formik.isSubmitting} + disabled={form.submitting} size="small" > Submit diff --git a/frontend/src/components/forms/Recording.jsx b/frontend/src/components/forms/Recording.jsx index db19e4a1..a4aaf266 100644 --- a/frontend/src/components/forms/Recording.jsx +++ b/frontend/src/components/forms/Recording.jsx @@ -1,22 +1,7 @@ // Modal.js -import React, { useState, useEffect } from 'react'; -import { useFormik } from 'formik'; -import * as Yup from 'yup'; +import React from 'react'; import API from '../../api'; -import useEPGsStore from '../../store/epgs'; -import { - LoadingOverlay, - TextInput, - Button, - Checkbox, - Modal, - Flex, - NativeSelect, - NumberInput, - Space, - Select, - Alert, -} from '@mantine/core'; +import { Button, Modal, Flex, Select, Alert } from '@mantine/core'; import useChannelsStore from '../../store/channels'; import { DateTimePicker } from '@mantine/dates'; import { CircleAlert } from 'lucide-react'; @@ -61,6 +46,8 @@ const DVR = ({ recording = null, channel = null, isOpen, onClose }) => { ...values, channel: channel_id, }); + + form.reset(); onClose(); }; @@ -110,7 +97,12 @@ const DVR = ({ recording = null, channel = null, isOpen, onClose }) => { /> - diff --git a/frontend/src/components/tables/ChannelsTable.jsx b/frontend/src/components/tables/ChannelsTable.jsx index 599e7c4f..d2520cf7 100644 --- a/frontend/src/components/tables/ChannelsTable.jsx +++ b/frontend/src/components/tables/ChannelsTable.jsx @@ -892,7 +892,7 @@ const ChannelsTable = ({}) => { - + diff --git a/frontend/src/components/tables/EPGsTable.jsx b/frontend/src/components/tables/EPGsTable.jsx index b3ad03f7..12d542ae 100644 --- a/frontend/src/components/tables/EPGsTable.jsx +++ b/frontend/src/components/tables/EPGsTable.jsx @@ -17,6 +17,7 @@ import { import { notifications } from '@mantine/notifications'; import { IconSquarePlus } from '@tabler/icons-react'; import { RefreshCcw, SquareMinus, SquarePen } from 'lucide-react'; +import dayjs from 'dayjs'; const EPGsTable = () => { const [epg, setEPG] = useState(null); @@ -44,6 +45,11 @@ const EPGsTable = () => { accessorKey: 'max_streams', enableSorting: false, }, + { + header: 'Updated', + accessorFn: (row) => dayjs(row.updated_at).format('MMMM D, YYYY h:mma'), + enableSorting: false, + }, ], [] ); diff --git a/frontend/src/components/tables/M3UsTable.jsx b/frontend/src/components/tables/M3UsTable.jsx index f079c98a..95ba9e93 100644 --- a/frontend/src/components/tables/M3UsTable.jsx +++ b/frontend/src/components/tables/M3UsTable.jsx @@ -16,6 +16,7 @@ import { } from '@mantine/core'; import { SquareMinus, SquarePen, RefreshCcw, Check, X } from 'lucide-react'; import { IconSquarePlus } from '@tabler/icons-react'; // Import custom icons +import dayjs from 'dayjs'; const M3UTable = () => { const [playlist, setPlaylist] = useState(null); @@ -70,6 +71,11 @@ const M3UTable = () => { ), }, + { + header: 'Updated', + accessorFn: (row) => dayjs(row.updated_at).format('MMMM D, YYYY h:mma'), + enableSorting: false, + }, ], [] ); diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index 9e431d17..4cfecab0 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -503,7 +503,7 @@ const StreamsTable = ({}) => { <> addStreamToChannel(row.original.id)} @@ -522,7 +522,7 @@ const StreamsTable = ({}) => { createChannelFromStream(row.original)} @@ -533,7 +533,7 @@ const StreamsTable = ({}) => { - + From e2850441aeb681c540675dbcb88ebc6422e78286 Mon Sep 17 00:00:00 2001 From: dekzter Date: Mon, 7 Apr 2025 15:01:44 -0400 Subject: [PATCH 4/5] basic DVR UI, custom properties for recordings --- apps/channels/models.py | 1 + frontend/src/App.jsx | 2 + frontend/src/api.js | 26 ++++++ frontend/src/components/Sidebar.jsx | 2 + frontend/src/pages/DVR.jsx | 135 ++++++++++++++++++++++++++++ frontend/src/pages/Guide.jsx | 66 +++++++++++--- frontend/src/store/auth.jsx | 1 + frontend/src/store/channels.jsx | 13 +++ 8 files changed, 235 insertions(+), 11 deletions(-) create mode 100644 frontend/src/pages/DVR.jsx diff --git a/apps/channels/models.py b/apps/channels/models.py index abcecb77..9f1b641e 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -410,6 +410,7 @@ class Recording(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() task_id = models.CharField(max_length=255, null=True, blank=True) + custom_properties = models.TextField(null=True, blank=True) def __str__(self): return f"{self.channel.name} - {self.start_time} to {self.end_time}" diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 4fa2b9a9..a641a53b 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -13,6 +13,7 @@ import M3U from './pages/M3U'; import EPG from './pages/EPG'; import Guide from './pages/Guide'; import Stats from './pages/Stats'; +import DVR from './pages/DVR'; import Settings from './pages/Settings'; import StreamProfiles from './pages/StreamProfiles'; import useAuthStore from './store/auth'; @@ -127,6 +128,7 @@ const App = () => { element={} /> } /> + } /> } /> } /> diff --git a/frontend/src/api.js b/frontend/src/api.js index 9e1dfd55..9d8bf746 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1035,6 +1035,19 @@ export default class API { .updateProfileChannels(channelIds, profileId, enabled); } + static async getRecordings() { + const response = await fetch(`${host}/api/channels/recordings/`, { + headers: { + Authorization: `Bearer ${await API.getAuthToken()}`, + 'Content-Type': 'application/json', + }, + }); + + const retval = await response.json(); + + return retval; + } + static async createRecording(values) { const response = await fetch(`${host}/api/channels/recordings/`, { method: 'POST', @@ -1046,7 +1059,20 @@ export default class API { }); const retval = await response.json(); + useChannelsStore.getState().fetchRecordings(); return retval; } + + static async deleteRecording(id) { + const response = await fetch(`${host}/api/channels/recordings/${id}/`, { + method: 'DELETE', + headers: { + Authorization: `Bearer ${await API.getAuthToken()}`, + 'Content-Type': 'application/json', + }, + }); + + useChannelsStore.getState().fetchRecordings(); + } } diff --git a/frontend/src/components/Sidebar.jsx b/frontend/src/components/Sidebar.jsx index 157381f0..00fb4045 100644 --- a/frontend/src/components/Sidebar.jsx +++ b/frontend/src/components/Sidebar.jsx @@ -9,6 +9,7 @@ import { Settings as LucideSettings, Copy, ChartLine, + Video, } from 'lucide-react'; import { Avatar, @@ -80,6 +81,7 @@ const Sidebar = ({ collapsed, toggleDrawer, drawerWidth, miniDrawerWidth }) => { path: '/stream-profiles', }, { label: 'TV Guide', icon: , path: '/guide' }, + { label: 'DVR', icon: