diff --git a/.github/workflows/base-image.yml b/.github/workflows/base-image.yml index 1da33d4f..f926d892 100644 --- a/.github/workflows/base-image.yml +++ b/.github/workflows/base-image.yml @@ -2,42 +2,37 @@ name: Base Image Build on: push: - branches: [ main, dev ] + branches: [main, dev] paths: - 'docker/DispatcharrBase' - '.github/workflows/base-image.yml' - 'requirements.txt' pull_request: - branches: [ main, dev ] + branches: [main, dev] paths: - 'docker/DispatcharrBase' - '.github/workflows/base-image.yml' - 'requirements.txt' - workflow_dispatch: # Allow manual triggering + workflow_dispatch: # Allow manual triggering permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry jobs: - build-base-image: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} steps: - uses: actions/checkout@v3 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Generate timestamp for build id: timestamp run: | @@ -66,13 +61,111 @@ jobs: echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT fi + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push Docker base image uses: docker/build-push-action@v4 with: context: . file: ./docker/DispatcharrBase - push: true - platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/${{ matrix.platform }} tags: | - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} + BRANCH=${{ github.ref_name }} + REPO_URL=https://github.com/${{ github.repository }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 + + # Docker Hub manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a60ac49..5da4118c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,19 +2,84 @@ name: CI Pipeline on: push: - branches: [ dev ] + branches: [dev] pull_request: - branches: [ dev ] - workflow_dispatch: # Allow manual triggering + branches: [dev] + workflow_dispatch: -# Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write + packages: write jobs: - build: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + # compute a single timestamp, version, and repo metadata for the entire workflow + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + version: ${{ steps.version.outputs.version }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + + - name: Extract version info + id: version + run: | + VERSION=$(python -c "import version; print(version.__version__)") + echo "version=${VERSION}" >> $GITHUB_OUTPUT + + - name: Set repository and image metadata + id: meta + run: | + REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT + + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "branch_tag=latest" >> $GITHUB_OUTPUT + echo "is_main=true" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then + echo "branch_tag=dev" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + else + BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') + echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + fi + + if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then + echo "is_fork=true" >> $GITHUB_OUTPUT + else + echo "is_fork=false" >> $GITHUB_OUTPUT + fi + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + # no per-job outputs here; shared metadata comes from the `prepare` job steps: - uses: actions/checkout@v3 with: @@ -45,66 +110,85 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Generate timestamp for build - id: timestamp - run: | - TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') - echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Extract version info - id: version - run: | - VERSION=$(python -c "import version; print(version.__version__)") - echo "version=${VERSION}" >> $GITHUB_OUTPUT - echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT - - - name: Set repository and image metadata - id: meta - run: | - # Get lowercase repository owner - REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - # Get repository name - REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') - echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT - - # Determine branch name - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - echo "branch_tag=latest" >> $GITHUB_OUTPUT - echo "is_main=true" >> $GITHUB_OUTPUT - elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then - echo "branch_tag=dev" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - else - # For other branches, use the branch name - BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') - echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - fi - - # Determine if this is from a fork - if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then - echo "is_fork=true" >> $GITHUB_OUTPUT - else - echo "is_fork=false" >> $GITHUB_OUTPUT - fi + # use metadata from the prepare job - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: ${{ github.event_name != 'pull_request' }} - platforms: linux/amd64,linux/arm64 + # Build only the platform for this matrix job to avoid running amd64 + # stages under qemu on an arm64 runner (and vice-versa). This makes + # the matrix runner's platform the one built by buildx. + platforms: linux/${{ matrix.platform }} + # push arch-specific tags from each matrix job (they will be combined + # into a multi-arch manifest in a follow-up job) tags: | - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} build-args: | - REPO_OWNER=${{ steps.meta.outputs.repo_owner }} - REPO_NAME=${{ steps.meta.outputs.repo_name }} + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} BASE_TAG=base BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} - TIMESTAMP=${{ steps.timestamp.outputs.timestamp }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} file: ./docker/Dockerfile + + create-manifest: + # wait for prepare and all matrix builds to finish + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + VERSION=${{ needs.prepare.outputs.version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # branch tag (e.g. latest or dev) + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # version + timestamp tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 + + # also create Docker Hub manifests using the same username + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 52c2ada2..27356c9a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,16 +15,21 @@ on: # Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry jobs: - release: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + outputs: + new_version: ${{ steps.update_version.outputs.new_version }} + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} steps: - uses: actions/checkout@v3 with: fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Configure Git run: | @@ -38,14 +43,45 @@ jobs: NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')") echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT - - name: Set lowercase repo owner - id: repo_owner + - name: Set repository metadata + id: meta run: | REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + - name: Commit and Tag + run: | + git add version.py + git commit -m "Release v${{ steps.update_version.outputs.new_version }}" + git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" + git push origin main --tags + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + ref: main + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -57,36 +93,88 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Commit and Tag - run: | - git add version.py - git commit -m "Release v${{ steps.update_version.outputs.new_version }}" - git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" - git push origin main --tags + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Release Image + - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: true - platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases + platforms: linux/${{ matrix.platform }} tags: | - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }} - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64 + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} file: ./docker/Dockerfile + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + VERSION=${{ needs.prepare.outputs.new_version }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # latest tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \ + ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64 + + # version tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64 + + # Docker Hub manifests + # latest tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64 + + # version tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64 + + create-release: + needs: [prepare, create-manifest] + runs-on: ubuntu-24.04 + steps: - name: Create GitHub Release uses: softprops/action-gh-release@v1 with: - tag_name: v${{ steps.update_version.outputs.new_version }} - name: Release v${{ steps.update_version.outputs.new_version }} + tag_name: v${{ needs.prepare.outputs.new_version }} + name: Release v${{ needs.prepare.outputs.new_version }} draft: false prerelease: false token: ${{ secrets.GITHUB_TOKEN }} diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 7cfdc1b1..7999abd9 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -13,12 +13,14 @@ from .api_views import ( UpdateChannelMembershipAPIView, BulkUpdateChannelMembershipAPIView, RecordingViewSet, + RecurringRecordingRuleViewSet, GetChannelStreamsAPIView, SeriesRulesAPIView, DeleteSeriesRuleAPIView, EvaluateSeriesRulesAPIView, BulkRemoveSeriesRecordingsAPIView, BulkDeleteUpcomingRecordingsAPIView, + ComskipConfigAPIView, ) app_name = 'channels' # for DRF routing @@ -30,6 +32,7 @@ router.register(r'channels', ChannelViewSet, basename='channel') router.register(r'logos', LogoViewSet, basename='logo') router.register(r'profiles', ChannelProfileViewSet, basename='profile') router.register(r'recordings', RecordingViewSet, basename='recording') +router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule') urlpatterns = [ # Bulk delete is a single APIView, not a ViewSet @@ -46,6 +49,7 @@ urlpatterns = [ path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), + path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] urlpatterns += router.urls diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 92755252..862de7f9 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -28,6 +28,7 @@ from .models import ( ChannelProfile, ChannelProfileMembership, Recording, + RecurringRecordingRule, ) from .serializers import ( StreamSerializer, @@ -38,8 +39,17 @@ from .serializers import ( BulkChannelProfileMembershipSerializer, ChannelProfileSerializer, RecordingSerializer, + RecurringRecordingRuleSerializer, +) +from .tasks import ( + match_epg_channels, + evaluate_series_rules, + evaluate_series_rules_impl, + match_single_channel_epg, + match_selected_channels_epg, + sync_recurring_rule_impl, + purge_recurring_rule_impl, ) -from .tasks import match_epg_channels, evaluate_series_rules, evaluate_series_rules_impl import django_filters from django_filters.rest_framework import DjangoFilterBackend from rest_framework.filters import SearchFilter, OrderingFilter @@ -49,10 +59,12 @@ from django.db.models import Q from django.http import StreamingHttpResponse, FileResponse, Http404 from django.utils import timezone import mimetypes +from django.conf import settings from rest_framework.pagination import PageNumberPagination + logger = logging.getLogger(__name__) @@ -493,6 +505,99 @@ class ChannelViewSet(viewsets.ModelViewSet): "channels": serialized_channels }) + @action(detail=False, methods=["post"], url_path="set-names-from-epg") + def set_names_from_epg(self, request): + """ + Trigger a Celery task to set channel names from EPG data + """ + from .tasks import set_channels_names_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_names_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG name setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-logos-from-epg") + def set_logos_from_epg(self, request): + """ + Trigger a Celery task to set channel logos from EPG data + """ + from .tasks import set_channels_logos_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_logos_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG logo setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg") + def set_tvg_ids_from_epg(self, request): + """ + Trigger a Celery task to set channel TVG-IDs from EPG data + """ + from .tasks import set_channels_tvg_ids_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_tvg_ids_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset @@ -642,10 +747,14 @@ class ChannelViewSet(viewsets.ModelViewSet): channel_data["channel_group_id"] = channel_group.id if stream.logo_url: - logo, _ = Logo.objects.get_or_create( - url=stream.logo_url, defaults={"name": stream.name or stream.tvg_id} - ) - channel_data["logo_id"] = logo.id + # Import validation function + from apps.channels.tasks import validate_logo_url + validated_logo_url = validate_logo_url(stream.logo_url) + if validated_logo_url: + logo, _ = Logo.objects.get_or_create( + url=validated_logo_url, defaults={"name": stream.name or stream.tvg_id} + ) + channel_data["logo_id"] = logo.id # Attempt to find existing EPGs with the same tvg-id epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) @@ -779,16 +888,65 @@ class ChannelViewSet(viewsets.ModelViewSet): # ───────────────────────────────────────────────────────── @swagger_auto_schema( method="post", - operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data.", + operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data. If channel_ids are provided, only those channels will be processed.", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'channel_ids': openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_INTEGER), + description='List of channel IDs to process. If empty or not provided, all channels without EPG will be processed.' + ) + } + ), responses={202: "EPG matching task initiated"}, ) @action(detail=False, methods=["post"], url_path="match-epg") def match_epg(self, request): - match_epg_channels.delay() + # Get channel IDs from request body if provided + channel_ids = request.data.get('channel_ids', []) + + if channel_ids: + # Process only selected channels + from .tasks import match_selected_channels_epg + match_selected_channels_epg.delay(channel_ids) + message = f"EPG matching task initiated for {len(channel_ids)} selected channel(s)." + else: + # Process all channels without EPG (original behavior) + match_epg_channels.delay() + message = "EPG matching task initiated for all channels without EPG." + return Response( - {"message": "EPG matching task initiated."}, status=status.HTTP_202_ACCEPTED + {"message": message}, status=status.HTTP_202_ACCEPTED ) + @swagger_auto_schema( + method="post", + operation_description="Try to auto-match this specific channel with EPG data.", + responses={200: "EPG matching completed", 202: "EPG matching task initiated"}, + ) + @action(detail=True, methods=["post"], url_path="match-epg") + def match_channel_epg(self, request, pk=None): + channel = self.get_object() + + # Import the matching logic + from apps.channels.tasks import match_single_channel_epg + + try: + # Try to match this specific channel - call synchronously for immediate response + result = match_single_channel_epg.apply_async(args=[channel.id]).get(timeout=30) + + # Refresh the channel from DB to get any updates + channel.refresh_from_db() + + return Response({ + "message": result.get("message", "Channel matching completed"), + "matched": result.get("matched", False), + "channel": self.get_serializer(channel).data + }) + except Exception as e: + return Response({"error": str(e)}, status=400) + # ───────────────────────────────────────────────────────── # 7) Set EPG and Refresh # ───────────────────────────────────────────────────────── @@ -1542,6 +1700,41 @@ class BulkUpdateChannelMembershipAPIView(APIView): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) +class RecurringRecordingRuleViewSet(viewsets.ModelViewSet): + queryset = RecurringRecordingRule.objects.all().select_related("channel") + serializer_class = RecurringRecordingRuleSerializer + + def get_permissions(self): + return [IsAdmin()] + + def perform_create(self, serializer): + rule = serializer.save() + try: + sync_recurring_rule_impl(rule.id, drop_existing=True) + except Exception as err: + logger.warning(f"Failed to initialize recurring rule {rule.id}: {err}") + return rule + + def perform_update(self, serializer): + rule = serializer.save() + try: + if rule.enabled: + sync_recurring_rule_impl(rule.id, drop_existing=True) + else: + purge_recurring_rule_impl(rule.id) + except Exception as err: + logger.warning(f"Failed to resync recurring rule {rule.id}: {err}") + return rule + + def perform_destroy(self, instance): + rule_id = instance.id + super().perform_destroy(instance) + try: + purge_recurring_rule_impl(rule_id) + except Exception as err: + logger.warning(f"Failed to purge recordings for rule {rule_id}: {err}") + + class RecordingViewSet(viewsets.ModelViewSet): queryset = Recording.objects.all() serializer_class = RecordingSerializer @@ -1721,6 +1914,49 @@ class RecordingViewSet(viewsets.ModelViewSet): return response +class ComskipConfigAPIView(APIView): + """Upload or inspect the custom comskip.ini used by DVR processing.""" + + parser_classes = [MultiPartParser, FormParser] + + def get_permissions(self): + return [IsAdmin()] + + def get(self, request): + path = CoreSettings.get_dvr_comskip_custom_path() + exists = bool(path and os.path.exists(path)) + return Response({"path": path, "exists": exists}) + + def post(self, request): + uploaded = request.FILES.get("file") or request.FILES.get("comskip_ini") + if not uploaded: + return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST) + + name = (uploaded.name or "").lower() + if not name.endswith(".ini"): + return Response({"error": "Only .ini files are allowed"}, status=status.HTTP_400_BAD_REQUEST) + + if uploaded.size and uploaded.size > 1024 * 1024: + return Response({"error": "File too large (limit 1MB)"}, status=status.HTTP_400_BAD_REQUEST) + + dest_dir = os.path.join(settings.MEDIA_ROOT, "comskip") + os.makedirs(dest_dir, exist_ok=True) + dest_path = os.path.join(dest_dir, "comskip.ini") + + try: + with open(dest_path, "wb") as dest: + for chunk in uploaded.chunks(): + dest.write(chunk) + except Exception as e: + logger.error(f"Failed to save uploaded comskip.ini: {e}") + return Response({"error": "Unable to save file"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Persist path setting so DVR processing picks it up immediately + CoreSettings.set_dvr_comskip_custom_path(dest_path) + + return Response({"success": True, "path": dest_path, "exists": os.path.exists(dest_path)}) + + class BulkDeleteUpcomingRecordingsAPIView(APIView): """Delete all upcoming (future) recordings.""" def get_permissions(self): diff --git a/apps/channels/migrations/0026_recurringrecordingrule.py b/apps/channels/migrations/0026_recurringrecordingrule.py new file mode 100644 index 00000000..1b8cfdb8 --- /dev/null +++ b/apps/channels/migrations/0026_recurringrecordingrule.py @@ -0,0 +1,31 @@ +# Generated by Django 5.0.14 on 2025-09-18 14:56 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='RecurringRecordingRule', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('days_of_week', models.JSONField(default=list)), + ('start_time', models.TimeField()), + ('end_time', models.TimeField()), + ('enabled', models.BooleanField(default=True)), + ('name', models.CharField(blank=True, max_length=255)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')), + ], + options={ + 'ordering': ['channel', 'start_time'], + }, + ), + ] diff --git a/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py new file mode 100644 index 00000000..8cdb9868 --- /dev/null +++ b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-05 20:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0026_recurringrecordingrule'), + ] + + operations = [ + migrations.AddField( + model_name='recurringrecordingrule', + name='end_date', + field=models.DateField(blank=True, null=True), + ), + migrations.AddField( + model_name='recurringrecordingrule', + name='start_date', + field=models.DateField(blank=True, null=True), + ), + ] diff --git a/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py new file mode 100644 index 00000000..08c426b1 --- /dev/null +++ b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py @@ -0,0 +1,25 @@ +# Generated by Django 5.2.4 on 2025-10-06 22:55 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'), + preserve_default=False, + ), + migrations.AddField( + model_name='channel', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index af66178d..238bdb33 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -119,11 +119,11 @@ class Stream(models.Model): return self.name or self.url or f"Stream ID {self.id}" @classmethod - def generate_hash_key(cls, name, url, tvg_id, keys=None): + def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None): if keys is None: keys = CoreSettings.get_m3u_hash_key().split(",") - stream_parts = {"name": name, "url": url, "tvg_id": tvg_id} + stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id} hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts} @@ -303,6 +303,15 @@ class Channel(models.Model): help_text="The M3U account that auto-created this channel" ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Timestamp when this channel was created" + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Timestamp when this channel was last updated" + ) + def clean(self): # Enforce unique channel_number within a given group existing = Channel.objects.filter( @@ -601,3 +610,35 @@ class Recording(models.Model): def __str__(self): return f"{self.channel.name} - {self.start_time} to {self.end_time}" + + +class RecurringRecordingRule(models.Model): + """Rule describing a recurring manual DVR schedule.""" + + channel = models.ForeignKey( + "Channel", + on_delete=models.CASCADE, + related_name="recurring_rules", + ) + days_of_week = models.JSONField(default=list) + start_time = models.TimeField() + end_time = models.TimeField() + enabled = models.BooleanField(default=True) + name = models.CharField(max_length=255, blank=True) + start_date = models.DateField(null=True, blank=True) + end_date = models.DateField(null=True, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + ordering = ["channel", "start_time"] + + def __str__(self): + channel_name = getattr(self.channel, "name", str(self.channel_id)) + return f"Recurring rule for {channel_name}" + + def cleaned_days(self): + try: + return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6}) + except Exception: + return [] diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 51bfe0a0..7058ced2 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,4 +1,6 @@ import json +from datetime import datetime + from rest_framework import serializers from .models import ( Stream, @@ -10,6 +12,7 @@ from .models import ( ChannelProfile, ChannelProfileMembership, Recording, + RecurringRecordingRule, ) from apps.epg.serializers import EPGDataSerializer from core.models import StreamProfile @@ -454,6 +457,13 @@ class RecordingSerializer(serializers.ModelSerializer): start_time = data.get("start_time") end_time = data.get("end_time") + if start_time and timezone.is_naive(start_time): + start_time = timezone.make_aware(start_time, timezone.get_current_timezone()) + data["start_time"] = start_time + if end_time and timezone.is_naive(end_time): + end_time = timezone.make_aware(end_time, timezone.get_current_timezone()) + data["end_time"] = end_time + # If this is an EPG-based recording (program provided), apply global pre/post offsets try: cp = data.get("custom_properties") or {} @@ -497,3 +507,56 @@ class RecordingSerializer(serializers.ModelSerializer): raise serializers.ValidationError("End time must be after start time.") return data + + +class RecurringRecordingRuleSerializer(serializers.ModelSerializer): + class Meta: + model = RecurringRecordingRule + fields = "__all__" + read_only_fields = ["created_at", "updated_at"] + + def validate_days_of_week(self, value): + if not value: + raise serializers.ValidationError("Select at least one day of the week") + cleaned = [] + for entry in value: + try: + iv = int(entry) + except (TypeError, ValueError): + raise serializers.ValidationError("Days of week must be integers 0-6") + if iv < 0 or iv > 6: + raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)") + cleaned.append(iv) + return sorted(set(cleaned)) + + def validate(self, attrs): + start = attrs.get("start_time") or getattr(self.instance, "start_time", None) + end = attrs.get("end_time") or getattr(self.instance, "end_time", None) + start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None) + end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None) + if start_date is None: + existing_start = getattr(self.instance, "start_date", None) + if existing_start is None: + raise serializers.ValidationError("Start date is required") + if start_date and end_date and end_date < start_date: + raise serializers.ValidationError("End date must be on or after start date") + if end_date is None: + existing_end = getattr(self.instance, "end_date", None) + if existing_end is None: + raise serializers.ValidationError("End date is required") + if start and end and start_date and end_date: + start_dt = datetime.combine(start_date, start) + end_dt = datetime.combine(end_date, end) + if end_dt <= start_dt: + raise serializers.ValidationError("End datetime must be after start datetime") + elif start and end and end == start: + raise serializers.ValidationError("End time must be different from start time") + # Normalize empty strings to None for dates + if attrs.get("end_date") == "": + attrs["end_date"] = None + if attrs.get("start_date") == "": + attrs["start_date"] = None + return super().validate(attrs) + + def create(self, validated_data): + return super().create(validated_data) diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index e0954210..3943cf16 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -7,11 +7,14 @@ import requests import time import json import subprocess +import signal +from zoneinfo import ZoneInfo from datetime import datetime, timedelta import gc from celery import shared_task from django.utils.text import slugify +from rapidfuzz import fuzz from apps.channels.models import Channel from apps.epg.models import EPGData @@ -27,6 +30,104 @@ from urllib.parse import quote logger = logging.getLogger(__name__) +# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size) +# We use 2000 as a safe maximum to account for multibyte characters +def validate_logo_url(logo_url, max_length=2000): + """ + Fast validation for logo URLs during bulk creation. + Returns None if URL is too long (would exceed PostgreSQL btree index limit), + original URL otherwise. + + PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than + this cannot be indexed and would cause database errors. These are typically + base64-encoded images embedded in URLs. + """ + if logo_url and len(logo_url) > max_length: + logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...") + return None + return logo_url + +def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"): + """ + Send EPG matching progress via WebSocket + """ + try: + channel_layer = get_channel_layer() + if channel_layer: + progress_data = { + 'type': 'epg_matching_progress', + 'total': total_channels, + 'matched': len(matched_channels) if isinstance(matched_channels, list) else matched_channels, + 'remaining': total_channels - (len(matched_channels) if isinstance(matched_channels, list) else matched_channels), + 'current_channel': current_channel_name, + 'stage': stage, + 'progress_percent': round((len(matched_channels) if isinstance(matched_channels, list) else matched_channels) / total_channels * 100, 1) if total_channels > 0 else 0 + } + + async_to_sync(channel_layer.group_send)( + "updates", + { + "type": "update", + "data": { + "type": "epg_matching_progress", + **progress_data + } + } + ) + except Exception as e: + logger.warning(f"Failed to send EPG matching progress: {e}") + +# Lazy loading for ML models - only imported/loaded when needed +_ml_model_cache = { + 'sentence_transformer': None +} + +def get_sentence_transformer(): + """Lazy load the sentence transformer model only when needed""" + if _ml_model_cache['sentence_transformer'] is None: + try: + from sentence_transformers import SentenceTransformer + from sentence_transformers import util + + model_name = "sentence-transformers/all-MiniLM-L6-v2" + cache_dir = "/data/models" + + # Check environment variable to disable downloads + disable_downloads = os.environ.get('DISABLE_ML_DOWNLOADS', 'false').lower() == 'true' + + if disable_downloads: + # Check if model exists before attempting to load + hf_model_path = os.path.join(cache_dir, f"models--{model_name.replace('/', '--')}") + if not os.path.exists(hf_model_path): + logger.warning("ML model not found and downloads disabled (DISABLE_ML_DOWNLOADS=true). Skipping ML matching.") + return None, None + + # Ensure cache directory exists + os.makedirs(cache_dir, exist_ok=True) + + # Let sentence-transformers handle all cache detection and management + logger.info(f"Loading sentence transformer model (cache: {cache_dir})") + _ml_model_cache['sentence_transformer'] = SentenceTransformer( + model_name, + cache_folder=cache_dir + ) + + return _ml_model_cache['sentence_transformer'], util + except ImportError: + logger.warning("sentence-transformers not available - ML-enhanced matching disabled") + return None, None + except Exception as e: + logger.error(f"Failed to load sentence transformer: {e}") + return None, None + else: + from sentence_transformers import util + return _ml_model_cache['sentence_transformer'], util + +# ML matching thresholds (same as original script) +BEST_FUZZY_THRESHOLD = 85 +LOWER_FUZZY_THRESHOLD = 40 +EMBED_SIM_THRESHOLD = 0.65 + # Words we remove to help with fuzzy + embedding matching COMMON_EXTRANEOUS_WORDS = [ "tv", "channel", "network", "television", @@ -49,155 +150,367 @@ def normalize_name(name: str) -> str: norm = name.lower() norm = re.sub(r"\[.*?\]", "", norm) + + # Extract and preserve important call signs from parentheses before removing them + # This captures call signs like (KVLY), (KING), (KARE), etc. + call_sign_match = re.search(r"\(([A-Z]{3,5})\)", name) + preserved_call_sign = "" + if call_sign_match: + preserved_call_sign = " " + call_sign_match.group(1).lower() + + # Now remove all parentheses content norm = re.sub(r"\(.*?\)", "", norm) + + # Add back the preserved call sign + norm = norm + preserved_call_sign + norm = re.sub(r"[^\w\s]", "", norm) tokens = norm.split() tokens = [t for t in tokens if t not in COMMON_EXTRANEOUS_WORDS] norm = " ".join(tokens).strip() return norm +def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True, send_progress=True): + """ + EPG matching logic that finds the best EPG matches for channels using + multiple matching strategies including fuzzy matching and ML models. + + Automatically uses conservative thresholds for bulk matching (multiple channels) + to avoid bad matches that create user cleanup work, and aggressive thresholds + for single channel matching where users specifically requested a match attempt. + """ + channels_to_update = [] + matched_channels = [] + total_channels = len(channels_data) + + # Send initial progress + if send_progress: + send_epg_matching_progress(total_channels, 0, stage="starting") + + # Try to get ML models if requested (but don't load yet - lazy loading) + st_model, util = None, None + epg_embeddings = None + ml_available = use_ml + + # Automatically determine matching strategy based on number of channels + is_bulk_matching = len(channels_data) > 1 + + # Adjust matching thresholds based on operation type + if is_bulk_matching: + # Conservative thresholds for bulk matching to avoid creating cleanup work + FUZZY_HIGH_CONFIDENCE = 90 # Only very high fuzzy scores + FUZZY_MEDIUM_CONFIDENCE = 70 # Higher threshold for ML enhancement + ML_HIGH_CONFIDENCE = 0.75 # Higher ML confidence required + ML_LAST_RESORT = 0.65 # More conservative last resort + FUZZY_LAST_RESORT_MIN = 50 # Higher fuzzy minimum for last resort + logger.info(f"Using conservative thresholds for bulk matching ({total_channels} channels)") + else: + # More aggressive thresholds for single channel matching (user requested specific match) + FUZZY_HIGH_CONFIDENCE = 85 # Original threshold + FUZZY_MEDIUM_CONFIDENCE = 40 # Original threshold + ML_HIGH_CONFIDENCE = 0.65 # Original threshold + ML_LAST_RESORT = 0.50 # Original desperate threshold + FUZZY_LAST_RESORT_MIN = 20 # Original minimum + logger.info("Using aggressive thresholds for single channel matching") # Process each channel + for index, chan in enumerate(channels_data): + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Send progress update every 5 channels or for the first few + if send_progress and (index < 5 or index % 5 == 0 or index == total_channels - 1): + send_epg_matching_progress( + total_channels, + len(matched_channels), + current_channel_name=chan["name"][:50], # Truncate long names + stage="matching" + ) + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Step 1: Exact TVG ID match + epg_by_tvg_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_tvg_id), None) + if normalized_tvg_id and epg_by_tvg_id: + chan["epg_data_id"] = epg_by_tvg_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, epg_by_tvg_id["tvg_id"])) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact tvg_id={epg_by_tvg_id['tvg_id']}") + continue + + # Step 2: Secondary TVG ID check (legacy compatibility) + if chan["tvg_id"]: + epg_match = [epg["id"] for epg in epg_data if epg["tvg_id"] == chan["tvg_id"]] + if epg_match: + chan["epg_data_id"] = epg_match[0] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, chan["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by secondary tvg_id={chan['tvg_id']}") + continue + + # Step 2.5: Exact Gracenote ID match + normalized_gracenote_id = chan.get("gracenote_id", "") + if normalized_gracenote_id: + epg_by_gracenote_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_gracenote_id), None) + if epg_by_gracenote_id: + chan["epg_data_id"] = epg_by_gracenote_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, f"gracenote:{epg_by_gracenote_id['tvg_id']}")) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact gracenote_id={normalized_gracenote_id}") + continue + + # Step 3: Name-based fuzzy matching + if not chan["norm_chan"]: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping") + continue + + best_score = 0 + best_epg = None + + # Debug: show what we're matching against + logger.debug(f"Fuzzy matching '{chan['norm_chan']}' against EPG entries...") + + # Find best fuzzy match + for row in epg_data: + if not row.get("norm_name"): + continue + + base_score = fuzz.ratio(chan["norm_chan"], row["norm_name"]) + bonus = 0 + + # Apply region-based bonus/penalty + if region_code and row.get("tvg_id"): + combined_text = row["tvg_id"].lower() + " " + row["name"].lower() + dot_regions = re.findall(r'\.([a-z]{2})', combined_text) + + if dot_regions: + if region_code in dot_regions: + bonus = 15 # Bigger bonus for matching region + else: + bonus = -15 # Penalty for different region + elif region_code in combined_text: + bonus = 10 + + score = base_score + bonus + + # Debug the best few matches + if score > 50: # Only show decent matches + logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") + + if score > best_score: + best_score = score + best_epg = row + + # Log the best score we found + if best_epg: + logger.info(f"Channel {chan['id']} '{chan['name']}' => best match: '{best_epg['name']}' (score: {best_score})") + else: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => no EPG entries with valid norm_name found") + continue + + # High confidence match - accept immediately + if best_score >= FUZZY_HIGH_CONFIDENCE: + chan["epg_data_id"] = best_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], best_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched tvg_id={best_epg['tvg_id']} (score={best_score})") + + # Medium confidence - use ML if available (lazy load models here) + elif best_score >= FUZZY_MEDIUM_CONFIDENCE and ml_available: + # Lazy load ML models only when we actually need them + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings only when we actually need them + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_HIGH_CONFIDENCE: + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy={best_score}, ML-sim={top_value:.2f} < {ML_HIGH_CONFIDENCE}, trying last resort...") + + # Last resort: try ML with very low fuzzy threshold + if top_value >= ML_LAST_RESORT: # Dynamic last resort threshold + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => even last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, skipping") + + except Exception as e: + logger.warning(f"ML matching failed for channel {chan['id']}: {e}") + # Fall back to non-ML decision + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy score {best_score} below threshold, skipping") + + # Last resort: Try ML matching even with very low fuzzy scores + elif best_score >= FUZZY_LAST_RESORT_MIN and ml_available: + # Lazy load ML models for last resort attempts + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings for last resort attempts + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (last resort lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings for last resort: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + logger.info(f"Channel {chan['id']} '{chan['name']}' => trying ML as last resort (fuzzy={best_score})") + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_LAST_RESORT: # Dynamic threshold for desperate attempts + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => DESPERATE LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => desperate last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, giving up") + except Exception as e: + logger.warning(f"Last resort ML matching failed for channel {chan['id']}: {e}") + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, giving up") + else: + # No ML available or very low fuzzy score + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, no ML fallback available") + + # Clean up ML models from memory after matching (infrequent operation) + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + # Send final progress update + if send_progress: + send_epg_matching_progress( + total_channels, + len(matched_channels), + stage="completed" + ) + + return { + "channels_to_update": channels_to_update, + "matched_channels": matched_channels + } + @shared_task def match_epg_channels(): """ - Goes through all Channels and tries to find a matching EPGData row by: - 1) If channel.tvg_id is valid in EPGData, skip. - 2) If channel has a tvg_id but not found in EPGData, attempt direct EPGData lookup. - 3) Otherwise, perform name-based fuzzy matching with optional region-based bonus. - 4) If a match is found, we set channel.tvg_id - 5) Summarize and log results. + Uses integrated EPG matching instead of external script. + Provides the same functionality with better performance and maintainability. """ try: - logger.info("Starting EPG matching logic...") + logger.info("Starting integrated EPG matching...") - # Attempt to retrieve a "preferred-region" if configured + # Get region preference try: region_obj = CoreSettings.objects.get(key="preferred-region") region_code = region_obj.value.strip().lower() except CoreSettings.DoesNotExist: region_code = None - matched_channels = [] - channels_to_update = [] - # Get channels that don't have EPG data assigned channels_without_epg = Channel.objects.filter(epg_data__isnull=True) logger.info(f"Found {channels_without_epg.count()} channels without EPG data") - channels_json = [] + channels_data = [] for channel in channels_without_epg: - # Normalize TVG ID - strip whitespace and convert to lowercase normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" - if normalized_tvg_id: - logger.info(f"Processing channel {channel.id} '{channel.name}' with TVG ID='{normalized_tvg_id}'") - - channels_json.append({ + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ "id": channel.id, "name": channel.name, - "tvg_id": normalized_tvg_id, # Use normalized TVG ID - "original_tvg_id": channel.tvg_id, # Keep original for reference + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, - "norm_chan": normalize_name(normalized_tvg_id if normalized_tvg_id else channel.name) + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! }) - # Similarly normalize EPG data TVG IDs - epg_json = [] + # Get all EPG data + epg_data = [] for epg in EPGData.objects.all(): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" - epg_json.append({ + epg_data.append({ 'id': epg.id, - 'tvg_id': normalized_tvg_id, # Use normalized TVG ID - 'original_tvg_id': epg.tvg_id, # Keep original for reference + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, }) - # Log available EPG data TVG IDs for debugging - unique_epg_tvg_ids = set(e['tvg_id'] for e in epg_json if e['tvg_id']) - logger.info(f"Available EPG TVG IDs: {', '.join(sorted(unique_epg_tvg_ids))}") + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries") - payload = { - "channels": channels_json, - "epg_data": epg_json, - "region_code": region_code, - } - - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - temp_file.write(json.dumps(payload).encode('utf-8')) - temp_file_path = temp_file.name - - # After writing to the file but before subprocess - # Explicitly delete the large data structures - del payload - gc.collect() - - process = subprocess.Popen( - ['python', '/app/scripts/epg_match.py', temp_file_path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - - stdout = '' - block_size = 1024 - - while True: - # Monitor stdout and stderr for readability - readable, _, _ = select.select([process.stdout, process.stderr], [], [], 1) # timeout of 1 second - - if not readable: # timeout expired - if process.poll() is not None: # check if process finished - break - else: # process still running, continue - continue - - for stream in readable: - if stream == process.stdout: - stdout += stream.read(block_size) - elif stream == process.stderr: - error = stream.readline() - if error: - logger.info(error.strip()) - - if process.poll() is not None: - break - - process.wait() - os.remove(temp_file_path) - - if process.returncode != 0: - return f"Failed to process EPG matching" - - result = json.loads(stdout) - # This returns lists of dicts, not model objects + # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) channels_to_update_dicts = result["channels_to_update"] matched_channels = result["matched_channels"] - # Explicitly clean up large objects - del stdout, result - gc.collect() - - # Convert your dict-based 'channels_to_update' into real Channel objects + # Update channels in database if channels_to_update_dicts: - # Extract IDs of the channels that need updates channel_ids = [d["id"] for d in channels_to_update_dicts] - - # Fetch them from DB channels_qs = Channel.objects.filter(id__in=channel_ids) channels_list = list(channels_qs) - # Build a map from channel_id -> epg_data_id (or whatever fields you need) - epg_mapping = { - d["id"]: d["epg_data_id"] for d in channels_to_update_dicts - } + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} - # Populate each Channel object with the updated epg_data_id + # Update each channel with matched EPG data for channel_obj in channels_list: - # The script sets 'epg_data_id' in the returned dict - # We either assign directly, or fetch the EPGData instance if needed. - channel_obj.epg_data_id = epg_mapping.get(channel_obj.id) + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") - # Now we have real model objects, so bulk_update will work + # Bulk update all channels Channel.objects.bulk_update(channels_list, ["epg_data"]) total_matched = len(matched_channels) @@ -208,9 +521,9 @@ def match_epg_channels(): else: logger.info("No new channels were matched.") - logger.info("Finished EPG matching logic.") + logger.info("Finished integrated EPG matching.") - # Send update with additional information for refreshing UI + # Send WebSocket update channel_layer = get_channel_layer() associations = [ {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} @@ -224,23 +537,317 @@ def match_epg_channels(): "data": { "success": True, "type": "epg_match", - "refresh_channels": True, # Flag to tell frontend to refresh channels + "refresh_channels": True, "matches_count": total_matched, "message": f"EPG matching complete: {total_matched} channel(s) matched", - "associations": associations # Add the associations data + "associations": associations } } ) return f"Done. Matched {total_matched} channel(s)." + finally: - # Final cleanup + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup gc.collect() - # Use our standardized cleanup function for more thorough memory management from core.utils import cleanup_memory cleanup_memory(log_usage=True, force_collection=True) +@shared_task +def match_selected_channels_epg(channel_ids): + """ + Match EPG data for only the specified selected channels. + Uses the same integrated EPG matching logic but processes only selected channels. + """ + try: + logger.info(f"Starting integrated EPG matching for {len(channel_ids)} selected channels...") + + # Get region preference + try: + region_obj = CoreSettings.objects.get(key="preferred-region") + region_code = region_obj.value.strip().lower() + except CoreSettings.DoesNotExist: + region_code = None + + # Get only the specified channels that don't have EPG data assigned + channels_without_epg = Channel.objects.filter( + id__in=channel_ids, + epg_data__isnull=True + ) + logger.info(f"Found {channels_without_epg.count()} selected channels without EPG data") + + if not channels_without_epg.exists(): + logger.info("No selected channels need EPG matching.") + + # Send WebSocket update + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": 0, + "message": "No selected channels need EPG matching", + "associations": [] + } + } + ) + return "No selected channels needed EPG matching." + + channels_data = [] + for channel in channels_without_epg: + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) + }) + + # Get all EPG data + epg_data = [] + for epg in EPGData.objects.all(): + normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data.append({ + 'id': epg.id, + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + }) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries") + + # Run EPG matching with progress updates - automatically uses appropriate thresholds + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) + channels_to_update_dicts = result["channels_to_update"] + matched_channels = result["matched_channels"] + + # Update channels in database + if channels_to_update_dicts: + channel_ids_to_update = [d["id"] for d in channels_to_update_dicts] + channels_qs = Channel.objects.filter(id__in=channel_ids_to_update) + channels_list = list(channels_qs) + + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} + + # Update each channel with matched EPG data + for channel_obj in channels_list: + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") + + # Bulk update all channels + Channel.objects.bulk_update(channels_list, ["epg_data"]) + + total_matched = len(matched_channels) + if total_matched: + logger.info(f"Selected Channel Match Summary: {total_matched} channel(s) matched.") + for (cid, cname, tvg) in matched_channels: + logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") + else: + logger.info("No selected channels were matched.") + + logger.info("Finished integrated EPG matching for selected channels.") + + # Send WebSocket update + channel_layer = get_channel_layer() + associations = [ + {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} + for chan in channels_to_update_dicts + ] + + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": total_matched, + "message": f"EPG matching complete: {total_matched} selected channel(s) matched", + "associations": associations + } + } + ) + + return f"Done. Matched {total_matched} selected channel(s)." + + finally: + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup + gc.collect() + from core.utils import cleanup_memory + cleanup_memory(log_usage=True, force_collection=True) + + +@shared_task +def match_single_channel_epg(channel_id): + """ + Try to match a single channel with EPG data using the integrated matching logic + that includes both fuzzy and ML-enhanced matching. Returns a dict with match status and message. + """ + try: + from apps.channels.models import Channel + from apps.epg.models import EPGData + + logger.info(f"Starting integrated single channel EPG matching for channel ID {channel_id}") + + # Get the channel + try: + channel = Channel.objects.get(id=channel_id) + except Channel.DoesNotExist: + return {"matched": False, "message": "Channel not found"} + + # If channel already has EPG data, skip + if channel.epg_data: + return {"matched": False, "message": f"Channel '{channel.name}' already has EPG data assigned"} + + # Prepare single channel data for matching (same format as bulk matching) + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channel_data = { + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! + } + + logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', gracenote_id='{normalized_gracenote_id}', norm_chan='{channel_data['norm_chan']}'") + + # Debug: Test what the normalization does to preserve call signs + test_name = "NBC 11 (KVLY) - Fargo" # Example for testing + test_normalized = normalize_name(test_name) + logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)") + + # Get all EPG data for matching - must include norm_name field + epg_data_list = [] + for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''): + normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data_list.append({ + 'id': epg.id, + 'tvg_id': normalized_epg_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + }) + + if not epg_data_list: + return {"matched": False, "message": "No EPG data available for matching"} + + logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") + + # Send progress for single channel matching + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="matching") + + # Use the EPG matching function - automatically uses aggressive thresholds for single channel + result = match_channels_to_epg([channel_data], epg_data_list, send_progress=False) + channels_to_update = result.get("channels_to_update", []) + matched_channels = result.get("matched_channels", []) + + if channels_to_update: + # Find our channel in the results + channel_match = None + for update in channels_to_update: + if update["id"] == channel.id: + channel_match = update + break + + if channel_match: + # Apply the match to the channel + try: + epg_data = EPGData.objects.get(id=channel_match['epg_data_id']) + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + + # Find match details from matched_channels for better reporting + match_details = None + for match_info in matched_channels: + if match_info[0] == channel.id: # matched_channels format: (channel_id, channel_name, epg_info) + match_details = match_info + break + + success_msg = f"Channel '{channel.name}' matched with EPG '{epg_data.name}'" + if match_details: + success_msg += f" (matched via: {match_details[2]})" + + logger.info(success_msg) + + # Send completion progress for single channel + send_epg_matching_progress(1, 1, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": True, + "message": success_msg, + "epg_name": epg_data.name, + "epg_id": epg_data.id + } + except EPGData.DoesNotExist: + return {"matched": False, "message": "Matched EPG data not found"} + + # No match found + # Send completion progress for single channel (failed) + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": False, + "message": f"No suitable EPG match found for channel '{channel.name}'" + } + + except Exception as e: + logger.error(f"Error in integrated single channel EPG matching: {e}", exc_info=True) + + # Clean up ML models from memory even on error + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory after error") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return {"matched": False, "message": f"Error during matching: {str(e)}"} + + def evaluate_series_rules_impl(tvg_id: str | None = None): """Synchronous implementation of series rule evaluation; returns details for debugging.""" from django.utils import timezone @@ -527,6 +1134,148 @@ def reschedule_upcoming_recordings_for_offset_change(): return reschedule_upcoming_recordings_for_offset_change_impl() +def _notify_recordings_refresh(): + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + +def purge_recurring_rule_impl(rule_id: int) -> int: + """Remove all future recordings created by a recurring rule.""" + from django.utils import timezone + from .models import Recording + + now = timezone.now() + try: + removed, _ = Recording.objects.filter( + start_time__gte=now, + custom_properties__rule__id=rule_id, + ).delete() + except Exception: + removed = 0 + if removed: + _notify_recordings_refresh() + return removed + + +def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int: + """Ensure recordings exist for a recurring rule within the scheduling horizon.""" + from django.utils import timezone + from .models import RecurringRecordingRule, Recording + + rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first() + now = timezone.now() + removed = 0 + if drop_existing: + removed = purge_recurring_rule_impl(rule_id) + + if not rule or not rule.enabled: + return 0 + + days = rule.cleaned_days() + if not days: + return 0 + + tz_name = CoreSettings.get_system_time_zone() + try: + tz = ZoneInfo(tz_name) + except Exception: + logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name) + tz = timezone.get_current_timezone() + start_limit = rule.start_date or now.date() + end_limit = rule.end_date + horizon = now + timedelta(days=horizon_days) + start_window = max(start_limit, now.date()) + if drop_existing and end_limit: + end_window = end_limit + else: + end_window = horizon.date() + if end_limit and end_limit < end_window: + end_window = end_limit + if end_window < start_window: + return 0 + total_created = 0 + + for offset in range((end_window - start_window).days + 1): + target_date = start_window + timedelta(days=offset) + if target_date.weekday() not in days: + continue + if end_limit and target_date > end_limit: + continue + try: + start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz) + end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz) + except Exception: + continue + if end_dt <= start_dt: + end_dt = end_dt + timedelta(days=1) + if start_dt <= now: + continue + exists = Recording.objects.filter( + channel=rule.channel, + start_time=start_dt, + custom_properties__rule__id=rule.id, + ).exists() + if exists: + continue + description = rule.name or f"Recurring recording for {rule.channel.name}" + cp = { + "rule": { + "type": "recurring", + "id": rule.id, + "days_of_week": days, + "name": rule.name or "", + }, + "status": "scheduled", + "description": description, + "program": { + "title": rule.name or rule.channel.name, + "description": description, + "start_time": start_dt.isoformat(), + "end_time": end_dt.isoformat(), + }, + } + try: + Recording.objects.create( + channel=rule.channel, + start_time=start_dt, + end_time=end_dt, + custom_properties=cp, + ) + total_created += 1 + except Exception as err: + logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}") + + if removed or total_created: + _notify_recordings_refresh() + + return total_created + + +@shared_task +def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14): + return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days) + + +@shared_task +def maintain_recurring_recordings(): + from .models import RecurringRecordingRule + + total = 0 + for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True): + try: + total += sync_recurring_rule_impl(rule_id, drop_existing=False) + except Exception as err: + logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}") + return total + + +@shared_task +def purge_recurring_rule(rule_id: int): + return purge_recurring_rule_impl(rule_id) + @shared_task def _safe_name(s): try: @@ -1249,6 +1998,7 @@ def comskip_process_recording(recording_id: int): Safe to call even if comskip is not installed; stores status in custom_properties.comskip. """ import shutil + from django.db import DatabaseError from .models import Recording # Helper to broadcast status over websocket def _ws(status: str, extra: dict | None = None): @@ -1266,7 +2016,33 @@ def comskip_process_recording(recording_id: int): except Recording.DoesNotExist: return "not_found" - cp = rec.custom_properties or {} + cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {} + + def _persist_custom_properties(): + """Persist updated custom_properties without raising if the row disappeared.""" + try: + updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp) + if not updated: + logger.warning( + "Recording %s vanished before comskip status could be saved", + recording_id, + ) + return False + except DatabaseError as db_err: + logger.warning( + "Failed to persist comskip status for recording %s: %s", + recording_id, + db_err, + ) + return False + except Exception as unexpected: + logger.warning( + "Unexpected error while saving comskip status for recording %s: %s", + recording_id, + unexpected, + ) + return False + return True file_path = (cp or {}).get("file_path") if not file_path or not os.path.exists(file_path): return "no_file" @@ -1277,8 +2053,7 @@ def comskip_process_recording(recording_id: int): comskip_bin = shutil.which("comskip") if not comskip_bin: cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('skipped', {"reason": "comskip_not_installed"}) return "comskip_missing" @@ -1290,24 +2065,59 @@ def comskip_process_recording(recording_id: int): try: cmd = [comskip_bin, "--output", os.path.dirname(file_path)] - # Prefer system ini if present to squelch warning and get sane defaults - for ini_path in ("/etc/comskip/comskip.ini", "/app/docker/comskip.ini"): - if os.path.exists(ini_path): + # Prefer user-specified INI, fall back to known defaults + ini_candidates = [] + try: + custom_ini = CoreSettings.get_dvr_comskip_custom_path() + if custom_ini: + ini_candidates.append(custom_ini) + except Exception as ini_err: + logger.debug(f"Unable to load custom comskip.ini path: {ini_err}") + ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"]) + selected_ini = None + for ini_path in ini_candidates: + if ini_path and os.path.exists(ini_path): + selected_ini = ini_path cmd.extend([f"--ini={ini_path}"]) break cmd.append(file_path) - subprocess.run(cmd, check=True) + subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + except subprocess.CalledProcessError as e: + stderr_tail = (e.stderr or "").strip().splitlines() + stderr_tail = stderr_tail[-5:] if stderr_tail else [] + detail = { + "status": "error", + "reason": "comskip_failed", + "returncode": e.returncode, + } + if e.returncode and e.returncode < 0: + try: + detail["signal"] = signal.Signals(-e.returncode).name + except Exception: + detail["signal"] = f"signal_{-e.returncode}" + if stderr_tail: + detail["stderr"] = "\n".join(stderr_tail) + if selected_ini: + detail["ini_path"] = selected_ini + cp["comskip"] = detail + _persist_custom_properties() + _ws('error', {"reason": "comskip_failed", "returncode": e.returncode}) + return "comskip_failed" except Exception as e: cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": str(e)}) return "comskip_failed" if not os.path.exists(edl_path): cp["comskip"] = {"status": "error", "reason": "edl_not_found"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": "edl_not_found"}) return "no_edl" @@ -1325,8 +2135,7 @@ def comskip_process_recording(recording_id: int): duration = _ffprobe_duration(file_path) if duration is None: cp["comskip"] = {"status": "error", "reason": "duration_unknown"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": "duration_unknown"}) return "no_duration" @@ -1355,9 +2164,14 @@ def comskip_process_recording(recording_id: int): keep.append((cur, duration)) if not commercials or sum((e - s) for s, e in commercials) <= 0.5: - cp["comskip"] = {"status": "completed", "skipped": True, "edl": os.path.basename(edl_path)} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + cp["comskip"] = { + "status": "completed", + "skipped": True, + "edl": os.path.basename(edl_path), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() _ws('skipped', {"reason": "no_commercials", "commercials": 0}) return "no_commercials" @@ -1381,7 +2195,8 @@ def comskip_process_recording(recording_id: int): list_path = os.path.join(workdir, "concat_list.txt") with open(list_path, "w") as lf: for pth in parts: - lf.write(f"file '{pth}'\n") + escaped = pth.replace("'", "'\\''") + lf.write(f"file '{escaped}'\n") output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv") subprocess.run([ @@ -1407,14 +2222,14 @@ def comskip_process_recording(recording_id: int): "segments_kept": len(parts), "commercials": len(commercials), } - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() _ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)}) return "ok" except Exception as e: cp["comskip"] = {"status": "error", "reason": str(e)} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": str(e)}) return f"error:{e}" def _resolve_poster_for_program(channel_name, program): @@ -1668,7 +2483,9 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None for i in range(0, total_streams, batch_size): batch_stream_ids = stream_ids[i:i + batch_size] - batch_streams = Stream.objects.filter(id__in=batch_stream_ids) + # Fetch streams and preserve the order from batch_stream_ids + batch_streams_dict = {stream.id: stream for stream in Stream.objects.filter(id__in=batch_stream_ids)} + batch_streams = [batch_streams_dict[stream_id] for stream_id in batch_stream_ids if stream_id in batch_streams_dict] # Send progress update send_websocket_update('updates', 'update', { @@ -1743,15 +2560,16 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None # Store profile IDs for this channel profile_map.append(channel_profile_ids) - # Handle logo - if stream.logo_url: + # Handle logo - validate URL length to avoid PostgreSQL btree index errors + validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None + if validated_logo_url: logos_to_create.append( Logo( - url=stream.logo_url, + url=validated_logo_url, name=stream.name or stream.tvg_id, ) ) - logo_map.append(stream.logo_url) + logo_map.append(validated_logo_url) else: logo_map.append(None) @@ -1897,3 +2715,322 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None 'error': str(e) }) raise + + +@shared_task(bind=True) +def set_channels_names_from_epg(self, channel_ids): + """ + Celery task to set channel names from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG name setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG name setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.name: + if channel.name != channel.epg_data.name: + channel.name = channel.epg_data.name + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['name']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel names...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel names from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG name setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG name setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_logos_from_epg(self, channel_ids): + """ + Celery task to set channel logos from EPG data for multiple channels + Creates logos from EPG icon URLs if they don't exist + """ + from .models import Logo + from core.utils import send_websocket_update + import requests + from urllib.parse import urlparse + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + created_logos_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG logo setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG logo setting...' + }) + + batch_size = 50 # Smaller batch for logo processing + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data', 'logo') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.icon_url: + icon_url = channel.epg_data.icon_url.strip() + + # Try to find existing logo with this URL + try: + logo = Logo.objects.get(url=icon_url) + except Logo.DoesNotExist: + # Create new logo from EPG icon URL + try: + # Generate a name for the logo + logo_name = channel.epg_data.name or f"Logo for {channel.epg_data.tvg_id}" + + # Create the logo record + logo = Logo.objects.create( + name=logo_name, + url=icon_url + ) + created_logos_count += 1 + logger.info(f"Created new logo from EPG: {logo_name} - {icon_url}") + + except Exception as create_error: + errors.append(f"Channel {channel.id}: Failed to create logo from {icon_url}: {str(create_error)}") + logger.error(f"Failed to create logo for channel {channel.id}: {create_error}") + continue + + # Update channel logo if different + if channel.logo != logo: + channel.logo = logo + batch_updates.append(channel) + updated_count += 1 + + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['logo']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel logos, created {created_logos_count} new logos...', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel logos and created {created_logos_count} new logos from EPG data', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG logo setting task completed. Updated {updated_count} channels, created {created_logos_count} logos") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG logo setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_tvg_ids_from_epg(self, channel_ids): + """ + Celery task to set channel TVG-IDs from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG TVG-ID setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.tvg_id: + if channel.tvg_id != channel.epg_data.tvg_id: + channel.tvg_id = channel.epg_data.tvg_id + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['tvg_id']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel TVG-IDs...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG TVG-ID setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise diff --git a/apps/channels/tests/__init__.py b/apps/channels/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/channels/tests/test_recurring_rules.py b/apps/channels/tests/test_recurring_rules.py new file mode 100644 index 00000000..982ecb93 --- /dev/null +++ b/apps/channels/tests/test_recurring_rules.py @@ -0,0 +1,40 @@ +from datetime import datetime, timedelta +from django.test import TestCase +from django.utils import timezone + +from apps.channels.models import Channel, RecurringRecordingRule, Recording +from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl + + +class RecurringRecordingRuleTasksTests(TestCase): + def test_sync_recurring_rule_creates_and_purges_recordings(self): + now = timezone.now() + channel = Channel.objects.create(channel_number=1, name='Test Channel') + + start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0) + end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0) + + rule = RecurringRecordingRule.objects.create( + channel=channel, + days_of_week=[now.weekday()], + start_time=start_time, + end_time=end_time, + ) + + created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1) + self.assertEqual(created, 1) + + recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first() + self.assertIsNotNone(recording) + self.assertEqual(recording.channel, channel) + self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id) + + expected_start = timezone.make_aware( + datetime.combine(recording.start_time.date(), start_time), + timezone.get_current_timezone(), + ) + self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60) + + removed = purge_recurring_rule_impl(rule.id) + self.assertEqual(removed, 1) + self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists()) diff --git a/apps/epg/migrations/0016_epgdata_icon_url.py b/apps/epg/migrations/0016_epgdata_icon_url.py new file mode 100644 index 00000000..b934b024 --- /dev/null +++ b/apps/epg/migrations/0016_epgdata_icon_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-16 22:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0015_alter_programdata_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='epgdata', + name='icon_url', + field=models.URLField(blank=True, max_length=500, null=True), + ), + ] diff --git a/apps/epg/migrations/0017_alter_epgsource_url.py b/apps/epg/migrations/0017_alter_epgsource_url.py new file mode 100644 index 00000000..dcb55e20 --- /dev/null +++ b/apps/epg/migrations/0017_alter_epgsource_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-24 21:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0016_epgdata_icon_url'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='url', + field=models.URLField(blank=True, max_length=1000, null=True), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index 22f2bd28..da6ac8e6 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -28,7 +28,7 @@ class EPGSource(models.Model): name = models.CharField(max_length=255, unique=True) source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES) - url = models.URLField(blank=True, null=True) # For XMLTV + url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct is_active = models.BooleanField(default=True) file_path = models.CharField(max_length=1024, blank=True, null=True) @@ -127,6 +127,7 @@ class EPGData(models.Model): # and a name (which might simply be the tvg_id if no real channel exists). tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True) name = models.CharField(max_length=255) + icon_url = models.URLField(max_length=500, null=True, blank=True) epg_source = models.ForeignKey( EPGSource, on_delete=models.CASCADE, diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 2f97cebf..85186cae 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -52,5 +52,6 @@ class EPGDataSerializer(serializers.ModelSerializer): 'id', 'tvg_id', 'name', + 'icon_url', 'epg_source', ] diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 0d0ebbb3..d9ae5a5d 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -28,6 +28,23 @@ from core.utils import acquire_task_lock, release_task_lock, send_websocket_upda logger = logging.getLogger(__name__) + +def validate_icon_url_fast(icon_url, max_length=None): + """ + Fast validation for icon URLs during parsing. + Returns None if URL is too long, original URL otherwise. + If max_length is None, gets it dynamically from the EPGData model field. + """ + if max_length is None: + # Get max_length dynamically from the model field + max_length = EPGData._meta.get_field('icon_url').max_length + + if icon_url and len(icon_url) > max_length: + logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...") + return None + return icon_url + + MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2) @@ -831,6 +848,7 @@ def parse_channels_only(source): processed_channels = 0 batch_size = 500 # Process in batches to limit memory usage progress = 0 # Initialize progress variable here + icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field # Track memory at key points if process: @@ -859,7 +877,7 @@ def parse_channels_only(source): # Change iterparse to look for both channel and programme elements logger.debug(f"Creating iterparse context for channels and programmes") - channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True) + channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True) if process: logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB") @@ -873,10 +891,15 @@ def parse_channels_only(source): tvg_id = elem.get('id', '').strip() if tvg_id: display_name = None + icon_url = None for child in elem: - if child.tag == 'display-name' and child.text: + if display_name is None and child.tag == 'display-name' and child.text: display_name = child.text.strip() - break + elif child.tag == 'icon': + raw_icon_url = child.get('src', '').strip() + icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length) + if display_name and icon_url: + break # No need to continue if we have both if not display_name: display_name = tvg_id @@ -894,17 +917,24 @@ def parse_channels_only(source): epgs_to_create.append(EPGData( tvg_id=tvg_id, name=display_name, + icon_url=icon_url, epg_source=source, )) logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}") processed_channels += 1 continue - # We use the cached object to check if the name has changed + # We use the cached object to check if the name or icon_url has changed epg_obj = existing_epgs[tvg_id] + needs_update = False if epg_obj.name != display_name: - # Only update if the name actually changed epg_obj.name = display_name + needs_update = True + if epg_obj.icon_url != icon_url: + epg_obj.icon_url = icon_url + needs_update = True + + if needs_update: epgs_to_update.append(epg_obj) logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}") else: @@ -915,6 +945,7 @@ def parse_channels_only(source): epgs_to_create.append(EPGData( tvg_id=tvg_id, name=display_name, + icon_url=icon_url, epg_source=source, )) logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}") @@ -937,7 +968,7 @@ def parse_channels_only(source): logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries") if process: logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") - EPGData.objects.bulk_update(epgs_to_update, ["name"]) + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) if process: logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") epgs_to_update = [] @@ -1004,7 +1035,7 @@ def parse_channels_only(source): logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries") if epgs_to_update: - EPGData.objects.bulk_update(epgs_to_update, ["name"]) + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries") if process: logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB") @@ -1211,7 +1242,7 @@ def parse_programs_for_tvg_id(epg_id): source_file = open(file_path, 'rb') # Stream parse the file using lxml's iterparse - program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True) + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) for _, elem in program_parser: if elem.get('channel') == epg.tvg_id: diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index 9c5d5c14..878ae7c6 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -81,6 +81,13 @@ class M3UAccountViewSet(viewsets.ModelViewSet): account_type = response.data.get("account_type") account_id = response.data.get("id") + # Notify frontend that a new playlist was created + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'playlist_created', + 'playlist_id': account_id + }) + if account_type == M3UAccount.Types.XC: refresh_m3u_groups(account_id) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index 2caeb519..0ba595c5 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -774,7 +774,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): group_title = group_name stream_hash = Stream.generate_hash_key( - name, url, tvg_id, hash_keys + name, url, tvg_id, hash_keys, m3u_id=account_id ) stream_props = { "name": name, @@ -903,6 +903,8 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): stream_hashes = {} logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}") + if compiled_filters: + logger.debug(f"Using compiled filters: {[f[1].regex_pattern for f in compiled_filters]}") for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] @@ -912,10 +914,10 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): group_title = get_case_insensitive_attr( stream_info["attributes"], "group-title", "Default Group" ) - + logger.debug(f"Processing stream: {name} - {url} in group {group_title}") include = True for pattern, filter in compiled_filters: - logger.debug(f"Checking filter patterh {pattern}") + logger.trace(f"Checking filter pattern {pattern}") target = name if filter.filter_type == "url": target = url @@ -940,7 +942,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): ) continue - stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys) + stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id) stream_props = { "name": name, "url": url, @@ -2071,13 +2073,13 @@ def get_transformed_credentials(account, profile=None): base_url = account.server_url base_username = account.username base_password = account.password # Build a complete URL with credentials (similar to how IPTV URLs are structured) - # Format: http://server.com:port/username/password/rest_of_path + # Format: http://server.com:port/live/username/password/1234.ts if base_url and base_username and base_password: # Remove trailing slash from server URL if present clean_server_url = base_url.rstrip('/') # Build the complete URL with embedded credentials - complete_url = f"{clean_server_url}/{base_username}/{base_password}/" + complete_url = f"{clean_server_url}/live/{base_username}/{base_password}/1234.ts" logger.debug(f"Built complete URL: {complete_url}") # Apply profile-specific transformations if profile is provided @@ -2091,14 +2093,14 @@ def get_transformed_credentials(account, profile=None): logger.info(f"Transformed complete URL: {complete_url} -> {transformed_complete_url}") # Extract components from the transformed URL - # Pattern: http://server.com:port/username/password/ + # Pattern: http://server.com:port/live/username/password/1234.ts parsed_url = urllib.parse.urlparse(transformed_complete_url) path_parts = [part for part in parsed_url.path.split('/') if part] if len(path_parts) >= 2: # Extract username and password from path - transformed_username = path_parts[0] - transformed_password = path_parts[1] + transformed_username = path_parts[1] + transformed_password = path_parts[2] # Rebuild server URL without the username/password path transformed_url = f"{parsed_url.scheme}://{parsed_url.netloc}" @@ -2521,76 +2523,75 @@ def refresh_single_m3u_account(account_id): if not all_xc_streams: logger.warning("No streams collected from XC groups") - return f"No streams found for XC account {account_id}", None + else: + # Now batch by stream count (like standard M3U processing) + batches = [ + all_xc_streams[i : i + BATCH_SIZE] + for i in range(0, len(all_xc_streams), BATCH_SIZE) + ] - # Now batch by stream count (like standard M3U processing) - batches = [ - all_xc_streams[i : i + BATCH_SIZE] - for i in range(0, len(all_xc_streams), BATCH_SIZE) - ] + logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") - logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") + # Use threading for XC stream processing - now with consistent batch sizes + max_workers = min(4, len(batches)) + logger.debug(f"Using {max_workers} threads for XC stream processing") - # Use threading for XC stream processing - now with consistent batch sizes - max_workers = min(4, len(batches)) - logger.debug(f"Using {max_workers} threads for XC stream processing") + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit stream batch processing tasks (reuse standard M3U processing) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit stream batch processing tasks (reuse standard M3U processing) - future_to_batch = { - executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i - for i, batch in enumerate(batches) - } + completed_batches = 0 + total_batches = len(batches) - completed_batches = 0 - total_batches = len(batches) + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 - # Process completed batches as they finish - for future in as_completed(future_to_batch): - batch_idx = future_to_batch[future] - try: - result = future.result() - completed_batches += 1 + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass - # Extract stream counts from result - if isinstance(result, str): - try: - created_match = re.search(r"(\d+) created", result) - updated_match = re.search(r"(\d+) updated", result) - if created_match and updated_match: - created_count = int(created_match.group(1)) - updated_count = int(updated_match.group(1)) - streams_created += created_count - streams_updated += updated_count - except (AttributeError, ValueError): - pass + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time - # Send progress update - progress = int((completed_batches / total_batches) * 100) - current_elapsed = time.time() - start_time + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 - if progress > 0: - estimated_total = (current_elapsed / progress) * 100 - time_remaining = max(0, estimated_total - current_elapsed) - else: - time_remaining = 0 + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) - send_m3u_update( - account_id, - "parsing", - progress, - elapsed_time=current_elapsed, - time_remaining=time_remaining, - streams_processed=streams_created + streams_updated, - ) + logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") - logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") + except Exception as e: + logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging - except Exception as e: - logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") - completed_batches += 1 # Still count it to avoid hanging - - logger.info(f"XC thread-based processing completed for account {account_id}") + logger.info(f"XC thread-based processing completed for account {account_id}") # Ensure all database transactions are committed before cleanup logger.info( @@ -2671,7 +2672,16 @@ def refresh_single_m3u_account(account_id): release_task_lock("refresh_single_m3u_account", account_id) # Aggressive garbage collection - del existing_groups, extinf_data, groups, batches + # Only delete variables if they exist + if 'existing_groups' in locals(): + del existing_groups + if 'extinf_data' in locals(): + del extinf_data + if 'groups' in locals(): + del groups + if 'batches' in locals(): + del batches + from core.utils import cleanup_memory cleanup_memory(log_usage=True, force_collection=True) diff --git a/apps/output/views.py b/apps/output/views.py index 6c18629c..6eee7ccc 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -45,45 +45,48 @@ def generate_m3u(request, profile_name=None, user=None): The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ + logger.debug("Generating M3U for profile: %s, user: %s", profile_name, user.username if user else "Anonymous") # Check if this is a POST request with data (which we don't want to allow) if request.method == "POST" and request.body: return HttpResponseForbidden("POST requests with content are not allowed") if user is not None: if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() != 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) - - channels = Channel.objects.filter(**filters).order_by("channel_number") + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( "channel_number" ) - - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') else: if profile_name is not None: channel_profile = ChannelProfile.objects.get(name=profile_name) channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True, - ).order_by("channel_number") + channelprofilemembership__enabled=True + ).order_by('channel_number') else: - channels = Channel.objects.order_by("channel_number") + if profile_name is not None: + channel_profile = ChannelProfile.objects.get(name=profile_name) + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True, + ).order_by("channel_number") + else: + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -95,7 +98,22 @@ def generate_m3u(request, profile_name=None, user=None): # Options: 'channel_number' (default), 'tvg_id', 'gracenote' tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() - m3u_content = "#EXTM3U\n" + # Build EPG URL with query parameters if needed + epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint')) + + # Optionally preserve certain query parameters + preserved_params = ['tvg_id_source', 'cachedlogos', 'days'] + query_params = {k: v for k, v in request.GET.items() if k in preserved_params} + if query_params: + from urllib.parse import urlencode + epg_url = f"{epg_base_url}?{urlencode(query_params)}" + else: + epg_url = epg_base_url + + # Add x-tvg-url and url-tvg attribute for EPG URL + m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n' + + # Start building M3U content for channel in channels: group_title = channel.channel_group.name if channel.channel_group else "Default" @@ -148,7 +166,7 @@ def generate_m3u(request, profile_name=None, user=None): # Determine the stream URL based on the direct parameter if use_direct_urls: # Try to get the first stream's direct URL - first_stream = channel.streams.first() + first_stream = channel.streams.order_by('channelstream__order').first() if first_stream and first_stream.url: # Use the direct stream URL stream_url = first_stream.url @@ -300,18 +318,20 @@ def generate_epg(request, profile_name=None, user=None): # Get channels based on user/profile if user is not None: if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() != 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) - - channels = Channel.objects.filter(**filters).order_by("channel_number") + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( "channel_number" @@ -848,19 +868,22 @@ def xc_get_live_categories(user): response = [] if user.user_level == 0: - filters = { - "channels__channelprofilemembership__enabled": True, - "channels__user_level": 0, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() != 0: - # Only get data from active profile - channel_profiles = user.channel_profiles.all() - filters["channels__channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) - - channel_groups = ChannelGroup.objects.filter(**filters).distinct().order_by(Lower("name")) + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channel groups + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().order_by(Lower("name")) + else: + # User has specific limited profiles assigned + filters = { + "channels__channelprofilemembership__enabled": True, + "channels__user_level": 0, + "channels__channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel_groups = ChannelGroup.objects.filter(**filters).distinct().order_by(Lower("name")) else: channel_groups = ChannelGroup.objects.filter( channels__isnull=False, channels__user_level__lte=user.user_level @@ -882,20 +905,25 @@ def xc_get_live_streams(request, user, category_id=None): streams = [] if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() > 0: - # Only get data from active profile - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles - - if category_id is not None: - filters["channel_group__id"] = category_id - - channels = Channel.objects.filter(**filters).order_by("channel_number") + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = {"user_level__lte": user.user_level} + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: if not category_id: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") @@ -920,7 +948,7 @@ def xc_get_live_streams(request, user, category_id=None): ) ), "epg_channel_id": str(int(channel.channel_number)) if channel.channel_number.is_integer() else str(channel.channel_number), - "added": int(time.time()), # @TODO: make this the actual created date + "added": int(channel.created_at.timestamp()), "is_adult": 0, "category_id": str(channel.channel_group.id), "category_ids": [channel.channel_group.id], @@ -941,17 +969,27 @@ def xc_get_epg(request, user, short=False): channel = None if user.user_level < 10: - filters = { - "id": channel_id, - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channel = Channel.objects.filter( + id=channel_id, + user_level__lte=user.user_level + ).first() + else: + # User has specific limited profiles assigned + filters = { + "id": channel_id, + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() - channel = get_object_or_404(Channel, **filters) + if not channel: + raise Http404() else: channel = get_object_or_404(Channel, id=channel_id) @@ -1008,31 +1046,11 @@ def xc_get_vod_categories(user): response = [] - # Filter categories based on user's M3U accounts - if user.user_level == 0: - # For regular users, get categories from their accessible M3U accounts - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - # Get M3U accounts accessible through user's profiles - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - else: - m3u_accounts = [] - - # Get categories that have movie relations with these accounts - categories = VODCategory.objects.filter( - category_type='movie', - m3umovierelation__m3u_account__in=m3u_accounts - ).distinct().order_by(Lower("name")) - else: - # Admins can see all categories that have active movie relations - categories = VODCategory.objects.filter( - category_type='movie', - m3umovierelation__m3u_account__is_active=True - ).distinct().order_by(Lower("name")) + # All authenticated users get access to VOD from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='movie', + m3umovierelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) for category in categories: response.append({ @@ -1051,22 +1069,9 @@ def xc_get_vod_streams(request, user, category_id=None): streams = [] - # Build filters for movies based on user access + # All authenticated users get access to VOD from all active M3U accounts filters = {"m3u_relations__m3u_account__is_active": True} - if user.user_level == 0: - # For regular users, filter by accessible M3U accounts - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_relations__m3u_account__in"] = m3u_accounts - else: - return [] # No accessible accounts - if category_id: filters["m3u_relations__category_id"] = category_id @@ -1127,28 +1132,11 @@ def xc_get_series_categories(user): response = [] - # Similar filtering as VOD categories but for series - if user.user_level == 0: - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - else: - m3u_accounts = [] - - # Get categories that have series relations with these accounts - categories = VODCategory.objects.filter( - category_type='series', - m3useriesrelation__m3u_account__in=m3u_accounts - ).distinct().order_by(Lower("name")) - else: - categories = VODCategory.objects.filter( - category_type='series', - m3useriesrelation__m3u_account__is_active=True - ).distinct().order_by(Lower("name")) + # All authenticated users get access to series from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='series', + m3useriesrelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) for category in categories: response.append({ @@ -1166,21 +1154,9 @@ def xc_get_series(request, user, category_id=None): series_list = [] - # Build filters based on user access + # All authenticated users get access to series from all active M3U accounts filters = {"m3u_account__is_active": True} - if user.user_level == 0: - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_account__in"] = m3u_accounts - else: - return [] - if category_id: filters["category_id"] = category_id @@ -1228,21 +1204,9 @@ def xc_get_series_info(request, user, series_id): if not series_id: raise Http404() - # Get series relation with user access filtering + # All authenticated users get access to series from all active M3U accounts filters = {"id": series_id, "m3u_account__is_active": True} - if user.user_level == 0: - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_account__in"] = m3u_accounts - else: - raise Http404() - try: series_relation = M3USeriesRelation.objects.select_related('series', 'series__logo').get(**filters) series = series_relation.series @@ -1439,21 +1403,9 @@ def xc_get_vod_info(request, user, vod_id): if not vod_id: raise Http404() - # Get movie relation with user access filtering - use movie ID instead of relation ID + # All authenticated users get access to VOD from all active M3U accounts filters = {"movie_id": vod_id, "m3u_account__is_active": True} - if user.user_level == 0: - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_account__in"] = m3u_accounts - else: - raise Http404() - try: # Order by account priority to get the best relation when multiple exist movie_relation = M3UMovieRelation.objects.select_related('movie', 'movie__logo').filter(**filters).order_by('-m3u_account__priority', 'id').first() @@ -1602,22 +1554,9 @@ def xc_movie_stream(request, username, password, stream_id, extension): if custom_properties["xc_password"] != password: return JsonResponse({"error": "Invalid credentials"}, status=401) - # Get movie relation based on user access level - use movie ID instead of relation ID + # All authenticated users get access to VOD from all active M3U accounts filters = {"movie_id": stream_id, "m3u_account__is_active": True} - if user.user_level < 10: - # For regular users, filter by accessible M3U accounts - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_account__in"] = m3u_accounts - else: - return JsonResponse({"error": "No accessible content"}, status=403) - try: # Order by account priority to get the best relation when multiple exist movie_relation = M3UMovieRelation.objects.select_related('movie').filter(**filters).order_by('-m3u_account__priority', 'id').first() @@ -1652,22 +1591,9 @@ def xc_series_stream(request, username, password, stream_id, extension): if custom_properties["xc_password"] != password: return JsonResponse({"error": "Invalid credentials"}, status=401) - # Get episode relation based on user access level - use episode ID instead of stream_id + # All authenticated users get access to series/episodes from all active M3U accounts filters = {"episode_id": stream_id, "m3u_account__is_active": True} - if user.user_level < 10: - # For regular users, filter by accessible M3U accounts - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - from apps.m3u.models import M3UAccount - m3u_accounts = M3UAccount.objects.filter( - is_active=True, - profiles__in=channel_profiles - ).distinct() - filters["m3u_account__in"] = m3u_accounts - else: - return JsonResponse({"error": "No accessible content"}, status=403) - try: episode_relation = M3UEpisodeRelation.objects.select_related('episode').get(**filters) except M3UEpisodeRelation.DoesNotExist: diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index 582bcc49..e31d0418 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -127,9 +127,9 @@ def stream_ts(request, channel_id): ) ChannelService.stop_channel(channel_id) - # Use max retry attempts and connection timeout from config - max_retries = ConfigHelper.max_retries() - retry_timeout = ConfigHelper.connection_timeout() + # Use fixed retry interval and timeout + retry_timeout = 1.5 # 1.5 seconds total timeout + retry_interval = 0.1 # 100ms between attempts wait_start_time = time.time() stream_url = None @@ -137,16 +137,18 @@ def stream_ts(request, channel_id): transcode = False profile_value = None error_reason = None + attempt = 0 - # Try to get a stream with configured retries - for attempt in range(max_retries): + # Try to get a stream with fixed interval retries + while time.time() - wait_start_time < retry_timeout: + attempt += 1 stream_url, stream_user_agent, transcode, profile_value = ( generate_stream_url(channel_id) ) if stream_url is not None: logger.info( - f"[{client_id}] Successfully obtained stream for channel {channel_id}" + f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts" ) break @@ -158,21 +160,15 @@ def stream_ts(request, channel_id): ) break - # Don't exceed the overall connection timeout - if time.time() - wait_start_time > retry_timeout: - logger.warning( - f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)" + # Wait 100ms before retrying + elapsed_time = time.time() - wait_start_time + remaining_time = retry_timeout - elapsed_time + if remaining_time > retry_interval: + logger.info( + f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" ) - break - - # Wait before retrying (using exponential backoff with a cap) - wait_time = min(0.5 * (2**attempt), 2.0) # Caps at 2 seconds - logger.info( - f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})" - ) - gevent.sleep( - wait_time - ) # FIXED: Using gevent.sleep instead of time.sleep + gevent.sleep(retry_interval) + retry_interval += 0.025 # Increase wait time by 25ms for next attempt if stream_url is None: # Make sure to release any stream locks that might have been acquired diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py index 7577d2af..fefc8739 100644 --- a/apps/proxy/vod_proxy/multi_worker_connection_manager.py +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -540,11 +540,9 @@ class RedisBackedVODConnection: } return {} - def cleanup(self, connection_manager=None): - """Clean up local resources and Redis state""" - # Get connection state before cleanup to handle profile decrementing - state = self._get_connection_state() - + def cleanup(self, connection_manager=None, current_worker_id=None): + """Smart cleanup based on worker ownership and active streams""" + # Always clean up local resources first if self.local_response: self.local_response.close() self.local_response = None @@ -552,38 +550,72 @@ class RedisBackedVODConnection: self.local_session.close() self.local_session = None - # Remove from Redis - if self.redis_client: - try: - # Use pipeline for atomic cleanup operations - pipe = self.redis_client.pipeline() + # Get current connection state to check ownership and active streams + state = self._get_connection_state() - # 1. Remove main connection state (now contains consolidated data) - pipe.delete(self.connection_key) + if not state: + logger.info(f"[{self.session_id}] No connection state found - local cleanup only") + return - # 2. Remove distributed lock - pipe.delete(self.lock_key) + # Check if there are active streams + if state.active_streams > 0: + # There are active streams - check ownership + if current_worker_id and state.worker_id == current_worker_id: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) and we own them - local cleanup only") + else: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) but owned by worker {state.worker_id} - local cleanup only") + return - # Execute all cleanup operations - pipe.execute() + # No active streams - we can clean up Redis state + if not self.redis_client: + logger.info(f"[{self.session_id}] No Redis client - local cleanup only") + return - logger.info(f"[{self.session_id}] Cleaned up all Redis keys (consolidated connection state, locks)") + # Acquire lock and do final check before cleanup to prevent race conditions + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for cleanup - skipping") + return - # Decrement profile connections if we have the state and connection manager - if state and state.m3u_profile_id and connection_manager: - logger.info(f"[{self.session_id}] Decrementing profile connection count for profile {state.m3u_profile_id}") - connection_manager._decrement_profile_connections(state.m3u_profile_id) - logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}") - else: - if not state: - logger.warning(f"[{self.session_id}] No connection state found during cleanup - cannot decrement profile connections") - elif not state.m3u_profile_id: - logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections") - elif not connection_manager: - logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections") + try: + # Re-check active streams with lock held to prevent race conditions + current_state = self._get_connection_state() + if not current_state: + logger.info(f"[{self.session_id}] Connection state no longer exists - cleanup already done") + return - except Exception as e: - logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}") + if current_state.active_streams > 0: + logger.info(f"[{self.session_id}] Active streams now present ({current_state.active_streams}) - skipping cleanup") + return + + # Use pipeline for atomic cleanup operations + pipe = self.redis_client.pipeline() + + # 1. Remove main connection state (contains consolidated data) + pipe.delete(self.connection_key) + + # 2. Remove distributed lock (will be released below anyway) + pipe.delete(self.lock_key) + + # Execute all cleanup operations + pipe.execute() + + logger.info(f"[{self.session_id}] Cleaned up Redis keys (verified no active streams)") + + # Decrement profile connections if we have the state and connection manager + if state.m3u_profile_id and connection_manager: + connection_manager._decrement_profile_connections(state.m3u_profile_id) + logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}") + else: + if not state.m3u_profile_id: + logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections") + elif not connection_manager: + logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections") + + except Exception as e: + logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}") + finally: + # Always release the lock + self._release_lock() # Modify the VODConnectionManager to use Redis-backed connections @@ -694,6 +726,15 @@ class MultiWorkerVODConnectionManager: logger.info(f"[{client_id}] Worker {self.worker_id} - Found matching idle session: {matching_session_id}") effective_session_id = matching_session_id client_id = matching_session_id # Update client_id for logging consistency + + # IMMEDIATELY reserve this session by incrementing active streams to prevent cleanup + temp_connection = RedisBackedVODConnection(effective_session_id, self.redis_client) + if temp_connection.increment_active_streams(): + logger.info(f"[{client_id}] Reserved idle session - incremented active streams") + else: + logger.warning(f"[{client_id}] Failed to reserve idle session - falling back to new session") + effective_session_id = session_id + matching_session_id = None # Clear the match so we create a new connection else: logger.info(f"[{client_id}] Worker {self.worker_id} - No matching idle session found, using new session") effective_session_id = session_id @@ -761,14 +802,20 @@ class MultiWorkerVODConnectionManager: else: logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection") - # Update session activity in consolidated connection state + # Transfer ownership to current worker and update session activity if redis_connection._acquire_lock(): try: state = redis_connection._get_connection_state() if state: + old_worker = state.worker_id state.last_activity = time.time() - state.worker_id = self.worker_id # Track which worker last accessed this + state.worker_id = self.worker_id # Transfer ownership to current worker redis_connection._save_connection_state(state) + + if old_worker != self.worker_id: + logger.info(f"[{client_id}] Ownership transferred from worker {old_worker} to {self.worker_id}") + else: + logger.debug(f"[{client_id}] Worker {self.worker_id} retaining ownership") finally: redis_connection._release_lock() @@ -788,8 +835,13 @@ class MultiWorkerVODConnectionManager: try: logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") - # Increment active streams - redis_connection.increment_active_streams() + # Increment active streams (unless we already did it for session reuse) + if not matching_session_id: + # New session - increment active streams + redis_connection.increment_active_streams() + else: + # Reused session - we already incremented when reserving the session + logger.debug(f"[{client_id}] Using pre-reserved session - active streams already incremented") bytes_sent = 0 chunk_count = 0 @@ -819,13 +871,13 @@ class MultiWorkerVODConnectionManager: redis_connection.decrement_active_streams() decremented = True - # Schedule cleanup if no active streams after normal completion + # Schedule smart cleanup if no active streams after normal completion if not redis_connection.has_active_streams(): def delayed_cleanup(): time.sleep(1) # Wait 1 second - if not redis_connection.has_active_streams(): - logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection after normal completion") - redis_connection.cleanup(connection_manager=self) + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after normal completion") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) import threading cleanup_thread = threading.Thread(target=delayed_cleanup) @@ -838,13 +890,13 @@ class MultiWorkerVODConnectionManager: redis_connection.decrement_active_streams() decremented = True - # Schedule cleanup if no active streams + # Schedule smart cleanup if no active streams if not redis_connection.has_active_streams(): def delayed_cleanup(): time.sleep(1) # Wait 1 second - if not redis_connection.has_active_streams(): - logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection") - redis_connection.cleanup(connection_manager=self) + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after client disconnect") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) import threading cleanup_thread = threading.Thread(target=delayed_cleanup) @@ -856,7 +908,8 @@ class MultiWorkerVODConnectionManager: if not decremented: redis_connection.decrement_active_streams() decremented = True - redis_connection.cleanup(connection_manager=self) + # Smart cleanup on error - immediate cleanup since we're in error state + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) yield b"Error: Stream interrupted" finally: diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py index f24dc3d8..00ed8a10 100644 --- a/apps/proxy/vod_proxy/views.py +++ b/apps/proxy/vod_proxy/views.py @@ -176,14 +176,15 @@ class VODStreamView(View): logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}") return HttpResponse("No stream URL available", status=503) - # Get M3U profile - m3u_profile = self._get_m3u_profile(m3u_account, profile_id) + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) - if not m3u_profile: + if not profile_result or not profile_result[0]: logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}") return HttpResponse("No available stream", status=503) - logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {m3u_profile.current_viewers})") + m3u_profile, current_connections = profile_result + logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {current_connections})") # Connection tracking is handled by the connection manager # Transform URL based on profile @@ -279,11 +280,13 @@ class VODStreamView(View): logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}") return HttpResponse("No stream URL available", status=503) - # Get M3U profile - m3u_profile = self._get_m3u_profile(m3u_account, profile_id) - if not m3u_profile: - logger.error(f"[VOD-HEAD] No M3U profile found") - return HttpResponse("Profile not found", status=404) + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) + if not profile_result or not profile_result[0]: + logger.error(f"[VOD-HEAD] No M3U profile found or all profiles at capacity") + return HttpResponse("No available stream", status=503) + + m3u_profile, current_connections = profile_result # Transform URL if needed final_stream_url = self._transform_url(stream_url, m3u_profile) @@ -517,10 +520,63 @@ class VODStreamView(View): logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True) return None - def _get_m3u_profile(self, m3u_account, profile_id): - """Get appropriate M3U profile for streaming""" + def _get_m3u_profile(self, m3u_account, profile_id, session_id=None): + """Get appropriate M3U profile for streaming using Redis-based viewer counts + + Args: + m3u_account: M3UAccount instance + profile_id: Optional specific profile ID requested + session_id: Optional session ID to check for existing connections + + Returns: + tuple: (M3UAccountProfile, current_connections) or None if no profile found + """ try: - # If specific profile requested, try to use it + from core.utils import RedisClient + redis_client = RedisClient.get_client() + + if not redis_client: + logger.warning("Redis not available, falling back to default profile") + default_profile = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True, + is_default=True + ).first() + return (default_profile, 0) if default_profile else None + + # Check if this session already has an active connection + if session_id: + persistent_connection_key = f"vod_persistent_connection:{session_id}" + connection_data = redis_client.hgetall(persistent_connection_key) + + if connection_data: + # Decode Redis hash data + decoded_data = {} + for k, v in connection_data.items(): + k_str = k.decode('utf-8') if isinstance(k, bytes) else k + v_str = v.decode('utf-8') if isinstance(v, bytes) else v + decoded_data[k_str] = v_str + + existing_profile_id = decoded_data.get('m3u_profile_id') + if existing_profile_id: + try: + existing_profile = M3UAccountProfile.objects.get( + id=int(existing_profile_id), + m3u_account=m3u_account, + is_active=True + ) + # Get current connections for logging + profile_connections_key = f"profile_connections:{existing_profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-SELECTION] Session {session_id} reusing existing profile {existing_profile.id}: {current_connections}/{existing_profile.max_streams} connections") + return (existing_profile, current_connections) + except (M3UAccountProfile.DoesNotExist, ValueError): + logger.warning(f"[PROFILE-SELECTION] Session {session_id} has invalid profile ID {existing_profile_id}, selecting new profile") + except Exception as e: + logger.warning(f"[PROFILE-SELECTION] Error checking existing profile for session {session_id}: {e}") + else: + logger.debug(f"[PROFILE-SELECTION] Session {session_id} exists but has no profile ID stored") # If specific profile requested, try to use it if profile_id: try: profile = M3UAccountProfile.objects.get( @@ -528,24 +584,46 @@ class VODStreamView(View): m3u_account=m3u_account, is_active=True ) - if profile.current_viewers < profile.max_streams or profile.max_streams == 0: - return profile - except M3UAccountProfile.DoesNotExist: - pass + # Check Redis-based current connections + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) - # Find available profile ordered by current usage (least loaded first) - profiles = M3UAccountProfile.objects.filter( + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Using requested profile {profile.id}: {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile.id} is at capacity: {current_connections}/{profile.max_streams}") + except M3UAccountProfile.DoesNotExist: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile_id} not found") + + # Get active profiles ordered by priority (default first) + m3u_profiles = M3UAccountProfile.objects.filter( m3u_account=m3u_account, is_active=True - ).order_by('current_viewers') + ) + + default_profile = m3u_profiles.filter(is_default=True).first() + if not default_profile: + logger.error(f"[PROFILE-SELECTION] No default profile found for M3U account {m3u_account.id}") + return None + + # Check profiles in order: default first, then others + profiles = [default_profile] + list(m3u_profiles.filter(is_default=False)) for profile in profiles: - # Check if profile has available connection slots - if profile.current_viewers < profile.max_streams or profile.max_streams == 0: - return profile + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) - # Fallback to default profile even if over limit - return profiles.filter(is_default=True).first() + # Check if profile has available connection slots + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Selected profile {profile.id} ({profile.name}): {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.debug(f"[PROFILE-SELECTION] Profile {profile.id} at capacity: {current_connections}/{profile.max_streams}") + + # All profiles are at capacity - return None to trigger error response + logger.error(f"[PROFILE-SELECTION] All profiles at capacity for M3U account {m3u_account.id}, rejecting request") + return None except Exception as e: logger.error(f"Error getting M3U profile: {e}") diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index 3c429e15..1a2e51ca 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -62,9 +62,9 @@ def refresh_vod_content(account_id): logger.info(f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds") - # Cleanup orphaned VOD content after refresh - logger.info("Starting cleanup of orphaned VOD content") - cleanup_result = cleanup_orphaned_vod_content(scan_start_time=start_time) + # Cleanup orphaned VOD content after refresh (scoped to this account only) + logger.info(f"Starting cleanup of orphaned VOD content for account {account.name}") + cleanup_result = cleanup_orphaned_vod_content(account_id=account_id, scan_start_time=start_time) logger.info(f"VOD cleanup completed: {cleanup_result}") # Send completion notification @@ -303,7 +303,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Prepare movie properties description = movie_data.get('description') or movie_data.get('plot') or '' - rating = movie_data.get('rating') or movie_data.get('vote_average') or '' + rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average')) genre = movie_data.get('genre') or movie_data.get('category_name') or '' duration_secs = extract_duration_from_data(movie_data) trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or '' @@ -608,7 +608,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Prepare series properties description = series_data.get('plot', '') - rating = series_data.get('rating', '') + rating = normalize_rating(series_data.get('rating')) genre = series_data.get('genre', '') logo_url = series_data.get('cover') or '' @@ -896,6 +896,33 @@ def extract_duration_from_data(movie_data): return duration_secs +def normalize_rating(rating_value): + """Normalize rating value by converting commas to decimals and validating as float""" + if not rating_value: + return None + + try: + # Convert to string for processing + rating_str = str(rating_value).strip() + + if not rating_str or rating_str == '': + return None + + # Replace comma with decimal point (European format) + rating_str = rating_str.replace(',', '.') + + # Try to convert to float + rating_float = float(rating_str) + + # Return as string to maintain compatibility with existing code + # but ensure it's a valid numeric format + return str(rating_float) + except (ValueError, TypeError, AttributeError): + # If conversion fails, discard the rating + logger.debug(f"Invalid rating value discarded: {rating_value}") + return None + + def extract_year(date_string): """Extract year from date string""" if not date_string: @@ -1021,9 +1048,9 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N if should_update_field(series.description, info.get('plot')): series.description = extract_string_from_array_or_string(info.get('plot')) updated = True - if (info.get('rating') and str(info.get('rating')).strip() and - (not series.rating or not str(series.rating).strip())): - series.rating = info.get('rating') + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and (not series.rating or not str(series.rating).strip()): + series.rating = normalized_rating updated = True if should_update_field(series.genre, info.get('genre')): series.genre = extract_string_from_array_or_string(info.get('genre')) @@ -1124,7 +1151,7 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) # Extract episode metadata description = info.get('plot') or info.get('overview', '') if info else '' - rating = info.get('rating', '') if info else '' + rating = normalize_rating(info.get('rating')) if info else None air_date = extract_date_from_data(info) if info else None duration_secs = info.get('duration_secs') if info else None tmdb_id = info.get('tmdb_id') if info else None @@ -1308,7 +1335,7 @@ def batch_refresh_series_episodes(account_id, series_ids=None): @shared_task -def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None): +def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id=None): """Clean up VOD content that has no M3U relations or has stale relations""" from datetime import timedelta @@ -1318,30 +1345,44 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None): # Calculate cutoff date for stale relations cutoff_date = reference_time - timedelta(days=stale_days) + # Build base query filters + base_filters = {'last_seen__lt': cutoff_date} + if account_id: + base_filters['m3u_account_id'] = account_id + logger.info(f"Cleaning up stale VOD content for account {account_id}") + else: + logger.info("Cleaning up stale VOD content across all accounts") + # Clean up stale movie relations (haven't been seen in the specified days) - stale_movie_relations = M3UMovieRelation.objects.filter(last_seen__lt=cutoff_date) + stale_movie_relations = M3UMovieRelation.objects.filter(**base_filters) stale_movie_count = stale_movie_relations.count() stale_movie_relations.delete() # Clean up stale series relations - stale_series_relations = M3USeriesRelation.objects.filter(last_seen__lt=cutoff_date) + stale_series_relations = M3USeriesRelation.objects.filter(**base_filters) stale_series_count = stale_series_relations.count() stale_series_relations.delete() # Clean up stale episode relations - stale_episode_relations = M3UEpisodeRelation.objects.filter(last_seen__lt=cutoff_date) + stale_episode_relations = M3UEpisodeRelation.objects.filter(**base_filters) stale_episode_count = stale_episode_relations.count() stale_episode_relations.delete() - # Clean up movies with no relations (orphaned) - orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) - orphaned_movie_count = orphaned_movies.count() - orphaned_movies.delete() + # Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup) + if not account_id: + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + orphaned_movies.delete() - # Clean up series with no relations (orphaned) - orphaned_series = Series.objects.filter(m3u_relations__isnull=True) - orphaned_series_count = orphaned_series.count() - orphaned_series.delete() + # Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + orphaned_series.delete() + else: + # When cleaning up for specific account, we don't remove orphaned content + # as other accounts might still reference it + orphaned_movie_count = 0 + orphaned_series_count = 0 # Episodes will be cleaned up via CASCADE when series are deleted @@ -1783,8 +1824,9 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): if info.get('plot') and info.get('plot') != movie.description: movie.description = info.get('plot') updated = True - if info.get('rating') and info.get('rating') != movie.rating: - movie.rating = info.get('rating') + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and normalized_rating != movie.rating: + movie.rating = normalized_rating updated = True if info.get('genre') and info.get('genre') != movie.genre: movie.genre = info.get('genre') diff --git a/core/migrations/0009_m3u_hash_settings.py b/core/migrations/0009_m3u_hash_settings.py index eab5f141..3c6283fa 100644 --- a/core/migrations/0009_m3u_hash_settings.py +++ b/core/migrations/0009_m3u_hash_settings.py @@ -8,7 +8,7 @@ def preload_core_settings(apps, schema_editor): CoreSettings.objects.create( key=slugify("M3U Hash Key"), name="M3U Hash Key", - value="name,url,tvg_id", + value="url", ) class Migration(migrations.Migration): diff --git a/core/models.py b/core/models.py index ba040666..3a5895ba 100644 --- a/core/models.py +++ b/core/models.py @@ -1,4 +1,5 @@ # core/models.py +from django.conf import settings from django.db import models from django.utils.text import slugify from django.core.exceptions import ValidationError @@ -158,8 +159,10 @@ DVR_TV_FALLBACK_DIR_KEY = slugify("DVR TV Fallback Dir") DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template") DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template") DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled") +DVR_COMSKIP_CUSTOM_PATH_KEY = slugify("DVR Comskip Custom Path") DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes") DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes") +SYSTEM_TIME_ZONE_KEY = slugify("System Time Zone") class CoreSettings(models.Model): @@ -274,6 +277,27 @@ class CoreSettings(models.Model): except cls.DoesNotExist: return False + @classmethod + def get_dvr_comskip_custom_path(cls): + """Return configured comskip.ini path or empty string if unset.""" + try: + return cls.objects.get(key=DVR_COMSKIP_CUSTOM_PATH_KEY).value + except cls.DoesNotExist: + return "" + + @classmethod + def set_dvr_comskip_custom_path(cls, path: str | None): + """Persist the comskip.ini path setting, normalizing nulls to empty string.""" + value = (path or "").strip() + obj, _ = cls.objects.get_or_create( + key=DVR_COMSKIP_CUSTOM_PATH_KEY, + defaults={"name": "DVR Comskip Custom Path", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + @classmethod def get_dvr_pre_offset_minutes(cls): """Minutes to start recording before scheduled start (default 0).""" @@ -302,6 +326,30 @@ class CoreSettings(models.Model): except Exception: return 0 + @classmethod + def get_system_time_zone(cls): + """Return configured system time zone or fall back to Django settings.""" + try: + value = cls.objects.get(key=SYSTEM_TIME_ZONE_KEY).value + if value: + return value + except cls.DoesNotExist: + pass + return getattr(settings, "TIME_ZONE", "UTC") or "UTC" + + @classmethod + def set_system_time_zone(cls, tz_name: str | None): + """Persist the desired system time zone identifier.""" + value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + obj, _ = cls.objects.get_or_create( + key=SYSTEM_TIME_ZONE_KEY, + defaults={"name": "System Time Zone", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + @classmethod def get_dvr_series_rules(cls): """Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}""" diff --git a/core/tasks.py b/core/tasks.py index 7bd3975b..f757613b 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -513,7 +513,7 @@ def rehash_streams(keys): for obj in batch: # Generate new hash - new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys) + new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id) # Check if this hash already exists in our tracking dict or in database if new_hash in hash_keys: diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 09a94882..4dbd603b 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -225,6 +225,10 @@ CELERY_BEAT_SCHEDULE = { "task": "core.tasks.scan_and_process_files", # Direct task call "schedule": 20.0, # Every 20 seconds }, + "maintain-recurring-recordings": { + "task": "apps.channels.tasks.maintain_recurring_recordings", + "schedule": 3600.0, # Once an hour ensure recurring schedules stay ahead + }, } MEDIA_ROOT = BASE_DIR / "media" diff --git a/docker/Dockerfile b/docker/Dockerfile index ec24c818..dc437227 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,11 +4,15 @@ ARG REPO_NAME=dispatcharr ARG BASE_TAG=base # --- Build frontend --- -FROM node:20 AS frontend-builder + +FROM node:24 AS frontend-builder + WORKDIR /app/frontend COPY ./frontend /app/frontend -RUN corepack enable && corepack prepare yarn@stable --activate && \ - yarn install && yarn build && \ +# remove any node_modules that may have been copied from the host (x86) +RUN rm -rf node_modules || true; \ + npm install --no-audit --progress=false; +RUN npm run build; \ rm -rf node_modules .cache # --- Redeclare build arguments for the next stage --- diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index 0cf387d5..90cd8654 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -9,7 +9,6 @@ services: - 9191:9191 volumes: - dispatcharr_data:/data - - ./data:/data environment: - DISPATCHARR_ENV=aio - REDIS_HOST=localhost diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 88f9fc84..dd989c81 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -53,7 +53,7 @@ services: command: > bash -c " cd /app && - celery -A dispatcharr worker -l info + nice -n 5 celery -A dispatcharr worker -l info " db: diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index e049df87..fa94df92 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -8,8 +8,8 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n 5 celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 7e50f2ef..6eca871d 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -10,8 +10,8 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n 5 celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index b35ea5bf..f763c3bc 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -10,8 +10,8 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server ; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n 5 celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application # Core settings diff --git a/frontend/package-lock.json b/frontend/package-lock.json index b3332a13..7ae9ab63 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,11 +1,11 @@ { - "name": "vite", + "name": "frontend", "version": "0.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "vite", + "name": "frontend", "version": "0.0.0", "dependencies": { "@dnd-kit/core": "^6.3.1", @@ -20,15 +20,15 @@ "@mantine/hooks": "~8.0.1", "@mantine/notifications": "~8.0.1", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", + "allotment": "^1.20.4", "dayjs": "^1.11.13", "formik": "^2.4.6", "hls.js": "^1.5.20", "immer": "^10.1.1", "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", @@ -42,17 +42,66 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", "eslint": "^9.21.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", + "jsdom": "^27.0.0", "prettier": "^3.5.3", - "vite": "^6.2.0" + "vite": "^6.2.0", + "vitest": "^3.2.4" } }, + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@asamuzakjp/css-color": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.0.4.tgz", + "integrity": "sha512-cKjSKvWGmAziQWbCouOsFwb14mp1betm8Y7Fn+yglDMUUu3r9DCbJ9iJbeFDenLMqFbIMC0pQP8K+B8LAxX3OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "lru-cache": "^11.1.0" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.5.5.tgz", + "integrity": "sha512-kI2MX9pmImjxWT8nxDZY+MuN6r1jJGe7WxizEbsAEPB/zxfW5wYLIiPG1v3UKgEOOP8EsDkp0ZL99oRFAdPM8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.1.0", + "is-potential-custom-element-name": "^1.0.1" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, "node_modules/@babel/code-frame": { "version": "7.26.2", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", @@ -195,6 +244,144 @@ "node": ">=6.9.0" } }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.14.tgz", + "integrity": "sha512-zSlIxa20WvMojjpCSy8WrNpcZ61RqfTfX3XTaOeVlGJrt/8HF3YbzgFZa01yTbT4GWQLwfTcC3EB8i3XnB647Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@dnd-kit/accessibility": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz", @@ -408,6 +595,278 @@ "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", "license": "MIT" }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.1.tgz", + "integrity": "sha512-kfYGy8IdzTGy+z0vFGvExZtxkFlA4zAxgKEahG9KE1ScBjpQnFsNOX8KTU5ojNru5ed5CVoJYXFtoxaq5nFbjQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.1.tgz", + "integrity": "sha512-dp+MshLYux6j/JjdqVLnMglQlFu+MuVeNrmT5nk6q07wNhCdSnB7QZj+7G8VMUGh1q+vj2Bq8kRsuyA00I/k+Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.1.tgz", + "integrity": "sha512-50tM0zCJW5kGqgG7fQ7IHvQOcAn9TKiVRuQ/lN0xR+T2lzEFvAi1ZcS8DiksFcEpf1t/GYOeOfCAgDHFpkiSmA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.1.tgz", + "integrity": "sha512-GCj6WfUtNldqUzYkN/ITtlhwQqGWu9S45vUXs7EIYf+7rCiiqH9bCloatO9VhxsL0Pji+PF4Lz2XXCES+Q8hDw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.1.tgz", + "integrity": "sha512-5hEZKPf+nQjYoSr/elb62U19/l1mZDdqidGfmFutVUjjUZrOazAtwK+Kr+3y0C/oeJfLlxo9fXb1w7L+P7E4FQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.1.tgz", + "integrity": "sha512-hxVnwL2Dqs3fM1IWq8Iezh0cX7ZGdVhbTfnOy5uURtao5OIVCEyj9xIzemDi7sRvKsuSdtCAhMKarxqtlyVyfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.1.tgz", + "integrity": "sha512-1MrCZs0fZa2g8E+FUo2ipw6jw5qqQiH+tERoS5fAfKnRx6NXH31tXBKI3VpmLijLH6yriMZsxJtaXUyFt/8Y4A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.1.tgz", + "integrity": "sha512-0IZWLiTyz7nm0xuIs0q1Y3QWJC52R8aSXxe40VUxm6BB1RNmkODtW6LHvWRrGiICulcX7ZvyH6h5fqdLu4gkww==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.1.tgz", + "integrity": "sha512-NdKOhS4u7JhDKw9G3cY6sWqFcnLITn6SqivVArbzIaf3cemShqfLGHYMx8Xlm/lBit3/5d7kXvriTUGa5YViuQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.1.tgz", + "integrity": "sha512-jaN3dHi0/DDPelk0nLcXRm1q7DNJpjXy7yWaWvbfkPvI+7XNSc/lDOnCLN7gzsyzgu6qSAmgSvP9oXAhP973uQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.1.tgz", + "integrity": "sha512-OJykPaF4v8JidKNGz8c/q1lBO44sQNUQtq1KktJXdBLn1hPod5rE/Hko5ugKKZd+D2+o1a9MFGUEIUwO2YfgkQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.1.tgz", + "integrity": "sha512-nGfornQj4dzcq5Vp835oM/o21UMlXzn79KobKlcs3Wz9smwiifknLy4xDCLUU0BWp7b/houtdrgUz7nOGnfIYg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.1.tgz", + "integrity": "sha512-1osBbPEFYwIE5IVB/0g2X6i1qInZa1aIoj1TdL4AaAb55xIIgbg8Doq6a5BzYWgr+tEcDzYH67XVnTmUzL+nXg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.1.tgz", + "integrity": "sha512-/6VBJOwUf3TdTvJZ82qF3tbLuWsscd7/1w+D9LH0W/SqUgM5/JJD0lrJ1fVIfZsqB6RFmLCe0Xz3fmZc3WtyVg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.1.tgz", + "integrity": "sha512-nSut/Mx5gnilhcq2yIMLMe3Wl4FK5wx/o0QuuCLMtmJn+WeWYoEGDN1ipcN72g1WHsnIbxGXd4i/MF0gTcuAjQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.1.tgz", + "integrity": "sha512-cEECeLlJNfT8kZHqLarDBQso9a27o2Zd2AQ8USAEoGtejOrCYHNtKP8XQhMDJMtthdF4GBmjR2au3x1udADQQQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/linux-x64": { "version": "0.25.1", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz", @@ -425,6 +884,142 @@ "node": ">=18" } }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.1.tgz", + "integrity": "sha512-O96poM2XGhLtpTh+s4+nP7YCCAfb4tJNRVZHfIE7dgmax+yMP2WgMd2OecBuaATHKTHsLWHQeuaxMRnCsH8+5g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.1.tgz", + "integrity": "sha512-X53z6uXip6KFXBQ+Krbx25XHV/NCbzryM6ehOAeAil7X7oa4XIq+394PWGnwaSQ2WRA0KI6PUO6hTO5zeF5ijA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.1.tgz", + "integrity": "sha512-Na9T3szbXezdzM/Kfs3GcRQNjHzM6GzFBeU1/6IV/npKP5ORtp9zbQjvkDJ47s6BCgaAZnnnu/cY1x342+MvZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.1.tgz", + "integrity": "sha512-T3H78X2h1tszfRSf+txbt5aOp/e7TAz3ptVKu9Oyir3IAOFPGV6O9c2naym5TOriy1l0nNf6a4X5UXRZSGX/dw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.1.tgz", + "integrity": "sha512-2H3RUvcmULO7dIE5EWJH8eubZAI4xw54H1ilJnRNZdeo8dTADEZ21w6J22XBkXqGJbe0+wnNJtw3UXRoLJnFEg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.1.tgz", + "integrity": "sha512-GE7XvrdOzrb+yVKB9KsRMq+7a2U/K5Cf/8grVFRAGJmfADr/e/ODQ134RK2/eeHqYV5eQRFxb1hY7Nr15fv1NQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.1.tgz", + "integrity": "sha512-uOxSJCIcavSiT6UnBhBzE8wy3n0hOkJsBOzy7HDAuTDE++1DJMRRVCPGisULScHL+a/ZwdXPpXD3IyFKjA7K8A==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.1.tgz", + "integrity": "sha512-Y1EQdcfwMSeQN/ujR5VayLOJ1BHaK+ssyk0AEzPjC+t1lITgsnccPqFjb6V+LsTp/9Iov4ysfjxLaGJ9RPtkVg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@eslint-community/eslint-utils": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.5.0.tgz", @@ -577,22 +1172,22 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.9", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.9.tgz", - "integrity": "sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==", + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", "license": "MIT", "dependencies": { - "@floating-ui/utils": "^0.2.9" + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.13", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.13.tgz", - "integrity": "sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==", + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.9" + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/react": { @@ -611,12 +1206,12 @@ } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", - "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", "license": "MIT", "dependencies": { - "@floating-ui/dom": "^1.0.0" + "@floating-ui/dom": "^1.7.4" }, "peerDependencies": { "react": ">=16.8.0", @@ -624,9 +1219,9 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.9.tgz", - "integrity": "sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==", + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", "license": "MIT" }, "node_modules/@humanfs/core": { @@ -728,9 +1323,9 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { @@ -869,6 +1464,209 @@ "url": "https://opencollective.com/popperjs" } }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.35", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.35.tgz", + "integrity": "sha512-slYrCpoxJUqzFDDNlvrOYRazQUNRvWPjXA17dAOISY3rDMxX6k8K4cj2H+hEYMHF81HO3uNd5rHVigAWRM5dSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.35.0.tgz", + "integrity": "sha512-uYQ2WfPaqz5QtVgMxfN6NpLD+no0MYHDBywl7itPYd3K5TjjSghNKmX8ic9S8NU8w81NVhJv/XojcHptRly7qQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.35.0.tgz", + "integrity": "sha512-FtKddj9XZudurLhdJnBl9fl6BwCJ3ky8riCXjEw3/UIbjmIY58ppWwPEvU3fNu+W7FUsAsB1CdH+7EQE6CXAPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.35.0.tgz", + "integrity": "sha512-Uk+GjOJR6CY844/q6r5DR/6lkPFOw0hjfOIzVx22THJXMxktXG6CbejseJFznU8vHcEBLpiXKY3/6xc+cBm65Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.35.0.tgz", + "integrity": "sha512-3IrHjfAS6Vkp+5bISNQnPogRAW5GAV1n+bNCrDwXmfMHbPl5EhTmWtfmwlJxFRUCBZ+tZ/OxDyU08aF6NI/N5Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.35.0.tgz", + "integrity": "sha512-sxjoD/6F9cDLSELuLNnY0fOrM9WA0KrM0vWm57XhrIMf5FGiN8D0l7fn+bpUeBSU7dCgPV2oX4zHAsAXyHFGcQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.35.0.tgz", + "integrity": "sha512-2mpHCeRuD1u/2kruUiHSsnjWtHjqVbzhBkNVQ1aVD63CcexKVcQGwJ2g5VphOd84GvxfSvnnlEyBtQCE5hxVVw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.35.0.tgz", + "integrity": "sha512-mrA0v3QMy6ZSvEuLs0dMxcO2LnaCONs1Z73GUDBHWbY8tFFocM6yl7YyMu7rz4zS81NDSqhrUuolyZXGi8TEqg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.35.0.tgz", + "integrity": "sha512-DnYhhzcvTAKNexIql8pFajr0PiDGrIsBYPRvCKlA5ixSS3uwo/CWNZxB09jhIapEIg945KOzcYEAGGSmTSpk7A==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.35.0.tgz", + "integrity": "sha512-uagpnH2M2g2b5iLsCTZ35CL1FgyuzzJQ8L9VtlJ+FckBXroTwNOaD0z0/UF+k5K3aNQjbm8LIVpxykUOQt1m/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.35.0.tgz", + "integrity": "sha512-XQxVOCd6VJeHQA/7YcqyV0/88N6ysSVzRjJ9I9UA/xXpEsjvAgDTgH3wQYz5bmr7SPtVK2TsP2fQ2N9L4ukoUg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loongarch64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.35.0.tgz", + "integrity": "sha512-5pMT5PzfgwcXEwOaSrqVsz/LvjDZt+vQ8RT/70yhPU06PTuq8WaHhfT1LW+cdD7mW6i/J5/XIkX/1tCAkh1W6g==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.35.0.tgz", + "integrity": "sha512-c+zkcvbhbXF98f4CtEIP1EBA/lCic5xB0lToneZYvMeKu5Kamq3O8gqrxiYYLzlZH6E3Aq+TSW86E4ay8iD8EA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.35.0.tgz", + "integrity": "sha512-s91fuAHdOwH/Tad2tzTtPX7UZyytHIRR6V4+2IGlV0Cej5rkG0R61SX4l4y9sh0JBibMiploZx3oHKPnQBKe4g==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.35.0.tgz", + "integrity": "sha512-hQRkPQPLYJZYGP+Hj4fR9dDBMIM7zrzJDWFEMPdTnTy95Ljnv0/4w/ixFw3pTBMEuuEuoqtBINYND4M7ujcuQw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, "node_modules/@rollup/rollup-linux-x64-gnu": { "version": "4.35.0", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.35.0.tgz", @@ -897,95 +1695,62 @@ "linux" ] }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.35.0.tgz", + "integrity": "sha512-OUOlGqPkVJCdJETKOCEf1mw848ZyJ5w50/rZ/3IBQVdLfR5jk/6Sr5m3iO2tdPgwo0x7VcncYuOvMhBWZq8ayg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.35.0.tgz", + "integrity": "sha512-2/lsgejMrtwQe44glq7AFFHLfJBPafpsTa6JvP2NGef/ifOa4KBoglVf7AKN7EV9o32evBPRqfg96fEHzWo5kw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.35.0.tgz", + "integrity": "sha512-PIQeY5XDkrOysbQblSW7v3l1MDZzkTEzAfTPkj5VAu3FW8fS4ynyLg2sINp0fp3SjZ8xkRYpLqoKcYqAkhU1dw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, "node_modules/@swc/core": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.11.8.tgz", - "integrity": "sha512-UAL+EULxrc0J73flwYHfu29mO8CONpDJiQv1QPDXsyCvDUcEhqAqUROVTgC+wtJCFFqMQdyr4stAA5/s0KSOmA==", - "dev": true, - "hasInstallScript": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3", - "@swc/types": "^0.1.19" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/swc" - }, - "optionalDependencies": { - "@swc/core-darwin-arm64": "1.11.8", - "@swc/core-darwin-x64": "1.11.8", - "@swc/core-linux-arm-gnueabihf": "1.11.8", - "@swc/core-linux-arm64-gnu": "1.11.8", - "@swc/core-linux-arm64-musl": "1.11.8", - "@swc/core-linux-x64-gnu": "1.11.8", - "@swc/core-linux-x64-musl": "1.11.8", - "@swc/core-win32-arm64-msvc": "1.11.8", - "@swc/core-win32-ia32-msvc": "1.11.8", - "@swc/core-win32-x64-msvc": "1.11.8" - }, - "peerDependencies": { - "@swc/helpers": "*" - }, - "peerDependenciesMeta": { - "@swc/helpers": { - "optional": true - } - } - }, - "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.11.8.tgz", - "integrity": "sha512-r72onUEIU1iJi9EUws3R28pztQ/eM3EshNpsPRBfuLwKy+qn3et55vXOyDhIjGCUph5Eg2Yn8H3h6MTxDdLd+w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/core-linux-x64-musl": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.11.8.tgz", - "integrity": "sha512-294k8cLpO103++f4ZUEDr3vnBeUfPitW6G0a3qeVZuoXFhFgaW7ANZIWknUc14WiLOMfMecphJAEiy9C8OeYSw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/counter": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "name": "@swc/wasm", + "version": "1.13.20", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", + "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", "dev": true, "license": "Apache-2.0" }, - "node_modules/@swc/types": { - "version": "0.1.19", - "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.19.tgz", - "integrity": "sha512-WkAZaAfj44kh/UFdAQcrMP1I0nwRqpt27u+08LMBYMqmQfwwMofYoMh/48NGkMMRfC4ynpfwRbJuu8ErfNloeA==", + "node_modules/@swc/wasm": { + "version": "1.13.20", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", + "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3" - } + "license": "Apache-2.0" }, "node_modules/@tanstack/react-table": { "version": "8.21.3", @@ -1020,6 +1785,112 @@ "url": "https://github.com/sponsors/tannerlinsley" } }, + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/jest-dom": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.8.0.tgz", + "integrity": "sha512-WgXcWzVM6idy5JaftTVC8Vs83NKRmGJz4Hqs4oyOuO2J4r/y79vvKZsb+CaGyCSEbUPI6OsewfPd0G1A0/TUZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@testing-library/react": { + "version": "16.3.0", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz", + "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, "node_modules/@types/d3-array": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", @@ -1083,6 +1954,13 @@ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", "license": "MIT" }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -1114,18 +1992,18 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "19.0.10", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.10.tgz", - "integrity": "sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==", + "version": "19.1.16", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.16.tgz", + "integrity": "sha512-WBM/nDbEZmDUORKnh5i1bTnAz6vTohUf9b8esSMu+b24+srbaxa04UbJgWx78CVfNXA20sNu0odEIluZDFdCog==", "license": "MIT", "dependencies": { "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "19.0.4", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.4.tgz", - "integrity": "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg==", + "version": "19.1.9", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz", + "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -1181,16 +2059,135 @@ } }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.8.0.tgz", - "integrity": "sha512-T4sHPvS+DIqDP51ifPqa9XIRAz/kIvIi8oXcnOZZgHmMotgmmdxe/DD5tMFlt5nuIRzT0/QuiwmKlH0503Aapw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.1.0.tgz", + "integrity": "sha512-Ff690TUck0Anlh7wdIcnsVMhofeEVgm44Y4OYdeeEEPSKyZHzDI9gfVBvySEhDfXtBp8tLCbfsVKPWEMEjq8/g==", "dev": true, "license": "MIT", "dependencies": { - "@swc/core": "^1.10.15" + "@rolldown/pluginutils": "1.0.0-beta.35", + "@swc/core": "^1.13.5" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" }, "peerDependencies": { - "vite": "^4 || ^5 || ^6" + "vite": "^4 || ^5 || ^6 || ^7" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, "node_modules/@xmldom/xmldom": { @@ -1237,6 +2234,16 @@ "pkcs7": "^1.0.4" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -1255,9 +2262,9 @@ } }, "node_modules/allotment": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.3.tgz", - "integrity": "sha512-JCnklt7j0OsyDjD7A9AdT6wqJ3FSoo1ASV6w02Am02lo6NwO25yhG1DcWW8ueBV38ppXQmvrXBXuzX7iVkq6Tw==", + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.4.tgz", + "integrity": "sha512-LMM5Xe5nLePFOLAlW/5k3ARqznYGUyNekV4xJrfDKn1jimW3nlZE6hT/Tu0T8s0VgAkr9s2P7+uM0WvJKn5DAw==", "license": "MIT", "dependencies": { "classnames": "^2.3.0", @@ -1272,17 +2279,14 @@ "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" } }, - "node_modules/allotment/node_modules/use-resize-observer": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", - "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, "license": "MIT", - "dependencies": { - "@juggle/resize-observer": "^3.3.1" - }, - "peerDependencies": { - "react": "16.8.0 - 18", - "react-dom": "16.8.0 - 18" + "engines": { + "node": ">=8" } }, "node_modules/ansi-styles": { @@ -1308,6 +2312,26 @@ "dev": true, "license": "Python-2.0" }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, "node_modules/attr-accept": { "version": "2.2.5", "resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz", @@ -1339,6 +2363,16 @@ "dev": true, "license": "MIT" }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -1350,6 +2384,16 @@ "concat-map": "0.0.1" } }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -1359,6 +2403,23 @@ "node": ">=6" } }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -1376,6 +2437,16 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, "node_modules/classnames": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", @@ -1473,6 +2544,42 @@ "node": ">= 8" } }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssstyle": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.1.tgz", + "integrity": "sha512-g5PC9Aiph9eiczFpcgUhd9S4UUO3F+LHGRIi5NUMZ+4xtoIYbHNZwZnWA2JsFGe8OU8nl4WyaEFiZuGuxlutJQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^4.0.3", + "@csstools/css-syntax-patches-for-csstree": "^1.0.14", + "css-tree": "^3.1.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -1600,6 +2707,20 @@ "node": ">=12" } }, + "node_modules/data-urls": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", + "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/dayjs": { "version": "1.11.13", "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", @@ -1607,9 +2728,9 @@ "license": "MIT" }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -1623,12 +2744,29 @@ } } }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, "node_modules/decimal.js-light": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", "license": "MIT" }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -1645,12 +2783,29 @@ "node": ">=0.10.0" } }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/detect-node-es": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", "license": "MIT" }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT" + }, "node_modules/dom-helpers": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", @@ -1666,6 +2821,19 @@ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", @@ -1675,6 +2843,13 @@ "is-arrayish": "^0.2.1" } }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, "node_modules/es6-promise": { "version": "4.2.8", "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", @@ -1902,6 +3077,16 @@ "node": ">=4.0" } }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", @@ -1918,6 +3103,16 @@ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", "license": "MIT" }, + "node_modules/expect-type": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -1925,9 +3120,9 @@ "license": "MIT" }, "node_modules/fast-equals": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.2.2.tgz", - "integrity": "sha512-V7/RktU11J3I36Nwq2JnZEM7tNm17eBJz+u25qdxBZeCKiX6BkVSZQjwWIr+IobgnZy+ag73tTZgZi7tr0LrBw==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.3.2.tgz", + "integrity": "sha512-6rxyATwPCkaFIL3JLqw8qXqMpIZ942pTX/tbQFkRsDGblS8tNGtlUauA/+mt6RUfqn/4MoEr+WDkYoIQbibWuQ==", "license": "MIT", "engines": { "node": ">=6.0.0" @@ -1948,11 +3143,14 @@ "license": "MIT" }, "node_modules/fdir": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", - "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", "dev": true, "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, "peerDependencies": { "picomatch": "^3 || ^4" }, @@ -2162,11 +3360,59 @@ "react-is": "^16.7.0" } }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" + "node_modules/html-encoding-sniffer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^3.1.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } }, "node_modules/ignore": { "version": "5.3.2", @@ -2214,6 +3460,16 @@ "node": ">=0.8.19" } }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/internmap": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", @@ -2273,6 +3529,13 @@ "node": ">=0.10.0" } }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -2299,6 +3562,46 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/jsdom": { + "version": "27.0.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.0.0.tgz", + "integrity": "sha512-lIHeR1qlIRrIN5VMccd8tI2Sgw6ieYXSVktcSHaNe3Z5nE/tcPQYQWOq00wxMvYOsz+73eAkNenVvmPC6bba9A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/dom-selector": "^6.5.4", + "cssstyle": "^5.3.0", + "data-urls": "^6.0.0", + "decimal.js": "^10.5.0", + "html-encoding-sniffer": "^4.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", + "is-potential-custom-element-name": "^1.0.1", + "parse5": "^7.3.0", + "rrweb-cssom": "^0.8.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.0", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.0", + "whatwg-encoding": "^3.1.1", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0", + "ws": "^8.18.2", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, "node_modules/jsesc": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", @@ -2443,6 +3746,23 @@ "loose-envify": "cli.js" } }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "11.2.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.1.tgz", + "integrity": "sha512-r8LA6i4LP4EeWOhqBaZZjDWwehd1xUJPCJd9Sv300H0ZmcUER4+JPh7bqqZeqs1o5pgtgvXm+d9UGrB5zZGDiQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/lucide-react": { "version": "0.511.0", "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.511.0.tgz", @@ -2452,6 +3772,16 @@ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, "node_modules/m3u8-parser": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/m3u8-parser/-/m3u8-parser-7.2.0.tgz", @@ -2463,6 +3793,23 @@ "global": "^4.4.0" } }, + "node_modules/magic-string": { + "version": "0.30.19", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", + "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "dev": true, + "license": "CC0-1.0" + }, "node_modules/memoize-one": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.2.1.tgz", @@ -2477,6 +3824,16 @@ "dom-walk": "^0.1.0" } }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -2653,6 +4010,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -2688,6 +4058,23 @@ "node": ">=8" } }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -2695,9 +4082,9 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { @@ -2774,6 +4161,41 @@ "url": "https://github.com/prettier/prettier?sponsor=1" } }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT" + }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -2794,12 +4216,6 @@ "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, "node_modules/property-expr": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", @@ -2817,24 +4233,24 @@ } }, "node_modules/react": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", - "integrity": "sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.0.0.tgz", - "integrity": "sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", + "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", "license": "MIT", "dependencies": { - "scheduler": "^0.25.0" + "scheduler": "^0.26.0" }, "peerDependencies": { - "react": "^19.0.0" + "react": "^19.1.1" } }, "node_modules/react-draggable": { @@ -2884,9 +4300,9 @@ "license": "MIT" }, "node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "license": "MIT" }, "node_modules/react-lifecycles-compat": { @@ -2896,9 +4312,9 @@ "license": "MIT" }, "node_modules/react-number-format": { - "version": "5.4.3", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.3.tgz", - "integrity": "sha512-VCY5hFg/soBighAoGcdE+GagkJq0230qN6jcS5sp8wQX1qy1fYN/RX7/BXkrs0oyzzwqR8/+eSUrqXbGeywdUQ==", + "version": "5.4.4", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.4.tgz", + "integrity": "sha512-wOmoNZoOpvMminhifQYiYSTCLUDOiUbBunrMrMjA+dV52sY+vck1S4UhR6PkgnoCquvvMSeJjErXZ4qSaWCliA==", "license": "MIT", "peerDependencies": { "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", @@ -3168,12 +4584,42 @@ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "license": "MIT" }, + "node_modules/recharts/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/regenerator-runtime": { "version": "0.14.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", "license": "MIT" }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve": { "version": "1.22.10", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", @@ -3242,10 +4688,37 @@ "fsevents": "~2.3.2" } }, + "node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, "node_modules/scheduler": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", - "integrity": "sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==", + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", "license": "MIT" }, "node_modules/set-cookie-parser": { @@ -3277,6 +4750,13 @@ "node": ">=8" } }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, "node_modules/source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -3296,6 +4776,33 @@ "node": ">=0.10.0" } }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", + "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -3309,6 +4816,26 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/strip-literal": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", + "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/stylis": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", @@ -3340,6 +4867,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, "node_modules/tabbable": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", @@ -3364,15 +4898,29 @@ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", "license": "MIT" }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, "node_modules/tinyglobby": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", - "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", "dev": true, "license": "MIT", "dependencies": { - "fdir": "^6.4.4", - "picomatch": "^4.0.2" + "fdir": "^6.5.0", + "picomatch": "^4.0.3" }, "engines": { "node": ">=12.0.0" @@ -3381,12 +4929,88 @@ "url": "https://github.com/sponsors/SuperchupuDev" } }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.15.tgz", + "integrity": "sha512-heYRCiGLhtI+U/D0V8YM3QRwPfsLJiP+HX+YwiHZTnWzjIKC+ZCxQRYlzvOoTEc6KIP62B1VeAN63diGCng2hg==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.15" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.15.tgz", + "integrity": "sha512-YBkp2VfS9VTRMPNL2PA6PMESmxV1JEVoAr5iBlZnB5JG3KUrWzNCB3yNNkRa2FZkqClaBgfNYCp8PgpYmpjkZw==", + "dev": true, + "license": "MIT" + }, "node_modules/toposort": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==", "license": "MIT" }, + "node_modules/tough-cookie": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", + "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", @@ -3494,6 +5118,19 @@ } } }, + "node_modules/use-resize-observer": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", + "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", + "license": "MIT", + "dependencies": { + "@juggle/resize-observer": "^3.3.1" + }, + "peerDependencies": { + "react": "16.8.0 - 18", + "react-dom": "16.8.0 - 18" + } + }, "node_modules/use-sidecar": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", @@ -3664,11 +5301,168 @@ } } }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", + "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, "node_modules/webworkify-webpack": { "version": "2.1.5", - "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git#24d1e719b4a6cac37a518b2bb10fe124527ef4ef", + "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git", + "integrity": "sha512-W8Bg+iLq52d2GFvwabPNCIDCgMHcW3g68Tr8zwpJliEz2cKBIKYL3T0VdYeZWhz5rOxWRBBEdF931fquSO6iCQ==", "license": "MIT" }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-url": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", + "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -3685,6 +5479,23 @@ "node": ">= 8" } }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/word-wrap": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", @@ -3695,6 +5506,60 @@ "node": ">=0.10.0" } }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, + "node_modules/yaml": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.1.tgz", + "integrity": "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true, + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index 291363fe..06a9313b 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,5 +1,5 @@ { - "name": "vite", + "name": "frontend", "private": true, "version": "0.0.0", "type": "module", @@ -7,7 +7,9 @@ "dev": "vite --host", "build": "vite build", "lint": "eslint .", - "preview": "vite preview" + "preview": "vite preview", + "test": "vitest --run", + "test:watch": "vitest" }, "dependencies": { "@dnd-kit/core": "^6.3.1", @@ -22,15 +24,15 @@ "@mantine/hooks": "~8.0.1", "@mantine/notifications": "~8.0.1", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", + "allotment": "^1.20.4", "dayjs": "^1.11.13", "immer": "^10.1.1", "formik": "^2.4.6", "hls.js": "^1.5.20", "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", @@ -44,14 +46,27 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", "eslint": "^9.21.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", + "jsdom": "^27.0.0", "prettier": "^3.5.3", - "vite": "^6.2.0" + "vite": "^6.2.0", + "vitest": "^3.2.4" + }, + "resolutions": { + "vite": "7.1.7", + "react": "19.1.0", + "react-dom": "19.1.0" } } diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index ef56171b..40aa7800 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -38,7 +38,6 @@ export const WebsocketProvider = ({ children }) => { const updateEPG = useEPGsStore((s) => s.updateEPG); const updateEPGProgress = useEPGsStore((s) => s.updateEPGProgress); - const playlists = usePlaylistsStore((s) => s.playlists); const updatePlaylist = usePlaylistsStore((s) => s.updatePlaylist); const applyMediaScanUpdate = useLibraryStore((s) => s.applyScanUpdate); @@ -288,10 +287,14 @@ export const WebsocketProvider = ({ children }) => { // Update the playlist status whenever we receive a status update // Not just when progress is 100% or status is pending_setup if (parsedEvent.data.status && parsedEvent.data.account) { - // Check if playlists is an object with IDs as keys or an array - const playlist = Array.isArray(playlists) - ? playlists.find((p) => p.id === parsedEvent.data.account) - : playlists[parsedEvent.data.account]; + // Get fresh playlists from store to avoid stale state from React render cycle + const currentPlaylists = usePlaylistsStore.getState().playlists; + const isArray = Array.isArray(currentPlaylists); + const playlist = isArray + ? currentPlaylists.find( + (p) => p.id === parsedEvent.data.account + ) + : currentPlaylists[parsedEvent.data.account]; if (playlist) { // When we receive a "success" status with 100% progress, this is a completed refresh @@ -314,19 +317,19 @@ export const WebsocketProvider = ({ children }) => { 'M3U refresh completed successfully:', updateData ); + fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date + fetchChannelProfiles(); // Ensure channel profiles are updated } updatePlaylist(updateData); - fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date - fetchChannelProfiles(); // Ensure channel profiles are updated } else { - // Log when playlist can't be found for debugging purposes - console.warn( - `Received update for unknown playlist ID: ${parsedEvent.data.account}`, - Array.isArray(playlists) - ? 'playlists is array' - : 'playlists is object', - Object.keys(playlists).length + // Playlist not in store yet - this happens when backend sends websocket + // updates immediately after creating the playlist, before the API response + // returns. The frontend will receive a 'playlist_created' event shortly + // which will trigger a fetchPlaylists() to sync the store. + console.log( + `Received update for playlist ID ${parsedEvent.data.account} not yet in store. ` + + `Waiting for playlist_created event to sync...` ); } } @@ -371,6 +374,173 @@ export const WebsocketProvider = ({ children }) => { } break; + case 'epg_matching_progress': { + const progress = parsedEvent.data; + const id = 'epg-matching-progress'; + + if (progress.stage === 'starting') { + notifications.show({ + id, + title: 'EPG Matching in Progress', + message: `Starting to match ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'matching') { + let message = `Matched ${progress.matched} of ${progress.total} channels`; + if (progress.remaining > 0) { + message += ` (${progress.remaining} remaining)`; + } + if (progress.current_channel) { + message += `\nCurrently processing: ${progress.current_channel}`; + } + + notifications.update({ + id, + title: 'EPG Matching in Progress', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'completed') { + notifications.update({ + id, + title: 'EPG Matching Complete', + message: `Successfully matched ${progress.matched} of ${progress.total} channels (${progress.progress_percent}%)`, + color: progress.matched > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + } + break; + } + + case 'epg_logo_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-logo-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Logos from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + if (progress.created_logos_count !== undefined) { + message += `, created ${progress.created_logos_count} logos`; + } + + notifications.update({ + id, + title: 'Setting Logos from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Logo Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel logos${progress.created_logos_count ? `, created ${progress.created_logos_count} new logos` : ''}`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data and logos + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + + // Get updated channel data and extract logo IDs to load + const channels = useChannelsStore.getState().channels; + const logoIds = Object.values(channels) + .filter((channel) => channel.logo_id) + .map((channel) => channel.logo_id); + + // Fetch the specific logos that were just assigned + if (logoIds.length > 0) { + await useLogosStore.getState().fetchLogosByIds(logoIds); + } + } catch (e) { + console.warn( + 'Failed to refresh channels after logo setting:', + e + ); + } + } + break; + } + + case 'epg_name_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-name-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Names from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + + notifications.update({ + id, + title: 'Setting Names from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Name Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel names from EPG data`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + } catch (e) { + console.warn( + 'Failed to refresh channels after name setting:', + e + ); + } + } + break; + } + case 'm3u_profile_test': setProfilePreview( parsedEvent.data.search_preview, @@ -613,6 +783,14 @@ export const WebsocketProvider = ({ children }) => { break; + case 'playlist_created': + // Backend signals that a new playlist has been created and we should refresh + console.log( + 'Playlist created event received, refreshing playlists...' + ); + fetchPlaylists(); + break; + case 'bulk_channel_creation_progress': { // Handle progress updates with persistent notifications like stream rehash const data = parsedEvent.data; diff --git a/frontend/src/api.js b/frontend/src/api.js index 8009e238..b7a2b8b2 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -516,6 +516,75 @@ export default class API { } } + static async setChannelNamesFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-names-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG name setting task', e); + throw e; + } + } + + static async setChannelLogosFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-logos-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG logo setting task', e); + throw e; + } + } + + static async setChannelTvgIdsFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-tvg-ids-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG TVG-ID setting task', e); + throw e; + } + } + static async assignChannelNumbers(channelIds, startingNum = 1) { try { const response = await request(`${host}/api/channels/channels/assign/`, { @@ -1437,12 +1506,18 @@ export default class API { } } - static async matchEpg() { + static async matchEpg(channelIds = null) { try { + const requestBody = channelIds ? { channel_ids: channelIds } : {}; + const response = await request( `${host}/api/channels/channels/match-epg/`, { method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestBody), } ); @@ -1452,6 +1527,26 @@ export default class API { } } + static async matchChannelEpg(channelId) { + try { + const response = await request( + `${host}/api/channels/channels/${channelId}/match-epg/`, + { + method: 'POST', + } + ); + + // Update the channel in the store with the refreshed data if provided + if (response.channel) { + useChannelsStore.getState().updateChannel(response.channel); + } + + return response; + } catch (e) { + errorNotification('Failed to run EPG auto-match for channel', e); + } + } + static async fetchActiveChannelStats() { try { const response = await request(`${host}/proxy/ts/status`); @@ -1801,6 +1896,83 @@ export default class API { } } + static async updateRecording(id, values) { + try { + const response = await request(`${host}/api/channels/recordings/${id}/`, { + method: 'PATCH', + body: values, + }); + useChannelsStore.getState().fetchRecordings(); + return response; + } catch (e) { + errorNotification(`Failed to update recording ${id}`, e); + } + } + + static async getComskipConfig() { + try { + return await request(`${host}/api/channels/dvr/comskip-config/`); + } catch (e) { + errorNotification('Failed to retrieve comskip configuration', e); + } + } + + static async uploadComskipIni(file) { + try { + const formData = new FormData(); + formData.append('file', file); + return await request(`${host}/api/channels/dvr/comskip-config/`, { + method: 'POST', + body: formData, + }); + } catch (e) { + errorNotification('Failed to upload comskip.ini', e); + } + } + + static async listRecurringRules() { + try { + const response = await request(`${host}/api/channels/recurring-rules/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve recurring DVR rules', e); + } + } + + static async createRecurringRule(payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/`, { + method: 'POST', + body: payload, + }); + return response; + } catch (e) { + errorNotification('Failed to create recurring DVR rule', e); + } + } + + static async updateRecurringRule(ruleId, payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'PATCH', + body: payload, + }); + return response; + } catch (e) { + errorNotification(`Failed to update recurring rule ${ruleId}`, e); + } + } + + static async deleteRecurringRule(ruleId) { + try { + await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification(`Failed to delete recurring rule ${ruleId}`, e); + } + } + static async deleteRecording(id) { try { await request(`${host}/api/channels/recordings/${id}/`, { method: 'DELETE' }); diff --git a/frontend/src/components/forms/Channel.jsx b/frontend/src/components/forms/Channel.jsx index d07fa44c..fd2e5312 100644 --- a/frontend/src/components/forms/Channel.jsx +++ b/frontend/src/components/forms/Channel.jsx @@ -9,7 +9,9 @@ import ChannelGroupForm from './ChannelGroup'; import usePlaylistsStore from '../../store/playlists'; import logo from '../../images/logo.png'; import { useChannelLogoSelection } from '../../hooks/useSmartLogos'; +import useLogosStore from '../../store/logos'; import LazyLogo from '../LazyLogo'; +import LogoForm from './Logo'; import { Box, Button, @@ -34,9 +36,9 @@ import { UnstyledButton, } from '@mantine/core'; import { notifications } from '@mantine/notifications'; -import { ListOrdered, SquarePlus, SquareX, X } from 'lucide-react'; +import { ListOrdered, SquarePlus, SquareX, X, Zap } from 'lucide-react'; import useEPGsStore from '../../store/epgs'; -import { Dropzone } from '@mantine/dropzone'; + import { FixedSizeList as List } from 'react-window'; import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants'; @@ -51,11 +53,14 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const canEditChannelGroup = useChannelsStore((s) => s.canEditChannelGroup); const { - logos, + logos: channelLogos, ensureLogosLoaded, isLoading: logosLoading, } = useChannelLogoSelection(); + // Import the full logos store for duplicate checking + const allLogos = useLogosStore((s) => s.logos); + // Ensure logos are loaded when component mounts useEffect(() => { ensureLogosLoaded(); @@ -67,7 +72,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const tvgs = useEPGsStore((s) => s.tvgs); const tvgsById = useEPGsStore((s) => s.tvgsById); - const [logoPreview, setLogoPreview] = useState(null); + const [logoModalOpen, setLogoModalOpen] = useState(false); const [channelStreams, setChannelStreams] = useState([]); const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); const [epgPopoverOpened, setEpgPopoverOpened] = useState(false); @@ -78,6 +83,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const [groupPopoverOpened, setGroupPopoverOpened] = useState(false); const [groupFilter, setGroupFilter] = useState(''); + const [autoMatchLoading, setAutoMatchLoading] = useState(false); const groupOptions = Object.values(channelGroups); const addStream = (stream) => { @@ -92,32 +98,196 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { setChannelStreams(Array.from(streamSet)); }; - const handleLogoChange = async (files) => { - if (files.length === 1) { - const file = files[0]; + const handleLogoSuccess = ({ logo }) => { + if (logo && logo.id) { + formik.setFieldValue('logo_id', logo.id); + ensureLogosLoaded(); // Refresh logos + } + setLogoModalOpen(false); + }; + + const handleAutoMatchEpg = async () => { + // Only attempt auto-match for existing channels (editing mode) + if (!channel || !channel.id) { + notifications.show({ + title: 'Info', + message: 'Auto-match is only available when editing existing channels.', + color: 'blue', + }); + return; + } + + setAutoMatchLoading(true); + try { + const response = await API.matchChannelEpg(channel.id); + + if (response.matched) { + // Update the form with the new EPG data + if (response.channel && response.channel.epg_data_id) { + formik.setFieldValue('epg_data_id', response.channel.epg_data_id); + } - // Validate file size on frontend first - if (file.size > 5 * 1024 * 1024) { - // 5MB notifications.show({ - title: 'Error', - message: 'File too large. Maximum size is 5MB.', - color: 'red', + title: 'Success', + message: response.message, + color: 'green', + }); + } else { + notifications.show({ + title: 'No Match Found', + message: response.message, + color: 'orange', }); - return; } + } catch (error) { + notifications.show({ + title: 'Error', + message: 'Failed to auto-match EPG data', + color: 'red', + }); + console.error('Auto-match error:', error); + } finally { + setAutoMatchLoading(false); + } + }; - try { - const retval = await API.uploadLogo(file); - // Note: API.uploadLogo already adds the logo to the store, no need to fetch - setLogoPreview(retval.cache_url); - formik.setFieldValue('logo_id', retval.id); - } catch (error) { - console.error('Logo upload failed:', error); - // Error notification is already handled in API.uploadLogo - } + const handleSetNameFromEpg = () => { + const epgDataId = formik.values.epg_data_id; + if (!epgDataId) { + notifications.show({ + title: 'No EPG Selected', + message: 'Please select an EPG source first.', + color: 'orange', + }); + return; + } + + const tvg = tvgsById[epgDataId]; + if (tvg && tvg.name) { + formik.setFieldValue('name', tvg.name); + notifications.show({ + title: 'Success', + message: `Channel name set to "${tvg.name}"`, + color: 'green', + }); } else { - setLogoPreview(null); + notifications.show({ + title: 'No Name Available', + message: 'No name found in the selected EPG data.', + color: 'orange', + }); + } + }; + + const handleSetLogoFromEpg = async () => { + const epgDataId = formik.values.epg_data_id; + if (!epgDataId) { + notifications.show({ + title: 'No EPG Selected', + message: 'Please select an EPG source first.', + color: 'orange', + }); + return; + } + + const tvg = tvgsById[epgDataId]; + if (!tvg || !tvg.icon_url) { + notifications.show({ + title: 'No EPG Icon', + message: 'EPG data does not have an icon URL.', + color: 'orange', + }); + return; + } + + try { + // Try to find a logo that matches the EPG icon URL - check ALL logos to avoid duplicates + let matchingLogo = Object.values(allLogos).find( + (logo) => logo.url === tvg.icon_url + ); + + if (matchingLogo) { + formik.setFieldValue('logo_id', matchingLogo.id); + notifications.show({ + title: 'Success', + message: `Logo set to "${matchingLogo.name}"`, + color: 'green', + }); + } else { + // Logo doesn't exist - create it + notifications.show({ + id: 'creating-logo', + title: 'Creating Logo', + message: `Creating new logo from EPG icon URL...`, + loading: true, + }); + + try { + const newLogoData = { + name: tvg.name || `Logo for ${tvg.icon_url}`, + url: tvg.icon_url, + }; + + // Create logo by calling the Logo API directly + const newLogo = await API.createLogo(newLogoData); + + formik.setFieldValue('logo_id', newLogo.id); + + notifications.update({ + id: 'creating-logo', + title: 'Success', + message: `Created and assigned new logo "${newLogo.name}"`, + loading: false, + color: 'green', + autoClose: 5000, + }); + } catch (createError) { + notifications.update({ + id: 'creating-logo', + title: 'Error', + message: 'Failed to create logo from EPG icon URL', + loading: false, + color: 'red', + autoClose: 5000, + }); + throw createError; + } + } + } catch (error) { + notifications.show({ + title: 'Error', + message: 'Failed to set logo from EPG data', + color: 'red', + }); + console.error('Set logo from EPG error:', error); + } + }; + + const handleSetTvgIdFromEpg = () => { + const epgDataId = formik.values.epg_data_id; + if (!epgDataId) { + notifications.show({ + title: 'No EPG Selected', + message: 'Please select an EPG source first.', + color: 'orange', + }); + return; + } + + const tvg = tvgsById[epgDataId]; + if (tvg && tvg.tvg_id) { + formik.setFieldValue('tvg_id', tvg.tvg_id); + notifications.show({ + title: 'Success', + message: `TVG-ID set to "${tvg.tvg_id}"`, + color: 'green', + }); + } else { + notifications.show({ + title: 'No TVG-ID Available', + message: 'No TVG-ID found in the selected EPG data.', + color: 'orange', + }); } }; @@ -248,9 +418,11 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { // Memoize logo options to prevent infinite re-renders during background loading const logoOptions = useMemo(() => { - const options = [{ id: '0', name: 'Default' }].concat(Object.values(logos)); + const options = [{ id: '0', name: 'Default' }].concat( + Object.values(channelLogos) + ); return options; - }, [logos]); // Only depend on logos object + }, [channelLogos]); // Only depend on channelLogos object // Update the handler for when channel group modal is closed const handleChannelGroupModalClose = (newGroup) => { @@ -306,11 +478,28 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { + Channel Name + {formik.values.epg_data_id && ( + + )} + + } value={formik.values.name} onChange={formik.handleChange} error={formik.errors.name ? formik.touched.name : ''} size="xs" + style={{ flex: 1 }} /> @@ -492,9 +681,27 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { + Logo + {formik.values.epg_data_id && ( + + )} + + } readOnly - value={logos[formik.values.logo_id]?.name || 'Default'} + value={ + channelLogos[formik.values.logo_id]?.name || 'Default' + } onClick={() => { console.log( 'Logo input clicked, setting popover opened to true' @@ -601,42 +808,22 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { - + + + - - - - OR - - - - - - Upload Logo - console.log('rejected files', files)} - maxSize={5 * 1024 ** 2} - > - - - Drag images here or click to select files - - - - -
-
+ @@ -664,7 +851,23 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { + TVG-ID + {formik.values.epg_data_id && ( + + )} + + } value={formik.values.tvg_id} onChange={formik.handleChange} error={formik.errors.tvg_id ? formik.touched.tvg_id : ''} @@ -707,6 +910,25 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { > Use Dummy + } readOnly @@ -767,6 +989,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { } mb="xs" size="xs" + autoFocus /> @@ -836,6 +1059,12 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { isOpen={channelGroupModelOpen} onClose={handleChannelGroupModalClose} /> + + setLogoModalOpen(false)} + onSuccess={handleLogoSuccess} + /> ); }; diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx index 9973ea57..0527a3b6 100644 --- a/frontend/src/components/forms/ChannelBatch.jsx +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -27,20 +27,36 @@ import { import { ListOrdered, SquarePlus, SquareX, X } from 'lucide-react'; import { FixedSizeList as List } from 'react-window'; import { useForm } from '@mantine/form'; +import { notifications } from '@mantine/notifications'; import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants'; +import { useChannelLogoSelection } from '../../hooks/useSmartLogos'; +import LazyLogo from '../LazyLogo'; +import logo from '../../images/logo.png'; +import ConfirmationDialog from '../ConfirmationDialog'; +import useWarningsStore from '../../store/warnings'; const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const theme = useMantineTheme(); const groupListRef = useRef(null); + const logoListRef = useRef(null); const channelGroups = useChannelsStore((s) => s.channelGroups); - const canEditChannelGroup = useChannelsStore((s) => s.canEditChannelGroup); + const { + logos: channelLogos, + ensureLogosLoaded, + isLoading: logosLoading, + } = useChannelLogoSelection(); + + useEffect(() => { + ensureLogosLoaded(); + }, [ensureLogosLoaded]); const streamProfiles = useStreamProfilesStore((s) => s.profiles); const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); const [selectedChannelGroup, setSelectedChannelGroup] = useState('-1'); + const [selectedLogoId, setSelectedLogoId] = useState('-1'); const [isSubmitting, setIsSubmitting] = useState(false); const [regexFind, setRegexFind] = useState(''); const [regexReplace, setRegexReplace] = useState(''); @@ -49,10 +65,21 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const [groupFilter, setGroupFilter] = useState(''); const groupOptions = Object.values(channelGroups); + const [logoPopoverOpened, setLogoPopoverOpened] = useState(false); + const [logoFilter, setLogoFilter] = useState(''); + // Confirmation dialog states + const [confirmSetNamesOpen, setConfirmSetNamesOpen] = useState(false); + const [confirmSetLogosOpen, setConfirmSetLogosOpen] = useState(false); + const [confirmSetTvgIdsOpen, setConfirmSetTvgIdsOpen] = useState(false); + const [confirmClearEpgsOpen, setConfirmClearEpgsOpen] = useState(false); + const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + const form = useForm({ mode: 'uncontrolled', initialValues: { channel_group: '(no change)', + logo: '(no change)', stream_profile_id: '-1', user_level: '-1', }, @@ -70,6 +97,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { delete values.channel_group_id; } + if (selectedLogoId && selectedLogoId !== '-1') { + if (selectedLogoId === '0') { + values.logo_id = null; + } else { + values.logo_id = parseInt(selectedLogoId); + } + } + delete values.logo; + // Handle stream profile ID - convert special values if (!values.stream_profile_id || values.stream_profile_id === '-1') { delete values.stream_profile_id; @@ -134,6 +170,184 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { } }; + const handleSetNamesFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-names-from-epg')) { + return executeSetNamesFromEpg(); + } + + setConfirmSetNamesOpen(true); + }; + + const executeSetNamesFromEpg = async () => { + try { + // Start the backend task + await API.setChannelNamesFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting names from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + setConfirmSetNamesOpen(false); + onClose(); + } catch (error) { + console.error('Failed to start EPG name setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG name setting task.', + color: 'red', + }); + setConfirmSetNamesOpen(false); + } + }; + + const handleSetLogosFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-logos-from-epg')) { + return executeSetLogosFromEpg(); + } + + setConfirmSetLogosOpen(true); + }; + + const executeSetLogosFromEpg = async () => { + try { + // Start the backend task + await API.setChannelLogosFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting logos from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + setConfirmSetLogosOpen(false); + onClose(); + } catch (error) { + console.error('Failed to start EPG logo setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG logo setting task.', + color: 'red', + }); + setConfirmSetLogosOpen(false); + } + }; + + const handleSetTvgIdsFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-tvg-ids-from-epg')) { + return executeSetTvgIdsFromEpg(); + } + + setConfirmSetTvgIdsOpen(true); + }; + + const executeSetTvgIdsFromEpg = async () => { + try { + // Start the backend task + await API.setChannelTvgIdsFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting TVG-IDs from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + setConfirmSetTvgIdsOpen(false); + onClose(); + } catch (error) { + console.error('Failed to start EPG TVG-ID setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG TVG-ID setting task.', + color: 'red', + }); + setConfirmSetTvgIdsOpen(false); + } + }; + + const handleClearEpgs = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-clear-epgs')) { + return executeClearEpgs(); + } + + setConfirmClearEpgsOpen(true); + }; + + const executeClearEpgs = async () => { + try { + // Clear EPG assignments (set to null/dummy) using existing batchSetEPG API + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: null, + })); + + await API.batchSetEPG(associations); + + // batchSetEPG already shows a notification and refreshes channels + // Close the modal + setConfirmClearEpgsOpen(false); + onClose(); + } catch (error) { + console.error('Failed to clear EPG assignments:', error); + notifications.show({ + title: 'Error', + message: 'Failed to clear EPG assignments.', + color: 'red', + }); + setConfirmClearEpgsOpen(false); + } + }; + // useEffect(() => { // // const sameStreamProfile = channels.every( // // (channel) => channel.stream_profile_id == channels[0].stream_profile_id @@ -174,6 +388,18 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { ), ]; + const logoOptions = useMemo(() => { + return [ + { id: '-1', name: '(no change)' }, + { id: '0', name: 'Use Default', isDefault: true }, + ...Object.values(channelLogos), + ]; + }, [channelLogos]); + + const filteredLogos = logoOptions.filter((logo) => + logo.name.toLowerCase().includes(logoFilter.toLowerCase()) + ); + if (!isOpen) { return <>; } @@ -183,7 +409,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { @@ -197,7 +423,9 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { - Channel Name + + Channel Name + { /> + + + + EPG Operations + + + + + + + + + + + + Updates channel names, logos, and TVG-IDs based on their + assigned EPG data, or clear EPG assignments to use dummy EPG + + + { + + + + setLogoPopoverOpened(true)} + size="xs" + style={{ flex: 1 }} + rightSection={ + selectedLogoId !== '-1' && ( + { + e.stopPropagation(); + setSelectedLogoId('-1'); + form.setValues({ logo: '(no change)' }); + }} + > + + + ) + } + /> + + e.stopPropagation()}> + + + setLogoFilter(event.currentTarget.value) + } + mb="xs" + size="xs" + /> + {logosLoading && ( + + Loading... + + )} + + + {filteredLogos.length === 0 ? ( +
+ + {logoFilter + ? 'No logos match your filter' + : 'No logos available'} + +
+ ) : ( + + {({ index, style }) => { + const item = filteredLogos[index]; + return ( +
{ + setSelectedLogoId(item.id); + form.setValues({ + logo: item.name, + }); + setLogoPopoverOpened(false); + }} + onMouseEnter={(e) => { + e.currentTarget.style.backgroundColor = + 'rgb(68, 68, 68)'; + }} + onMouseLeave={(e) => { + e.currentTarget.style.backgroundColor = + 'transparent'; + }} + > +
+ {item.isDefault ? ( + Default Logo + ) : item.id > 0 ? ( + {item.name { + if (e.target.src !== logo) { + e.target.src = logo; + } + }} + /> + ) : ( + + )} + + {item.name} + +
+
+ ); + }} +
+ )} +
+
+
+ {selectedLogoId > 0 && ( + + )} +
+ ({ - value: `${channel.id}`, - label: channel.name, - }))} + + - +
+ + {mode === 'single' ? ( + + )} - + {mode === 'single' ? ( + <> + + + + ) : ( + <> + + - - - - + + + recurringForm.setFieldValue('start_date', value || new Date()) + } + valueFormat="MMM D, YYYY" + /> + recurringForm.setFieldValue('end_date', value)} + valueFormat="MMM D, YYYY" + minDate={recurringForm.values.start_date || undefined} + /> + + + + + recurringForm.setFieldValue('start_time', toTimeString(val)) + )} + onBlur={() => recurringForm.validateField('start_time')} + withSeconds={false} + format="12" // shows 12-hour (so "00:00" renders "12:00 AM") + inputMode="numeric" + amLabel="AM" + pmLabel="PM" + /> + + + recurringForm.setFieldValue('end_time', toTimeString(val)) + )} + onBlur={() => recurringForm.validateField('end_time')} + withSeconds={false} + format="12" + inputMode="numeric" + amLabel="AM" + pmLabel="PM" + /> + + + )} + + + + +
+ +
); }; -export default DVR; +export default RecordingModal; diff --git a/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx b/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx index 54fa2f8d..b7e04d7d 100644 --- a/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx +++ b/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx @@ -143,11 +143,18 @@ const ChannelTableHeader = ({ const matchEpg = async () => { try { // Hit our new endpoint that triggers the fuzzy matching Celery task - await API.matchEpg(); - - notifications.show({ - title: 'EPG matching task started!', - }); + // If channels are selected, only match those; otherwise match all + if (selectedTableIds.length > 0) { + await API.matchEpg(selectedTableIds); + notifications.show({ + title: `EPG matching task started for ${selectedTableIds.length} selected channel(s)!`, + }); + } else { + await API.matchEpg(); + notifications.show({ + title: 'EPG matching task started for all channels without EPG!', + }); + } } catch (err) { notifications.show(`Error: ${err.message}`); } @@ -298,7 +305,11 @@ const ChannelTableHeader = ({ disabled={authUser.user_level != USER_LEVELS.ADMIN} onClick={matchEpg} > - Auto-Match + + {selectedTableIds.length > 0 + ? `Auto-Match (${selectedTableIds.length} selected)` + : 'Auto-Match EPG'} + { }; const editPlaylist = async (playlist = null) => { - if (playlist) { - setPlaylist(playlist); - } + setPlaylist(playlist); setPlaylistModalOpen(true); }; diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index ef944bbd..c4ab3652 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -282,7 +282,7 @@ const StreamsTable = () => { cell: ({ getValue }) => ( { cell: ({ getValue }) => ( { + const settings = useSettingsStore((s) => s.settings); + const [timeZone, setTimeZone] = useLocalStorage( + 'time-zone', + dayjs.tz?.guess + ? dayjs.tz.guess() + : Intl.DateTimeFormat().resolvedOptions().timeZone + ); + + useEffect(() => { + const tz = settings?.['system-time-zone']?.value; + if (tz && tz !== timeZone) { + setTimeZone(tz); + } + }, [settings, timeZone, setTimeZone]); + + return timeZone; +}; + +const useTimeHelpers = () => { + const timeZone = useUserTimeZone(); + + const toUserTime = useCallback( + (value) => { + if (!value) return dayjs.invalid(); + try { + return dayjs(value).tz(timeZone); + } catch (error) { + return dayjs(value); + } + }, + [timeZone] + ); + + const userNow = useCallback(() => dayjs().tz(timeZone), [timeZone]); + + return { timeZone, toUserTime, userNow }; +}; + +const RECURRING_DAY_OPTIONS = [ + { value: 6, label: 'Sun' }, + { value: 0, label: 'Mon' }, + { value: 1, label: 'Tue' }, + { value: 2, label: 'Wed' }, + { value: 3, label: 'Thu' }, + { value: 4, label: 'Fri' }, + { value: 5, label: 'Sat' }, +]; // Short preview that triggers the details modal when clicked const RecordingSynopsis = ({ description, onOpen }) => { const truncated = description?.length > 140; - const preview = truncated ? `${description.slice(0, 140).trim()}…` : description; + const preview = truncated + ? `${description.slice(0, 140).trim()}...` + : description; if (!description) return null; return ( { ); }; -const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, onWatchLive, onWatchRecording, env_mode }) => { - if (!recording) return null; +const RecordingDetailsModal = ({ + opened, + onClose, + recording, + channel, + posterUrl, + onWatchLive, + onWatchRecording, + env_mode, + onEdit, +}) => { + const allRecordings = useChannelsStore((s) => s.recordings); + const channelMap = useChannelsStore((s) => s.channels); + const { toUserTime, userNow } = useTimeHelpers(); + const [childOpen, setChildOpen] = React.useState(false); + const [childRec, setChildRec] = React.useState(null); - const customProps = recording.custom_properties || {}; + const safeRecording = recording || {}; + const customProps = safeRecording.custom_properties || {}; const program = customProps.program || {}; const recordingName = program.title || 'Custom Recording'; - const subTitle = program.sub_title || ''; const description = program.description || customProps.description || ''; - const start = dayjs(recording.start_time); - const end = dayjs(recording.end_time); + const start = toUserTime(safeRecording.start_time); + const end = toUserTime(safeRecording.end_time); const stats = customProps.stream_info || {}; const statRows = [ ['Video Codec', stats.video_codec], - ['Resolution', stats.resolution || (stats.width && stats.height ? `${stats.width}x${stats.height}` : null)], + [ + 'Resolution', + stats.resolution || + (stats.width && stats.height ? `${stats.width}x${stats.height}` : null), + ], ['FPS', stats.source_fps], ['Video Bitrate', stats.video_bitrate && `${stats.video_bitrate} kb/s`], ['Audio Codec', stats.audio_codec], @@ -85,39 +165,48 @@ const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, ].filter(([, v]) => v !== null && v !== undefined && v !== ''); // Rating (if available) - const rating = customProps.rating || customProps.rating_value || (program && program.custom_properties && program.custom_properties.rating); + const rating = + customProps.rating || + customProps.rating_value || + (program && program.custom_properties && program.custom_properties.rating); const ratingSystem = customProps.rating_system || 'MPAA'; const fileUrl = customProps.file_url || customProps.output_file_url; - const canWatchRecording = (customProps.status === 'completed' || customProps.status === 'interrupted') && Boolean(fileUrl); + const canWatchRecording = + (customProps.status === 'completed' || + customProps.status === 'interrupted') && + Boolean(fileUrl); // Prefix in dev (Vite) if needed let resolvedPosterUrl = posterUrl; - if (typeof import.meta !== 'undefined' && import.meta.env && import.meta.env.DEV) { + if ( + typeof import.meta !== 'undefined' && + import.meta.env && + import.meta.env.DEV + ) { if (resolvedPosterUrl && resolvedPosterUrl.startsWith('/')) { resolvedPosterUrl = `${window.location.protocol}//${window.location.hostname}:5656${resolvedPosterUrl}`; } } - // If this card represented a grouped series (next of N), show a series modal listing episodes - const allRecordings = useChannelsStore((s) => s.recordings); - const channels = useChannelsStore((s) => s.channels); - const [childOpen, setChildOpen] = React.useState(false); - const [childRec, setChildRec] = React.useState(null); - const isSeriesGroup = Boolean(recording._group_count && recording._group_count > 1); + const isSeriesGroup = Boolean( + safeRecording._group_count && safeRecording._group_count > 1 + ); const upcomingEpisodes = React.useMemo(() => { if (!isSeriesGroup) return []; - const arr = Array.isArray(allRecordings) ? allRecordings : Object.values(allRecordings || {}); + const arr = Array.isArray(allRecordings) + ? allRecordings + : Object.values(allRecordings || {}); const tvid = program.tvg_id || ''; const titleKey = (program.title || '').toLowerCase(); const filtered = arr.filter((r) => { - const cp = r.custom_properties || {}; - const pr = cp.program || {}; - if ((pr.tvg_id || '') !== tvid) return false; - if ((pr.title || '').toLowerCase() !== titleKey) return false; - const st = dayjs(r.start_time); - return st.isAfter(dayjs()); - }); + const cp = r.custom_properties || {}; + const pr = cp.program || {}; + if ((pr.tvg_id || '') !== tvid) return false; + if ((pr.title || '').toLowerCase() !== titleKey) return false; + const st = toUserTime(r.start_time); + return st.isAfter(userNow()); + }); // Deduplicate by program.id if present, else by time+title const seen = new Set(); const deduped = []; @@ -127,52 +216,117 @@ const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, // Prefer season/episode or onscreen code; else fall back to sub_title; else program id/slot const season = cp.season ?? pr?.custom_properties?.season; const episode = cp.episode ?? pr?.custom_properties?.episode; - const onscreen = cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode; + const onscreen = + cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode; let key = null; if (season != null && episode != null) key = `se:${season}:${episode}`; else if (onscreen) key = `onscreen:${String(onscreen).toLowerCase()}`; else if (pr.sub_title) key = `sub:${(pr.sub_title || '').toLowerCase()}`; else if (pr.id != null) key = `id:${pr.id}`; - else key = `slot:${r.channel}|${r.start_time}|${r.end_time}|${(pr.title||'')}`; + else + key = `slot:${r.channel}|${r.start_time}|${r.end_time}|${pr.title || ''}`; if (seen.has(key)) continue; seen.add(key); deduped.push(r); } - return deduped.sort((a, b) => dayjs(a.start_time) - dayjs(b.start_time)); - }, [allRecordings, isSeriesGroup, program.tvg_id, program.title]); + return deduped.sort( + (a, b) => toUserTime(a.start_time) - toUserTime(b.start_time) + ); + }, [ + allRecordings, + isSeriesGroup, + program.tvg_id, + program.title, + toUserTime, + userNow, + ]); + + if (!recording) return null; const EpisodeRow = ({ rec }) => { const cp = rec.custom_properties || {}; const pr = cp.program || {}; - const start = dayjs(rec.start_time); - const end = dayjs(rec.end_time); + const start = toUserTime(rec.start_time); + const end = toUserTime(rec.end_time); const season = cp.season ?? pr?.custom_properties?.season; const episode = cp.episode ?? pr?.custom_properties?.episode; - const onscreen = cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode; - const se = season && episode ? `S${String(season).padStart(2,'0')}E${String(episode).padStart(2,'0')}` : (onscreen || null); + const onscreen = + cp.onscreen_episode ?? pr?.custom_properties?.onscreen_episode; + const se = + season && episode + ? `S${String(season).padStart(2, '0')}E${String(episode).padStart(2, '0')}` + : onscreen || null; const posterLogoId = cp.poster_logo_id; - let purl = posterLogoId ? `/api/channels/logos/${posterLogoId}/cache/` : cp.poster_url || posterUrl || '/logo.png'; - if (typeof import.meta !== 'undefined' && import.meta.env && import.meta.env.DEV && purl && purl.startsWith('/')) { + let purl = posterLogoId + ? `/api/channels/logos/${posterLogoId}/cache/` + : cp.poster_url || posterUrl || '/logo.png'; + if ( + typeof import.meta !== 'undefined' && + import.meta.env && + import.meta.env.DEV && + purl && + purl.startsWith('/') + ) { purl = `${window.location.protocol}//${window.location.hostname}:5656${purl}`; } const onRemove = async (e) => { e?.stopPropagation?.(); - try { await API.deleteRecording(rec.id); } catch {} - try { await useChannelsStore.getState().fetchRecordings(); } catch {} + try { + await API.deleteRecording(rec.id); + } catch (error) { + console.error('Failed to delete upcoming recording', error); + } + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (error) { + console.error('Failed to refresh recordings after delete', error); + } }; return ( - { setChildRec(rec); setChildOpen(true); }}> + { + setChildRec(rec); + setChildOpen(true); + }} + > - {pr.title + {pr.title - {pr.sub_title || pr.title} - {se && {se}} + + {pr.sub_title || pr.title} + + {se && ( + + {se} + + )} - {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + + {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + - + @@ -183,7 +337,11 @@ const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, {upcomingEpisodes.length === 0 && ( - No upcoming episodes found + + No upcoming episodes found + )} {upcomingEpisodes.map((ep) => ( @@ -208,20 +368,22 @@ const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, opened={childOpen} onClose={() => setChildOpen(false)} recording={childRec} - channel={channels[childRec.channel]} - posterUrl={( - childRec.custom_properties?.poster_logo_id + channel={channelMap[childRec.channel]} + posterUrl={ + (childRec.custom_properties?.poster_logo_id ? `/api/channels/logos/${childRec.custom_properties.poster_logo_id}/cache/` - : childRec.custom_properties?.poster_url || channels[childRec.channel]?.logo?.cache_url - ) || '/logo.png'} + : childRec.custom_properties?.poster_url || + channelMap[childRec.channel]?.logo?.cache_url) || + '/logo.png' + } env_mode={env_mode} onWatchLive={() => { const rec = childRec; - const now = dayjs(); - const s = dayjs(rec.start_time); - const e = dayjs(rec.end_time); + const now = userNow(); + const s = toUserTime(rec.start_time); + const e = toUserTime(rec.end_time); if (now.isAfter(s) && now.isBefore(e)) { - const ch = channels[rec.channel]; + const ch = channelMap[rec.channel]; if (!ch) return; let url = `/proxy/ts/stream/${ch.uuid}`; if (env_mode === 'dev') { @@ -231,79 +393,568 @@ const RecordingDetailsModal = ({ opened, onClose, recording, channel, posterUrl, } }} onWatchRecording={() => { - let fileUrl = childRec.custom_properties?.file_url || childRec.custom_properties?.output_file_url; + let fileUrl = + childRec.custom_properties?.file_url || + childRec.custom_properties?.output_file_url; if (!fileUrl) return; if (env_mode === 'dev' && fileUrl.startsWith('/')) { fileUrl = `${window.location.protocol}//${window.location.hostname}:5656${fileUrl}`; } - useVideoStore.getState().showVideo(fileUrl, 'vod', { name: childRec.custom_properties?.program?.title || 'Recording', logo: { url: (childRec.custom_properties?.poster_logo_id ? `/api/channels/logos/${childRec.custom_properties.poster_logo_id}/cache/` : channels[childRec.channel]?.logo?.cache_url) || '/logo.png' } }); + useVideoStore.getState().showVideo(fileUrl, 'vod', { + name: + childRec.custom_properties?.program?.title || 'Recording', + logo: { + url: + (childRec.custom_properties?.poster_logo_id + ? `/api/channels/logos/${childRec.custom_properties.poster_logo_id}/cache/` + : channelMap[childRec.channel]?.logo?.cache_url) || + '/logo.png', + }, + }); }} /> )} ) : ( - - {recordingName} - - - {channel ? `${channel.channel_number} • ${channel.name}` : '—'} - - {onWatchLive && ( - - )} - {onWatchRecording && ( - - )} - {customProps.status === 'completed' && (!customProps?.comskip || customProps?.comskip?.status !== 'completed') && ( - - )} + + {recordingName} + + + + {channel ? `${channel.channel_number} • ${channel.name}` : '—'} + + + {onWatchLive && ( + + )} + {onWatchRecording && ( + + )} + {onEdit && start.isAfter(userNow()) && ( + + )} + {customProps.status === 'completed' && + (!customProps?.comskip || + customProps?.comskip?.status !== 'completed') && ( + + )} + - - {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} - {rating && ( - - {rating} - - )} - {description && ( - {description} - )} - {statRows.length > 0 && ( - - Stream Stats - {statRows.map(([k, v]) => ( - - {k} - {v} - - ))} - - )} - - + + {start.format('MMM D, YYYY h:mma')} – {end.format('h:mma')} + + {rating && ( + + + {rating} + + + )} + {description && ( + + {description} + + )} + {statRows.length > 0 && ( + + + Stream Stats + + {statRows.map(([k, v]) => ( + + + {k} + + {v} + + ))} + + )} + +
)} ); }; -const RecordingCard = ({ recording, category, onOpenDetails }) => { +const toTimeString = (value) => { + if (!value) return '00:00'; + if (typeof value === 'string') { + const parsed = dayjs(value, ['HH:mm', 'HH:mm:ss', 'h:mm A'], true); + if (parsed.isValid()) return parsed.format('HH:mm'); + return value; + } + const parsed = dayjs(value); + return parsed.isValid() ? parsed.format('HH:mm') : '00:00'; +}; + +const parseDate = (value) => { + if (!value) return null; + const parsed = dayjs(value, ['YYYY-MM-DD', dayjs.ISO_8601], true); + return parsed.isValid() ? parsed.toDate() : null; +}; + +const RecurringRuleModal = ({ opened, onClose, ruleId, onEditOccurrence }) => { + const channels = useChannelsStore((s) => s.channels); + const recurringRules = useChannelsStore((s) => s.recurringRules); + const fetchRecurringRules = useChannelsStore((s) => s.fetchRecurringRules); + const fetchRecordings = useChannelsStore((s) => s.fetchRecordings); + const recordings = useChannelsStore((s) => s.recordings); + const { toUserTime, userNow } = useTimeHelpers(); + + const [saving, setSaving] = useState(false); + const [deleting, setDeleting] = useState(false); + const [busyOccurrence, setBusyOccurrence] = useState(null); + + const rule = recurringRules.find((r) => r.id === ruleId); + + const channelOptions = useMemo(() => { + const list = Object.values(channels || {}); + list.sort((a, b) => { + const aNum = Number(a.channel_number) || 0; + const bNum = Number(b.channel_number) || 0; + if (aNum === bNum) { + return (a.name || '').localeCompare(b.name || ''); + } + return aNum - bNum; + }); + return list.map((item) => ({ + value: `${item.id}`, + label: item.name || `Channel ${item.id}`, + })); + }, [channels]); + + const form = useForm({ + mode: 'controlled', + initialValues: { + channel_id: '', + days_of_week: [], + rule_name: '', + start_time: dayjs().startOf('hour').format('HH:mm'), + end_time: dayjs().startOf('hour').add(1, 'hour').format('HH:mm'), + start_date: dayjs().toDate(), + end_date: dayjs().toDate(), + enabled: true, + }, + validate: { + channel_id: (value) => (value ? null : 'Select a channel'), + days_of_week: (value) => + value && value.length ? null : 'Pick at least one day', + end_time: (value, values) => { + if (!value) return 'Select an end time'; + const startValue = dayjs( + values.start_time, + ['HH:mm', 'hh:mm A', 'h:mm A'], + true + ); + const endValue = dayjs(value, ['HH:mm', 'hh:mm A', 'h:mm A'], true); + if ( + startValue.isValid() && + endValue.isValid() && + endValue.diff(startValue, 'minute') === 0 + ) { + return 'End time must differ from start time'; + } + return null; + }, + end_date: (value, values) => { + const endDate = dayjs(value); + const startDate = dayjs(values.start_date); + if (!value) return 'Select an end date'; + if (startDate.isValid() && endDate.isBefore(startDate, 'day')) { + return 'End date cannot be before start date'; + } + return null; + }, + }, + }); + + useEffect(() => { + if (opened && rule) { + form.setValues({ + channel_id: `${rule.channel}`, + days_of_week: (rule.days_of_week || []).map((d) => String(d)), + rule_name: rule.name || '', + start_time: toTimeString(rule.start_time), + end_time: toTimeString(rule.end_time), + start_date: parseDate(rule.start_date) || dayjs().toDate(), + end_date: parseDate(rule.end_date), + enabled: Boolean(rule.enabled), + }); + } else { + form.reset(); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [opened, ruleId, rule]); + + const upcomingOccurrences = useMemo(() => { + const list = Array.isArray(recordings) + ? recordings + : Object.values(recordings || {}); + const now = userNow(); + return list + .filter( + (rec) => + rec?.custom_properties?.rule?.id === ruleId && + toUserTime(rec.start_time).isAfter(now) + ) + .sort( + (a, b) => + toUserTime(a.start_time).valueOf() - + toUserTime(b.start_time).valueOf() + ); + }, [recordings, ruleId, toUserTime, userNow]); + + const handleSave = async (values) => { + if (!rule) return; + setSaving(true); + try { + await API.updateRecurringRule(ruleId, { + channel: values.channel_id, + days_of_week: (values.days_of_week || []).map((d) => Number(d)), + start_time: toTimeString(values.start_time), + end_time: toTimeString(values.end_time), + start_date: values.start_date + ? dayjs(values.start_date).format('YYYY-MM-DD') + : null, + end_date: values.end_date + ? dayjs(values.end_date).format('YYYY-MM-DD') + : null, + name: values.rule_name?.trim() || '', + enabled: Boolean(values.enabled), + }); + await Promise.all([fetchRecurringRules(), fetchRecordings()]); + notifications.show({ + title: 'Recurring rule updated', + message: 'Schedule adjustments saved', + color: 'green', + autoClose: 2500, + }); + onClose(); + } catch (error) { + console.error('Failed to update recurring rule', error); + } finally { + setSaving(false); + } + }; + + const handleDelete = async () => { + if (!rule) return; + setDeleting(true); + try { + await API.deleteRecurringRule(ruleId); + await Promise.all([fetchRecurringRules(), fetchRecordings()]); + notifications.show({ + title: 'Recurring rule removed', + message: 'All future occurrences were cancelled', + color: 'red', + autoClose: 2500, + }); + onClose(); + } catch (error) { + console.error('Failed to delete recurring rule', error); + } finally { + setDeleting(false); + } + }; + + const handleToggleEnabled = async (checked) => { + if (!rule) return; + setSaving(true); + try { + await API.updateRecurringRule(ruleId, { enabled: checked }); + await Promise.all([fetchRecurringRules(), fetchRecordings()]); + notifications.show({ + title: checked ? 'Recurring rule enabled' : 'Recurring rule paused', + message: checked + ? 'Future occurrences will resume' + : 'Upcoming occurrences were removed', + color: checked ? 'green' : 'yellow', + autoClose: 2500, + }); + } catch (error) { + console.error('Failed to toggle recurring rule', error); + form.setFieldValue('enabled', !checked); + } finally { + setSaving(false); + } + }; + + const handleCancelOccurrence = async (occurrence) => { + setBusyOccurrence(occurrence.id); + try { + await API.deleteRecording(occurrence.id); + await fetchRecordings(); + notifications.show({ + title: 'Occurrence cancelled', + message: 'The selected airing was removed', + color: 'yellow', + autoClose: 2000, + }); + } catch (error) { + console.error('Failed to cancel occurrence', error); + } finally { + setBusyOccurrence(null); + } + }; + + if (!rule) { + return ( + + Recurring rule not found. + + ); + } + + return ( + + + + + {channels?.[rule.channel]?.name || `Channel ${rule.channel}`} + + { + form.setFieldValue('enabled', event.currentTarget.checked); + handleToggleEnabled(event.currentTarget.checked); + }} + label={form.values.enabled ? 'Enabled' : 'Paused'} + disabled={saving} + /> + +
+ + onUISettingsChange('time-zone', val)} + data={timeZoneOptions} + /> @@ -707,6 +983,46 @@ const SettingsPage = () => { 'dvr-comskip-enabled' } /> + + + + + + + {comskipConfig.exists && comskipConfig.path + ? `Using ${comskipConfig.path}` + : 'No custom comskip.ini uploaded.'} + { value: 'tvg_id', label: 'TVG-ID', }, + { + value: 'm3u_id', + label: 'M3U ID', + }, ]} {...form.getInputProps('m3u-hash-key')} key={form.key('m3u-hash-key')} @@ -1141,6 +1461,8 @@ const SettingsPage = () => { onClose={() => { setRehashConfirmOpen(false); setRehashDialogType(null); + // Clear pending values when dialog is cancelled + setPendingChangedSettings(null); }} onConfirm={handleRehashConfirm} title={ diff --git a/frontend/src/pages/__tests__/guideUtils.test.js b/frontend/src/pages/__tests__/guideUtils.test.js new file mode 100644 index 00000000..58a6d292 --- /dev/null +++ b/frontend/src/pages/__tests__/guideUtils.test.js @@ -0,0 +1,100 @@ +import { describe, it, expect } from 'vitest'; +import dayjs from 'dayjs'; +import { + PROGRAM_HEIGHT, + EXPANDED_PROGRAM_HEIGHT, + buildChannelIdMap, + mapProgramsByChannel, + computeRowHeights, +} from '../guideUtils.js'; + +describe('guideUtils', () => { + describe('buildChannelIdMap', () => { + it('maps tvg ids from epg records and falls back to channel uuid', () => { + const channels = [ + { id: 1, epg_data_id: 'epg-1', uuid: 'uuid-1' }, + { id: 2, epg_data_id: null, uuid: 'uuid-2' }, + ]; + const tvgsById = { + 'epg-1': { tvg_id: 'alpha' }, + }; + + const map = buildChannelIdMap(channels, tvgsById); + + expect(map.get('alpha')).toBe(1); + expect(map.get('uuid-2')).toBe(2); + }); + }); + + describe('mapProgramsByChannel', () => { + it('groups programs by channel and sorts them by start time', () => { + const programs = [ + { + id: 10, + tvg_id: 'alpha', + start_time: dayjs('2025-01-01T02:00:00Z').toISOString(), + end_time: dayjs('2025-01-01T03:00:00Z').toISOString(), + title: 'Late Show', + }, + { + id: 11, + tvg_id: 'alpha', + start_time: dayjs('2025-01-01T01:00:00Z').toISOString(), + end_time: dayjs('2025-01-01T02:00:00Z').toISOString(), + title: 'Evening News', + }, + { + id: 20, + tvg_id: 'beta', + start_time: dayjs('2025-01-01T00:00:00Z').toISOString(), + end_time: dayjs('2025-01-01T01:00:00Z').toISOString(), + title: 'Morning Show', + }, + ]; + + const channelIdByTvgId = new Map([ + ['alpha', 1], + ['beta', 2], + ]); + + const map = mapProgramsByChannel(programs, channelIdByTvgId); + + expect(map.get(1)).toHaveLength(2); + expect(map.get(1)?.map((item) => item.id)).toEqual([11, 10]); + expect(map.get(2)).toHaveLength(1); + expect(map.get(2)?.[0].startMs).toBeTypeOf('number'); + expect(map.get(2)?.[0].endMs).toBeTypeOf('number'); + }); + }); + + describe('computeRowHeights', () => { + it('returns program heights with expanded rows when needed', () => { + const filteredChannels = [ + { id: 1 }, + { id: 2 }, + ]; + + const programsByChannel = new Map([ + [1, [{ id: 10 }, { id: 11 }]], + [2, [{ id: 20 }]], + ]); + + const collapsed = computeRowHeights( + filteredChannels, + programsByChannel, + null + ); + expect(collapsed).toEqual([PROGRAM_HEIGHT, PROGRAM_HEIGHT]); + + const expanded = computeRowHeights( + filteredChannels, + programsByChannel, + 10 + ); + expect(expanded).toEqual([ + EXPANDED_PROGRAM_HEIGHT, + PROGRAM_HEIGHT, + ]); + }); + }); +}); diff --git a/frontend/src/pages/guideUtils.js b/frontend/src/pages/guideUtils.js new file mode 100644 index 00000000..5b52b938 --- /dev/null +++ b/frontend/src/pages/guideUtils.js @@ -0,0 +1,79 @@ +import dayjs from 'dayjs'; + +export const PROGRAM_HEIGHT = 90; +export const EXPANDED_PROGRAM_HEIGHT = 180; + +export function buildChannelIdMap(channels, tvgsById) { + const map = new Map(); + channels.forEach((channel) => { + const tvgRecord = channel.epg_data_id + ? tvgsById[channel.epg_data_id] + : null; + const tvgId = tvgRecord?.tvg_id ?? channel.uuid; + if (tvgId) { + const tvgKey = String(tvgId); + if (!map.has(tvgKey)) { + map.set(tvgKey, []); + } + map.get(tvgKey).push(channel.id); + } + }); + return map; +} + +export function mapProgramsByChannel(programs, channelIdByTvgId) { + if (!programs?.length || !channelIdByTvgId?.size) { + return new Map(); + } + + const map = new Map(); + programs.forEach((program) => { + const channelIds = channelIdByTvgId.get(String(program.tvg_id)); + if (!channelIds || channelIds.length === 0) { + return; + } + + const startMs = program.startMs ?? dayjs(program.start_time).valueOf(); + const endMs = program.endMs ?? dayjs(program.end_time).valueOf(); + + const programData = { + ...program, + startMs, + endMs, + }; + + // Add this program to all channels that share the same TVG ID + channelIds.forEach((channelId) => { + if (!map.has(channelId)) { + map.set(channelId, []); + } + map.get(channelId).push(programData); + }); + }); + + map.forEach((list) => { + list.sort((a, b) => a.startMs - b.startMs); + }); + + return map; +} + +export function computeRowHeights( + filteredChannels, + programsByChannelId, + expandedProgramId, + defaultHeight = PROGRAM_HEIGHT, + expandedHeight = EXPANDED_PROGRAM_HEIGHT +) { + if (!filteredChannels?.length) { + return []; + } + + return filteredChannels.map((channel) => { + const channelPrograms = programsByChannelId.get(channel.id) || []; + const expanded = channelPrograms.some( + (program) => program.id === expandedProgramId + ); + return expanded ? expandedHeight : defaultHeight; + }); +} diff --git a/frontend/src/store/channels.jsx b/frontend/src/store/channels.jsx index 97e42f06..3635d784 100644 --- a/frontend/src/store/channels.jsx +++ b/frontend/src/store/channels.jsx @@ -15,6 +15,7 @@ const useChannelsStore = create((set, get) => ({ activeChannels: {}, activeClients: {}, recordings: [], + recurringRules: [], isLoading: false, error: null, forceUpdate: 0, @@ -408,6 +409,23 @@ const useChannelsStore = create((set, get) => ({ } }, + fetchRecurringRules: async () => { + try { + const rules = await api.listRecurringRules(); + set({ recurringRules: Array.isArray(rules) ? rules : [] }); + } catch (error) { + console.error('Failed to fetch recurring DVR rules:', error); + set({ error: 'Failed to load recurring DVR rules.' }); + } + }, + + removeRecurringRule: (id) => + set((state) => ({ + recurringRules: Array.isArray(state.recurringRules) + ? state.recurringRules.filter((rule) => String(rule?.id) !== String(id)) + : [], + })), + // Optimistically remove a single recording from the local store removeRecording: (id) => set((state) => { diff --git a/frontend/src/store/logos.jsx b/frontend/src/store/logos.jsx index 4a0b945c..eb2a7597 100644 --- a/frontend/src/store/logos.jsx +++ b/frontend/src/store/logos.jsx @@ -3,7 +3,7 @@ import api from '../api'; const useLogosStore = create((set, get) => ({ logos: {}, - channelLogos: {}, // Separate state for channel-assignable logos + channelLogos: {}, // Keep this for simplicity, but we'll be more careful about when we populate it isLoading: false, backgroundLoading: false, hasLoadedAll: false, // Track if we've loaded all logos @@ -21,12 +21,29 @@ const useLogosStore = create((set, get) => ({ }, addLogo: (newLogo) => - set((state) => ({ - logos: { + set((state) => { + // Add to main logos store always + const newLogos = { ...state.logos, [newLogo.id]: { ...newLogo }, - }, - })), + }; + + // Add to channelLogos if the user has loaded channel-assignable logos + // This means they're using channel forms and the new logo should be available there + // Newly created logos are channel-assignable (they start unused) + let newChannelLogos = state.channelLogos; + if (state.hasLoadedChannelLogos) { + newChannelLogos = { + ...state.channelLogos, + [newLogo.id]: { ...newLogo }, + }; + } + + return { + logos: newLogos, + channelLogos: newChannelLogos, + }; + }), updateLogo: (logo) => set((state) => ({ @@ -34,13 +51,25 @@ const useLogosStore = create((set, get) => ({ ...state.logos, [logo.id]: { ...logo }, }, + // Update in channelLogos if it exists there + channelLogos: state.channelLogos[logo.id] + ? { + ...state.channelLogos, + [logo.id]: { ...logo }, + } + : state.channelLogos, })), removeLogo: (logoId) => set((state) => { const newLogos = { ...state.logos }; + const newChannelLogos = { ...state.channelLogos }; delete newLogos[logoId]; - return { logos: newLogos }; + delete newChannelLogos[logoId]; + return { + logos: newLogos, + channelLogos: newChannelLogos, + }; }), // Smart loading methods @@ -155,8 +184,15 @@ const useLogosStore = create((set, get) => ({ console.log(`Fetched ${logos.length} channel-assignable logos`); - // Store in separate channelLogos state + // Store in both places, but this is intentional and only when specifically requested set({ + logos: { + ...get().logos, // Keep existing logos + ...logos.reduce((acc, logo) => { + acc[logo.id] = { ...logo }; + return acc; + }, {}), + }, channelLogos: logos.reduce((acc, logo) => { acc[logo.id] = { ...logo }; return acc; diff --git a/frontend/src/test/setupTests.js b/frontend/src/test/setupTests.js new file mode 100644 index 00000000..b5f53af0 --- /dev/null +++ b/frontend/src/test/setupTests.js @@ -0,0 +1,42 @@ +import '@testing-library/jest-dom/vitest'; +import { afterEach, vi } from 'vitest'; +import { cleanup } from '@testing-library/react'; + +afterEach(() => { + cleanup(); +}); + +if (typeof window !== 'undefined' && !window.matchMedia) { + window.matchMedia = vi.fn().mockImplementation((query) => ({ + matches: false, + media: query, + onchange: null, + addListener: vi.fn(), + removeListener: vi.fn(), + addEventListener: vi.fn(), + removeEventListener: vi.fn(), + dispatchEvent: vi.fn(), + })); +} + +if (typeof window !== 'undefined' && !window.ResizeObserver) { + class ResizeObserver { + constructor(callback) { + this.callback = callback; + } + observe() {} + unobserve() {} + disconnect() {} + } + + window.ResizeObserver = ResizeObserver; +} + +if (typeof window !== 'undefined') { + if (!window.requestAnimationFrame) { + window.requestAnimationFrame = (cb) => setTimeout(cb, 16); + } + if (!window.cancelAnimationFrame) { + window.cancelAnimationFrame = (id) => clearTimeout(id); + } +} diff --git a/frontend/vite.config.js b/frontend/vite.config.js index 9ce8189b..1026e519 100644 --- a/frontend/vite.config.js +++ b/frontend/vite.config.js @@ -26,4 +26,10 @@ export default defineConfig({ // }, // }, }, + + test: { + environment: 'jsdom', + setupFiles: ['./src/test/setupTests.js'], + globals: true, + }, }); diff --git a/scripts/epg_match.py b/scripts/epg_match.py deleted file mode 100644 index 890ffe3a..00000000 --- a/scripts/epg_match.py +++ /dev/null @@ -1,182 +0,0 @@ -# ml_model.py - -import sys -import json -import re -import os -import logging - -from rapidfuzz import fuzz -from sentence_transformers import util -from sentence_transformers import SentenceTransformer as st - -# Set up logger -logger = logging.getLogger(__name__) - -# Load the sentence-transformers model once at the module level -SENTENCE_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" -MODEL_PATH = os.path.join("/app", "models", "all-MiniLM-L6-v2") - -# Thresholds -BEST_FUZZY_THRESHOLD = 85 -LOWER_FUZZY_THRESHOLD = 40 -EMBED_SIM_THRESHOLD = 0.65 - -def process_data(input_data): - os.makedirs(MODEL_PATH, exist_ok=True) - - # If not present locally, download: - if not os.path.exists(os.path.join(MODEL_PATH, "config.json")): - logger.info(f"Local model not found in {MODEL_PATH}; downloading from {SENTENCE_MODEL_NAME}...") - st_model = st(SENTENCE_MODEL_NAME, cache_folder=MODEL_PATH) - else: - logger.info(f"Loading local model from {MODEL_PATH}") - st_model = st(MODEL_PATH) - - channels = input_data["channels"] - epg_data = input_data["epg_data"] - region_code = input_data.get("region_code", None) - - epg_embeddings = None - if any(row["norm_name"] for row in epg_data): - epg_embeddings = st_model.encode( - [row["norm_name"] for row in epg_data], - convert_to_tensor=True - ) - - channels_to_update = [] - matched_channels = [] - - for chan in channels: - normalized_tvg_id = chan.get("tvg_id", "") - fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] - - # Exact TVG ID match (direct match) - epg_by_tvg_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_tvg_id), None) - if normalized_tvg_id and epg_by_tvg_id: - chan["epg_data_id"] = epg_by_tvg_id["id"] - channels_to_update.append(chan) - - # Add to matched_channels list so it's counted in the total - matched_channels.append((chan['id'], fallback_name, epg_by_tvg_id["tvg_id"])) - - logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by tvg_id={epg_by_tvg_id['tvg_id']}") - continue - - # If channel has a tvg_id that doesn't exist in EPGData, do direct check. - # I don't THINK this should happen now that we assign EPG on channel creation. - if chan["tvg_id"]: - epg_match = [epg["id"] for epg in epg_data if epg["tvg_id"] == chan["tvg_id"]] - if epg_match: - chan["epg_data_id"] = epg_match[0] - logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by tvg_id={chan['tvg_id']}") - channels_to_update.append(chan) - continue - - # C) Perform name-based fuzzy matching - if not chan["norm_chan"]: - logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping") - continue - - best_score = 0 - best_epg = None - for row in epg_data: - if not row["norm_name"]: - continue - - base_score = fuzz.ratio(chan["norm_chan"], row["norm_name"]) - bonus = 0 - # Region-based bonus/penalty - combined_text = row["tvg_id"].lower() + " " + row["name"].lower() - dot_regions = re.findall(r'\.([a-z]{2})', combined_text) - if region_code: - if dot_regions: - if region_code in dot_regions: - bonus = 30 # bigger bonus if .us or .ca matches - else: - bonus = -15 - elif region_code in combined_text: - bonus = 15 - score = base_score + bonus - - logger.debug( - f"Channel {chan['id']} '{fallback_name}' => EPG row {row['id']}: " - f"name='{row['name']}', norm_name='{row['norm_name']}', " - f"combined_text='{combined_text}', dot_regions={dot_regions}, " - f"base_score={base_score}, bonus={bonus}, total_score={score}" - ) - - if score > best_score: - best_score = score - best_epg = row - - # If no best match was found, skip - if not best_epg: - logger.debug(f"Channel {chan['id']} '{fallback_name}' => no EPG match at all.") - continue - - # If best_score is above BEST_FUZZY_THRESHOLD => direct accept - if best_score >= BEST_FUZZY_THRESHOLD: - chan["epg_data_id"] = best_epg["id"] - channels_to_update.append(chan) - - matched_channels.append((chan['id'], fallback_name, best_epg["tvg_id"])) - logger.info( - f"Channel {chan['id']} '{fallback_name}' => matched tvg_id={best_epg['tvg_id']} " - f"(score={best_score})" - ) - - # If best_score is in the “middle range,” do embedding check - elif best_score >= LOWER_FUZZY_THRESHOLD and epg_embeddings is not None: - chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) - sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] - top_index = int(sim_scores.argmax()) - top_value = float(sim_scores[top_index]) - if top_value >= EMBED_SIM_THRESHOLD: - matched_epg = epg_data[top_index] - chan["epg_data_id"] = matched_epg["id"] - channels_to_update.append(chan) - - matched_channels.append((chan['id'], fallback_name, matched_epg["tvg_id"])) - logger.info( - f"Channel {chan['id']} '{fallback_name}' => matched EPG tvg_id={matched_epg['tvg_id']} " - f"(fuzzy={best_score}, cos-sim={top_value:.2f})" - ) - else: - logger.info( - f"Channel {chan['id']} '{fallback_name}' => fuzzy={best_score}, " - f"cos-sim={top_value:.2f} < {EMBED_SIM_THRESHOLD}, skipping" - ) - else: - # No good match found - fuzzy score is too low - logger.info( - f"Channel {chan['id']} '{fallback_name}' => best fuzzy match score={best_score} < {LOWER_FUZZY_THRESHOLD}, skipping" - ) - - return { - "channels_to_update": channels_to_update, - "matched_channels": matched_channels - } - -def main(): - # Configure logging - logging_level = os.environ.get('DISPATCHARR_LOG_LEVEL', 'INFO') - logging.basicConfig( - level=getattr(logging, logging_level), - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', - stream=sys.stderr - ) - - # Read input data from a file - input_file_path = sys.argv[1] - with open(input_file_path, 'r') as f: - input_data = json.load(f) - - # Process data with the ML model (or your logic) - result = process_data(input_data) - - # Output result to stdout - print(json.dumps(result)) - -if __name__ == "__main__": - main() diff --git a/version.py b/version.py index 39334b6c..750836e7 100644 --- a/version.py +++ b/version.py @@ -1,5 +1,5 @@ """ Dispatcharr version information. """ -__version__ = '0.9.1' # Follow semantic versioning (MAJOR.MINOR.PATCH) +__version__ = '0.10.3' # Follow semantic versioning (MAJOR.MINOR.PATCH) __timestamp__ = None # Set during CI/CD build process