diff --git a/.dockerignore b/.dockerignore index c79ca7b4..296537de 100755 --- a/.dockerignore +++ b/.dockerignore @@ -31,3 +31,4 @@ LICENSE README.md data/ +docker/data/ diff --git a/.github/workflows/base-image.yml b/.github/workflows/base-image.yml index f926d892..d290d49a 100644 --- a/.github/workflows/base-image.yml +++ b/.github/workflows/base-image.yml @@ -101,6 +101,28 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + - name: Build and push Docker base image uses: docker/build-push-action@v4 with: @@ -113,6 +135,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -154,18 +177,74 @@ jobs: # GitHub Container Registry manifests # branch tag (e.g. base or base-dev) - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 # branch + timestamp tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 # Docker Hub manifests # branch tag (e.g. base or base-dev) - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 # branch + timestamp tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5da4118c..d8f4a3a7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,8 @@ name: CI Pipeline on: push: branches: [dev] + paths-ignore: + - '**.md' pull_request: branches: [dev] workflow_dispatch: @@ -117,7 +119,27 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - # use metadata from the prepare job + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} - name: Build and push Docker image uses: docker/build-push-action@v4 @@ -135,6 +157,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -179,16 +202,72 @@ jobs: echo "Creating multi-arch manifest for ${OWNER}/${REPO}" # branch tag (e.g. latest or dev) - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 # version + timestamp tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 # also create Docker Hub manifests using the same username - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/frontend-tests.yml b/.github/workflows/frontend-tests.yml new file mode 100644 index 00000000..4e9e2505 --- /dev/null +++ b/.github/workflows/frontend-tests.yml @@ -0,0 +1,41 @@ +name: Frontend Tests + +on: + push: + branches: [main, dev] + paths: + - 'frontend/**' + - '.github/workflows/frontend-tests.yml' + pull_request: + branches: [main, dev] + paths: + - 'frontend/**' + - '.github/workflows/frontend-tests.yml' + +jobs: + test: + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ./frontend + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + cache: 'npm' + cache-dependency-path: './frontend/package-lock.json' + + - name: Install dependencies + run: npm ci + + # - name: Run linter + # run: npm run lint + + - name: Run tests + run: npm test diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 27356c9a..9186541d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,6 +25,7 @@ jobs: new_version: ${{ steps.update_version.outputs.new_version }} repo_owner: ${{ steps.meta.outputs.repo_owner }} repo_name: ${{ steps.meta.outputs.repo_name }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} steps: - uses: actions/checkout@v3 with: @@ -43,6 +44,10 @@ jobs: NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')") echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT + - name: Update Changelog + run: | + python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }} + - name: Set repository metadata id: meta run: | @@ -52,9 +57,15 @@ jobs: REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + - name: Commit and Tag run: | - git add version.py + git add version.py CHANGELOG.md git commit -m "Release v${{ steps.update_version.outputs.new_version }}" git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" git push origin main --tags @@ -100,6 +111,28 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }} + - name: Build and push Docker image uses: docker/build-push-action@v4 with: @@ -111,6 +144,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -145,25 +179,48 @@ jobs: OWNER=${{ needs.prepare.outputs.repo_owner }} REPO=${{ needs.prepare.outputs.repo_name }} VERSION=${{ needs.prepare.outputs.new_version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} echo "Creating multi-arch manifest for ${OWNER}/${REPO}" # GitHub Container Registry manifests - # latest tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \ - ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64 - - # version tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ + # Create one manifest with both latest and version tags + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:latest \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64 # Docker Hub manifests - # latest tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ - docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64 - - # version tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ + # Create one manifest with both latest and version tags + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64 create-release: diff --git a/.gitignore b/.gitignore index a9d76412..20968f46 100755 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,5 @@ dump.rdb debugpy* uwsgi.sock package-lock.json -models \ No newline at end of file +models +.idea \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..610c2ee5 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1014 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Changed + +- Frontend tests GitHub workflow now uses Node.js 24 (matching Dockerfile) and runs on both `main` and `dev` branch pushes and pull requests for comprehensive CI coverage. + +### Fixed + +- Fixed NumPy baseline detection in Docker entrypoint. Now calls `numpy.show_config()` directly with case-insensitive grep instead of incorrectly wrapping the output. +- Fixed SettingsUtils frontend tests for new grouped settings architecture. Updated test suite to properly verify grouped JSON settings (stream_settings, dvr_settings, etc.) instead of individual CharField settings, including tests for type conversions, array-to-CSV transformations, and special handling of proxy_settings and network_access. + +## [0.17.0] - 2026-01-13 + +### Added + +- Loading feedback for all confirmation dialogs: Extended visual loading indicators across all confirmation dialogs throughout the application. Delete, cleanup, and bulk operation dialogs now show an animated dots loader and disabled state during async operations, providing consistent user feedback for backups (restore/delete), channels, EPGs, logos, VOD logos, M3U accounts, streams, users, groups, filters, profiles, batch operations, and network access changes. +- Channel profile edit and duplicate functionality: Users can now rename existing channel profiles and create duplicates with automatic channel membership cloning. Each profile action (edit, duplicate, delete) in the profile dropdown for quick access. +- ProfileModal component extracted for improved code organization and maintainability of channel profile management operations. +- Frontend unit tests for pages and utilities: Added comprehensive unit test coverage for frontend components within pages/ and JS files within utils/, along with a GitHub Actions workflow (`frontend-tests.yml`) to automatically run tests on commits and pull requests - Thanks [@nick4810](https://github.com/nick4810) +- Channel Profile membership control for manual channel creation and bulk operations: Extended the existing `channel_profile_ids` parameter from `POST /api/channels/from-stream/` to also support `POST /api/channels/` (manual creation) and bulk creation tasks with the same flexible semantics: + - Omitted parameter (default): Channels are added to ALL profiles (preserves backward compatibility) + - Empty array `[]`: Channels are added to NO profiles + - Sentinel value `[0]`: Channels are added to ALL profiles (explicit) + - Specific IDs `[1, 2, ...]`: Channels are added only to the specified profiles + This allows API consumers to control profile membership across all channel creation methods without requiring all channels to be added to every profile by default. +- Channel profile selection in creation modal: Users can now choose which profiles to add channels to when creating channels from streams (both single and bulk operations). Options include adding to all profiles, no profiles, or specific profiles with mutual exclusivity between special options ("All Profiles", "None") and specific profile selections. Profile selection defaults to the current table filter for intuitive workflow. +- Group retention policy for M3U accounts: Groups now follow the same stale retention logic as streams, using the account's `stale_stream_days` setting. Groups that temporarily disappear from an M3U source are retained for the configured retention period instead of being immediately deleted, preserving user settings and preventing data loss when providers temporarily remove/re-add groups. (Closes #809) +- Visual stale indicators for streams and groups: Added `is_stale` field to Stream and both `is_stale` and `last_seen` fields to ChannelGroupM3UAccount models to track items in their retention grace period. Stale groups display with orange buttons and a warning tooltip, while stale streams show with a red background color matching the visual treatment of empty channels. + +### Changed + +- Settings architecture refactored to use grouped JSON storage: Migrated from individual CharField settings to grouped JSONField settings for improved performance, maintainability, and type safety. Settings are now organized into logical groups (stream_settings, dvr_settings, backup_settings, system_settings, proxy_settings, network_access) with automatic migration handling. Backend provides helper methods (`get_stream_settings()`, `get_default_user_agent_id()`, etc.) for easy access. Frontend simplified by removing complex key mapping logic and standardizing on underscore-based field names throughout. +- Docker setup enhanced for legacy CPU support: Added `USE_LEGACY_NUMPY` environment variable to enable custom-built NumPy with no CPU baseline, allowing Dispatcharr to run on older CPUs (circa 2009) that lack support for newer baseline CPU features. When set to `true`, the entrypoint script will install the legacy NumPy build instead of the standard distribution. (Fixes #805) +- VOD upstream read timeout reduced from 30 seconds to 10 seconds to minimize lock hold time when clients disconnect during connection phase +- Form management refactored across application: Migrated Channel, Stream, M3U Profile, Stream Profile, Logo, and User Agent forms from Formik to React Hook Form (RHF) with Yup validation for improved form handling, better validation feedback, and enhanced code maintainability +- Stats and VOD pages refactored for clearer separation of concerns: extracted Stream/VOD connection cards (StreamConnectionCard, VodConnectionCard, VODCard, SeriesCard), moved page logic into dedicated utils, and lazy-loaded heavy components with ErrorBoundary fallbacks to improve readability and maintainability - Thanks [@nick4810](https://github.com/nick4810) +- Channel creation modal refactored: Extracted and unified channel numbering dialogs from StreamsTable into a dedicated CreateChannelModal component that handles both single and bulk channel creation with cleaner, more maintainable implementation and integrated profile selection controls. + +### Fixed + +- Fixed bulk channel profile membership update endpoint silently ignoring channels without existing membership records. The endpoint now creates missing memberships automatically (matching single-channel endpoint behavior), validates that all channel IDs exist before processing, and provides detailed response feedback including counts of updated vs. created memberships. Added comprehensive Swagger documentation with request/response schemas. +- Fixed bulk channel edit endpoint crashing with `ValueError: Field names must be given to bulk_update()` when the first channel in the update list had no actual field changes. The endpoint now collects all unique field names from all channels being updated instead of only looking at the first channel, properly handling cases where different channels update different fields or when some channels have no changes - Thanks [@mdellavo](https://github.com/mdellavo) (Fixes #804) +- Fixed PostgreSQL backup restore not completely cleaning database before restoration. The restore process now drops and recreates the entire `public` schema before running `pg_restore`, ensuring a truly clean restore that removes all tables, functions, and other objects not present in the backup file. This prevents leftover database objects from persisting when restoring backups from older branches or versions. Added `--no-owner` flag to `pg_restore` to avoid role permission errors when the backup was created by a different PostgreSQL user. +- Fixed TV Guide loading overlay not disappearing after navigating from DVR page. The `fetchRecordings()` function in the channels store was setting `isLoading: true` on start but never resetting it to `false` on successful completion, causing the Guide page's loading overlay to remain visible indefinitely when accessed after the DVR page. +- Fixed stream profile parameters not properly handling quoted arguments. Switched from basic `.split()` to `shlex.split()` for parsing command-line parameters, allowing proper handling of multi-word arguments in quotes (e.g., OAuth tokens in HTTP headers like `"--twitch-api-header=Authorization=OAuth token123"`). This ensures external streaming tools like Streamlink and FFmpeg receive correctly formatted arguments when using stream profiles with complex parameters - Thanks [@justinforlenza](https://github.com/justinforlenza) (Fixes #833) +- Fixed bulk and manual channel creation not refreshing channel profile memberships in the UI for all connected clients. WebSocket `channels_created` event now calls `fetchChannelProfiles()` to ensure profile membership updates are reflected in real-time for all users without requiring a page refresh. +- Fixed Channel Profile filter incorrectly applying profile membership filtering even when "Show Disabled" was enabled, preventing all channels from being displayed. Profile filter now only applies when hiding disabled channels. (Fixes #825) +- Fixed manual channel creation not adding channels to channel profiles. Manually created channels are now added to the selected profile if one is active, or to all profiles if "All" is selected, matching the behavior of channels created from streams. +- Fixed VOD streams disappearing from stats page during playback by adding `socket-timeout = 600` to production uWSGI config. The missing directive caused uWSGI to use its default 4-second timeout, triggering premature cleanup when clients buffered content. Now matches the existing `http-timeout = 600` value and prevents timeout errors during normal client buffering - Thanks [@patchy8736](https://github.com/patchy8736) +- Fixed Channels table EPG column showing "Not Assigned" on initial load for users with large EPG datasets. Added `tvgsLoaded` flag to EPG store to track when EPG data has finished loading, ensuring the table waits for EPG data before displaying. EPG cells now show animated skeleton placeholders while loading instead of incorrectly showing "Not Assigned". (Fixes #810) +- Fixed VOD profile connection count not being decremented when stream connection fails (timeout, 404, etc.), preventing profiles from reaching capacity limits and rejecting valid stream requests +- Fixed React warning in Channel form by removing invalid `removeTrailingZeros` prop from NumberInput component +- Release workflow Docker tagging: Fixed issue where `latest` and version tags (e.g., `0.16.0`) were creating separate manifests instead of pointing to the same image digest, which caused old `latest` tags to become orphaned/untagged after new releases. Now creates a single multi-arch manifest with both tags, maintaining proper tag relationships and download statistics visibility on GitHub. +- Fixed onboarding message appearing in the Channels Table when filtered results are empty. The onboarding message now only displays when there are no channels created at all, not when channels exist but are filtered out by current filters. +- Fixed `M3UMovieRelation.get_stream_url()` and `M3UEpisodeRelation.get_stream_url()` to use XC client's `_normalize_url()` method instead of simple `rstrip('/')`. This properly handles malformed M3U account URLs (e.g., containing `/player_api.php` or query parameters) before constructing VOD stream endpoints, matching behavior of live channel URL building. (Closes #722) +- Fixed bulk_create and bulk_update errors during VOD content refresh by pre-checking object existence with optimized bulk queries (3 queries total instead of N per batch) before creating new objects. This ensures all movie/series objects have primary keys before relation operations, preventing "prohibited to prevent data loss due to unsaved related object" errors. Additionally fixed duplicate key constraint violations by treating TMDB/IMDB ID values of `0` or `'0'` as invalid (some providers use this to indicate "no ID"), converting them to NULL to prevent multiple items from incorrectly sharing the same ID. (Fixes #813) + +## [0.16.0] - 2026-01-04 + +### Added + +- Advanced filtering for Channels table: Filter menu now allows toggling disabled channels visibility (when a profile is selected) and filtering to show only empty channels without streams (Closes #182) +- Network Access warning modal now displays the client's IP address for better transparency when network restrictions are being enforced - Thanks [@damien-alt-sudo](https://github.com/damien-alt-sudo) (Closes #778) +- VLC streaming support - Thanks [@sethwv](https://github.com/sethwv) + - Added `cvlc` as an alternative streaming backend alongside FFmpeg and Streamlink + - Log parser refactoring: Introduced `LogParserFactory` and stream-specific parsers (`FFmpegLogParser`, `VLCLogParser`, `StreamlinkLogParser`) to enable codec and resolution detection from multiple streaming tools + - VLC log parsing for stream information: Detects video/audio codecs from TS demux output, supports both stream-copy and transcode modes with resolution/FPS extraction from transcode output + - Locked, read-only VLC stream profile configured for headless operation with intelligent audio/video codec detection + - VLC and required plugins installed in Docker environment with headless configuration +- ErrorBoundary component for handling frontend errors gracefully with generic error message - Thanks [@nick4810](https://github.com/nick4810) + +### Changed + +- Fixed event viewer arrow direction (previously inverted) — UI behavior corrected. - Thanks [@drnikcuk](https://github.com/drnikcuk) (Closes #772) +- Region code options now intentionally include both `GB` (ISO 3166-1 standard) and `UK` (commonly used by EPG/XMLTV providers) to accommodate real-world EPG data variations. Many providers use `UK` in channel identifiers (e.g., `BBCOne.uk`) despite `GB` being the official ISO country code. Users should select the region code that matches their specific EPG provider's convention for optimal region-based EPG matching bonuses - Thanks [@bigpandaaaa](https://github.com/bigpandaaaa) +- Channel number inputs in stream-to-channel creation modals no longer have a maximum value restriction, allowing users to enter any valid channel number supported by the database +- Stream log parsing refactored to use factory pattern: Simplified `ChannelService.parse_and_store_stream_info()` to route parsing through specialized log parsers instead of inline program-specific logic (~150 lines of code removed) +- Stream profile names in fixtures updated to use proper capitalization (ffmpeg → FFmpeg, streamlink → Streamlink) +- Frontend component refactoring for improved code organization and maintainability - Thanks [@nick4810](https://github.com/nick4810) + - Extracted large nested components into separate files (RecordingCard, RecordingDetailsModal, RecurringRuleModal, RecordingSynopsis, GuideRow, HourTimeline, PluginCard, ProgramRecordingModal, SeriesRecordingModal, Field) + - Moved business logic from components into dedicated utility files (dateTimeUtils, RecordingCardUtils, RecordingDetailsModalUtils, RecurringRuleModalUtils, DVRUtils, guideUtils, PluginsUtils, PluginCardUtils, notificationUtils) + - Lazy loaded heavy components (SuperuserForm, RecordingDetailsModal, ProgramRecordingModal, SeriesRecordingModal, PluginCard) with loading fallbacks + - Removed unused Dashboard and Home pages + - Guide page refactoring: Extracted GuideRow and HourTimeline components, moved grid calculations and utility functions to guideUtils.js, added loading states for initial data fetching, improved performance through better memoization + - Plugins page refactoring: Extracted PluginCard and Field components, added Zustand store for plugin state management, improved plugin action confirmation handling, better separation of concerns between UI and business logic +- Logo loading optimization: Logos now load only after both Channels and Streams tables complete loading to prevent blocking initial page render, with rendering gated by table readiness to ensure data loads before visual elements +- M3U stream URLs now use `build_absolute_uri_with_port()` for consistency with EPG and logo URLs, ensuring uniform port handling across all M3U file URLs +- Settings and Logos page refactoring for improved readability and separation of concerns - Thanks [@nick4810](https://github.com/nick4810) + - Extracted individual settings forms (DVR, Network Access, Proxy, Stream, System, UI) into separate components with dedicated utility files + - Moved larger nested components into their own files + - Moved business logic into corresponding utils/ files + - Extracted larger in-line component logic into its own function + - Each panel in Settings now uses its own form state with the parent component handling active state management + +### Fixed + +- Auto Channel Sync Force EPG Source feature not properly forcing "No EPG" assignment - When selecting "Force EPG Source" > "No EPG (Disabled)", channels were still being auto-matched to EPG data instead of forcing dummy/no EPG. Now correctly sets `force_dummy_epg` flag to prevent unwanted EPG assignment. (Fixes #788) +- VOD episode processing now properly handles season and episode numbers from APIs that return string values instead of integers, with comprehensive error logging to track data quality issues - Thanks [@patchy8736](https://github.com/patchy8736) (Fixes #770) +- VOD episode-to-stream relations are now validated to ensure episodes have been saved to the database before creating relations, preventing integrity errors when bulk_create operations encounter conflicts - Thanks [@patchy8736](https://github.com/patchy8736) +- VOD category filtering now correctly handles category names containing pipe "|" characters (e.g., "PL | BAJKI", "EN | MOVIES") by using `rsplit()` to split from the right instead of the left, ensuring the category type is correctly extracted as the last segment - Thanks [@Vitekant](https://github.com/Vitekant) +- M3U and EPG URLs now correctly preserve non-standard HTTPS ports (e.g., `:8443`) when accessed behind reverse proxies that forward the port in headers — `get_host_and_port()` now properly checks `X-Forwarded-Port` header before falling back to other detection methods (Fixes #704) +- M3U and EPG manager page no longer crashes when a playlist references a deleted channel group (Fixes screen blank on navigation) +- Stream validation now returns original URL instead of redirected URL to prevent issues with temporary redirect URLs that expire before clients can connect +- XtreamCodes EPG limit parameter now properly converted to integer to prevent type errors when accessing EPG listings (Fixes #781) +- Docker container file permissions: Django management commands (`migrate`, `collectstatic`) now run as the non-root user to prevent root-owned `__pycache__` and static files from causing permission issues - Thanks [@sethwv](https://github.com/sethwv) +- Stream validation now continues with GET request if HEAD request fails due to connection issues - Thanks [@kvnnap](https://github.com/kvnnap) (Fixes #782) +- XtreamCodes M3U files now correctly set `x-tvg-url` and `url-tvg` headers to reference XC EPG URL (`xmltv.php`) instead of standard EPG endpoint when downloaded via XC API (Fixes #629) + +## [0.15.1] - 2025-12-22 + +### Fixed + +- XtreamCodes EPG `has_archive` field now returns integer `0` instead of string `"0"` for proper JSON type consistency +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744) + +## [0.15.0] - 2025-12-20 + +### Added + +- VOD client stop button in Stats page: Users can now disconnect individual VOD clients from the Stats view, similar to the existing channel client disconnect functionality. +- Automated configuration backup/restore system with scheduled backups, retention policies, and async task processing - Thanks [@stlalpha](https://github.com/stlalpha) (Closes #153) +- Stream group as available hash option: Users can now select 'Group' as a hash key option in Settings → Stream Settings → M3U Hash Key, allowing streams to be differentiated by their group membership in addition to name, URL, TVG-ID, and M3U ID + +### Changed + +- Initial super user creation page now matches the login page design with logo, welcome message, divider, and version display for a more consistent and polished first-time setup experience +- Removed unreachable code path in m3u output - Thanks [@DawtCom](https://github.com/DawtCom) +- GitHub Actions workflows now use `docker/metadata-action` for cleaner and more maintainable OCI-compliant image label generation across all build pipelines (ci.yml, base-image.yml, release.yml). Labels are applied to both platform-specific images and multi-arch manifests with proper annotation formatting. - Thanks [@mrdynamo]https://github.com/mrdynamo) (Closes #724) +- Update docker/dev-build.sh to support private registries, multiple architectures and pushing. Now you can do things like `dev-build.sh -p -r my.private.registry -a linux/arm64,linux/amd64` - Thanks [@jdblack](https://github.com/jblack) +- Updated dependencies: Django (5.2.4 → 5.2.9) includes CVE security patch, psycopg2-binary (2.9.10 → 2.9.11), celery (5.5.3 → 5.6.0), djangorestframework (3.16.0 → 3.16.1), requests (2.32.4 → 2.32.5), psutil (7.0.0 → 7.1.3), gevent (25.5.1 → 25.9.1), rapidfuzz (3.13.0 → 3.14.3), torch (2.7.1 → 2.9.1), sentence-transformers (5.1.0 → 5.2.0), lxml (6.0.0 → 6.0.2) (Closes #662) +- Frontend dependencies updated: Vite (6.2.0 → 7.1.7), ESLint (9.21.0 → 9.27.0), and related packages; added npm `overrides` to enforce js-yaml@^4.1.1 for transitive security fix. All 6 reported vulnerabilities resolved with `npm audit fix`. +- Floating video player now supports resizing via a drag handles, with minimum size enforcement and viewport/page boundary constraints to keep it visible. +- Redis connection settings now fully configurable via environment variables (`REDIS_HOST`, `REDIS_PORT`, `REDIS_DB`, `REDIS_URL`), replacing hardcoded `localhost:6379` values throughout the codebase. This enables use of external Redis services in production deployments. (Closes #762) +- Celery broker and result backend URLs now respect `REDIS_HOST`/`REDIS_PORT`/`REDIS_DB` settings as defaults, with `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables available for override. + +### Fixed + +- Docker init script now validates DISPATCHARR_PORT is an integer before using it, preventing sed errors when Kubernetes sets it to a service URL like `tcp://10.98.37.10:80`. Falls back to default port 9191 when invalid (Fixes #737) +- M3U Profile form now properly resets local state for search and replace patterns after saving, preventing validation errors when adding multiple profiles in a row +- DVR series rule deletion now properly handles TVG IDs that contain slashes by encoding them in the URL path (Fixes #697) +- VOD episode processing now correctly handles duplicate episodes (same episode in multiple languages/qualities) by reusing Episode records across multiple M3UEpisodeRelation entries instead of attempting to create duplicates (Fixes #556) +- XtreamCodes series streaming endpoint now correctly handles episodes with multiple streams (different languages/qualities) by selecting the best available stream based on account priority (Fixes #569) +- XtreamCodes series info API now returns unique episodes instead of duplicate entries when multiple streams exist for the same episode (different languages/qualities) +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744) +- XtreamCodes EPG API now returns correct date/time format for start/end fields and proper string types for timestamps and channel_id +- XtreamCodes EPG API now handles None values for title and description fields to prevent AttributeError +- XtreamCodes EPG `id` field now provides unique identifiers per program listing instead of always returning "0" for better client EPG handling +- XtreamCodes EPG `epg_id` field now correctly returns the EPGData record ID (representing the EPG source/channel mapping) instead of a dummy value + +## [0.14.0] - 2025-12-09 + +### Added + +- Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) +- EPG source priority field for controlling which EPG source is preferred when multiple sources have matching entries for a channel (higher numbers = higher priority) (Closes #603) + +### Changed + +- EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. +- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) +- IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) +- nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) +- EPG matching now respects source priority and only uses active (enabled) EPG sources (Closes #672) +- EPG form API Key field now only visible when Schedules Direct source type is selected + +### Fixed + +- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh +- Bulk channel editor confirmation dialog now displays the correct stream profile name that will be applied to the selected channels. +- uWSGI not found and 502 bad gateway on first startup + +## [0.13.1] - 2025-12-06 + +### Fixed + +- JWT token generated so is unique for each deployment + +## [0.13.0] - 2025-12-02 + +### Added + +- `CHANGELOG.md` file following Keep a Changelog format to document all notable changes and project history +- System event logging and viewer: Comprehensive logging system that tracks internal application events (M3U refreshes, EPG updates, stream switches, errors) with a dedicated UI viewer for filtering and reviewing historical events. Improves monitoring, troubleshooting, and understanding system behavior +- M3U/EPG endpoint caching: Implements intelligent caching for frequently requested M3U playlists and EPG data to reduce database load and improve response times for clients. +- Search icon to name headers for the channels and streams tables (#686) +- Comprehensive logging for user authentication events and network access restrictions +- Validation for EPG objects and payloads in updateEPG functions to prevent errors from invalid data +- Referrerpolicy to YouTube iframes in series and VOD modals for better compatibility + +### Changed + +- XC player API now returns server_info for unknown actions to align with provider behavior +- XC player API refactored to streamline action handling and ensure consistent responses +- Date parsing logic in generate_custom_dummy_programs improved to handle empty or invalid inputs +- DVR cards now reflect date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten) +- "Uncategorized" categories and relations now automatically created for VOD accounts to improve content management (#627) +- Improved minimum horizontal size in the stats page for better usability on smaller displays +- M3U and EPG generation now handles missing channel profiles with appropriate error logging + +### Fixed + +- Episode URLs in series modal now use UUID instead of ID, fixing broken links (#684, #694) +- Stream preview now respects selected M3U profile instead of always using default profile (#690) +- Channel groups filter in M3UGroupFilter component now filters out non-existent groups (prevents blank webui when editing M3U after a group was removed) +- Stream order now preserved in PATCH/PUT responses from ChannelSerializer, ensuring consistent ordering across all API operations - Thanks [@FiveBoroughs](https://github.com/FiveBoroughs) (#643) +- XC client compatibility: float channel numbers now converted to integers +- M3U account and profile modals now scrollable on mobile devices for improved usability + +## [0.12.0] - 2025-11-19 + +### Added + +- RTSP stream support with automatic protocol detection when a proxy profile requires it. The proxy now forces FFmpeg for RTSP sources and properly handles RTSP URLs - Thanks [@ragchuck](https://github.com/ragchuck) (#184) +- UDP stream support, including correct handling when a proxy profile specifies a UDP source. The proxy now skips HTTP-specific headers (like `user_agent`) for non-HTTP protocols and performs manual redirect handling to improve reliability (#617) +- Separate VOD logos system with a new `VODLogo` model, database migration, dedicated API/viewset, and server-paginated UI. This separates movie/series logos from channel logos, making cleanup safer and enabling independent bulk operations + +### Changed + +- Background profile refresh now uses a rate-limiting/backoff strategy to avoid provider bans +- Bulk channel editing now validates all requested changes up front and applies updates in a single database transaction +- ProxyServer shutdown & ghost-client handling improved to avoid initializing channels for transient clients and prevent duplicate reinitialization during rapid reconnects +- URL / Stream validation expanded to support credentials on non-FQDN hosts, skips HTTP-only checks for RTSP/RTP/UDP streams, and improved host/port normalization +- TV guide scrolling & timeline synchronization improved with mouse-wheel scrolling, synchronized timeline position with guide navigation, and improved mobile momentum scrolling (#252) +- EPG Source dropdown now sorts alphabetically - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) +- M3U POST handling restored and improved for clients (e.g., Smarters) that request playlists using HTTP POST - Thanks [@maluueu](https://github.com/maluueu) +- Login form revamped with branding, cleaner layout, loading state, "Remember Me" option, and focused sign-in flow +- Series & VOD now have copy-link buttons in modals for easier URL sharing +- `get_host_and_port` now prioritizes verified port sources and handles reverse-proxy edge cases more accurately (#618) + +### Fixed + +- EXTINF parsing overhauled to correctly extract attributes such as `tvg-id`, `tvg-name`, and `group-title`, even when values include quotes or commas (#637) +- Websocket payload size reduced during EPG processing to avoid UI freezes, blank screens, or memory spikes in the browser (#327) +- Logo management UI fixes including confirmation dialogs, header checkbox reset, delete button reliability, and full client refetch after cleanup + +## [0.11.2] - 2025-11-04 + +### Added + +- Custom Dummy EPG improvements: + - Support for using an existing Custom Dummy EPG as a template for creating new EPGs + - Custom fallback templates for unmatched patterns + - `{endtime}` as an available output placeholder and renamed `{time}` → `{starttime}` (#590) + - Support for date placeholders that respect both source and output timezones (#597) + - Ability to bulk assign Custom Dummy EPGs to multiple channels + - "Include New Tag" option to mark programs as new in Dummy EPG output + - Support for month strings in date parsing + - Ability to set custom posters and channel logos via regex patterns for Custom Dummy EPGs + - Improved DST handling by calculating offsets based on the actual program date, not today's date + +### Changed + +- Stream model maximum URL length increased from 2000 to 4096 characters (#585) +- Groups now sorted during `xc_get_live_categories` based on the order they first appear (by lowest channel number) +- Client TTL settings updated and periodic refresh implemented during active streaming to maintain accurate connection tracking +- `ProgramData.sub_title` field changed from `CharField` to `TextField` to allow subtitles longer than 255 characters (#579) +- Startup improved by verifying `/data` directory ownership and automatically fixing permissions if needed. Pre-creates `/data/models` during initialization (#614) +- Port detection enhanced to check `request.META.get("SERVER_PORT")` before falling back to defaults, ensuring correct port when generating M3U, EPG, and logo URLs - Thanks [@lasharor](https://github.com/lasharor) + +### Fixed + +- Custom Dummy EPG frontend DST calculation now uses program date instead of current date +- Channel titles no longer truncated early after an apostrophe - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) + +## [0.11.1] - 2025-10-22 + +### Fixed + +- uWSGI not receiving environmental variables +- LXC unable to access daemons launched by uWSGI ([#575](https://github.com/Dispatcharr/Dispatcharr/issues/575), [#576](https://github.com/Dispatcharr/Dispatcharr/issues/576), [#577](https://github.com/Dispatcharr/Dispatcharr/issues/577)) + +## [0.11.0] - 2025-10-22 + +### Added + +- Custom Dummy EPG system: + - Regex pattern matching and name source selection + - Support for custom upcoming and ended programs + - Timezone-aware with source and local timezone selection + - Option to include categories and date/live tags in Dummy EPG output + - (#293) +- Auto-Enable & Category Improvements: + - Auto-enable settings for new groups and categories in M3U and VOD components (#208) +- IPv6 CIDR validation in Settings - Thanks [@jordandalley](https://github.com/jordandalley) (#236) +- Custom logo support for channel groups in Auto Sync Channels (#555) +- Tooltips added to the Stream Table + +### Changed + +- Celery and uWSGI now have configurable `nice` levels (defaults: `uWSGI=0`, `Celery=5`) to prioritize streaming when needed. (#571) +- Directory creation and ownership management refactored in init scripts to avoid unnecessary recursive `chown` operations and improve boot speed +- HTTP streamer switched to threaded model with piped output for improved robustness +- Chunk timeout configuration improved and StreamManager timeout handling enhanced +- Proxy timeout values reduced to avoid unnecessary waiting +- Resource cleanup improved to prevent "Too many open files" errors +- Proxy settings caching implemented and database connections properly closed after use +- EPG program fetching optimized with chunked retrieval and explicit ordering to reduce memory usage during output +- EPG output now sorted by channel number for consistent presentation +- Stream Table buttons reordered for better usability +- Database connection handling improved throughout the codebase to reduce overall connection count + +### Fixed + +- Crash when resizing columns in the Channel Table (#516) +- Errors when saving stream settings (#535) +- Preview and edit bugs for custom streams where profile and group selections did not display correctly +- `channel_id` and `channel.uuid` now converted to strings before processing to fix manual switching when the uWSGI worker was not the stream owner (#269) +- Stream locking and connection search issues when switching channels; increased search timeout to reduce premature failures (#503) +- Stream Table buttons no longer shift into multiple rows when selecting many streams +- Custom stream previews +- Custom Stream settings not loading properly (#186) +- Orphaned categories now automatically removed for VOD and Series during M3U refresh (#540) + +## [0.10.4] - 2025-10-08 + +### Added + +- "Assign TVG-ID from EPG" functionality with frontend actions for single-channel and batch operations +- Confirmation dialogs in `ChannelBatchForm` for setting names, logos, TVG-IDs, and clearing EPG assignments +- "Clear EPG" button to `ChannelBatchForm` for easy reset of assignments +- Batch editing of channel logos - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Ability to set logo name from URL - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Proper timestamp tracking for channel creation and updates; `XC Get Live Streams` now uses this information +- Time Zone Settings added to the application ([#482](https://github.com/Dispatcharr/Dispatcharr/issues/482), [#347](https://github.com/Dispatcharr/Dispatcharr/issues/347)) +- Comskip settings support including comskip.ini upload and custom directory selection (#418) +- Manual recording scheduling for channels without EPG data (#162) + +### Changed + +- Default M3U account type is now set to XC for new accounts +- Performance optimization: Only fetch playlists and channel profiles after a successful M3U refresh (rather than every status update) +- Playlist retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) + +### Fixed + +- Large EPGs now fully parse all channels +- Duplicate channel outputs for streamer profiles set to "All" +- Streamer profiles with "All" assigned now receive all eligible channels +- PostgreSQL btree index errors from logo URL validation during channel creation (#519) +- M3U processing lock not releasing when no streams found during XC refresh, which also skipped VOD scanning (#449) +- Float conversion errors by normalizing decimal format during VOD scanning (#526) +- Direct URL ordering in M3U output to use correct stream sequence (#528) +- Adding multiple M3U accounts without refreshing modified only the first entry (#397) +- UI state bug where new playlist creation was not notified to frontend ("Fetching Groups" stuck) +- Minor FFmpeg task and stream termination bugs in DVR module +- Input escaping issue where single quotes were interpreted as code delimiters (#406) + +## [0.10.3] - 2025-10-04 + +### Added + +- Logo management UI improvements where Channel editor now uses the Logo Manager modal, allowing users to add logos by URL directly from the edit form - Thanks [@EmeraldPi](https://github.com/EmeraldPi) + +### Changed + +- FFmpeg base container rebuilt with improved native build support - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- GitHub Actions workflow updated to use native runners instead of QEMU emulation for more reliable multi-architecture builds + +### Fixed + +- EPG parsing stability when large EPG files would not fully parse all channels. Parser now uses `iterparse` with `recover=True` for both channel and program-level parsing, ensuring complete and resilient XML processing even when Cloudflare injects additional root elements + +## [0.10.2] - 2025-10-03 + +### Added + +- `m3u_id` parameter to `generate_hash_key` and updated related calls +- Support for `x-tvg-url` and `url-tvg` generation with preserved query parameters (#345) +- Exact Gracenote ID matching for EPG channel mapping (#291) +- Recovery handling for XMLTV parser errors +- `nice -n 5` added to Celery commands for better process priority management + +### Changed + +- Default M3U hash key changed to URL only for new installs +- M3U profile retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) +- XMLTV parsing refactored to use `iterparse` for `` element +- Release workflow refactored to run on native architecture +- Docker build system improvements: + - Split install/build steps + - Switch from Yarn → NPM + - Updated to Node.js 24 (frontend build) + - Improved ARM build reliability + - Pushes to DockerHub with combined manifest + - Removed redundant tags and improved build organization + +### Fixed + +- Cloudflare-hosted EPG feeds breaking parsing (#497) +- Bulk channel creation now preserves the order channels were selected in (no longer reversed) +- M3U hash settings not saving properly +- VOD selecting the wrong M3U profile at session start (#461) +- Redundant `h` removed from 12-hour time format in settings page + +## [0.10.1] - 2025-09-24 + +### Added + +- Virtualized rendering for TV Guide for smoother performance when displaying large guides - Thanks [@stlalpha](https://github.com/stlalpha) (#438) +- Enhanced channel/program mapping to reuse EPG data across multiple channels that share the same TVG-ID + +### Changed + +- `URL` field length in EPGSource model increased from 200 → 1000 characters to support long URLs with tokens +- Improved URL transformation logic with more advanced regex during profile refreshes +- During EPG scanning, the first display name for a channel is now used instead of the last +- `whiteSpace` style changed from `nowrap` → `pre` in StreamsTable for better text formatting + +### Fixed + +- EPG channel parsing failure when channel `URL` exceeded 500 characters by adding validation during scanning (#452) +- Frontend incorrectly saving case-sensitive setting as a JSON string for stream filters + +## [0.10.0] - 2025-09-18 + +### Added + +- Channel Creation Improvements: + - Ability to specify channel number during channel creation ([#377](https://github.com/Dispatcharr/Dispatcharr/issues/377), [#169](https://github.com/Dispatcharr/Dispatcharr/issues/169)) + - Asynchronous bulk channel creation from stream IDs with WebSocket progress updates + - WebSocket notifications when channels are created +- EPG Auto-Matching (Rewritten & Enhanced): + - Completely refactored for improved accuracy and efficiency + - Can now be applied to selected channels or triggered directly from the channel edit form + - Uses stricter matching logic with support from sentence transformers + - Added progress notifications during the matching process + - Implemented memory cleanup for ML models after matching operations + - Removed deprecated matching scripts +- Logo & EPG Management: + - Ability in channel edit form and bulk channel editor to set logos and names from assigned EPG (#157) + - Improved logo update flow: frontend refreshes on changes, store updates after bulk changes, progress shown via notifications +- Table Enhancements: + - All tables now support adjustable column resizing (#295) + - Channels and Streams tables persist column widths and center divider position to local storage + - Improved sizing and layout for user-agents, stream profiles, logos, M3U, and EPG tables + +### Changed + +- Simplified VOD and series access: removed user-level restrictions on M3U accounts +- Skip disabled M3U accounts when choosing streams during playback (#402) +- Enhanced `UserViewSet` queryset to prefetch related channel profiles for better performance +- Auto-focus added to EPG filter input +- Category API retrieval now sorts by name +- Increased default column size for EPG fields and removed max size on group/EPG columns +- Standardized EPG column header to display `(EPG ID - TVG-ID)` + +### Fixed + +- Bug during VOD cleanup where all VODs not from the current M3U scan could be deleted +- Logos not being set correctly in some cases +- Bug where not setting a channel number caused an error when creating a channel (#422) +- Bug where clicking "Add Channel" with a channel selected opened the edit form instead +- Bug where a newly created channel could reuse streams from another channel due to form not clearing properly +- VOD page not displaying correct order while changing pages +- `ReferenceError: setIsInitialized is not defined` when logging into web UI +- `cannot access local variable 'total_chunks' where it is not associated with a value` during VOD refresh + +## [0.9.1] - 2025-09-13 + +### Fixed + +- Broken migrations affecting the plugins system +- DVR and plugin paths to ensure proper functionality (#381) + +## [0.9.0] - 2025-09-12 + +### Added + +- **Video on Demand (VOD) System:** + - Complete VOD infrastructure with support for movies and TV series + - Advanced VOD metadata including IMDB/TMDB integration, trailers, cast information + - Smart VOD categorization with filtering by type (movies vs series) + - Multi-provider VOD support with priority-based selection + - VOD streaming proxy with connection tracking and statistics + - Season/episode organization for TV series with expandable episode details + - VOD statistics and monitoring integrated with existing stats dashboard + - Optimized VOD parsing and category filtering + - Dedicated VOD page with movies and series tabs + - Rich VOD modals with backdrop images, trailers, and metadata + - Episode management with season-based organization + - Play button integration with external player support + - VOD statistics cards similar to channel cards +- **Plugin System:** + - Extensible Plugin Framework - Developers can build custom functionality without modifying Dispatcharr core + - Plugin Discovery & Management - Automatic detection of installed plugins, with enable/disable controls in the UI + - Backend API Support - New APIs for listing, loading, and managing plugins programmatically + - Plugin Registry - Structured models for plugin metadata (name, version, author, description) + - UI Enhancements - Dedicated Plugins page in the admin panel for centralized plugin management + - Documentation & Scaffolding - Initial documentation and scaffolding to accelerate plugin development +- **DVR System:** + - Refreshed DVR page for managing scheduled and completed recordings + - Global pre/post padding controls surfaced in Settings + - Playback support for completed recordings directly in the UI + - DVR table view includes title, channel, time, and padding adjustments for clear scheduling + - Improved population of DVR listings, fixing intermittent blank screen issues + - Comskip integration for automated commercial detection and skipping in recordings + - User-configurable comskip toggle in Settings +- **Enhanced Channel Management:** + - EPG column added to channels table for better organization + - EPG filtering by channel assignment and source name + - Channel batch renaming for efficient bulk channel name updates + - Auto channel sync improvements with custom stream profile override + - Channel logo management overhaul with background loading +- Date and time format customization in settings - Thanks [@Biologisten](https://github.com/Biologisten) +- Auto-refresh intervals for statistics with better UI controls +- M3U profile notes field for better organization +- XC account information retrieval and display with account refresh functionality and notifications + +### Changed + +- JSONB field conversion for custom properties (replacing text fields) for better performance +- Database encoding converted from ASCII to UTF8 for better character support +- Batch processing for M3U updates and channel operations +- Query optimization with prefetch_related to eliminate N+1 queries +- Reduced API calls by fetching all data at once instead of per-category +- Buffering speed setting now affects UI indicators +- Swagger endpoint accessible with or without trailing slash +- EPG source names displayed before channel names in edit forms +- Logo loading improvements with background processing +- Channel card enhancements with better status indicators +- Group column width optimization +- Better content-type detection for streams +- Improved headers with content-range and total length +- Enhanced user-agent handling for M3U accounts +- HEAD request support with connection keep-alive +- Progress tracking improvements for clients with new sessions +- Server URL length increased to 1000 characters for token support +- Prettier formatting applied to all frontend code +- String quote standardization and code formatting improvements + +### Fixed + +- Logo loading issues in channel edit forms resolved +- M3U download error handling and user feedback improved +- Unique constraint violations fixed during stream rehashing +- Channel stats fetching moved from Celery beat task to configurable API calls +- Speed badge colors now use configurable buffering speed setting +- Channel cards properly close when streams stop +- Active streams labeling updated from "Active Channels" +- WebSocket updates for client connect/disconnect events +- Null value handling before database saves +- Empty string scrubbing for cleaner data +- Group relationship cleanup for removed M3U groups +- Logo cleanup for unused files with proper batch processing +- Recordings start 5 mins after show starts (#102) + +### Closed + +- [#350](https://github.com/Dispatcharr/Dispatcharr/issues/350): Allow DVR recordings to be played via the UI +- [#349](https://github.com/Dispatcharr/Dispatcharr/issues/349): DVR screen doesn't populate consistently +- [#340](https://github.com/Dispatcharr/Dispatcharr/issues/340): Global find and replace +- [#311](https://github.com/Dispatcharr/Dispatcharr/issues/311): Stat's "Current Speed" does not reflect "Buffering Speed" setting +- [#304](https://github.com/Dispatcharr/Dispatcharr/issues/304): Name ignored when uploading logo +- [#300](https://github.com/Dispatcharr/Dispatcharr/issues/300): Updating Logo throws error +- [#286](https://github.com/Dispatcharr/Dispatcharr/issues/286): 2 Value/Column EPG in Channel Edit +- [#280](https://github.com/Dispatcharr/Dispatcharr/issues/280): Add general text field in M3U/XS profiles +- [#190](https://github.com/Dispatcharr/Dispatcharr/issues/190): Show which stream is being used and allow it to be altered in channel properties +- [#155](https://github.com/Dispatcharr/Dispatcharr/issues/155): Additional column with EPG assignment information / Allow filtering by EPG assignment +- [#138](https://github.com/Dispatcharr/Dispatcharr/issues/138): Bulk Channel Edit Functions + +## [0.8.0] - 2025-08-19 + +### Added + +- Channel & Stream Enhancements: + - Preview streams under a channel, with stream logo and name displayed in the channel card + - Advanced stats for channel streams + - Stream qualities displayed in the channel table + - Stream stats now saved to the database + - URL badges can now be clicked to copy stream links to the clipboard +- M3U Filtering for Streams: + - Streams for an M3U account can now be filtered using flexible parameters + - Apply filters based on stream name, group title, or stream URL (via regex) + - Filters support both inclusion and exclusion logic for precise control + - Multiple filters can be layered with a priority order for complex rules +- Ability to reverse the sort order for auto channel sync +- Custom validator for URL fields now allows non-FQDN hostnames (#63) +- Membership creation added in `UpdateChannelMembershipAPIView` if not found (#275) + +### Changed + +- Bumped Postgres to version 17 +- Updated dependencies in `requirements.txt` for compatibility and improvements +- Improved chunked extraction to prevent memory issues - Thanks [@pantherale0](https://github.com/pantherale0) + +### Fixed + +- XML escaping for channel ID in `generate_dummy_epg` function +- Bug where creating a channel from a stream not displayed in the table used an invalid stream name +- Debian install script - Thanks [@deku-m](https://github.com/deku-m) + +## [0.7.1] - 2025-07-29 + +### Added + +- Natural sorting for channel names during auto channel sync +- Ability to sort auto sync order by provider order (default), channel name, TVG ID, or last updated time +- Auto-created channels can now be assigned to specific channel profiles (#255) +- Channel profiles are now fetched automatically after a successful M3U refresh +- Uses only whole numbers when assigning the next available channel number + +### Changed + +- Logo upload behavior changed to wait for the Create button before saving +- Uses the channel name as the display name in EPG output for improved readability +- Ensures channels are only added to a selected profile if one is explicitly chosen + +### Fixed + +- Logo Manager prevents redundant messages from the file scanner by properly tracking uploaded logos in Redis +- Fixed an issue preventing logo uploads via URL +- Adds internal support for assigning multiple profiles via API + +## [0.7.0] - 2025-07-19 + +### Added + +- **Logo Manager:** + - Complete logo management system with filtering, search, and usage tracking + - Upload logos directly through the UI + - Automatically scan `/data/logos` for existing files (#69) + - View which channels use each logo + - Bulk delete unused logos with cleanup + - Enhanced display with hover effects and improved sizing + - Improved logo fetching with timeouts and user-agent headers to prevent hanging +- **Group Manager:** + - Comprehensive group management interface (#128) + - Search and filter groups with ease + - Bulk operations for cleanup + - Filter channels by group membership + - Automatically clean up unused groups +- **Auto Channel Sync:** + - Automatic channel synchronization from M3U sources (#147) + - Configure auto-sync settings per M3U account group + - Set starting channel numbers by group + - Override group names during sync + - Apply regex match and replace for channel names + - Filter channels by regex match on stream name + - Track auto-created vs manually added channels + - Smart updates preserve UUIDs and existing links +- Stream rehashing with WebSocket notifications +- Better error handling for blocked rehash attempts +- Lock acquisition to prevent conflicts +- Real-time progress tracking + +### Changed + +- Persist table page sizes in local storage (streams & channels) +- Smoother pagination and improved UX +- Fixed z-index issues during table refreshes +- Improved XC client with connection pooling +- Better error handling for API and JSON decode failures +- Smarter handling of empty content and blocking responses +- Improved EPG XML generation with richer metadata +- Better support for keywords, languages, ratings, and credits +- Better form layouts and responsive buttons +- Enhanced confirmation dialogs and feedback + +### Fixed + +- Channel table now correctly restores page size from local storage +- Resolved WebSocket message formatting issues +- Fixed logo uploads and edits +- Corrected ESLint issues across the codebase +- Fixed HTML validation errors in menus +- Optimized logo fetching with proper timeouts and headers ([#101](https://github.com/Dispatcharr/Dispatcharr/issues/101), [#217](https://github.com/Dispatcharr/Dispatcharr/issues/217)) + +## [0.6.2] - 2025-07-10 + +### Fixed + +- **Streaming & Connection Stability:** + - Provider timeout issues - Slow but responsive providers no longer cause channel lockups + - Added chunk and process timeouts - Prevents hanging during stream processing and transcoding + - Improved connection handling - Enhanced process management and socket closure detection for safer streaming + - Enhanced health monitoring - Health monitor now properly notifies main thread without attempting reconnections +- **User Interface & Experience:** + - Touch screen compatibility - Web player can now be properly closed on touch devices + - Improved user management - Added support for first/last names, login tracking, and standardized table formatting +- Improved logging - Enhanced log messages with channel IDs for better debugging +- Code cleanup - Removed unused imports, variables, and dead links + +## [0.6.1] - 2025-06-27 + +### Added + +- Dynamic parameter options for M3U and EPG URLs (#207) +- Support for 'num' property in channel number extraction (fixes channel creation from XC streams not having channel numbers) + +### Changed + +- EPG generation now uses streaming responses to prevent client timeouts during large EPG file generation (#179) +- Improved reliability when downloading EPG data from external sources +- Better program positioning - Programs that start before the current view now have proper text positioning (#223) +- Better mobile support - Improved sizing and layout for mobile devices across multiple tables +- Responsive stats cards - Better calculation for card layout and improved filling on different screen sizes (#218) +- Enhanced table rendering - M3U and EPG tables now render better on small screens +- Optimized spacing - Removed unnecessary padding and blank space throughout the interface +- Better settings layout - Improved minimum widths and mobile support for settings pages +- Always show 2 decimal places for FFmpeg speed values + +### Fixed + +- TV Guide now properly filters channels based on selected channel group +- Resolved loading issues - Fixed channels and groups not loading correctly in the TV Guide +- Stream profile fixes - Resolved issue with setting stream profile to 'use default' +- Single channel editing - When only one channel is selected, the correct channel editor now opens +- Bulk edit improvements - Added "no change" options for bulk editing operations +- Bulk channel editor now properly saves changes (#222) +- Link form improvements - Better sizing and rendering of link forms with proper layering +- Confirmation dialogs added with warning suppression for user deletion, channel profile deletion, and M3U profile deletion + +## [0.6.0] - 2025-06-19 + +### Added + +- **User Management & Access Control:** + - Complete user management system with user levels and channel access controls + - Network access control with CIDR validation and IP-based restrictions + - Logout functionality and improved loading states for authenticated users +- **Xtream Codes Output:** + - Xtream Codes support enables easy output to IPTV clients (#195) +- **Stream Management & Monitoring:** + - FFmpeg statistics integration - Real-time display of video/audio codec info, resolution, speed, and stream type + - Automatic stream switching when buffering is detected + - Enhanced stream profile management with better connection tracking + - Improved stream state detection, including buffering as an active state +- **Channel Management:** + - Bulk channel editing for channel group, stream profile, and user access level +- **Enhanced M3U & EPG Features:** + - Dynamic `tvg-id` source selection for M3U and EPG (`tvg_id`, `gracenote`, or `channel_number`) + - Direct URL support in M3U output via `direct=true` parameter + - Flexible EPG output with a configurable day limit via `days=#` parameter + - Support for LIVE tags and `dd_progrid` numbering in EPG processing +- Proxy settings configuration with UI integration and improved validation +- Stream retention controls - Set stale stream days to `0` to disable retention completely (#123) +- Tuner flexibility - Minimum of 1 tuner now allowed for HDHomeRun output +- Fallback IP geolocation provider (#127) - Thanks [@maluueu](https://github.com/maluueu) +- POST method now allowed for M3U output, enabling support for Smarters IPTV - Thanks [@maluueu](https://github.com/maluueu) + +### Changed + +- Improved channel cards with better status indicators and tooltips +- Clearer error messaging for unsupported codecs in the web player +- Network access warnings to prevent accidental lockouts +- Case-insensitive M3U parsing for improved compatibility +- Better EPG processing with improved channel matching +- Replaced Mantine React Table with custom implementations +- Improved tooltips and parameter wrapping for cleaner interfaces +- Better badge colors and status indicators +- Stronger form validation and user feedback +- Streamlined settings management using JSON configs +- Default value population for clean installs +- Environment-specific configuration support for multiple deployment scenarios + +### Fixed + +- FFmpeg process cleanup - Ensures FFmpeg fully exits before marking connection closed +- Resolved stream profile update issues in statistics display +- Fixed M3U profile ID behavior when switching streams +- Corrected stream switching logic - Redis is only updated on successful switches +- Fixed connection counting - Excludes the current profile from available connection counts +- Fixed custom stream channel creation when no group is assigned (#122) +- Resolved EPG auto-matching deadlock when many channels match simultaneously - Thanks [@xham3](https://github.com/xham3) + +## [0.5.2] - 2025-06-03 + +### Added + +- Direct Logo Support: Added ability to bypass logo caching by adding `?cachedlogos=false` to the end of M3U and EPG URLs (#109) + +### Changed + +- Dynamic Resource Management: Auto-scales Celery workers based on demand, reducing overall memory and CPU usage while still allowing high-demand tasks to complete quickly (#111) +- Enhanced Logging: + - Improved logging for M3U processing + - Better error output from XML parser for easier troubleshooting + +### Fixed + +- XMLTV Parsing: Added `remove_blank_text=True` to lxml parser to prevent crashes with poorly formatted XMLTV files (#115) +- Stats Display: Refactored channel info retrieval for safer decoding and improved error logging, fixing intermittent issues with statistics not displaying properly + +## [0.5.1] - 2025-05-28 + +### Added + +- Support for ZIP-compressed EPG files +- Automatic extraction of compressed files after downloading +- Intelligent file type detection for EPG sources: + - Reads the first bits of files to determine file type + - If a compressed file is detected, it peeks inside to find XML files +- Random descriptions for dummy channels in the TV guide +- Support for decimal channel numbers (converted from integer to float) - Thanks [@MooseyOnTheLoosey](https://github.com/MooseyOnTheLoosey) +- Show channels without EPG data in TV Guide +- Profile name added to HDHR-friendly name and device ID (allows adding multiple HDHR profiles to Plex) + +### Changed + +- About 30% faster EPG processing +- Significantly improved memory usage for large EPG files +- Improved timezone handling +- Cleaned up cached files when deleting EPG sources +- Performance improvements when processing extremely large M3U files +- Improved batch processing with better cleanup +- Enhanced WebSocket update handling for large operations +- Redis configured for better performance (no longer saves to disk) +- Improved memory management for Celery tasks +- Separated beat schedules with a file scanning interval set to 20 seconds +- Improved authentication error handling with user redirection to the login page +- Improved channel card formatting for different screen resolutions (can now actually read the channel stats card on mobile) +- Decreased line height for status messages in the EPG and M3U tables for better appearance on smaller screens +- Updated the EPG form to match the M3U form for consistency + +### Fixed + +- Profile selection issues that previously caused WebUI crashes +- Issue with `tvc-guide-id` (Gracenote ID) in bulk channel creation +- Bug when uploading an M3U with the default user-agent set +- Bug where multiple channel initializations could occur, causing zombie streams and performance issues (choppy streams) +- Better error handling for buffer overflow issues +- Fixed various memory leaks +- Bug in the TV Guide that would crash the web UI when selecting a profile to filter by +- Multiple minor bug fixes and code cleanup + +## [0.5.0] - 2025-05-15 + +### Added + +- **XtreamCodes Support:** + - Initial XtreamCodes client support + - Option to add EPG source with XC account + - Improved XC login and authentication + - Improved error handling for XC connections +- **Hardware Acceleration:** + - Detection of hardware acceleration capabilities with recommendations (available in logs after startup) + - Improved support for NVIDIA, Intel (QSV), and VAAPI acceleration methods + - Added necessary drivers and libraries for hardware acceleration + - Automatically assigns required permissions for hardware acceleration + - Thanks to [@BXWeb](https://github.com/BXWeb), @chris.r3x, [@rykr](https://github.com/rykr), @j3111, [@jesmannstl](https://github.com/jesmannstl), @jimmycarbone, [@gordlaben](https://github.com/gordlaben), [@roofussummers](https://github.com/roofussummers), [@slamanna212](https://github.com/slamanna212) +- **M3U and EPG Management:** + - Enhanced M3U profile creation with live regex results + - Added stale stream detection with configurable thresholds + - Improved status messaging for M3U and EPG operations: + - Shows download speed with estimated time remaining + - Shows parsing time remaining + - Added "Pending Setup" status for M3U's requiring group selection + - Improved handling of M3U group filtering +- **UI Improvements:** + - Added configurable table sizes + - Enhanced video player with loading and error states + - Improved WebSocket connection handling with authentication + - Added confirmation dialogs for critical operations + - Auto-assign numbers now configurable by selection + - Added bulk editing of channel profile membership (select multiple channels, then click the profile toggle on any selected channel to apply the change to all) +- **Infrastructure & Performance:** + - Standardized and improved the logging system + - New environment variable to set logging level: `DISPATCHARR_LOG_LEVEL` (default: `INFO`, available: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`) + - Introduced a new base image build process: updates are now significantly smaller (typically under 15MB unless the base image changes) + - Improved environment variable handling in container +- Support for Gracenote ID (`tvc-guide-stationid`) - Thanks [@rykr](https://github.com/rykr) +- Improved file upload handling with size limits removed + +### Fixed + +- Issues with profiles not loading correctly +- Problems with stream previews in tables +- Channel creation and editing workflows +- Logo display issues +- WebSocket connection problems +- Multiple React-related errors and warnings +- Pagination and filtering issues in tables + +## [0.4.1] - 2025-05-01 + +### Changed + +- Optimized uWSGI configuration settings for better server performance +- Improved asynchronous processing by converting additional timers to gevent +- Enhanced EPG (Electronic Program Guide) downloading with proper user agent headers + +### Fixed + +- Issue with "add streams to channel" functionality to correctly follow disabled state logic + +## [0.4.0] - 2025-05-01 + +### Added + +- URL copy buttons for stream and channel URLs +- Manual stream switching ability +- EPG auto-match notifications - Users now receive feedback about how many matches were found +- Informative tooltips throughout the interface, including stream profiles and user-agent details +- Display of connected time for each client +- Current M3U profile information to stats +- Better logging for which channel clients are getting chunks from + +### Changed + +- Table System Rewrite: Completely refactored channel and stream tables for dramatically improved performance with large datasets +- Improved Concurrency: Replaced time.sleep with gevent.sleep for better performance when handling multiple streams +- Improved table interactions: + - Restored alternating row colors and hover effects + - Added shift-click support for multiple row selection + - Preserved drag-and-drop functionality +- Adjusted logo display to prevent layout shifts with different sized logos +- Improved sticky headers in tables +- Fixed spacing and padding in EPG and M3U tables for better readability on smaller displays +- Stream URL handling improved for search/replace patterns +- Enhanced stream lock management for better reliability +- Added stream name to channel status for better visibility +- Properly track current stream ID during stream switches +- Improved EPG cache handling and cleanup of old cache files +- Corrected content type for M3U file (using m3u instead of m3u8) +- Fixed logo URL handling in M3U generation +- Enhanced tuner count calculation to include only active M3U accounts +- Increased thread stack size in uwsgi configuration +- Changed proxy to use uwsgi socket +- Added build timestamp to version information +- Reduced excessive logging during M3U/EPG file importing +- Improved store variable handling to increase application efficiency +- Frontend now being built by Yarn instead of NPM + +### Fixed + +- Issues with channel statistics randomly not working +- Stream ordering in channel selection +- M3U profile name added to stream names for better identification +- Channel form not updating some properties after saving +- Issue with setting logos to default +- Channel creation from streams +- Channel group saving +- Improved error handling throughout the application +- Bugs in deleting stream profiles +- Resolved mimetype detection issues +- Fixed form display issues +- Added proper requerying after form submissions and item deletions +- Bug overwriting tvg-id when loading TV Guide +- Bug that prevented large m3u's and epg's from uploading +- Typo in Stream Profile header column for Description - Thanks [@LoudSoftware](https://github.com/LoudSoftware) +- Typo in m3u input processing (tv-chno instead of tvg-chno) - Thanks @www2a + +## [0.3.3] - 2025-04-18 + +### Fixed + +- Issue with dummy EPG calculating hours above 24, ensuring time values remain within valid 24-hour format +- Auto import functionality to properly process old files that hadn't been imported yet, rather than ignoring them + +## [0.3.2] - 2025-04-16 + +### Fixed + +- Issue with stream ordering for channels - resolved problem where stream objects were incorrectly processed when assigning order in channel configurations + +## [0.3.1] - 2025-04-16 + +### Added + +- Key to navigation links in sidebar to resolve DOM errors when loading web UI +- Channels that are set to 'dummy' epg to the TV Guide + +### Fixed + +- Issue preventing dummy EPG from being set +- Channel numbers not saving properly +- EPGs not refreshing when linking EPG to channel +- Improved error messages in notifications + +## [0.3.0] - 2025-04-15 + +### Added + +- URL validation for redirect profile: + - Validates stream URLs before redirecting clients + - Prevents clients from being redirected to unavailable streams + - Now tries alternate streams when primary stream validation fails +- Dynamic tuner configuration for HDHomeRun devices: + - TunerCount is now dynamically created based on profile max connections + - Sets minimum of 2 tuners, up to 10 for unlimited profiles + +### Changed + +- More robust stream switching: + - Clients now wait properly if a stream is in the switching state + - Improved reliability during stream transitions +- Performance enhancements: + - Increased workers and threads for uwsgi for better concurrency + +### Fixed + +- Issue with multiple dead streams in a row - System now properly handles cases where several sequential streams are unavailable +- Broken links to compose files in documentation + +## [0.2.1] - 2025-04-13 + +### Fixed + +- Stream preview (not channel) +- Streaming wouldn't work when using default user-agent for an M3U +- WebSockets and M3U profile form issues + +## [0.2.0] - 2025-04-12 + +Initial beta public release. diff --git a/apps/accounts/api_views.py b/apps/accounts/api_views.py index bf87c2ab..41e2f077 100644 --- a/apps/accounts/api_views.py +++ b/apps/accounts/api_views.py @@ -20,30 +20,88 @@ class TokenObtainPairView(TokenObtainPairView): def post(self, request, *args, **kwargs): # Custom logic here if not network_access_allowed(request, "UI"): + # Log blocked login attempt due to network restrictions + from core.utils import log_system_event + username = request.data.get("username", 'unknown') + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user=username, + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied', + ) return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN) # Get the response from the parent class first - response = super().post(request, *args, **kwargs) + username = request.data.get("username") - # If login was successful, update last_login - if response.status_code == 200: - username = request.data.get("username") - if username: - from django.utils import timezone - try: - user = User.objects.get(username=username) - user.last_login = timezone.now() - user.save(update_fields=['last_login']) - except User.DoesNotExist: - pass # User doesn't exist, but login somehow succeeded + # Log login attempt + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') - return response + try: + response = super().post(request, *args, **kwargs) + + # If login was successful, update last_login and log success + if response.status_code == 200: + if username: + from django.utils import timezone + try: + user = User.objects.get(username=username) + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + except User.DoesNotExist: + pass # User doesn't exist, but login somehow succeeded + else: + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) + + return response + + except Exception as e: + # If parent class raises an exception (e.g., validation error), log failed attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason=f'Authentication error: {str(e)[:100]}', + ) + raise # Re-raise the exception to maintain normal error flow class TokenRefreshView(TokenRefreshView): def post(self, request, *args, **kwargs): # Custom logic here if not network_access_allowed(request, "UI"): + # Log blocked token refresh attempt due to network restrictions + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user='token_refresh', + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied (token refresh)', + ) return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN) return super().post(request, *args, **kwargs) @@ -80,6 +138,15 @@ def initialize_superuser(request): class AuthViewSet(viewsets.ViewSet): """Handles user login and logout""" + def get_permissions(self): + """ + Login doesn't require auth, but logout does + """ + if self.action == 'logout': + from rest_framework.permissions import IsAuthenticated + return [IsAuthenticated()] + return [] + @swagger_auto_schema( operation_description="Authenticate and log in a user", request_body=openapi.Schema( @@ -100,6 +167,11 @@ class AuthViewSet(viewsets.ViewSet): password = request.data.get("password") user = authenticate(request, username=username, password=password) + # Get client info for logging + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + if user: login(request, user) # Update last_login timestamp @@ -107,6 +179,14 @@ class AuthViewSet(viewsets.ViewSet): user.last_login = timezone.now() user.save(update_fields=['last_login']) + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + return Response( { "message": "Login successful", @@ -118,6 +198,15 @@ class AuthViewSet(viewsets.ViewSet): }, } ) + + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) return Response({"error": "Invalid credentials"}, status=400) @swagger_auto_schema( @@ -126,6 +215,19 @@ class AuthViewSet(viewsets.ViewSet): ) def logout(self, request): """Logs out the authenticated user""" + # Log logout event before actually logging out + from core.utils import log_system_event + username = request.user.username if request.user and request.user.is_authenticated else 'unknown' + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + log_system_event( + event_type='logout', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + logout(request) return Response({"message": "Logout successful"}) diff --git a/apps/api/urls.py b/apps/api/urls.py index 7d9edb52..4c92c70a 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -27,6 +27,7 @@ urlpatterns = [ path('core/', include(('core.api_urls', 'core'), namespace='core')), path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')), path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), + path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), diff --git a/apps/backups/__init__.py b/apps/backups/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/api_urls.py b/apps/backups/api_urls.py new file mode 100644 index 00000000..226758cc --- /dev/null +++ b/apps/backups/api_urls.py @@ -0,0 +1,18 @@ +from django.urls import path + +from . import api_views + +app_name = "backups" + +urlpatterns = [ + path("", api_views.list_backups, name="backup-list"), + path("create/", api_views.create_backup, name="backup-create"), + path("upload/", api_views.upload_backup, name="backup-upload"), + path("schedule/", api_views.get_schedule, name="backup-schedule-get"), + path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"), + path("status//", api_views.backup_status, name="backup-status"), + path("/download-token/", api_views.get_download_token, name="backup-download-token"), + path("/download/", api_views.download_backup, name="backup-download"), + path("/delete/", api_views.delete_backup, name="backup-delete"), + path("/restore/", api_views.restore_backup, name="backup-restore"), +] diff --git a/apps/backups/api_views.py b/apps/backups/api_views.py new file mode 100644 index 00000000..c6ff7d26 --- /dev/null +++ b/apps/backups/api_views.py @@ -0,0 +1,364 @@ +import hashlib +import hmac +import logging +import os +from pathlib import Path + +from celery.result import AsyncResult +from django.conf import settings +from django.http import HttpResponse, StreamingHttpResponse, Http404 +from rest_framework import status +from rest_framework.decorators import api_view, permission_classes, parser_classes +from rest_framework.permissions import IsAdminUser, AllowAny +from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.response import Response + +from . import services +from .tasks import create_backup_task, restore_backup_task +from .scheduler import get_schedule_settings, update_schedule_settings + +logger = logging.getLogger(__name__) + + +def _generate_task_token(task_id: str) -> str: + """Generate a signed token for task status access without auth.""" + secret = settings.SECRET_KEY.encode() + return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32] + + +def _verify_task_token(task_id: str, token: str) -> bool: + """Verify a task token is valid.""" + expected = _generate_task_token(task_id) + return hmac.compare_digest(expected, token) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def list_backups(request): + """List all available backup files.""" + try: + backups = services.list_backups() + return Response(backups, status=status.HTTP_200_OK) + except Exception as e: + return Response( + {"detail": f"Failed to list backups: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def create_backup(request): + """Create a new backup (async via Celery).""" + try: + task = create_backup_task.delay() + return Response( + { + "detail": "Backup started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"detail": f"Failed to start backup: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def backup_status(request, task_id): + """Check the status of a backup/restore task. + + Requires either: + - Valid admin authentication, OR + - Valid task_token query parameter + """ + # Check for token-based auth (for restore when session is invalidated) + token = request.query_params.get("token") + if token: + if not _verify_task_token(task_id, token): + return Response( + {"detail": "Invalid task token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + result = AsyncResult(task_id) + + if result.ready(): + task_result = result.get() + if task_result.get("status") == "completed": + return Response({ + "state": "completed", + "result": task_result, + }) + else: + return Response({ + "state": "failed", + "error": task_result.get("error", "Unknown error"), + }) + elif result.failed(): + return Response({ + "state": "failed", + "error": str(result.result), + }) + else: + return Response({ + "state": result.state.lower(), + }) + except Exception as e: + return Response( + {"detail": f"Failed to get task status: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_download_token(request, filename): + """Get a signed token for downloading a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + token = _generate_task_token(filename) + return Response({"token": token}) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to generate token: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def download_backup(request, filename): + """Download a backup file. + + Requires either: + - Valid admin authentication, OR + - Valid download_token query parameter + """ + # Check for token-based auth (avoids CORS preflight issues) + token = request.query_params.get("token") + if token: + if not _verify_task_token(filename, token): + return Response( + {"detail": "Invalid download token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + # Security: prevent path traversal by checking for suspicious characters + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = (backup_dir / filename).resolve() + + # Security: ensure the resolved path is still within backup_dir + if not str(backup_file).startswith(str(backup_dir.resolve())): + raise Http404("Invalid filename") + + if not backup_file.exists() or not backup_file.is_file(): + raise Http404("Backup file not found") + + file_size = backup_file.stat().st_size + + # Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly + # Fall back to streaming for non-nginx deployments + use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true" + logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}") + + if use_nginx_accel: + # X-Accel-Redirect: Django returns immediately, nginx serves file + logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}") + response = HttpResponse() + response["X-Accel-Redirect"] = f"/protected-backups/{filename}" + response["Content-Type"] = "application/zip" + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + else: + # Streaming fallback for non-nginx deployments + logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)") + def file_iterator(file_path, chunk_size=2 * 1024 * 1024): + with open(file_path, "rb") as f: + while chunk := f.read(chunk_size): + yield chunk + + response = StreamingHttpResponse( + file_iterator(backup_file), + content_type="application/zip", + ) + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Download failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["DELETE"]) +@permission_classes([IsAdminUser]) +def delete_backup(request, filename): + """Delete a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + services.delete_backup(filename) + return Response( + {"detail": "Backup deleted successfully"}, + status=status.HTTP_204_NO_CONTENT, + ) + except FileNotFoundError: + raise Http404("Backup file not found") + except Exception as e: + return Response( + {"detail": f"Delete failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +@parser_classes([MultiPartParser, FormParser]) +def upload_backup(request): + """Upload a backup file for restoration.""" + uploaded = request.FILES.get("file") + if not uploaded: + return Response( + {"detail": "No file uploaded"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + backup_dir = services.get_backup_dir() + filename = uploaded.name or "uploaded-backup.zip" + + # Ensure unique filename + backup_file = backup_dir / filename + counter = 1 + while backup_file.exists(): + name_parts = filename.rsplit(".", 1) + if len(name_parts) == 2: + backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}" + else: + backup_file = backup_dir / f"{filename}-{counter}" + counter += 1 + + # Save uploaded file + with backup_file.open("wb") as f: + for chunk in uploaded.chunks(): + f.write(chunk) + + return Response( + { + "detail": "Backup uploaded successfully", + "filename": backup_file.name, + }, + status=status.HTTP_201_CREATED, + ) + except Exception as e: + return Response( + {"detail": f"Upload failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def restore_backup(request, filename): + """Restore from a backup file (async via Celery). WARNING: This will flush the database!""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + task = restore_backup_task.delay(filename) + return Response( + { + "detail": "Restore started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to start restore: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_schedule(request): + """Get backup schedule settings.""" + try: + settings = get_schedule_settings() + return Response(settings) + except Exception as e: + return Response( + {"detail": f"Failed to get schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["PUT"]) +@permission_classes([IsAdminUser]) +def update_schedule(request): + """Update backup schedule settings.""" + try: + settings = update_schedule_settings(request.data) + return Response(settings) + except ValueError as e: + return Response( + {"detail": str(e)}, + status=status.HTTP_400_BAD_REQUEST, + ) + except Exception as e: + return Response( + {"detail": f"Failed to update schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/apps/backups/apps.py b/apps/backups/apps.py new file mode 100644 index 00000000..ee644149 --- /dev/null +++ b/apps/backups/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class BackupsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "apps.backups" + verbose_name = "Backups" diff --git a/apps/backups/migrations/__init__.py b/apps/backups/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/models.py b/apps/backups/models.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/scheduler.py b/apps/backups/scheduler.py new file mode 100644 index 00000000..aa7e9bcd --- /dev/null +++ b/apps/backups/scheduler.py @@ -0,0 +1,202 @@ +import json +import logging + +from django_celery_beat.models import PeriodicTask, CrontabSchedule + +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + +BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task" + +DEFAULTS = { + "schedule_enabled": True, + "schedule_frequency": "daily", + "schedule_time": "03:00", + "schedule_day_of_week": 0, # Sunday + "retention_count": 3, + "schedule_cron_expression": "", +} + + +def _get_backup_settings(): + """Get all backup settings from CoreSettings grouped JSON.""" + try: + settings_obj = CoreSettings.objects.get(key="backup_settings") + return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy() + except CoreSettings.DoesNotExist: + return DEFAULTS.copy() + + +def _update_backup_settings(updates: dict) -> None: + """Update backup settings in the grouped JSON.""" + obj, created = CoreSettings.objects.get_or_create( + key="backup_settings", + defaults={"name": "Backup Settings", "value": DEFAULTS.copy()} + ) + current = obj.value if isinstance(obj.value, dict) else {} + current.update(updates) + obj.value = current + obj.save() + + +def get_schedule_settings() -> dict: + """Get all backup schedule settings.""" + settings = _get_backup_settings() + return { + "enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])), + "frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])), + "time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])), + "day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])), + "retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])), + "cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])), + } + + +def update_schedule_settings(data: dict) -> dict: + """Update backup schedule settings and sync the PeriodicTask.""" + # Validate + if "frequency" in data and data["frequency"] not in ("daily", "weekly"): + raise ValueError("frequency must be 'daily' or 'weekly'") + + if "time" in data: + try: + hour, minute = data["time"].split(":") + int(hour) + int(minute) + except (ValueError, AttributeError): + raise ValueError("time must be in HH:MM format") + + if "day_of_week" in data: + day = int(data["day_of_week"]) + if day < 0 or day > 6: + raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)") + + if "retention_count" in data: + count = int(data["retention_count"]) + if count < 0: + raise ValueError("retention_count must be >= 0") + + # Update settings with proper key names + updates = {} + if "enabled" in data: + updates["schedule_enabled"] = bool(data["enabled"]) + if "frequency" in data: + updates["schedule_frequency"] = str(data["frequency"]) + if "time" in data: + updates["schedule_time"] = str(data["time"]) + if "day_of_week" in data: + updates["schedule_day_of_week"] = int(data["day_of_week"]) + if "retention_count" in data: + updates["retention_count"] = int(data["retention_count"]) + if "cron_expression" in data: + updates["schedule_cron_expression"] = str(data["cron_expression"]) + + _update_backup_settings(updates) + + # Sync the periodic task + _sync_periodic_task() + + return get_schedule_settings() + + +def _sync_periodic_task() -> None: + """Create, update, or delete the scheduled backup task based on settings.""" + settings = get_schedule_settings() + + if not settings["enabled"]: + # Delete the task if it exists + task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first() + if task: + old_crontab = task.crontab + task.delete() + _cleanup_orphaned_crontab(old_crontab) + logger.info("Backup schedule disabled, removed periodic task") + return + + # Get old crontab before creating new one + old_crontab = None + try: + old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME) + old_crontab = old_task.crontab + except PeriodicTask.DoesNotExist: + pass + + # Check if using cron expression (advanced mode) + if settings["cron_expression"]: + # Parse cron expression: "minute hour day month weekday" + try: + parts = settings["cron_expression"].split() + if len(parts) != 5: + raise ValueError("Cron expression must have 5 parts: minute hour day month weekday") + + minute, hour, day_of_month, month_of_year, day_of_week = parts + + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=day_of_week, + day_of_month=day_of_month, + month_of_year=month_of_year, + timezone=CoreSettings.get_system_time_zone(), + ) + except Exception as e: + logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}") + raise ValueError(f"Invalid cron expression: {e}") + else: + # Use simple frequency-based scheduling + # Parse time + hour, minute = settings["time"].split(":") + + # Build crontab based on frequency + system_tz = CoreSettings.get_system_time_zone() + if settings["frequency"] == "daily": + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week="*", + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + else: # weekly + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=str(settings["day_of_week"]), + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + + # Create or update the periodic task + task, created = PeriodicTask.objects.update_or_create( + name=BACKUP_SCHEDULE_TASK_NAME, + defaults={ + "task": "apps.backups.tasks.scheduled_backup_task", + "crontab": crontab, + "enabled": True, + "kwargs": json.dumps({"retention_count": settings["retention_count"]}), + }, + ) + + # Clean up old crontab if it changed and is orphaned + if old_crontab and old_crontab.id != crontab.id: + _cleanup_orphaned_crontab(old_crontab) + + action = "Created" if created else "Updated" + logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}") + + +def _cleanup_orphaned_crontab(crontab_schedule): + """Delete old CrontabSchedule if no other tasks are using it.""" + if crontab_schedule is None: + return + + # Check if any other tasks are using this crontab + if PeriodicTask.objects.filter(crontab=crontab_schedule).exists(): + logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting") + return + + logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}") + crontab_schedule.delete() diff --git a/apps/backups/services.py b/apps/backups/services.py new file mode 100644 index 00000000..b638e701 --- /dev/null +++ b/apps/backups/services.py @@ -0,0 +1,350 @@ +import datetime +import json +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from zipfile import ZipFile, ZIP_DEFLATED +import logging +import pytz + +from django.conf import settings +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + + +def get_backup_dir() -> Path: + """Get the backup directory, creating it if necessary.""" + backup_dir = Path(settings.BACKUP_ROOT) + backup_dir.mkdir(parents=True, exist_ok=True) + return backup_dir + + +def _is_postgresql() -> bool: + """Check if we're using PostgreSQL.""" + return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql" + + +def _get_pg_env() -> dict: + """Get environment variables for PostgreSQL commands.""" + db_config = settings.DATABASES["default"] + env = os.environ.copy() + env["PGPASSWORD"] = db_config.get("PASSWORD", "") + return env + + +def _get_pg_args() -> list[str]: + """Get common PostgreSQL command arguments.""" + db_config = settings.DATABASES["default"] + return [ + "-h", db_config.get("HOST", "localhost"), + "-p", str(db_config.get("PORT", 5432)), + "-U", db_config.get("USER", "postgres"), + "-d", db_config.get("NAME", "dispatcharr"), + ] + + +def _dump_postgresql(output_file: Path) -> None: + """Dump PostgreSQL database using pg_dump.""" + logger.info("Dumping PostgreSQL database with pg_dump...") + + cmd = [ + "pg_dump", + *_get_pg_args(), + "-Fc", # Custom format for pg_restore + "-v", # Verbose + "-f", str(output_file), + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"pg_dump failed: {result.stderr}") + raise RuntimeError(f"pg_dump failed: {result.stderr}") + + logger.debug(f"pg_dump output: {result.stderr}") + + +def _clean_postgresql_schema() -> None: + """Drop and recreate the public schema to ensure a completely clean restore.""" + logger.info("[PG_CLEAN] Dropping and recreating public schema...") + + # Commands to drop and recreate schema + sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;" + + cmd = [ + "psql", + *_get_pg_args(), + "-c", sql_commands, + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}") + raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}") + + logger.info("[PG_CLEAN] Schema cleaned successfully") + + +def _restore_postgresql(dump_file: Path) -> None: + """Restore PostgreSQL database using pg_restore.""" + logger.info("[PG_RESTORE] Starting pg_restore...") + logger.info(f"[PG_RESTORE] Dump file: {dump_file}") + + # Drop and recreate schema to ensure a completely clean restore + _clean_postgresql_schema() + + pg_args = _get_pg_args() + logger.info(f"[PG_RESTORE] Connection args: {pg_args}") + + cmd = [ + "pg_restore", + "--no-owner", # Skip ownership commands (we already created schema) + *pg_args, + "-v", # Verbose + str(dump_file), + ] + + logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}") + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + logger.info(f"[PG_RESTORE] Return code: {result.returncode}") + + # pg_restore may return non-zero even on partial success + # Check for actual errors vs warnings + if result.returncode != 0: + # Some errors during restore are expected (e.g., "does not exist" when cleaning) + # Only fail on critical errors + stderr = result.stderr.lower() + if "fatal" in stderr or "could not connect" in stderr: + logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}") + raise RuntimeError(f"pg_restore failed: {result.stderr}") + else: + logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...") + + logger.info("[PG_RESTORE] Completed successfully") + + +def _dump_sqlite(output_file: Path) -> None: + """Dump SQLite database using sqlite3 .backup command.""" + logger.info("Dumping SQLite database with sqlite3 .backup...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + + if not db_path.exists(): + raise FileNotFoundError(f"SQLite database not found: {db_path}") + + # Use sqlite3 .backup command via stdin for reliable execution + result = subprocess.run( + ["sqlite3", str(db_path)], + input=f".backup '{output_file}'\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 backup failed: {result.stderr}") + raise RuntimeError(f"sqlite3 backup failed: {result.stderr}") + + # Verify the backup file was created + if not output_file.exists(): + raise RuntimeError("sqlite3 backup failed: output file not created") + + logger.info(f"sqlite3 backup completed successfully: {output_file}") + + +def _restore_sqlite(dump_file: Path) -> None: + """Restore SQLite database by replacing the database file.""" + logger.info("Restoring SQLite database...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + backup_current = None + + # Backup current database before overwriting + if db_path.exists(): + backup_current = db_path.with_suffix(".db.bak") + shutil.copy2(db_path, backup_current) + logger.info(f"Backed up current database to {backup_current}") + + # Ensure parent directory exists + db_path.parent.mkdir(parents=True, exist_ok=True) + + # The backup file from _dump_sqlite is a complete SQLite database file + # We can simply copy it over the existing database + shutil.copy2(dump_file, db_path) + + # Verify the restore worked by checking if sqlite3 can read it + result = subprocess.run( + ["sqlite3", str(db_path)], + input=".tables\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 verification failed: {result.stderr}") + # Try to restore from backup + if backup_current and backup_current.exists(): + shutil.copy2(backup_current, db_path) + logger.info("Restored original database from backup") + raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}") + + logger.info("sqlite3 restore completed successfully") + + +def create_backup() -> Path: + """ + Create a backup archive containing database dump and data directories. + Returns the path to the created backup file. + """ + backup_dir = get_backup_dir() + + # Use system timezone for filename (user-friendly), but keep internal timestamps as UTC + system_tz_name = CoreSettings.get_system_time_zone() + try: + system_tz = pytz.timezone(system_tz_name) + now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz) + timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S") + except Exception as e: + logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC") + timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S") + + backup_name = f"dispatcharr-backup-{timestamp}.zip" + backup_file = backup_dir / backup_name + + logger.info(f"Creating backup: {backup_name}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir: + temp_path = Path(temp_dir) + + # Determine database type and dump accordingly + if _is_postgresql(): + db_dump_file = temp_path / "database.dump" + _dump_postgresql(db_dump_file) + db_type = "postgresql" + else: + db_dump_file = temp_path / "database.sqlite3" + _dump_sqlite(db_dump_file) + db_type = "sqlite" + + # Create ZIP archive with compression and ZIP64 support for large files + with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file: + # Add database dump + zip_file.write(db_dump_file, db_dump_file.name) + + # Add metadata + metadata = { + "format": "dispatcharr-backup", + "version": 2, + "database_type": db_type, + "database_file": db_dump_file.name, + "created_at": datetime.datetime.now(datetime.UTC).isoformat(), + } + zip_file.writestr("metadata.json", json.dumps(metadata, indent=2)) + + logger.info(f"Backup created successfully: {backup_file}") + return backup_file + + +def restore_backup(backup_file: Path) -> None: + """ + Restore from a backup archive. + WARNING: This will overwrite the database! + """ + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_file}") + + logger.info(f"Restoring from backup: {backup_file}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir: + temp_path = Path(temp_dir) + + # Extract backup + logger.debug("Extracting backup archive...") + with ZipFile(backup_file, "r") as zip_file: + zip_file.extractall(temp_path) + + # Read metadata + metadata_file = temp_path / "metadata.json" + if not metadata_file.exists(): + raise ValueError("Invalid backup: missing metadata.json") + + with open(metadata_file) as f: + metadata = json.load(f) + + # Restore database + _restore_database(temp_path, metadata) + + logger.info("Restore completed successfully") + + +def _restore_database(temp_path: Path, metadata: dict) -> None: + """Restore database from backup.""" + db_type = metadata.get("database_type", "postgresql") + db_file = metadata.get("database_file", "database.dump") + dump_file = temp_path / db_file + + if not dump_file.exists(): + raise ValueError(f"Invalid backup: missing {db_file}") + + current_db_type = "postgresql" if _is_postgresql() else "sqlite" + + if db_type != current_db_type: + raise ValueError( + f"Database type mismatch: backup is {db_type}, " + f"but current database is {current_db_type}" + ) + + if db_type == "postgresql": + _restore_postgresql(dump_file) + else: + _restore_sqlite(dump_file) + + +def list_backups() -> list[dict]: + """List all available backup files with metadata.""" + backup_dir = get_backup_dir() + backups = [] + + for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True): + # Use UTC timezone so frontend can convert to user's local time + created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC) + backups.append({ + "name": backup_file.name, + "size": backup_file.stat().st_size, + "created": created_time.isoformat(), + }) + + return backups + + +def delete_backup(filename: str) -> None: + """Delete a backup file.""" + backup_dir = get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {filename}") + + if not backup_file.is_file(): + raise ValueError(f"Invalid backup file: {filename}") + + backup_file.unlink() + logger.info(f"Deleted backup: {filename}") diff --git a/apps/backups/tasks.py b/apps/backups/tasks.py new file mode 100644 index 00000000..f531fef8 --- /dev/null +++ b/apps/backups/tasks.py @@ -0,0 +1,106 @@ +import logging +import traceback +from celery import shared_task + +from . import services + +logger = logging.getLogger(__name__) + + +def _cleanup_old_backups(retention_count: int) -> int: + """Delete old backups, keeping only the most recent N. Returns count deleted.""" + if retention_count <= 0: + return 0 + + backups = services.list_backups() + if len(backups) <= retention_count: + return 0 + + # Backups are sorted newest first, so delete from the end + to_delete = backups[retention_count:] + deleted = 0 + + for backup in to_delete: + try: + services.delete_backup(backup["name"]) + deleted += 1 + logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}") + except Exception as e: + logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}") + + return deleted + + +@shared_task(bind=True) +def create_backup_task(self): + """Celery task to create a backup asynchronously.""" + try: + logger.info(f"[BACKUP] Starting backup task {self.request.id}") + backup_file = services.create_backup() + logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}") + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + } + except Exception as e: + logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}") + logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def restore_backup_task(self, filename: str): + """Celery task to restore a backup asynchronously.""" + try: + logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}") + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + logger.info(f"[RESTORE] Backup file path: {backup_file}") + services.restore_backup(backup_file) + logger.info(f"[RESTORE] Task {self.request.id} completed successfully") + return { + "status": "completed", + "filename": filename, + } + except Exception as e: + logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}") + logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def scheduled_backup_task(self, retention_count: int = 0): + """Celery task for scheduled backups with optional retention cleanup.""" + try: + logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}") + + # Create backup + backup_file = services.create_backup() + logger.info(f"[SCHEDULED] Backup created: {backup_file.name}") + + # Cleanup old backups if retention is set + deleted = 0 + if retention_count > 0: + deleted = _cleanup_old_backups(retention_count) + logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)") + + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + "deleted_count": deleted, + } + except Exception as e: + logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}") + logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } diff --git a/apps/backups/tests.py b/apps/backups/tests.py new file mode 100644 index 00000000..dc8a5136 --- /dev/null +++ b/apps/backups/tests.py @@ -0,0 +1,1163 @@ +import json +import tempfile +from io import BytesIO +from pathlib import Path +from zipfile import ZipFile +from unittest.mock import patch, MagicMock + +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework_simplejwt.tokens import RefreshToken + +from . import services + +User = get_user_model() + + +class BackupServicesTestCase(TestCase): + """Test cases for backup services""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.services.settings') + def test_get_backup_dir_creates_directory(self, mock_settings): + """Test that get_backup_dir creates the directory if it doesn't exist""" + mock_settings.BACKUP_ROOT = self.temp_backup_dir + + with patch('apps.backups.services.Path') as mock_path: + mock_path_instance = MagicMock() + mock_path_instance.mkdir = MagicMock() + mock_path.return_value = mock_path_instance + + services.get_backup_dir() + mock_path_instance.mkdir.assert_called_once_with(parents=True, exist_ok=True) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_sqlite') + def test_create_backup_success_sqlite(self, mock_dump_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with SQLite""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = False + + # Mock SQLite dump to create a temp file + def mock_dump(output_file): + output_file.write_text("sqlite dump") + + mock_dump_sqlite.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + self.assertTrue(result.name.startswith('dispatcharr-backup-')) + self.assertTrue(result.name.endswith('.zip')) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.sqlite3', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'sqlite') + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_postgresql') + def test_create_backup_success_postgresql(self, mock_dump_pg, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with PostgreSQL""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = True + + # Mock PostgreSQL dump to create a temp file + def mock_dump(output_file): + output_file.write_bytes(b"pg dump data") + + mock_dump_pg.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.dump', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'postgresql') + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_empty(self, mock_get_backup_dir): + """Test listing backups when none exist""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + result = services.list_backups() + + self.assertEqual(result, []) + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_with_files(self, mock_get_backup_dir): + """Test listing backups with existing backup files""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-2025.01.01.12.00.00.zip" + test_backup.write_text("fake backup content") + + result = services.list_backups() + + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['name'], test_backup.name) + self.assertIn('size', result[0]) + self.assertIn('created', result[0]) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_success(self, mock_get_backup_dir): + """Test successful backup deletion""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-test.zip" + test_backup.write_text("fake backup content") + + self.assertTrue(test_backup.exists()) + + services.delete_backup(test_backup.name) + + self.assertFalse(test_backup.exists()) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_not_found(self, mock_get_backup_dir): + """Test deleting a non-existent backup raises error""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + with self.assertRaises(FileNotFoundError): + services.delete_backup("nonexistent-backup.zip") + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_postgresql') + def test_restore_backup_postgresql(self, mock_restore_pg, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of PostgreSQL backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create PostgreSQL backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'pg dump data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + services.restore_backup(backup_file) + + mock_restore_pg.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_sqlite') + def test_restore_backup_sqlite(self, mock_restore_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of SQLite backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = False + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', + 'database_file': 'database.sqlite3' + })) + + services.restore_backup(backup_file) + + mock_restore_sqlite.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_database_type_mismatch(self, mock_is_pg, mock_get_backup_dir): + """Test restore fails when database type doesn't match""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True # Current system is PostgreSQL + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', # Backup is SQLite + 'database_file': 'database.sqlite3' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('mismatch', str(context.exception).lower()) + + def test_restore_backup_not_found(self): + """Test restoring from non-existent backup file""" + fake_path = Path("/tmp/nonexistent-backup-12345.zip") + + with self.assertRaises(FileNotFoundError): + services.restore_backup(fake_path) + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_missing_metadata(self, mock_get_backup_dir): + """Test restoring from backup without metadata.json""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a backup file missing metadata.json + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'fake dump data') + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('metadata.json', str(context.exception)) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_missing_database(self, mock_is_pg, mock_get_backup_dir): + """Test restoring from backup missing database dump""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create backup file missing database dump + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('database.dump', str(context.exception)) + + +class BackupAPITestCase(TestCase): + """Test cases for backup API endpoints""" + + def setUp(self): + self.client = APIClient() + self.user = User.objects.create_user( + username='testuser', + email='test@example.com', + password='testpass123' + ) + self.admin_user = User.objects.create_superuser( + username='admin', + email='admin@example.com', + password='adminpass123' + ) + self.temp_backup_dir = tempfile.mkdtemp() + + def get_auth_header(self, user): + """Helper method to get JWT auth header for a user""" + refresh = RefreshToken.for_user(user) + return f'Bearer {str(refresh.access_token)}' + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + def test_list_backups_requires_admin(self): + """Test that listing backups requires admin privileges""" + url = '/api/backups/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.list_backups') + def test_list_backups_success(self, mock_list_backups): + """Test successful backup listing""" + mock_list_backups.return_value = [ + { + 'name': 'backup-test.zip', + 'size': 1024, + 'created': '2025-01-01T12:00:00' + } + ] + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(len(data), 1) + self.assertEqual(data[0]['name'], 'backup-test.zip') + + def test_create_backup_requires_admin(self): + """Test that creating backups requires admin privileges""" + url = '/api/backups/create/' + + # Unauthenticated request + response = self.client.post(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.post(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_success(self, mock_create_task): + """Test successful backup creation via API (async task)""" + mock_task = MagicMock() + mock_task.id = 'test-task-id-123' + mock_create_task.return_value = mock_task + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-task-id-123') + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_failure(self, mock_create_task): + """Test backup creation failure handling""" + mock_create_task.side_effect = Exception("Failed to start task") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_success(self, mock_get_backup_dir): + """Test successful backup download""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response['Content-Type'], 'application/zip') + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_not_found(self, mock_get_backup_dir): + """Test downloading non-existent backup""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_success(self, mock_delete_backup): + """Test successful backup deletion via API""" + mock_delete_backup.return_value = None + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 204) + mock_delete_backup.assert_called_once_with('test-backup.zip') + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_not_found(self, mock_delete_backup): + """Test deleting non-existent backup via API""" + mock_delete_backup.side_effect = FileNotFoundError("Not found") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + def test_upload_backup_requires_file(self): + """Test that upload requires a file""" + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('No file uploaded', data['detail']) + + @patch('apps.backups.services.get_backup_dir') + def test_upload_backup_success(self, mock_get_backup_dir): + """Test successful backup upload""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + # Create a fake backup file + fake_backup = BytesIO(b"fake backup content") + fake_backup.name = 'uploaded-backup.zip' + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, {'file': fake_backup}, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 201) + data = response.json() + self.assertIn('filename', data) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_success(self, mock_restore_task, mock_get_backup_dir): + """Test successful backup restoration via API (async task)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + mock_task = MagicMock() + mock_task.id = 'test-restore-task-456' + mock_restore_task.return_value = mock_task + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-restore-task-456') + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_not_found(self, mock_get_backup_dir): + """Test restoring from non-existent backup via API""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Backup Status Endpoint Tests --- + + def test_backup_status_requires_auth_or_token(self): + """Test that backup_status requires auth or valid token""" + url = '/api/backups/status/fake-task-id/' + + # Unauthenticated request without token + response = self.client.get(url) + self.assertEqual(response.status_code, 401) + + def test_backup_status_invalid_token(self): + """Test that backup_status rejects invalid tokens""" + url = '/api/backups/status/fake-task-id/?token=invalid-token' + response = self.client.get(url) + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_with_admin_auth(self, mock_async_result): + """Test backup_status with admin authentication""" + mock_result = MagicMock() + mock_result.ready.return_value = False + mock_result.failed.return_value = False + mock_result.state = 'PENDING' + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'pending') + + @patch('apps.backups.api_views.AsyncResult') + @patch('apps.backups.api_views._verify_task_token') + def test_backup_status_with_valid_token(self, mock_verify, mock_async_result): + """Test backup_status with valid token""" + mock_verify.return_value = True + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'completed', 'filename': 'test.zip'} + mock_async_result.return_value = mock_result + + url = '/api/backups/status/test-task-id/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'completed') + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_task_failed(self, mock_async_result): + """Test backup_status when task failed""" + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'failed', 'error': 'Something went wrong'} + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'failed') + self.assertIn('Something went wrong', data['error']) + + # --- Download Token Endpoint Tests --- + + def test_get_download_token_requires_admin(self): + """Test that get_download_token requires admin privileges""" + url = '/api/backups/test.zip/download-token/' + + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_success(self, mock_get_backup_dir): + """Test successful download token generation""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertIn('token', data) + self.assertEqual(len(data['token']), 32) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_not_found(self, mock_get_backup_dir): + """Test download token for non-existent file""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Download with Token Auth Tests --- + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.api_views._verify_task_token') + def test_download_backup_with_valid_token(self, mock_verify, mock_get_backup_dir): + """Test downloading backup with valid token (no auth header)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_verify.return_value = True + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + url = '/api/backups/test-backup.zip/download/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_invalid_token(self, mock_get_backup_dir): + """Test downloading backup with invalid token""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + url = '/api/backups/test-backup.zip/download/?token=invalid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_task_start_failure(self, mock_restore_task, mock_get_backup_dir): + """Test restore task start failure via API""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_restore_task.side_effect = Exception("Failed to start restore task") + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + def test_get_schedule_requires_admin(self): + """Test that getting schedule requires admin privileges""" + url = '/api/backups/schedule/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.get_schedule_settings') + def test_get_schedule_success(self, mock_get_settings): + """Test successful schedule retrieval""" + mock_get_settings.return_value = { + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'day_of_week': 0, + 'retention_count': 5, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['enabled'], True) + self.assertEqual(data['frequency'], 'daily') + self.assertEqual(data['retention_count'], 5) + + def test_update_schedule_requires_admin(self): + """Test that updating schedule requires admin privileges""" + url = '/api/backups/schedule/update/' + + # Unauthenticated request + response = self.client.put(url, {}, content_type='application/json') + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.put( + url, + {}, + content_type='application/json', + HTTP_AUTHORIZATION=self.get_auth_header(self.user) + ) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_success(self, mock_update_settings): + """Test successful schedule update""" + mock_update_settings.return_value = { + 'enabled': True, + 'frequency': 'weekly', + 'time': '02:00', + 'day_of_week': 1, + 'retention_count': 10, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'enabled': True, 'frequency': 'weekly', 'time': '02:00', 'day_of_week': 1, 'retention_count': 10}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['frequency'], 'weekly') + self.assertEqual(data['day_of_week'], 1) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_validation_error(self, mock_update_settings): + """Test schedule update with invalid data""" + mock_update_settings.side_effect = ValueError("frequency must be 'daily' or 'weekly'") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'frequency': 'invalid'}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('frequency', data['detail']) + + +class BackupSchedulerTestCase(TestCase): + """Test cases for backup scheduler""" + + databases = {'default'} + + @classmethod + def setUpClass(cls): + pass + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + from core.models import CoreSettings + # Clean up any existing settings + CoreSettings.objects.filter(key__startswith='backup_').delete() + + def tearDown(self): + from core.models import CoreSettings + from django_celery_beat.models import PeriodicTask + CoreSettings.objects.filter(key__startswith='backup_').delete() + PeriodicTask.objects.filter(name='backup-scheduled-task').delete() + + def test_get_schedule_settings_defaults(self): + """Test that get_schedule_settings returns defaults when no settings exist""" + from . import scheduler + + settings = scheduler.get_schedule_settings() + + self.assertEqual(settings['enabled'], False) + self.assertEqual(settings['frequency'], 'daily') + self.assertEqual(settings['time'], '03:00') + self.assertEqual(settings['day_of_week'], 0) + self.assertEqual(settings['retention_count'], 0) + self.assertEqual(settings['cron_expression'], '') + + def test_update_schedule_settings_stores_values(self): + """Test that update_schedule_settings stores values correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '04:30', + 'day_of_week': 3, + 'retention_count': 7, + }) + + self.assertEqual(result['enabled'], True) + self.assertEqual(result['frequency'], 'weekly') + self.assertEqual(result['time'], '04:30') + self.assertEqual(result['day_of_week'], 3) + self.assertEqual(result['retention_count'], 7) + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['enabled'], True) + self.assertEqual(settings['frequency'], 'weekly') + + def test_update_schedule_settings_invalid_frequency(self): + """Test that invalid frequency raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'frequency': 'monthly'}) + + self.assertIn('frequency', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_time(self): + """Test that invalid time raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'time': 'invalid'}) + + self.assertIn('HH:MM', str(context.exception)) + + def test_update_schedule_settings_invalid_day_of_week(self): + """Test that invalid day_of_week raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'day_of_week': 7}) + + self.assertIn('day_of_week', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_retention(self): + """Test that negative retention_count raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'retention_count': -1}) + + self.assertIn('retention_count', str(context.exception).lower()) + + def test_sync_creates_periodic_task_when_enabled(self): + """Test that enabling schedule creates a PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertTrue(task.enabled) + self.assertEqual(task.crontab.hour, '05') + self.assertEqual(task.crontab.minute, '00') + + def test_sync_deletes_periodic_task_when_disabled(self): + """Test that disabling schedule removes PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + # First enable + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + self.assertTrue(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + # Then disable + scheduler.update_schedule_settings({'enabled': False}) + + self.assertFalse(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + def test_weekly_schedule_sets_day_of_week(self): + """Test that weekly schedule sets correct day_of_week in crontab""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '06:00', + 'day_of_week': 3, # Wednesday + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.day_of_week, '3') + + def test_cron_expression_stores_value(self): + """Test that cron_expression is stored and retrieved correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/5 * * * *', + }) + + self.assertEqual(result['cron_expression'], '*/5 * * * *') + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['cron_expression'], '*/5 * * * *') + + def test_cron_expression_creates_correct_schedule(self): + """Test that cron expression creates correct CrontabSchedule""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/15 2 * * 1-5', # Every 15 mins during 2 AM hour on weekdays + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '*/15') + self.assertEqual(task.crontab.hour, '2') + self.assertEqual(task.crontab.day_of_month, '*') + self.assertEqual(task.crontab.month_of_year, '*') + self.assertEqual(task.crontab.day_of_week, '1-5') + + def test_cron_expression_invalid_format(self): + """Test that invalid cron expression raises ValueError""" + from . import scheduler + + # Too few parts + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '0 3 *', + }) + self.assertIn('5 parts', str(context.exception)) + + def test_cron_expression_empty_uses_simple_mode(self): + """Test that empty cron_expression falls back to simple frequency mode""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '04:00', + 'cron_expression': '', # Empty, should use simple mode + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '00') + self.assertEqual(task.crontab.hour, '04') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_cron_expression_overrides_simple_settings(self): + """Test that cron_expression takes precedence over frequency/time""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'cron_expression': '0 */6 * * *', # Every 6 hours (should override daily at 3 AM) + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '0') + self.assertEqual(task.crontab.hour, '*/6') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_periodic_task_uses_system_timezone(self): + """Test that CrontabSchedule is created with the system timezone""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Set a non-UTC timezone + CoreSettings.set_system_time_zone('America/New_York') + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/New_York') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_periodic_task_timezone_updates_with_schedule(self): + """Test that CrontabSchedule timezone is updated when schedule is modified""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Create initial schedule with one timezone + CoreSettings.set_system_time_zone('America/Los_Angeles') + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '02:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/Los_Angeles') + + # Change system timezone and update schedule + CoreSettings.set_system_time_zone('Europe/London') + scheduler.update_schedule_settings({ + 'enabled': True, + 'time': '04:00', + }) + + task.refresh_from_db() + self.assertEqual(str(task.crontab.timezone), 'Europe/London') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_orphaned_crontab_cleanup(self): + """Test that old CrontabSchedule is deleted when schedule changes""" + from . import scheduler + from django_celery_beat.models import PeriodicTask, CrontabSchedule + + # Create initial daily schedule + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + first_crontab_id = task.crontab.id + initial_count = CrontabSchedule.objects.count() + + # Change to weekly schedule (different crontab) + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'day_of_week': 3, + 'time': '03:00', + }) + + task.refresh_from_db() + second_crontab_id = task.crontab.id + + # Verify old crontab was deleted + self.assertNotEqual(first_crontab_id, second_crontab_id) + self.assertFalse(CrontabSchedule.objects.filter(id=first_crontab_id).exists()) + self.assertEqual(CrontabSchedule.objects.count(), initial_count) + + # Cleanup + scheduler.update_schedule_settings({'enabled': False}) + + +class BackupTasksTestCase(TestCase): + """Test cases for backup Celery tasks""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_keeps_recent(self, mock_delete, mock_list): + """Test that cleanup keeps the most recent backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, # newest + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, # oldest + ] + + deleted = _cleanup_old_backups(retention_count=2) + + self.assertEqual(deleted, 1) + mock_delete.assert_called_once_with('backup-1.zip') + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_does_nothing_when_under_limit(self, mock_delete, mock_list): + """Test that cleanup does nothing when under retention limit""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=5) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_zero_retention_keeps_all(self, mock_delete, mock_list): + """Test that retention_count=0 keeps all backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=0) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_success(self, mock_cleanup, mock_create): + """Test scheduled backup task success""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + mock_cleanup.return_value = 2 + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['filename'], 'scheduled-backup.zip') + self.assertEqual(result['size'], 1024) + self.assertEqual(result['deleted_count'], 2) + mock_cleanup.assert_called_once_with(5) + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_no_cleanup_when_retention_zero(self, mock_cleanup, mock_create): + """Test scheduled backup skips cleanup when retention is 0""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + + result = scheduled_backup_task(retention_count=0) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['deleted_count'], 0) + mock_cleanup.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + def test_scheduled_backup_task_failure(self, mock_create): + """Test scheduled backup task handles failure""" + from .tasks import scheduled_backup_task + + mock_create.side_effect = Exception("Backup failed") + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'failed') + self.assertIn('Backup failed', result['error']) diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 7999abd9..bd53ae45 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -47,7 +47,7 @@ urlpatterns = [ path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'), path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'), path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), - path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), + path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 862de7f9..c2ba7a06 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -8,7 +8,10 @@ from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.shortcuts import get_object_or_404, get_list_or_404 from django.db import transaction -import os, json, requests, logging +from django.db.models import Q +import os, json, requests, logging, mimetypes +from django.utils.http import http_date +from urllib.parse import unquote from apps.accounts.permissions import ( Authenticated, IsAdmin, @@ -124,10 +127,12 @@ class StreamViewSet(viewsets.ModelViewSet): filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = StreamFilter search_fields = ["name", "channel_group__name"] - ordering_fields = ["name", "channel_group__name"] + ordering_fields = ["name", "channel_group__name", "m3u_account__name"] ordering = ["-name"] def get_permissions(self): + if self.action == "duplicate": + return [IsAdmin()] try: return [perm() for perm in permission_classes_by_action[self.action]] except KeyError: @@ -234,12 +239,8 @@ class ChannelGroupViewSet(viewsets.ModelViewSet): return [Authenticated()] def get_queryset(self): - """Add annotation for association counts""" - from django.db.models import Count - return ChannelGroup.objects.annotate( - channel_count=Count('channels', distinct=True), - m3u_account_count=Count('m3u_accounts', distinct=True) - ) + """Return channel groups with prefetched relations for efficient counting""" + return ChannelGroup.objects.prefetch_related('channels', 'm3u_accounts').all() def update(self, request, *args, **kwargs): """Override update to check M3U associations""" @@ -275,15 +276,20 @@ class ChannelGroupViewSet(viewsets.ModelViewSet): @action(detail=False, methods=["post"], url_path="cleanup") def cleanup_unused_groups(self, request): """Delete all channel groups with no channels or M3U account associations""" - from django.db.models import Count + from django.db.models import Q, Exists, OuterRef + + # Find groups with no channels and no M3U account associations using Exists subqueries + from .models import Channel, ChannelGroupM3UAccount + + has_channels = Channel.objects.filter(channel_group_id=OuterRef('pk')) + has_accounts = ChannelGroupM3UAccount.objects.filter(channel_group_id=OuterRef('pk')) - # Find groups with no channels and no M3U account associations unused_groups = ChannelGroup.objects.annotate( - channel_count=Count('channels', distinct=True), - m3u_account_count=Count('m3u_accounts', distinct=True) + has_channels=Exists(has_channels), + has_accounts=Exists(has_accounts) ).filter( - channel_count=0, - m3u_account_count=0 + has_channels=False, + has_accounts=False ) deleted_count = unused_groups.count() @@ -384,6 +390,72 @@ class ChannelViewSet(viewsets.ModelViewSet): ordering_fields = ["channel_number", "name", "channel_group__name"] ordering = ["-channel_number"] + def create(self, request, *args, **kwargs): + """Override create to handle channel profile membership""" + serializer = self.get_serializer(data=request.data) + serializer.is_valid(raise_exception=True) + + with transaction.atomic(): + channel = serializer.save() + + # Handle channel profile membership + # Semantics: + # - Omitted (None): add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only + channel_profile_ids = request.data.get("channel_profile_ids") + if channel_profile_ids is not None: + # Normalize single ID to array + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] + + # Determine action based on semantics + if channel_profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + elif isinstance(channel_profile_ids, list) and len(channel_profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(channel_profile_ids, list) and 0 in channel_profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + else: + # Specific profile IDs + try: + channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids) + if len(channel_profiles) != len(channel_profile_ids): + missing_ids = set(channel_profile_ids) - set(channel_profiles.values_list('id', flat=True)) + return Response( + {"error": f"Channel profiles with IDs {list(missing_ids)} not found"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in channel_profiles + ]) + except Exception as e: + return Response( + {"error": f"Error creating profile memberships: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + headers = self.get_success_headers(serializer.data) + return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) + def get_permissions(self): if self.action in [ "edit_bulk", @@ -419,10 +491,41 @@ class ChannelViewSet(viewsets.ModelViewSet): group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) - if self.request.user.user_level < 10: - qs = qs.filter(user_level__lte=self.request.user.user_level) + filters = {} + q_filters = Q() - return qs + channel_profile_id = self.request.query_params.get("channel_profile_id") + show_disabled_param = self.request.query_params.get("show_disabled", None) + only_streamless = self.request.query_params.get("only_streamless", None) + + if channel_profile_id: + try: + profile_id_int = int(channel_profile_id) + + if show_disabled_param is None: + # Show only enabled channels: channels that have a membership + # record for this profile with enabled=True + # Default is DISABLED (channels without membership are hidden) + filters["channelprofilemembership__channel_profile_id"] = profile_id_int + filters["channelprofilemembership__enabled"] = True + # If show_disabled is True, show all channels (no filtering needed) + + except (ValueError, TypeError): + # Ignore invalid profile id values + pass + + if only_streamless: + q_filters &= Q(streams__isnull=True) + + if self.request.user.user_level < 10: + filters["user_level__lte"] = self.request.user.user_level + + if filters: + qs = qs.filter(**filters) + if q_filters: + qs = qs.filter(q_filters) + + return qs.distinct() def get_serializer_context(self): context = super().get_serializer_context() @@ -435,8 +538,8 @@ class ChannelViewSet(viewsets.ModelViewSet): @action(detail=False, methods=["patch"], url_path="edit/bulk") def edit_bulk(self, request): """ - Bulk edit channels. - Expects a list of channels with their updates. + Bulk edit channels efficiently. + Validates all updates first, then applies in a single transaction. """ data = request.data if not isinstance(data, list): @@ -445,63 +548,101 @@ class ChannelViewSet(viewsets.ModelViewSet): status=status.HTTP_400_BAD_REQUEST, ) - updated_channels = [] - errors = [] + # Extract IDs and validate presence + channel_updates = {} + missing_ids = [] - for channel_data in data: + for i, channel_data in enumerate(data): channel_id = channel_data.get("id") if not channel_id: - errors.append({"error": "Channel ID is required"}) - continue + missing_ids.append(f"Item {i}: Channel ID is required") + else: + channel_updates[channel_id] = channel_data - try: - channel = Channel.objects.get(id=channel_id) + if missing_ids: + return Response( + {"errors": missing_ids}, + status=status.HTTP_400_BAD_REQUEST, + ) - # Handle channel_group_id properly - convert string to integer if needed - if 'channel_group_id' in channel_data: - group_id = channel_data['channel_group_id'] - if group_id is not None: - try: - channel_data['channel_group_id'] = int(group_id) - except (ValueError, TypeError): - channel_data['channel_group_id'] = None + # Fetch all channels at once (one query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Use the serializer to validate and update - serializer = ChannelSerializer( - channel, data=channel_data, partial=True - ) + # Validate and prepare updates + validated_updates = [] + errors = [] - if serializer.is_valid(): - updated_channel = serializer.save() - updated_channels.append(updated_channel) - else: - errors.append({ - "channel_id": channel_id, - "errors": serializer.errors - }) + for channel_id, channel_data in channel_updates.items(): + channel = channels_dict.get(channel_id) - except Channel.DoesNotExist: + if not channel: errors.append({ "channel_id": channel_id, "error": "Channel not found" }) - except Exception as e: + continue + + # Handle channel_group_id conversion + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None + + # Validate with serializer + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + validated_updates.append((channel, serializer.validated_data)) + else: errors.append({ "channel_id": channel_id, - "error": str(e) + "errors": serializer.errors }) if errors: return Response( - {"errors": errors, "updated_count": len(updated_channels)}, + {"errors": errors, "updated_count": len(validated_updates)}, status=status.HTTP_400_BAD_REQUEST, ) - # Serialize the updated channels for response - serialized_channels = ChannelSerializer(updated_channels, many=True).data + # Apply all updates in a transaction + with transaction.atomic(): + for channel, validated_data in validated_updates: + for key, value in validated_data.items(): + setattr(channel, key, value) + + # Single bulk_update query instead of individual saves + channels_to_update = [channel for channel, _ in validated_updates] + if channels_to_update: + # Collect all unique field names from all updates + all_fields = set() + for _, validated_data in validated_updates: + all_fields.update(validated_data.keys()) + + # Only call bulk_update if there are fields to update + if all_fields: + Channel.objects.bulk_update( + channels_to_update, + fields=list(all_fields), + batch_size=100 + ) + + # Return the updated objects (already in memory) + serialized_channels = ChannelSerializer( + [channel for channel, _ in validated_updates], + many=True, + context=self.get_serializer_context() + ).data return Response({ - "message": f"Successfully updated {len(updated_channels)} channels", + "message": f"Successfully updated {len(validated_updates)} channels", "channels": serialized_channels }) @@ -676,7 +817,7 @@ class ChannelViewSet(viewsets.ModelViewSet): "channel_profile_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="(Optional) Channel profile ID(s) to add the channel to. Can be a single ID or array of IDs. If not provided, channel is added to all profiles." + description="(Optional) Channel profile ID(s). Behavior: omitted = add to ALL profiles (default); empty array [] = add to NO profiles; [0] = add to ALL profiles (explicit); [1,2,...] = add only to specified profiles." ), }, ), @@ -769,14 +910,37 @@ class ChannelViewSet(viewsets.ModelViewSet): channel.streams.add(stream) # Handle channel profile membership + # Semantics: + # - Omitted (None): add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only channel_profile_ids = request.data.get("channel_profile_ids") if channel_profile_ids is not None: # Normalize single ID to array if not isinstance(channel_profile_ids, list): channel_profile_ids = [channel_profile_ids] - if channel_profile_ids: - # Add channel only to the specified profiles + # Determine action based on semantics + if channel_profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + elif isinstance(channel_profile_ids, list) and len(channel_profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(channel_profile_ids, list) and 0 in channel_profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + else: + # Specific profile IDs try: channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids) if len(channel_profiles) != len(channel_profile_ids): @@ -799,13 +963,6 @@ class ChannelViewSet(viewsets.ModelViewSet): {"error": f"Error creating profile memberships: {str(e)}"}, status=status.HTTP_400_BAD_REQUEST, ) - else: - # Default behavior: add to all profiles - profiles = ChannelProfile.objects.all() - ChannelProfileMembership.objects.bulk_create([ - ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) - for profile in profiles - ]) # Send WebSocket notification for single channel creation from core.utils import send_websocket_update @@ -838,7 +995,7 @@ class ChannelViewSet(viewsets.ModelViewSet): "channel_profile_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="(Optional) Channel profile ID(s) to add the channels to. If not provided, channels are added to all profiles." + description="(Optional) Channel profile ID(s). Behavior: omitted = add to ALL profiles (default); empty array [] = add to NO profiles; [0] = add to ALL profiles (explicit); [1,2,...] = add only to specified profiles." ), "starting_channel_number": openapi.Schema( type=openapi.TYPE_INTEGER, @@ -987,19 +1144,27 @@ class ChannelViewSet(viewsets.ModelViewSet): channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" + + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" return Response( { - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", + "message": message, "channel": self.get_serializer(channel).data, "task_status": status_message, } @@ -1031,8 +1196,15 @@ class ChannelViewSet(viewsets.ModelViewSet): def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" associations = request.data.get("associations", []) - channels_updated = 0 - programs_refreshed = 0 + + if not associations: + return Response( + {"error": "associations list is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract channel IDs upfront + channel_updates = {} unique_epg_ids = set() for assoc in associations: @@ -1042,32 +1214,58 @@ class ChannelViewSet(viewsets.ModelViewSet): if not channel_id: continue - try: - # Get the channel - channel = Channel.objects.get(id=channel_id) + channel_updates[channel_id] = epg_data_id + if epg_data_id: + unique_epg_ids.add(epg_data_id) - # Set the EPG data - channel.epg_data_id = epg_data_id - channel.save(update_fields=["epg_data"]) - channels_updated += 1 + # Batch fetch all channels (single query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Track unique EPG data IDs - if epg_data_id: - unique_epg_ids.add(epg_data_id) - - except Channel.DoesNotExist: + # Collect channels to update + channels_to_update = [] + for channel_id, epg_data_id in channel_updates.items(): + if channel_id not in channels_dict: logger.error(f"Channel with ID {channel_id} not found") - except Exception as e: - logger.error( - f"Error setting EPG data for channel {channel_id}: {str(e)}" + continue + + channel = channels_dict[channel_id] + channel.epg_data_id = epg_data_id + channels_to_update.append(channel) + + # Bulk update all channels (single query) + if channels_to_update: + with transaction.atomic(): + Channel.objects.bulk_update( + channels_to_update, + fields=["epg_data_id"], + batch_size=100 ) - # Trigger program refresh for unique EPG data IDs - from apps.epg.tasks import parse_programs_for_tvg_id + channels_updated = len(channels_to_update) + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) + from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData + + # Batch fetch EPG data (single query) + epg_data_dict = { + epg.id: epg + for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source') + } + + programs_refreshed = 0 for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + epg_data = epg_data_dict.get(epg_id) + if not epg_data: + logger.error(f"EPGData with ID {epg_id} not found") + continue + + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 return Response( { @@ -1232,7 +1430,7 @@ class CleanupUnusedLogosAPIView(APIView): return [Authenticated()] @swagger_auto_schema( - operation_description="Delete all logos that are not used by any channels, movies, or series", + operation_description="Delete all channel logos that are not used by any channels", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ @@ -1246,24 +1444,11 @@ class CleanupUnusedLogosAPIView(APIView): responses={200: "Cleanup completed"}, ) def post(self, request): - """Delete all logos with no channel, movie, or series associations""" + """Delete all channel logos with no channel associations""" delete_files = request.data.get("delete_files", False) - # Find logos that are not used by channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - unused_logos = Logo.objects.filter(filter_conditions) + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) deleted_count = unused_logos.count() logo_names = list(unused_logos.values_list('name', flat=True)) local_files_deleted = 0 @@ -1335,13 +1520,6 @@ class LogoViewSet(viewsets.ModelViewSet): # Start with basic prefetch for channels queryset = Logo.objects.prefetch_related('channels').order_by('name') - # Try to prefetch VOD relations if available - try: - queryset = queryset.prefetch_related('movie', 'series') - except: - # VOD app might not be available, continue without VOD prefetch - pass - # Filter by specific IDs ids = self.request.query_params.getlist('ids') if ids: @@ -1354,62 +1532,14 @@ class LogoViewSet(viewsets.ModelViewSet): pass # Invalid IDs, return empty queryset queryset = Logo.objects.none() - # Filter by usage - now includes VOD content + # Filter by usage used_filter = self.request.query_params.get('used', None) if used_filter == 'true': - # Logo is used if it has any channels, movies, or series - filter_conditions = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - filter_conditions |= Q(movie__isnull=False) - except: - pass - - try: - filter_conditions |= Q(series__isnull=False) - except: - pass - - queryset = queryset.filter(filter_conditions).distinct() - + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() elif used_filter == 'false': - # Logo is unused if it has no channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - queryset = queryset.filter(filter_conditions) - - # Filter for channel assignment (unused + channel-used, exclude VOD-only) - channel_assignable = self.request.query_params.get('channel_assignable', None) - if channel_assignable == 'true': - # Include logos that are either: - # 1. Completely unused, OR - # 2. Used by channels (but may also be used by VOD) - # Exclude logos that are ONLY used by VOD content - - unused_condition = Q(channels__isnull=True) - channel_used_condition = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True) - except: - pass - - # Combine: unused OR used by channels - filter_conditions = unused_condition | channel_used_condition - queryset = queryset.filter(filter_conditions).distinct() + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) # Filter by name name_filter = self.request.query_params.get('name', None) @@ -1524,11 +1654,10 @@ class LogoViewSet(viewsets.ModelViewSet): """Streams the logo file, whether it's local or remote.""" logo = self.get_object() logo_url = logo.url - if logo_url.startswith("/data"): # Local file if not os.path.exists(logo_url): raise Http404("Image not found") - + stat = os.stat(logo_url) # Get proper mime type (first item of the tuple) content_type, _ = mimetypes.guess_type(logo_url) if not content_type: @@ -1538,6 +1667,8 @@ class LogoViewSet(viewsets.ModelViewSet): response = StreamingHttpResponse( open(logo_url, "rb"), content_type=content_type ) + response["Cache-Control"] = "public, max-age=14400" # Cache in browser for 4 hours + response["Last-Modified"] = http_date(stat.st_mtime) response["Content-Disposition"] = 'inline; filename="{}"'.format( os.path.basename(logo_url) ) @@ -1577,6 +1708,10 @@ class LogoViewSet(viewsets.ModelViewSet): remote_response.iter_content(chunk_size=8192), content_type=content_type, ) + if(remote_response.headers.get("Cache-Control")): + response["Cache-Control"] = remote_response.headers.get("Cache-Control") + if(remote_response.headers.get("Last-Modified")): + response["Last-Modified"] = remote_response.headers.get("Last-Modified") response["Content-Disposition"] = 'inline; filename="{}"'.format( os.path.basename(logo_url) ) @@ -1608,11 +1743,58 @@ class ChannelProfileViewSet(viewsets.ModelViewSet): return self.request.user.channel_profiles.all() def get_permissions(self): + if self.action == "duplicate": + return [IsAdmin()] try: return [perm() for perm in permission_classes_by_action[self.action]] except KeyError: return [Authenticated()] + @action(detail=True, methods=["post"], url_path="duplicate", permission_classes=[IsAdmin]) + def duplicate(self, request, pk=None): + requested_name = str(request.data.get("name", "")).strip() + + if not requested_name: + return Response( + {"detail": "Name is required to duplicate a profile."}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if ChannelProfile.objects.filter(name=requested_name).exists(): + return Response( + {"detail": "A channel profile with this name already exists."}, + status=status.HTTP_400_BAD_REQUEST, + ) + + source_profile = self.get_object() + + with transaction.atomic(): + new_profile = ChannelProfile.objects.create(name=requested_name) + + source_memberships = ChannelProfileMembership.objects.filter( + channel_profile=source_profile + ) + source_enabled_map = { + membership.channel_id: membership.enabled + for membership in source_memberships + } + + new_memberships = list( + ChannelProfileMembership.objects.filter(channel_profile=new_profile) + ) + for membership in new_memberships: + membership.enabled = source_enabled_map.get( + membership.channel_id, False + ) + + if new_memberships: + ChannelProfileMembership.objects.bulk_update( + new_memberships, ["enabled"] + ) + + serializer = self.get_serializer(new_profile) + return Response(serializer.data, status=status.HTTP_201_CREATED) + class GetChannelStreamsAPIView(APIView): def get_permissions(self): @@ -1669,6 +1851,30 @@ class BulkUpdateChannelMembershipAPIView(APIView): except KeyError: return [Authenticated()] + @swagger_auto_schema( + operation_description="Bulk enable or disable channels for a specific profile. Creates membership records if they don't exist.", + request_body=BulkChannelProfileMembershipSerializer, + responses={ + 200: openapi.Response( + description="Channels updated successfully", + schema=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + "status": openapi.Schema(type=openapi.TYPE_STRING, example="success"), + "updated": openapi.Schema(type=openapi.TYPE_INTEGER, description="Number of channels updated"), + "created": openapi.Schema(type=openapi.TYPE_INTEGER, description="Number of new memberships created"), + "invalid_channels": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_INTEGER), + description="List of channel IDs that don't exist" + ), + }, + ), + ), + 400: "Invalid request data", + 404: "Profile not found", + }, + ) def patch(self, request, profile_id): """Bulk enable or disable channels for a specific profile""" # Get the channel profile @@ -1681,21 +1887,67 @@ class BulkUpdateChannelMembershipAPIView(APIView): updates = serializer.validated_data["channels"] channel_ids = [entry["channel_id"] for entry in updates] - memberships = ChannelProfileMembership.objects.filter( + # Validate that all channels exist + existing_channels = set( + Channel.objects.filter(id__in=channel_ids).values_list("id", flat=True) + ) + invalid_channels = [cid for cid in channel_ids if cid not in existing_channels] + + if invalid_channels: + return Response( + { + "error": "Some channels do not exist", + "invalid_channels": invalid_channels, + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Get existing memberships + existing_memberships = ChannelProfileMembership.objects.filter( channel_profile=channel_profile, channel_id__in=channel_ids ) + membership_dict = {m.channel_id: m for m in existing_memberships} - membership_dict = {m.channel.id: m for m in memberships} + # Prepare lists for bulk operations + memberships_to_update = [] + memberships_to_create = [] for entry in updates: channel_id = entry["channel_id"] enabled_status = entry["enabled"] + if channel_id in membership_dict: + # Update existing membership membership_dict[channel_id].enabled = enabled_status + memberships_to_update.append(membership_dict[channel_id]) + else: + # Create new membership + memberships_to_create.append( + ChannelProfileMembership( + channel_profile=channel_profile, + channel_id=channel_id, + enabled=enabled_status, + ) + ) - ChannelProfileMembership.objects.bulk_update(memberships, ["enabled"]) + # Perform bulk operations + with transaction.atomic(): + if memberships_to_update: + ChannelProfileMembership.objects.bulk_update( + memberships_to_update, ["enabled"] + ) + if memberships_to_create: + ChannelProfileMembership.objects.bulk_create(memberships_to_create) - return Response({"status": "success"}, status=status.HTTP_200_OK) + return Response( + { + "status": "success", + "updated": len(memberships_to_update), + "created": len(memberships_to_create), + "invalid_channels": [], + }, + status=status.HTTP_200_OK, + ) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @@ -1741,7 +1993,7 @@ class RecordingViewSet(viewsets.ModelViewSet): def get_permissions(self): # Allow unauthenticated playback of recording files (like other streaming endpoints) - if getattr(self, 'action', None) == 'file': + if self.action == 'file': return [AllowAny()] try: return [perm() for perm in permission_classes_by_action[self.action]] @@ -2022,7 +2274,7 @@ class DeleteSeriesRuleAPIView(APIView): return [Authenticated()] def delete(self, request, tvg_id): - tvg_id = str(tvg_id) + tvg_id = unquote(str(tvg_id or "")) rules = [r for r in CoreSettings.get_dvr_series_rules() if str(r.get("tvg_id")) != tvg_id] CoreSettings.set_dvr_series_rules(rules) return Response({"success": True, "rules": rules}) diff --git a/apps/channels/migrations/0030_alter_stream_url.py b/apps/channels/migrations/0030_alter_stream_url.py new file mode 100644 index 00000000..203e411a --- /dev/null +++ b/apps/channels/migrations/0030_alter_stream_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-28 20:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0029_backfill_custom_stream_hashes'), + ] + + operations = [ + migrations.AlterField( + model_name='stream', + name='url', + field=models.URLField(blank=True, max_length=4096, null=True), + ), + ] diff --git a/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py new file mode 100644 index 00000000..2428a97b --- /dev/null +++ b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.9 on 2026-01-09 18:19 + +import datetime +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0030_alter_stream_url'), + ] + + operations = [ + migrations.AddField( + model_name='channelgroupm3uaccount', + name='is_stale', + field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='last_seen', + field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'), + ), + migrations.AddField( + model_name='stream', + name='is_stale', + field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index 3b7ed6e3..6d199520 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -55,7 +55,7 @@ class Stream(models.Model): """ name = models.CharField(max_length=255, default="Default Stream") - url = models.URLField(max_length=2000, blank=True, null=True) + url = models.URLField(max_length=4096, blank=True, null=True) m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, @@ -94,6 +94,11 @@ class Stream(models.Model): db_index=True, ) last_seen = models.DateTimeField(db_index=True, default=datetime.now) + is_stale = models.BooleanField( + default=False, + db_index=True, + help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)" + ) custom_properties = models.JSONField(default=dict, blank=True, null=True) # Stream statistics fields @@ -119,11 +124,11 @@ class Stream(models.Model): return self.name or self.url or f"Stream ID {self.id}" @classmethod - def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None): + def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None): if keys is None: keys = CoreSettings.get_m3u_hash_key().split(",") - stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id} + stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group} hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts} @@ -589,6 +594,16 @@ class ChannelGroupM3UAccount(models.Model): blank=True, help_text='Starting channel number for auto-created channels in this group' ) + last_seen = models.DateTimeField( + default=datetime.now, + db_index=True, + help_text='Last time this group was seen in the M3U source during a refresh' + ) + is_stale = models.BooleanField( + default=False, + db_index=True, + help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)' + ) class Meta: unique_together = ("channel_group", "m3u_account") diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 7058ced2..c1919e24 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -64,47 +64,15 @@ class LogoSerializer(serializers.ModelSerializer): return reverse("api:channels:logo-cache", args=[obj.id]) def get_channel_count(self, obj): - """Get the number of channels, movies, and series using this logo""" - channel_count = obj.channels.count() - - # Safely get movie count - try: - movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0 - except AttributeError: - movie_count = 0 - - # Safely get series count - try: - series_count = obj.series.count() if hasattr(obj, 'series') else 0 - except AttributeError: - series_count = 0 - - return channel_count + movie_count + series_count + """Get the number of channels using this logo""" + return obj.channels.count() def get_is_used(self, obj): - """Check if this logo is used by any channels, movies, or series""" - # Check if used by channels - if obj.channels.exists(): - return True - - # Check if used by movies (handle case where VOD app might not be available) - try: - if hasattr(obj, 'movie') and obj.movie.exists(): - return True - except AttributeError: - pass - - # Check if used by series (handle case where VOD app might not be available) - try: - if hasattr(obj, 'series') and obj.series.exists(): - return True - except AttributeError: - pass - - return False + """Check if this logo is used by any channels""" + return obj.channels.exists() def get_channel_names(self, obj): - """Get the names of channels, movies, and series using this logo (limited to first 5)""" + """Get the names of channels using this logo (limited to first 5)""" names = [] # Get channel names @@ -112,28 +80,6 @@ class LogoSerializer(serializers.ModelSerializer): for channel in channels: names.append(f"Channel: {channel.name}") - # Get movie names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'movie'): - remaining_slots = 5 - len(names) - movies = obj.movie.all()[:remaining_slots] - for movie in movies: - names.append(f"Movie: {movie.name}") - except AttributeError: - pass - - # Get series names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'series'): - remaining_slots = 5 - len(names) - series = obj.series.all()[:remaining_slots] - for series_item in series: - names.append(f"Series: {series_item.name}") - except AttributeError: - pass - # Calculate total count for "more" message total_count = self.get_channel_count(obj) if total_count > 5: @@ -173,6 +119,7 @@ class StreamSerializer(serializers.ModelSerializer): "current_viewers", "updated_at", "last_seen", + "is_stale", "stream_profile_id", "is_custom", "channel_group", @@ -209,7 +156,7 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): class Meta: model = ChannelGroupM3UAccount - fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"] + fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"] def to_representation(self, instance): data = super().to_representation(instance) @@ -233,8 +180,8 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): # Channel Group # class ChannelGroupSerializer(serializers.ModelSerializer): - channel_count = serializers.IntegerField(read_only=True) - m3u_account_count = serializers.IntegerField(read_only=True) + channel_count = serializers.SerializerMethodField() + m3u_account_count = serializers.SerializerMethodField() m3u_accounts = ChannelGroupM3UAccountSerializer( many=True, read_only=True @@ -244,6 +191,14 @@ class ChannelGroupSerializer(serializers.ModelSerializer): model = ChannelGroup fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"] + def get_channel_count(self, obj): + """Get count of channels in this group""" + return obj.channels.count() + + def get_m3u_account_count(self, obj): + """Get count of M3U accounts associated with this group""" + return obj.m3u_accounts.count() + class ChannelProfileSerializer(serializers.ModelSerializer): channels = serializers.SerializerMethodField() @@ -348,8 +303,17 @@ class ChannelSerializer(serializers.ModelSerializer): if include_streams: self.fields["streams"] = serializers.SerializerMethodField() - - return super().to_representation(instance) + return super().to_representation(instance) + else: + # Fix: For PATCH/PUT responses, ensure streams are ordered + representation = super().to_representation(instance) + if "streams" in representation: + representation["streams"] = list( + instance.streams.all() + .order_by("channelstream__order") + .values_list("id", flat=True) + ) + return representation def get_logo(self, obj): return LogoSerializer(obj.logo).data diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 3943cf16..b3e11251 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -295,7 +295,11 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True if score > 50: # Only show decent matches logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") - if score > best_score: + # When scores are equal, prefer higher priority EPG source + row_priority = row.get('epg_source_priority', 0) + best_priority = best_epg.get('epg_source_priority', 0) if best_epg else -1 + + if score > best_score or (score == best_score and row_priority > best_priority): best_score = score best_epg = row @@ -471,9 +475,9 @@ def match_epg_channels(): "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -482,9 +486,13 @@ def match_epg_channels(): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -618,9 +626,9 @@ def match_selected_channels_epg(channel_ids): "norm_chan": normalize_name(channel.name) }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -629,9 +637,13 @@ def match_selected_channels_epg(channel_ids): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses appropriate thresholds result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -749,9 +761,10 @@ def match_single_channel_epg(channel_id): test_normalized = normalize_name(test_name) logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)") - # Get all EPG data for matching - must include norm_name field + # Get all EPG data for matching from active sources - must include norm_name field + # Ordered by source priority (highest first) so we prefer higher priority matches epg_data_list = [] - for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True, name__isnull=False).exclude(name=''): normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data_list.append({ 'id': epg.id, @@ -760,10 +773,14 @@ def match_single_channel_epg(channel_id): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data_list.sort(key=lambda x: x['epg_source_priority'], reverse=True) + if not epg_data_list: - return {"matched": False, "message": "No EPG data available for matching"} + return {"matched": False, "message": "No EPG data available for matching (from active sources)"} logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") @@ -1434,6 +1451,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str): logger.info(f"Starting recording for channel {channel.name}") + # Log system event for recording start + try: + from core.utils import log_system_event + log_system_event( + 'recording_start', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id + ) + except Exception as e: + logger.error(f"Could not log recording start event: {e}") + # Try to resolve the Recording row up front recording_obj = None try: @@ -1827,6 +1856,20 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str): # After the loop, the file and response are closed automatically. logger.info(f"Finished recording for channel {channel.name}") + # Log system event for recording end + try: + from core.utils import log_system_event + log_system_event( + 'recording_end', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id, + interrupted=interrupted, + bytes_written=bytes_written + ) + except Exception as e: + logger.error(f"Could not log recording end event: {e}") + # Remux TS to MKV container remux_success = False try: @@ -2636,7 +2679,38 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None ) # Handle channel profile membership - if profile_ids: + # Semantics: + # - None: add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0 in array: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only + if profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + all_profiles = ChannelProfile.objects.all() + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in all_profiles + ]) + elif isinstance(profile_ids, list) and len(profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(profile_ids, list) and 0 in profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + all_profiles = ChannelProfile.objects.all() + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in all_profiles + ]) + else: + # Specific profile IDs try: specific_profiles = ChannelProfile.objects.filter(id__in=profile_ids) channel_profile_memberships.extend([ @@ -2652,17 +2726,6 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None 'channel_id': channel.id, 'error': f'Failed to add to profiles: {str(e)}' }) - else: - # Add to all profiles by default - all_profiles = ChannelProfile.objects.all() - channel_profile_memberships.extend([ - ChannelProfileMembership( - channel_profile=profile, - channel=channel, - enabled=True - ) - for profile in all_profiles - ]) # Bulk update channels with logos if update: diff --git a/apps/channels/tests/test_channel_api.py b/apps/channels/tests/test_channel_api.py new file mode 100644 index 00000000..bb245da1 --- /dev/null +++ b/apps/channels/tests/test_channel_api.py @@ -0,0 +1,211 @@ +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework import status + +from apps.channels.models import Channel, ChannelGroup + +User = get_user_model() + + +class ChannelBulkEditAPITests(TestCase): + def setUp(self): + # Create a test admin user (user_level >= 10) and authenticate + self.user = User.objects.create_user(username="testuser", password="testpass123") + self.user.user_level = 10 # Set admin level + self.user.save() + self.client = APIClient() + self.client.force_authenticate(user=self.user) + self.bulk_edit_url = "/api/channels/channels/edit/bulk/" + + # Create test channel group + self.group1 = ChannelGroup.objects.create(name="Test Group 1") + self.group2 = ChannelGroup.objects.create(name="Test Group 2") + + # Create test channels + self.channel1 = Channel.objects.create( + channel_number=1.0, + name="Channel 1", + tvg_id="channel1", + channel_group=self.group1 + ) + self.channel2 = Channel.objects.create( + channel_number=2.0, + name="Channel 2", + tvg_id="channel2", + channel_group=self.group1 + ) + self.channel3 = Channel.objects.create( + channel_number=3.0, + name="Channel 3", + tvg_id="channel3" + ) + + def test_bulk_edit_success(self): + """Test successful bulk update of multiple channels""" + data = [ + {"id": self.channel1.id, "name": "Updated Channel 1"}, + {"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + self.assertEqual(len(response.data["channels"]), 2) + + # Verify database changes + self.channel1.refresh_from_db() + self.channel2.refresh_from_db() + self.assertEqual(self.channel1.name, "Updated Channel 1") + self.assertEqual(self.channel2.name, "Updated Channel 2") + self.assertEqual(self.channel2.channel_number, 22.0) + + def test_bulk_edit_with_empty_validated_data_first(self): + """ + Test the bug fix: when first channel has empty validated_data. + This was causing: ValueError: Field names must be given to bulk_update() + """ + # Create a channel with data that will be "unchanged" (empty validated_data) + # We'll send the same data it already has + data = [ + # First channel: no actual changes (this would create empty validated_data) + {"id": self.channel1.id}, + # Second channel: has changes + {"id": self.channel2.id, "name": "Updated Channel 2"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should not crash with ValueError + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + + # Verify the channel with changes was updated + self.channel2.refresh_from_db() + self.assertEqual(self.channel2.name, "Updated Channel 2") + + def test_bulk_edit_all_empty_updates(self): + """Test when all channels have empty updates (no actual changes)""" + data = [ + {"id": self.channel1.id}, + {"id": self.channel2.id}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should succeed without calling bulk_update + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + + def test_bulk_edit_mixed_fields(self): + """Test bulk update where different channels update different fields""" + data = [ + {"id": self.channel1.id, "name": "New Name 1"}, + {"id": self.channel2.id, "channel_number": 99.0}, + {"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 3 channels") + + # Verify all updates + self.channel1.refresh_from_db() + self.channel2.refresh_from_db() + self.channel3.refresh_from_db() + + self.assertEqual(self.channel1.name, "New Name 1") + self.assertEqual(self.channel2.channel_number, 99.0) + self.assertEqual(self.channel3.tvg_id, "new_tvg_id") + self.assertEqual(self.channel3.name, "New Name 3") + + def test_bulk_edit_with_channel_group(self): + """Test bulk update with channel_group_id changes""" + data = [ + {"id": self.channel1.id, "channel_group_id": self.group2.id}, + {"id": self.channel3.id, "channel_group_id": self.group1.id}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + + # Verify group changes + self.channel1.refresh_from_db() + self.channel3.refresh_from_db() + self.assertEqual(self.channel1.channel_group, self.group2) + self.assertEqual(self.channel3.channel_group, self.group1) + + def test_bulk_edit_nonexistent_channel(self): + """Test bulk update with a channel that doesn't exist""" + nonexistent_id = 99999 + data = [ + {"id": nonexistent_id, "name": "Should Fail"}, + {"id": self.channel1.id, "name": "Should Still Update"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should return 400 with errors + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertIn("errors", response.data) + self.assertEqual(len(response.data["errors"]), 1) + self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id) + self.assertEqual(response.data["errors"][0]["error"], "Channel not found") + + # The valid channel should still be updated + self.assertEqual(response.data["updated_count"], 1) + + def test_bulk_edit_validation_error(self): + """Test bulk update with invalid data (validation error)""" + data = [ + {"id": self.channel1.id, "channel_number": "invalid_number"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should return 400 with validation errors + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertIn("errors", response.data) + self.assertEqual(len(response.data["errors"]), 1) + self.assertIn("channel_number", response.data["errors"][0]["errors"]) + + def test_bulk_edit_empty_channel_updates(self): + """Test bulk update with empty list""" + data = [] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Empty list is accepted and returns success with 0 updates + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 0 channels") + + def test_bulk_edit_missing_channel_updates(self): + """Test bulk update without proper format (dict instead of list)""" + data = {"channel_updates": {}} + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data["error"], "Expected a list of channel updates") + + def test_bulk_edit_preserves_other_fields(self): + """Test that bulk update only changes specified fields""" + original_channel_number = self.channel1.channel_number + original_tvg_id = self.channel1.tvg_id + + data = [ + {"id": self.channel1.id, "name": "Only Name Changed"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + + # Verify only name changed, other fields preserved + self.channel1.refresh_from_db() + self.assertEqual(self.channel1.name, "Only Name Changed") + self.assertEqual(self.channel1.channel_number, original_channel_number) + self.assertEqual(self.channel1.tvg_id, original_tvg_id) diff --git a/apps/epg/migrations/0019_alter_programdata_sub_title.py b/apps/epg/migrations/0019_alter_programdata_sub_title.py new file mode 100644 index 00000000..5a53627c --- /dev/null +++ b/apps/epg/migrations/0019_alter_programdata_sub_title.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-22 21:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0018_epgsource_custom_properties_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='sub_title', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py new file mode 100644 index 00000000..8f53bb0a --- /dev/null +++ b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py @@ -0,0 +1,119 @@ +# Generated migration to replace {time} placeholders with {starttime} + +import re +from django.db import migrations + + +def migrate_time_placeholders(apps, schema_editor): + """ + Replace {time} with {starttime} and {time24} with {starttime24} + in all dummy EPG source custom_properties templates. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + # Fields that contain templates with placeholders + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + # Get all dummy EPG sources + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Replace {time24} first (before {time}) to avoid double replacement + # e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime} + new_value = original_value + new_value = re.sub(r'\{time24\}', '{starttime24}', new_value) + new_value = re.sub(r'\{time\}', '{starttime}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.") + else: + print("No dummy EPG sources needed placeholder updates.") + + +def reverse_migration(apps, schema_editor): + """ + Reverse the migration by replacing {starttime} back to {time}. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Reverse the replacements + new_value = original_value + new_value = re.sub(r'\{starttime24\}', '{time24}', new_value) + new_value = re.sub(r'\{starttime\}', '{time}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.") + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0019_alter_programdata_sub_title'), + ] + + operations = [ + migrations.RunPython(migrate_time_placeholders, reverse_migration), + ] diff --git a/apps/epg/migrations/0021_epgsource_priority.py b/apps/epg/migrations/0021_epgsource_priority.py new file mode 100644 index 00000000..f2696d67 --- /dev/null +++ b/apps/epg/migrations/0021_epgsource_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-12-05 15:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0020_migrate_time_to_starttime_placeholders'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index 6c70add2..b3696edc 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -45,6 +45,10 @@ class EPGSource(models.Model): null=True, help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel." + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, @@ -155,7 +159,7 @@ class ProgramData(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() title = models.CharField(max_length=255) - sub_title = models.CharField(max_length=255, blank=True, null=True) + sub_title = models.TextField(blank=True, null=True) description = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, null=True, blank=True) custom_properties = models.JSONField(default=dict, blank=True, null=True) diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 3404cca9..e4d5f466 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -4,7 +4,7 @@ from .models import EPGSource, EPGData, ProgramData from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] url = serializers.CharField( required=False, @@ -24,16 +24,18 @@ class EPGSourceSerializer(serializers.ModelSerializer): 'is_active', 'file_path', 'refresh_interval', + 'priority', 'status', 'last_message', 'created_at', 'updated_at', 'custom_properties', - 'epg_data_ids' + 'epg_data_count' ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 2028cd98..97552171 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -24,7 +24,7 @@ from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from .models import EPGSource, EPGData, ProgramData -from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory +from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event logger = logging.getLogger(__name__) @@ -286,11 +286,12 @@ def fetch_xmltv(source): logger.info(f"Fetching XMLTV data from source: {source.name}") try: # Get default user agent from settings - default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first() + stream_settings = CoreSettings.get_stream_settings() user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default - if default_user_agent_setting and default_user_agent_setting.value: + default_user_agent_id = stream_settings.get('default_user_agent') + if default_user_agent_id: try: - user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first() + user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first() if user_agent_obj and user_agent_obj.user_agent: user_agent = user_agent_obj.user_agent logger.debug(f"Using default user agent: {user_agent}") @@ -1157,6 +1158,12 @@ def parse_programs_for_tvg_id(epg_id): epg = EPGData.objects.get(id=epg_id) epg_source = epg.epg_source + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return + if not Channel.objects.filter(epg_data=epg).exists(): logger.info(f"No channels matched to EPG {epg.tvg_id}") release_task_lock('parse_epg_programs', epg_id) @@ -1387,11 +1394,23 @@ def parse_programs_for_tvg_id(epg_id): def parse_programs_for_source(epg_source, tvg_id=None): + """ + Parse programs for all MAPPED channels from an EPG source in a single pass. + + This is an optimized version that: + 1. Only processes EPG entries that are actually mapped to channels + 2. Parses the XML file ONCE instead of once per channel + 3. Skips programmes for unmapped channels entirely during parsing + + This dramatically improves performance when an EPG source has many channels + but only a fraction are mapped. + """ # Send initial programs parsing notification send_epg_update(epg_source.id, "parsing_programs", 0) should_log_memory = False process = None initial_memory = 0 + source_file = None # Add memory tracking only in trace mode or higher try: @@ -1411,91 +1430,251 @@ def parse_programs_for_source(epg_source, tvg_id=None): should_log_memory = False try: - # Process EPG entries in batches rather than all at once - batch_size = 20 # Process fewer channels at once to reduce memory usage - epg_count = EPGData.objects.filter(epg_source=epg_source).count() + # Only get EPG entries that are actually mapped to channels + mapped_epg_ids = set( + Channel.objects.filter( + epg_data__epg_source=epg_source, + epg_data__isnull=False + ).values_list('epg_data_id', flat=True) + ) - if epg_count == 0: - logger.info(f"No EPG entries found for source: {epg_source.name}") - # Update status - this is not an error, just no entries + if not mapped_epg_ids: + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} " + f"(source has {total_epg_count} EPG entries, 0 mapped)") + # Update status - this is not an error, just no mapped entries epg_source.status = 'success' - epg_source.save(update_fields=['status']) + epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)" + epg_source.save(update_fields=['status', 'last_message']) send_epg_update(epg_source.id, "parsing_programs", 100, status="success") return True - logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}") + # Get the mapped EPG entries with their tvg_ids + mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id') + tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']} + mapped_tvg_ids = set(tvg_id_to_epg_id.keys()) - failed_entries = [] - program_count = 0 - channel_count = 0 - updated_count = 0 - processed = 0 - # Process in batches using cursor-based approach to limit memory usage - last_id = 0 - while True: - # Get a batch of EPG entries - batch_entries = list(EPGData.objects.filter( - epg_source=epg_source, - id__gt=last_id - ).order_by('id')[:batch_size]) + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + mapped_count = len(mapped_tvg_ids) - if not batch_entries: - break # No more entries to process + logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} " + f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)") - # Update last_id for next iteration - last_id = batch_entries[-1].id + # Get the file path + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + if not file_path: + file_path = epg_source.get_cache_file() - # Process this batch - for epg in batch_entries: - if epg.tvg_id: - try: - result = parse_programs_for_tvg_id(epg.id) - if result == "Task already running": - logger.info(f"Program parse for {epg.id} already in progress, skipping") + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file not found at: {file_path}") - processed += 1 - progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50 - send_epg_update(epg_source.id, "parsing_programs", progress) - except Exception as e: - logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True) - failed_entries.append(f"{epg.tvg_id}: {str(e)}") + if epg_source.url: + # Update the file path in the database + new_path = epg_source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + epg_source.file_path = new_path + epg_source.save(update_fields=['file_path']) + logger.info(f"Fetching new EPG data from URL: {epg_source.url}") - # Force garbage collection after each batch - batch_entries = None # Remove reference to help garbage collection + # Fetch new data before continuing + fetch_success = fetch_xmltv(epg_source) + + if not fetch_success: + logger.error(f"Failed to fetch EPG data for source: {epg_source.name}") + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file") + return False + + # Update file_path with the new location + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + else: + logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data") + epg_source.status = 'error' + epg_source.last_message = f"No URL provided, cannot fetch EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") + return False + + # SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory + # We parse FIRST, then do an atomic delete+insert to avoid race conditions + # where clients might see empty/partial EPG data during the transition + all_programs_to_create = [] + programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel + total_programs = 0 + skipped_programs = 0 + last_progress_update = 0 + + try: + logger.debug(f"Opening file for single-pass parsing: {file_path}") + source_file = open(file_path, 'rb') + + # Stream parse the file using lxml's iterparse + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) + + for _, elem in program_parser: + channel_id = elem.get('channel') + + # Skip programmes for unmapped channels immediately + if channel_id not in mapped_tvg_ids: + skipped_programs += 1 + # Clear element to free memory + clear_element(elem) + continue + + # This programme is for a mapped channel - process it + try: + start_time = parse_xmltv_time(elem.get('start')) + end_time = parse_xmltv_time(elem.get('stop')) + title = None + desc = None + sub_title = None + + # Efficiently process child elements + for child in elem: + if child.tag == 'title': + title = child.text or 'No Title' + elif child.tag == 'desc': + desc = child.text or '' + elif child.tag == 'sub-title': + sub_title = child.text or '' + + if not title: + title = 'No Title' + + # Extract custom properties + custom_props = extract_custom_properties(elem) + custom_properties_json = custom_props if custom_props else None + + epg_id = tvg_id_to_epg_id[channel_id] + all_programs_to_create.append(ProgramData( + epg_id=epg_id, + start_time=start_time, + end_time=end_time, + title=title, + description=desc, + sub_title=sub_title, + tvg_id=channel_id, + custom_properties=custom_properties_json + )) + total_programs += 1 + programs_by_channel[channel_id] += 1 + + # Clear the element to free memory + clear_element(elem) + + # Send progress update (estimate based on programs processed) + if total_programs - last_progress_update >= 5000: + last_progress_update = total_programs + # Cap at 70% during parsing phase (save 30% for DB operations) + progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60)) + send_epg_update(epg_source.id, "parsing_programs", progress, + processed=total_programs, channels=mapped_count) + + # Periodic garbage collection during parsing + if total_programs % 5000 == 0: + gc.collect() + + except Exception as e: + logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True) + clear_element(elem) + continue + + except etree.XMLSyntaxError as xml_error: + logger.error(f"XML syntax error parsing program data: {xml_error}") + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"XML parsing error: {str(xml_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error)) + return False + except Exception as e: + logger.error(f"Error parsing XML for programs: {e}", exc_info=True) + raise + finally: + if source_file: + source_file.close() + source_file = None + + # Now perform atomic delete + bulk insert + # This ensures clients never see empty/partial EPG data + logger.info(f"Parsed {total_programs} programs, performing atomic database update...") + send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...") + + batch_size = 1000 + try: + with transaction.atomic(): + # Delete existing programs for mapped EPGs + deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] + logger.debug(f"Deleted {deleted_count} existing programs") + + # Clean up orphaned programs for unmapped EPG entries + unmapped_epg_ids = list(EPGData.objects.filter( + epg_source=epg_source + ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True)) + + if unmapped_epg_ids: + orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] + if orphaned_count > 0: + logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") + + # Bulk insert all new programs in batches within the same transaction + for i in range(0, len(all_programs_to_create), batch_size): + batch = all_programs_to_create[i:i + batch_size] + ProgramData.objects.bulk_create(batch) + + # Update progress during insertion + progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95 + if i % (batch_size * 5) == 0: + send_epg_update(epg_source.id, "parsing_programs", min(95, progress), + message=f"Inserting programs... {i}/{len(all_programs_to_create)}") + + logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs") + + except Exception as db_error: + logger.error(f"Database error during atomic update: {db_error}", exc_info=True) + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"Database error: {str(db_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error)) + return False + finally: + # Clear the large list to free memory + all_programs_to_create = None gc.collect() - # If there were failures, include them in the message but continue - if failed_entries: - epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed - error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries" - stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." - epg_source.last_message = f"{stats_summary} Warning: {error_summary}" - epg_source.updated_at = timezone.now() - epg_source.save(update_fields=['status', 'last_message', 'updated_at']) + # Count channels that actually got programs + channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0) - # Send completion notification with mixed status - send_epg_update(epg_source.id, "parsing_programs", 100, - status="success", - message=epg_source.last_message) - - # Explicitly release memory of large lists before returning - del failed_entries - gc.collect() - - return True - - # If all successful, set a comprehensive success message + # Success message epg_source.status = EPGSource.STATUS_SUCCESS - epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." + epg_source.last_message = ( + f"Parsed {total_programs:,} programs for {channels_with_programs} channels " + f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)" + ) epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) + # Log system event for EPG refresh + log_system_event( + event_type='epg_refresh', + source_name=epg_source.name, + programs=total_programs, + channels=channels_with_programs, + skipped_programs=skipped_programs, + unmapped_channels=total_epg_count - mapped_count, + ) + # Send completion notification with status send_epg_update(epg_source.id, "parsing_programs", 100, status="success", - message=epg_source.last_message) + message=epg_source.last_message, + updated_at=epg_source.updated_at.isoformat()) - logger.info(f"Completed parsing all programs for source: {epg_source.name}") + logger.info(f"Completed parsing programs for source: {epg_source.name} - " + f"{total_programs:,} programs for {channels_with_programs} channels, " + f"skipped {skipped_programs:,} programs for unmapped channels") return True except Exception as e: @@ -1510,14 +1689,19 @@ def parse_programs_for_source(epg_source, tvg_id=None): return False finally: # Final memory cleanup and tracking - + if source_file: + try: + source_file.close() + except: + pass + source_file = None # Explicitly release any remaining large data structures - failed_entries = None - program_count = None - channel_count = None - updated_count = None - processed = None + programs_to_create = None + programs_by_channel = None + mapped_epg_ids = None + mapped_tvg_ids = None + tvg_id_to_epg_id = None gc.collect() # Add comprehensive memory cleanup at the end @@ -1531,12 +1715,13 @@ def fetch_schedules_direct(source): logger.info(f"Fetching Schedules Direct data from source: {source.name}") try: # Get default user agent from settings - default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first() + stream_settings = CoreSettings.get_stream_settings() user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default + default_user_agent_id = stream_settings.get('default_user_agent') - if default_user_agent_setting and default_user_agent_setting.value: + if default_user_agent_id: try: - user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first() + user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first() if user_agent_obj and user_agent_obj.user_agent: user_agent = user_agent_obj.user_agent logger.debug(f"Using default user agent: {user_agent}") diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index 878ae7c6..1f16f20f 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -152,6 +152,46 @@ class M3UAccountViewSet(viewsets.ModelViewSet): and not old_vod_enabled and new_vod_enabled ): + # Create Uncategorized categories immediately so they're available in the UI + from apps.vod.models import VODCategory, M3UVODCategoryRelation + + # Create movie Uncategorized category + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Create series Uncategorized category + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Create relations for both categories (disabled by default until first refresh) + account_custom_props = instance.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Trigger full VOD refresh from apps.vod.tasks import refresh_vod_content refresh_vod_content.delay(instance.id) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index d29c294b..ed9eb465 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -24,11 +24,13 @@ from core.utils import ( acquire_task_lock, release_task_lock, natural_sort_key, + log_system_event, ) from core.models import CoreSettings, UserAgent from asgiref.sync import async_to_sync from core.xtream_codes import Client as XCClient from core.utils import send_websocket_update +from .utils import normalize_stream_url logger = logging.getLogger(__name__) @@ -219,6 +221,10 @@ def fetch_m3u_lines(account, use_cache=False): # Has HTTP URLs, might be a simple M3U without headers is_valid_m3u = True logger.info("Content validated as M3U: contains HTTP URLs") + elif any(line.strip().startswith(('rtsp', 'rtp', 'udp')) for line in content_lines): + # Has RTSP/RTP/UDP URLs, might be a simple M3U without headers + is_valid_m3u = True + logger.info("Content validated as M3U: contains RTSP/RTP/UDP URLs") if not is_valid_m3u: # Log what we actually received for debugging @@ -434,25 +440,51 @@ def get_case_insensitive_attr(attributes, key, default=""): def parse_extinf_line(line: str) -> dict: """ Parse an EXTINF line from an M3U file. - This function removes the "#EXTINF:" prefix, then splits the remaining - string on the first comma that is not enclosed in quotes. + This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes, + and treats everything after the last attribute as the display name. Returns a dictionary with: - 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title) - - 'display_name': the text after the comma (the fallback display name) + - 'display_name': the text after the attributes (the fallback display name) - 'name': the value from tvg-name (if present) or the display name otherwise. """ if not line.startswith("#EXTINF:"): return None content = line[len("#EXTINF:") :].strip() - # Split on the first comma that is not inside quotes. - parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1) - if len(parts) != 2: - return None - attributes_part, display_name = parts[0], parts[1].strip() - attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part)) - # Use tvg-name attribute if available; otherwise, use the display name. - name = get_case_insensitive_attr(attrs, "tvg-name", display_name) + + # Single pass: extract all attributes AND track the last attribute position + # This regex matches both key="value" and key='value' patterns + attrs = {} + last_attr_end = 0 + + # Use a single regex that handles both quote types + for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content): + key = match.group(1) + value = match.group(3) + attrs[key] = value + last_attr_end = match.end() + + # Everything after the last attribute (skipping leading comma and whitespace) is the display name + if last_attr_end > 0: + remaining = content[last_attr_end:].strip() + # Remove leading comma if present + if remaining.startswith(','): + remaining = remaining[1:].strip() + display_name = remaining + else: + # No attributes found, try the old comma-split method as fallback + parts = content.split(',', 1) + if len(parts) == 2: + display_name = parts[1].strip() + else: + display_name = content.strip() + + # Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name. + name = get_case_insensitive_attr(attrs, "tvg-name", None) + if not name: + name = get_case_insensitive_attr(attrs, "tvc-guide-title", None) + if not name: + name = display_name return {"attributes": attrs, "display_name": display_name, "name": name} @@ -481,7 +513,19 @@ def check_field_lengths(streams_to_create): @shared_task -def process_groups(account, groups): +def process_groups(account, groups, scan_start_time=None): + """Process groups and update their relationships with the M3U account. + + Args: + account: M3UAccount instance + groups: Dict of group names to custom properties + scan_start_time: Timestamp when the scan started (for consistent last_seen marking) + """ + # Use scan_start_time if provided, otherwise current time + # This ensures consistency with stream processing and cleanup logic + if scan_start_time is None: + scan_start_time = timezone.now() + existing_groups = { group.name: group for group in ChannelGroup.objects.filter(name__in=groups.keys()) @@ -521,24 +565,8 @@ def process_groups(account, groups): ).select_related('channel_group') } - # Get ALL existing relationships for this account to identify orphaned ones - all_existing_relationships = { - rel.channel_group.name: rel - for rel in ChannelGroupM3UAccount.objects.filter( - m3u_account=account - ).select_related('channel_group') - } - relations_to_create = [] relations_to_update = [] - relations_to_delete = [] - - # Find orphaned relationships (groups that no longer exist in the source) - current_group_names = set(groups.keys()) - for group_name, rel in all_existing_relationships.items(): - if group_name not in current_group_names: - relations_to_delete.append(rel) - logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}") for group in all_group_objs: custom_props = groups.get(group.name, {}) @@ -565,9 +593,15 @@ def process_groups(account, groups): del updated_custom_props["xc_id"] existing_rel.custom_properties = updated_custom_props + existing_rel.last_seen = scan_start_time + existing_rel.is_stale = False relations_to_update.append(existing_rel) logger.debug(f"Updated xc_id for group '{group.name}' from '{existing_xc_id}' to '{new_xc_id}' - account {account.id}") else: + # Update last_seen even if xc_id hasn't changed + existing_rel.last_seen = scan_start_time + existing_rel.is_stale = False + relations_to_update.append(existing_rel) logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}") else: # Create new relationship - this group is new to this M3U account @@ -581,6 +615,8 @@ def process_groups(account, groups): m3u_account=account, custom_properties=custom_props, enabled=auto_enable_new_groups_live, + last_seen=scan_start_time, + is_stale=False, ) ) @@ -591,15 +627,38 @@ def process_groups(account, groups): # Bulk update existing relationships if relations_to_update: - ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties']) - logger.info(f"Updated {len(relations_to_update)} existing group relationships with new xc_id values for account {account.id}") + ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties', 'last_seen', 'is_stale']) + logger.info(f"Updated {len(relations_to_update)} existing group relationships for account {account.id}") - # Delete orphaned relationships - if relations_to_delete: - ChannelGroupM3UAccount.objects.filter( - id__in=[rel.id for rel in relations_to_delete] - ).delete() - logger.info(f"Deleted {len(relations_to_delete)} orphaned group relationships for account {account.id}: {[rel.channel_group.name for rel in relations_to_delete]}") + +def cleanup_stale_group_relationships(account, scan_start_time): + """ + Remove group relationships that haven't been seen since the stale retention period. + This follows the same logic as stream cleanup for consistency. + """ + # Calculate cutoff date for stale group relationships + stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days) + logger.info( + f"Removing group relationships not seen since {stale_cutoff} for M3U account {account.id}" + ) + + # Find stale relationships + stale_relationships = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, + last_seen__lt=stale_cutoff + ).select_related('channel_group') + + relations_to_delete = list(stale_relationships) + deleted_count = len(relations_to_delete) + + if deleted_count > 0: + logger.info( + f"Found {deleted_count} stale group relationships for account {account.id}: " + f"{[rel.channel_group.name for rel in relations_to_delete]}" + ) + + # Delete the stale relationships + stale_relationships.delete() # Check if any of the deleted relationships left groups with no remaining associations orphaned_group_ids = [] @@ -624,6 +683,10 @@ def process_groups(account, groups): deleted_groups = list(ChannelGroup.objects.filter(id__in=orphaned_group_ids).values_list('name', flat=True)) ChannelGroup.objects.filter(id__in=orphaned_group_ids).delete() logger.info(f"Deleted {len(orphaned_group_ids)} orphaned groups that had no remaining associations: {deleted_groups}") + else: + logger.debug(f"No stale group relationships found for account {account.id}") + + return deleted_count def collect_xc_streams(account_id, enabled_groups): @@ -760,7 +823,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): group_title = group_name stream_hash = Stream.generate_hash_key( - name, url, tvg_id, hash_keys, m3u_id=account_id + name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title ) stream_props = { "name": name, @@ -771,6 +834,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): "channel_group_id": int(group_id), "stream_hash": stream_hash, "custom_properties": stream, + "is_stale": False, } if stream_hash not in stream_hashes: @@ -806,10 +870,12 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): setattr(obj, key, value) obj.last_seen = timezone.now() obj.updated_at = timezone.now() # Update timestamp only for changed streams + obj.is_stale = False streams_to_update.append(obj) else: # Always update last_seen, even if nothing else changed obj.last_seen = timezone.now() + obj.is_stale = False # Don't update updated_at for unchanged streams streams_to_update.append(obj) @@ -820,6 +886,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): stream_props["updated_at"] = ( timezone.now() ) # Set initial updated_at for new streams + stream_props["is_stale"] = False streams_to_create.append(Stream(**stream_props)) try: @@ -831,7 +898,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys): # Simplified bulk update for better performance Stream.objects.bulk_update( streams_to_update, - ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'], + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'], batch_size=150 # Smaller batch size for XC processing ) @@ -894,6 +961,12 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] + + # Validate URL length - maximum of 4096 characters + if url and len(url) > 4096: + logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)") + continue + tvg_id, tvg_logo = get_case_insensitive_attr( stream_info["attributes"], "tvg-id", "" ), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "") @@ -928,7 +1001,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): ) continue - stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id) + stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title) stream_props = { "name": name, "url": url, @@ -938,6 +1011,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): "channel_group_id": int(groups.get(group_title)), "stream_hash": stream_hash, "custom_properties": stream_info["attributes"], + "is_stale": False, } if stream_hash not in stream_hashes: @@ -977,11 +1051,15 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): obj.custom_properties = stream_props["custom_properties"] obj.updated_at = timezone.now() + # Always mark as not stale since we saw it in this refresh + obj.is_stale = False + streams_to_update.append(obj) else: # New stream stream_props["last_seen"] = timezone.now() stream_props["updated_at"] = timezone.now() + stream_props["is_stale"] = False streams_to_create.append(Stream(**stream_props)) try: @@ -993,7 +1071,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): # Update all streams in a single bulk operation Stream.objects.bulk_update( streams_to_update, - ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'], + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'], batch_size=200 ) except Exception as e: @@ -1054,7 +1132,15 @@ def cleanup_streams(account_id, scan_start_time=timezone.now): @shared_task -def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): +def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False, scan_start_time=None): + """Refresh M3U groups for an account. + + Args: + account_id: ID of the M3U account + use_cache: Whether to use cached M3U file + full_refresh: Whether this is part of a full refresh + scan_start_time: Timestamp when the scan started (for consistent last_seen marking) + """ if not acquire_task_lock("refresh_m3u_account_groups", account_id): return f"Task already running for account_id={account_id}.", None @@ -1180,52 +1266,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): auth_result = xc_client.authenticate() logger.debug(f"Authentication response: {auth_result}") - # Save account information to all active profiles + # Queue async profile refresh task to run in background + # This prevents any delay in the main refresh process try: - from apps.m3u.models import M3UAccountProfile - - profiles = M3UAccountProfile.objects.filter( - m3u_account=account, - is_active=True - ) - - # Update each profile with account information using its own transformed credentials - for profile in profiles: - try: - # Get transformed credentials for this specific profile - profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) - - # Create a separate XC client for this profile's credentials - with XCClient( - profile_url, - profile_username, - profile_password, - user_agent_string - ) as profile_client: - # Authenticate with this profile's credentials - if profile_client.authenticate(): - # Get account information specific to this profile's credentials - profile_account_info = profile_client.get_account_info() - - # Merge with existing custom_properties if they exist - existing_props = profile.custom_properties or {} - existing_props.update(profile_account_info) - profile.custom_properties = existing_props - profile.save(update_fields=['custom_properties']) - - logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials") - else: - logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") - - except Exception as profile_error: - logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") - # Continue with other profiles even if one fails - - logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}") - - except Exception as save_error: - logger.warning(f"Failed to process profile account information: {str(save_error)}") - # Don't fail the whole process if saving account info fails + logger.info(f"Queueing background profile refresh for account {account.name}") + refresh_account_profiles.delay(account.id) + except Exception as e: + logger.warning(f"Failed to queue profile refresh task: {str(e)}") + # Don't fail the main refresh if profile refresh can't be queued except Exception as e: error_msg = f"Failed to authenticate with XC server: {str(e)}" @@ -1367,10 +1415,12 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): ) problematic_lines.append((line_index + 1, line[:200])) - elif extinf_data and line.startswith("http"): + elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")): url_count += 1 + # Normalize UDP URLs only (e.g., remove VLC-specific @ prefix) + normalized_url = normalize_stream_url(line) if line.startswith("udp") else line # Associate URL with the last EXTINF line - extinf_data[-1]["url"] = line + extinf_data[-1]["url"] = normalized_url valid_stream_count += 1 # Periodically log progress for large files @@ -1417,7 +1467,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): send_m3u_update(account_id, "processing_groups", 0) - process_groups(account, groups) + process_groups(account, groups, scan_start_time) release_task_lock("refresh_m3u_account_groups", account_id) @@ -2230,6 +2280,106 @@ def get_transformed_credentials(account, profile=None): return base_url, base_username, base_password +@shared_task +def refresh_account_profiles(account_id): + """Refresh account information for all active profiles of an XC account. + + This task runs asynchronously in the background after account refresh completes. + It includes rate limiting delays between profile authentications to prevent provider bans. + """ + from django.conf import settings + import time + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.debug(f"Account {account_id} is not XC type, skipping profile refresh") + return f"Account {account_id} is not an XtreamCodes account" + + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + if not profiles.exists(): + logger.info(f"No active profiles found for account {account.name}") + return f"No active profiles for account {account_id}" + + # Get user agent for this account + try: + user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + if account.user_agent_id: + from core.models import UserAgent + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent: + user_agent_string = ua_obj.user_agent + except Exception as e: + logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.debug(f"Using user agent for profile refresh: {user_agent_string}") + # Get rate limiting delay from settings + profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5) + + profiles_updated = 0 + profiles_failed = 0 + + logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}") + + for idx, profile in enumerate(profiles): + try: + # Add delay between profiles to prevent rate limiting (except for first profile) + if idx > 0: + logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting") + time.sleep(profile_delay) + + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + profiles_updated += 1 + logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})") + else: + profiles_failed += 1 + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + profiles_failed += 1 + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed" + logger.info(result_msg) + return result_msg + + except M3UAccount.DoesNotExist: + error_msg = f"Account {account_id} not found" + logger.error(error_msg) + return error_msg + except Exception as e: + error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}" + logger.error(error_msg) + return error_msg + + @shared_task def refresh_account_info(profile_id): """Refresh only the account information for a specific M3U profile.""" @@ -2424,7 +2574,7 @@ def refresh_single_m3u_account(account_id): if not extinf_data: try: logger.info(f"Calling refresh_m3u_groups for account {account_id}") - result = refresh_m3u_groups(account_id, full_refresh=True) + result = refresh_m3u_groups(account_id, full_refresh=True, scan_start_time=refresh_start_timestamp) logger.trace(f"refresh_m3u_groups result: {result}") # Check for completely empty result or missing groups @@ -2704,9 +2854,26 @@ def refresh_single_m3u_account(account_id): id=-1 ).exists() # This will never find anything but ensures DB sync + # Mark streams that weren't seen in this refresh as stale (pending deletion) + stale_stream_count = Stream.objects.filter( + m3u_account=account, + last_seen__lt=refresh_start_timestamp + ).update(is_stale=True) + logger.info(f"Marked {stale_stream_count} streams as stale for account {account_id}") + + # Mark group relationships that weren't seen in this refresh as stale (pending deletion) + stale_group_count = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, + last_seen__lt=refresh_start_timestamp + ).update(is_stale=True) + logger.info(f"Marked {stale_group_count} group relationships as stale for account {account_id}") + # Now run cleanup streams_deleted = cleanup_streams(account_id, refresh_start_timestamp) + # Cleanup stale group relationships (follows same retention policy as streams) + cleanup_stale_group_relationships(account, refresh_start_timestamp) + # Run auto channel sync after successful refresh auto_sync_message = "" try: @@ -2739,6 +2906,17 @@ def refresh_single_m3u_account(account_id): account.updated_at = timezone.now() account.save(update_fields=["status", "last_message", "updated_at"]) + # Log system event for M3U refresh + log_system_event( + event_type='m3u_refresh', + account_name=account.name, + elapsed_time=round(elapsed_time, 2), + streams_created=streams_created, + streams_updated=streams_updated, + streams_deleted=streams_deleted, + total_processed=streams_processed, + ) + # Send final update with complete metrics and explicitly include success status send_m3u_update( account_id, diff --git a/apps/m3u/utils.py b/apps/m3u/utils.py index 4e1027b2..598ef713 100644 --- a/apps/m3u/utils.py +++ b/apps/m3u/utils.py @@ -8,6 +8,34 @@ lock = threading.Lock() active_streams_map = {} logger = logging.getLogger(__name__) + +def normalize_stream_url(url): + """ + Normalize stream URLs for compatibility with FFmpeg. + + Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol. + FFmpeg doesn't recognize the @ prefix for multicast addresses. + + Args: + url (str): The stream URL to normalize + + Returns: + str: The normalized URL + """ + if not url: + return url + + # Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234 + # The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax + if url.startswith('udp://@'): + normalized = url.replace('udp://@', 'udp://', 1) + logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}") + return normalized + + # Could add other normalizations here in the future (rtp://@, etc.) + return url + + def increment_stream_count(account): with lock: current_usage = active_streams_map.get(account.id, 0) diff --git a/apps/output/tests.py b/apps/output/tests.py index e1e857ee..f87c8340 100644 --- a/apps/output/tests.py +++ b/apps/output/tests.py @@ -14,3 +14,26 @@ class OutputM3UTest(TestCase): self.assertEqual(response.status_code, 200) content = response.content.decode() self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_empty_body(self): + """ + Test that a POST request with an empty body returns 200 OK. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded') + content = response.content.decode() + + self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK") + self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_with_body(self): + """ + Test that a POST request with a non-empty body returns 403 Forbidden. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data={'evilstring': 'muhahaha'}) + + self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden") + self.assertIn("POST requests with body are not allowed, body is:", response.content.decode()) diff --git a/apps/output/views.py b/apps/output/views.py index a695d05f..47798ee2 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -7,7 +7,6 @@ from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_http_methods from apps.epg.models import ProgramData from apps.accounts.models import User -from core.models import CoreSettings, NETWORK_ACCESS from dispatcharr.utils import network_access_allowed from django.utils import timezone as django_timezone from django.shortcuts import get_object_or_404 @@ -23,33 +22,110 @@ from django.db.models.functions import Lower import os from apps.m3u.utils import calculate_tuner_count import regex +from core.utils import log_system_event +import hashlib logger = logging.getLogger(__name__) +def get_client_identifier(request): + """Get client information including IP, user agent, and a unique hash identifier + + Returns: + tuple: (client_id_hash, client_ip, user_agent) + """ + # Get client IP (handle proxies) + x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') + if x_forwarded_for: + client_ip = x_forwarded_for.split(',')[0].strip() + else: + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + + # Get user agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + # Create a hash for a shorter cache key + client_str = f"{client_ip}:{user_agent}" + client_id_hash = hashlib.md5(client_str.encode()).hexdigest()[:12] + + return client_id_hash, client_ip, user_agent + def m3u_endpoint(request, profile_name=None, user=None): + logger.debug("m3u_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for M3U") + response = HttpResponse(content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response + return generate_m3u(request, profile_name, user) def epg_endpoint(request, profile_name=None, user=None): + logger.debug("epg_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for EPG") + response = HttpResponse(content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + return generate_epg(request, profile_name, user) @csrf_exempt -@require_http_methods(["GET", "POST"]) +@require_http_methods(["GET", "POST", "HEAD"]) def generate_m3u(request, profile_name=None, user=None): """ Dynamically generate an M3U file from channels. The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ - logger.debug("Generating M3U for profile: %s, user: %s", profile_name, user.username if user else "Anonymous") + # Check if this is a POST request and the body is not empty (which we don't want to allow) + logger.debug("Generating M3U for profile: %s, user: %s, method: %s", profile_name, user.username if user else "Anonymous", request.method) + + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"m3u_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving M3U from cache") + response = HttpResponse(cached_content, content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response # Check if this is a POST request with data (which we don't want to allow) if request.method == "POST" and request.body: - return HttpResponseForbidden("POST requests with content are not allowed") + if request.body.decode() != '{}': + return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) if user is not None: if user.user_level == 0: @@ -74,20 +150,17 @@ def generate_m3u(request, profile_name=None, user=None): else: if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, channelprofilemembership__enabled=True ).order_by('channel_number') else: - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True, - ).order_by("channel_number") - else: - channels = Channel.objects.order_by("channel_number") + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -100,16 +173,26 @@ def generate_m3u(request, profile_name=None, user=None): tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() # Build EPG URL with query parameters if needed - epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint')) + # Check if this is an XC API request (has username/password in GET params and user is authenticated) + xc_username = request.GET.get('username') + xc_password = request.GET.get('password') - # Optionally preserve certain query parameters - preserved_params = ['tvg_id_source', 'cachedlogos', 'days'] - query_params = {k: v for k, v in request.GET.items() if k in preserved_params} - if query_params: - from urllib.parse import urlencode - epg_url = f"{epg_base_url}?{urlencode(query_params)}" + if user is not None and xc_username and xc_password: + # This is an XC API request - use XC-style EPG URL + base_url = build_absolute_uri_with_port(request, '') + epg_url = f"{base_url}/xmltv.php?username={xc_username}&password={xc_password}" else: - epg_url = epg_base_url + # Regular request - use standard EPG endpoint + epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint')) + + # Optionally preserve certain query parameters + preserved_params = ['tvg_id_source', 'cachedlogos', 'days'] + query_params = {k: v for k, v in request.GET.items() if k in preserved_params} + if query_params: + from urllib.parse import urlencode + epg_url = f"{epg_base_url}?{urlencode(query_params)}" + else: + epg_url = epg_base_url # Add x-tvg-url and url-tvg attribute for EPG URL m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n' @@ -173,20 +256,83 @@ def generate_m3u(request, profile_name=None, user=None): stream_url = first_stream.url else: # Fall back to proxy URL if no direct URL available - base_url = request.build_absolute_uri('/')[:-1] - stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" + stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}") else: # Standard behavior - use proxy URL - base_url = request.build_absolute_uri('/')[:-1] - stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" + stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}") m3u_content += extinf_line + stream_url + "\n" + # Cache the generated content for 2 seconds to handle double-GET requests + cache.set(content_cache_key, m3u_content, 2) + + # Log system event for M3U download (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"m3u_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='m3u_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") response["Content-Disposition"] = 'attachment; filename="channels.m3u"' return response +def generate_fallback_programs(channel_id, channel_name, now, num_days, program_length_hours, fallback_title, fallback_description): + """ + Generate dummy programs using custom fallback templates when patterns don't match. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel name to use as fallback in templates + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + fallback_title: Custom fallback title template (empty string if not provided) + fallback_description: Custom fallback description template (empty string if not provided) + + Returns: + List of program dictionaries + """ + programs = [] + + # Use custom fallback title or channel name as default + title = fallback_title if fallback_title else channel_name + + # Use custom fallback description or a simple default message + if fallback_description: + description = fallback_description + else: + description = f"EPG information is currently unavailable for {channel_name}" + + # Create programs for each day + for day in range(num_days): + day_start = now + timedelta(days=day) + + # Create programs with specified length throughout the day + for hour_offset in range(0, 24, program_length_hours): + # Calculate program start and end times + start_time = day_start + timedelta(hours=hour_offset) + end_time = start_time + timedelta(hours=program_length_hours) + + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": title, + "description": description, + }) + + return programs + + def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4, epg_source=None): """ Generate dummy EPG programs for channels. @@ -216,11 +362,26 @@ def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length epg_source.custom_properties ) # If custom generation succeeded, return those programs - # If it returned empty (pattern didn't match), fall through to default + # If it returned empty (pattern didn't match), check for custom fallback templates if custom_programs: return custom_programs else: - logger.info(f"Custom pattern didn't match for '{channel_name}', using default dummy EPG") + logger.info(f"Custom pattern didn't match for '{channel_name}', checking for custom fallback templates") + + # Check if custom fallback templates are provided + custom_props = epg_source.custom_properties + fallback_title = custom_props.get('fallback_title_template', '').strip() + fallback_description = custom_props.get('fallback_description_template', '').strip() + + # If custom fallback templates exist, use them instead of default + if fallback_title or fallback_description: + logger.info(f"Using custom fallback templates for '{channel_name}'") + return generate_fallback_programs( + channel_id, channel_name, now, num_days, + program_length_hours, fallback_title, fallback_description + ) + else: + logger.info(f"No custom fallback templates found, using default dummy EPG") # Default humorous program descriptions based on time of day time_descriptions = { @@ -346,12 +507,17 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust ended_title_template = custom_properties.get('ended_title_template', '') ended_description_template = custom_properties.get('ended_description_template', '') + # Image URL templates + channel_logo_url_template = custom_properties.get('channel_logo_url', '') + program_poster_url_template = custom_properties.get('program_poster_url', '') + # EPG metadata options category_string = custom_properties.get('category', '') # Split comma-separated categories and strip whitespace, filter out empty strings categories = [cat.strip() for cat in category_string.split(',') if cat.strip()] if category_string else [] include_date = custom_properties.get('include_date', True) include_live = custom_properties.get('include_live', False) + include_new = custom_properties.get('include_new', False) # Parse timezone name try: @@ -428,13 +594,25 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust logger.debug(f"Title pattern matched. Groups: {groups}") # Helper function to format template with matched groups - def format_template(template, groups): - """Replace {groupname} placeholders with matched group values""" + def format_template(template, groups, url_encode=False): + """Replace {groupname} placeholders with matched group values + + Args: + template: Template string with {groupname} placeholders + groups: Dict of group names to values + url_encode: If True, URL encode the group values for safe use in URLs + """ if not template: return '' result = template for key, value in groups.items(): - result = result.replace(f'{{{key}}}', str(value) if value else '') + if url_encode and value: + # URL encode the value to handle spaces and special characters + from urllib.parse import quote + encoded_value = quote(str(value), safe='') + result = result.replace(f'{{{key}}}', encoded_value) + else: + result = result.replace(f'{{{key}}}', str(value) if value else '') return result # Extract time from title if time pattern exists @@ -482,28 +660,39 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust try: # Support various date group names: month, day, year month_str = date_groups.get('month', '') - day = int(date_groups.get('day', 1)) - year = int(date_groups.get('year', now.year)) # Default to current year if not provided + day_str = date_groups.get('day', '') + year_str = date_groups.get('year', '') + + # Parse day - default to current day if empty or invalid + day = int(day_str) if day_str else now.day + + # Parse year - default to current year if empty or invalid (matches frontend behavior) + year = int(year_str) if year_str else now.year # Parse month - can be numeric (1-12) or text (Jan, January, etc.) month = None - if month_str.isdigit(): - month = int(month_str) - else: - # Try to parse text month names - import calendar - month_str_lower = month_str.lower() - # Check full month names - for i, month_name in enumerate(calendar.month_name): - if month_name.lower() == month_str_lower: - month = i - break - # Check abbreviated month names if not found - if month is None: - for i, month_abbr in enumerate(calendar.month_abbr): - if month_abbr.lower() == month_str_lower: + if month_str: + if month_str.isdigit(): + month = int(month_str) + else: + # Try to parse text month names + import calendar + month_str_lower = month_str.lower() + # Check full month names + for i, month_name in enumerate(calendar.month_name): + if month_name.lower() == month_str_lower: month = i break + # Check abbreviated month names if not found + if month is None: + for i, month_abbr in enumerate(calendar.month_abbr): + if month_abbr.lower() == month_str_lower: + month = i + break + + # Default to current month if not extracted or invalid + if month is None: + month = now.month if month and 1 <= month <= 12 and 1 <= day <= 31: date_info = {'year': year, 'month': month, 'day': day} @@ -516,15 +705,44 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust # Merge title groups, time groups, and date groups for template formatting all_groups = {**groups, **time_groups, **date_groups} + # Add normalized versions of all groups for cleaner URLs + # These remove all non-alphanumeric characters and convert to lowercase + for key, value in list(all_groups.items()): + if value: + # Remove all non-alphanumeric characters (except spaces temporarily) + # then replace spaces with nothing, and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + all_groups[f'{key}_normalize'] = normalized + + # Format channel logo URL if template provided (with URL encoding) + channel_logo_url = None + if channel_logo_url_template: + channel_logo_url = format_template(channel_logo_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted channel logo URL: {channel_logo_url}") + + # Format program poster URL if template provided (with URL encoding) + program_poster_url = None + if program_poster_url_template: + program_poster_url = format_template(program_poster_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted program poster URL: {program_poster_url}") + # Add formatted time strings for better display (handles minutes intelligently) if time_info: hour_24 = time_info['hour'] minute = time_info['minute'] + # Determine the base date to use for placeholders + # If date was extracted, use it; otherwise use current date + if date_info: + base_date = datetime(date_info['year'], date_info['month'], date_info['day']) + else: + base_date = datetime.now() + # If output_timezone is specified, convert the display time to that timezone if output_tz: - # Create a datetime in the source timezone - temp_date = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + # Create a datetime in the source timezone using the base date + temp_date = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) # Convert to output timezone temp_date_output = temp_date.astimezone(output_tz) # Extract converted hour and minute for display @@ -532,13 +750,29 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust minute = temp_date_output.minute logger.debug(f"Converted display time from {source_tz} to {output_tz}: {hour_24}:{minute:02d}") - # Format 24-hour time string - only include minutes if non-zero - if minute > 0: - all_groups['time24'] = f"{hour_24}:{minute:02d}" + # Add date placeholders based on the OUTPUT timezone + # This ensures {date}, {month}, {day}, {year} reflect the converted timezone + all_groups['date'] = temp_date_output.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_output.month) + all_groups['day'] = str(temp_date_output.day) + all_groups['year'] = str(temp_date_output.year) + logger.debug(f"Converted date placeholders to {output_tz}: {all_groups['date']}") else: - all_groups['time24'] = f"{hour_24:02d}:00" + # No output timezone conversion - use source timezone for date + # Create temp date to get proper date in source timezone using the base date + temp_date_source = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + all_groups['date'] = temp_date_source.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_source.month) + all_groups['day'] = str(temp_date_source.day) + all_groups['year'] = str(temp_date_source.year) - # Convert 24-hour to 12-hour format for {time} placeholder + # Format 24-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime24'] = f"{hour_24}:{minute:02d}" + else: + all_groups['starttime24'] = f"{hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {starttime} placeholder # Note: hour_24 is ALWAYS in 24-hour format at this point (converted earlier if needed) ampm = 'AM' if hour_24 < 12 else 'PM' hour_12 = hour_24 @@ -547,11 +781,46 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust elif hour_24 > 12: hour_12 = hour_24 - 12 - # Format 12-hour time string - only include minutes if non-zero + # Format 12-hour start time string - only include minutes if non-zero if minute > 0: - all_groups['time'] = f"{hour_12}:{minute:02d} {ampm}" + all_groups['starttime'] = f"{hour_12}:{minute:02d} {ampm}" else: - all_groups['time'] = f"{hour_12} {ampm}" + all_groups['starttime'] = f"{hour_12} {ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['starttime_long'] = f"{hour_12}:{minute:02d} {ampm}" + + # Calculate end time based on program duration + # Create a datetime for calculations + temp_start = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + temp_end = temp_start + timedelta(minutes=program_duration) + + # Extract end time components (already in correct timezone if output_tz was applied above) + end_hour_24 = temp_end.hour + end_minute = temp_end.minute + + # Format 24-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime24'] = f"{end_hour_24}:{end_minute:02d}" + else: + all_groups['endtime24'] = f"{end_hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {endtime} placeholder + end_ampm = 'AM' if end_hour_24 < 12 else 'PM' + end_hour_12 = end_hour_24 + if end_hour_24 == 0: + end_hour_12 = 12 + elif end_hour_24 > 12: + end_hour_12 = end_hour_24 - 12 + + # Format 12-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + else: + all_groups['endtime'] = f"{end_hour_12} {end_ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['endtime_long'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" # Generate programs programs = [] @@ -676,6 +945,10 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust date_str = local_time.strftime('%Y-%m-%d') program_custom_properties['date'] = date_str + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + programs.append({ "channel_id": channel_id, "start_time": program_start_utc, @@ -683,6 +956,7 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust "title": upcoming_title, "description": upcoming_description, "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation }) current_time += timedelta(minutes=program_duration) @@ -706,6 +980,14 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust if include_live: main_event_custom_properties['live'] = True + # Add new flag if requested + if include_new: + main_event_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + main_event_custom_properties['icon'] = program_poster_url + programs.append({ "channel_id": channel_id, "start_time": event_start_utc, @@ -713,6 +995,7 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust "title": main_event_title, "description": main_event_description, "custom_properties": main_event_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation }) event_happened = True @@ -745,6 +1028,10 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust date_str = local_time.strftime('%Y-%m-%d') program_custom_properties['date'] = date_str + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + programs.append({ "channel_id": channel_id, "start_time": program_start_utc, @@ -752,6 +1039,7 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust "title": ended_title, "description": ended_description, "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation }) current_time += timedelta(minutes=program_duration) @@ -800,6 +1088,10 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust date_str = local_time.strftime('%Y-%m-%d') program_custom_properties['date'] = date_str + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + programs.append({ "channel_id": channel_id, "start_time": program_start_utc, @@ -807,6 +1099,7 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust "title": program_title, "description": program_description, "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, }) current_time += timedelta(minutes=program_duration) @@ -854,6 +1147,14 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust if include_live: program_custom_properties['live'] = True + # Add new flag if requested + if include_new: + program_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + programs.append({ "channel_id": channel_id, "start_time": program_start_utc, @@ -861,6 +1162,7 @@ def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, cust "title": title, "description": description, "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation }) logger.info(f"Generated {len(programs)} custom dummy programs for {channel_name}") @@ -915,6 +1217,10 @@ def generate_dummy_epg( if custom_data.get('live', False): xml_lines.append(f" ") + # New tag + if custom_data.get('new', False): + xml_lines.append(f" ") + xml_lines.append(f" ") return xml_lines @@ -927,8 +1233,22 @@ def generate_epg(request, profile_name=None, user=None): by their associated EPGData record. This version filters data based on the 'days' parameter and sends keep-alives during processing. """ + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"epg_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving EPG from cache") + response = HttpResponse(cached_content, content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + def epg_generator(): - """Generator function that yields EPG data with keep-alives during processing""" # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) + """Generator function that yields EPG data with keep-alives during processing""" + # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) xml_lines = [] xml_lines.append('') @@ -959,7 +1279,11 @@ def generate_epg(request, profile_name=None, user=None): ) else: if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during epg generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, channelprofilemembership__enabled=True, @@ -991,16 +1315,45 @@ def generate_epg(request, profile_name=None, user=None): now = django_timezone.now() cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None + # Build collision-free channel number mapping for XC clients (if user is authenticated) + # XC clients require integer channel numbers, so we need to ensure no conflicts + channel_num_map = {} + if user is not None: + # This is an XC client - build collision-free mapping + used_numbers = set() + + # First pass: assign integers for channels that already have integer numbers + for channel in channels: + if channel.channel_number == int(channel.channel_number): + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + for channel in channels: + if channel.channel_number != int(channel.channel_number): + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) + # Process channels for the section for channel in channels: - # Format channel number as integer if it has no decimal component - same as M3U generation - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = int(channel.channel_number) - else: - formatted_channel_number = channel.channel_number + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer + formatted_channel_number = channel_num_map[channel.id] else: - formatted_channel_number = "" + # Regular client - format channel number as integer if it has no decimal component + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" # Determine the channel ID based on the selected source if tvg_id_source == 'tvg_id' and channel.tvg_id: @@ -1013,7 +1366,62 @@ def generate_epg(request, profile_name=None, user=None): # Add channel logo if available tvg_logo = "" - if channel.logo: + + # Check if this is a custom dummy EPG with channel logo URL template + if channel.epg_data and channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + channel_logo_url_template = custom_props.get('channel_logo_url', '') + + if channel_logo_url_template: + # Determine which name to use for pattern matching (same logic as program generation) + pattern_match_name = channel.name + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + + # Try to extract groups from the channel/stream name and build the logo URL + title_pattern = custom_props.get('title_pattern', '') + if title_pattern: + try: + # Convert PCRE/JavaScript named groups to Python format + title_pattern = regex.sub(r'\(\?<(?![=!])([^>]+)>', r'(?P<\1>', title_pattern) + title_regex = regex.compile(title_pattern) + title_match = title_regex.search(pattern_match_name) + + if title_match: + groups = title_match.groupdict() + + # Add normalized versions of all groups for cleaner URLs + for key, value in list(groups.items()): + if value: + # Remove all non-alphanumeric characters and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + groups[f'{key}_normalize'] = normalized + + # Format the logo URL template with the matched groups (with URL encoding) + from urllib.parse import quote + for key, value in groups.items(): + if value: + encoded_value = quote(str(value), safe='') + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', encoded_value) + else: + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', '') + tvg_logo = channel_logo_url_template + logger.debug(f"Built channel logo URL from template: {tvg_logo}") + except Exception as e: + logger.warning(f"Failed to build channel logo URL for {channel.name}: {e}") + + # If no custom dummy logo, use regular logo logic + if not tvg_logo and channel.logo: if use_cached_logos: # Use cached logo as before tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) @@ -1032,7 +1440,8 @@ def generate_epg(request, profile_name=None, user=None): xml_lines.append(" ") # Send all channel definitions - yield '\n'.join(xml_lines) + '\n' + channel_xml = '\n'.join(xml_lines) + '\n' + yield channel_xml xml_lines = [] # Clear to save memory # Process programs for each channel @@ -1044,14 +1453,20 @@ def generate_epg(request, profile_name=None, user=None): elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: channel_id = channel.tvc_guide_stationid else: - # Get formatted channel number - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = int(channel.channel_number) - else: - formatted_channel_number = channel.channel_number + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer from map + formatted_channel_number = channel_num_map[channel.id] else: - formatted_channel_number = "" + # Regular client - format channel number as before + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" # Default to channel number channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) @@ -1114,6 +1529,14 @@ def generate_epg(request, profile_name=None, user=None): if custom_data.get('live', False): yield f" \n" + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + yield f" \n" else: @@ -1155,6 +1578,14 @@ def generate_epg(request, profile_name=None, user=None): if custom_data.get('live', False): yield f" \n" + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + yield f" \n" continue # Skip to next channel @@ -1406,7 +1837,8 @@ def generate_epg(request, profile_name=None, user=None): # Send batch when full or send keep-alive if len(program_batch) >= batch_size: - yield '\n'.join(program_batch) + '\n' + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml program_batch = [] # Move to next chunk @@ -1414,12 +1846,40 @@ def generate_epg(request, profile_name=None, user=None): # Send remaining programs in batch if program_batch: - yield '\n'.join(program_batch) + '\n' + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml # Send final closing tag and completion message - yield "\n" # Return streaming response + yield "\n" + + # Log system event for EPG download after streaming completes (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"epg_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='epg_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + + # Wrapper generator that collects content for caching + def caching_generator(): + collected_content = [] + for chunk in epg_generator(): + collected_content.append(chunk) + yield chunk + # After streaming completes, cache the full content + full_content = ''.join(collected_content) + cache.set(content_cache_key, full_content, 300) + logger.debug("Cached EPG content (%d bytes)", len(full_content)) + + # Return streaming response response = StreamingHttpResponse( - streaming_content=epg_generator(), + streaming_content=caching_generator(), content_type="application/xml" ) response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' @@ -1507,45 +1967,31 @@ def xc_player_api(request, full=False): if user is None: return JsonResponse({'error': 'Unauthorized'}, status=401) - server_info = xc_get_info(request) - - if not action: - return JsonResponse(server_info) - if action == "get_live_categories": return JsonResponse(xc_get_live_categories(user), safe=False) - if action == "get_live_streams": + elif action == "get_live_streams": return JsonResponse(xc_get_live_streams(request, user, request.GET.get("category_id")), safe=False) - if action == "get_short_epg": + elif action == "get_short_epg": return JsonResponse(xc_get_epg(request, user, short=True), safe=False) - if action == "get_simple_data_table": + elif action == "get_simple_data_table": return JsonResponse(xc_get_epg(request, user, short=False), safe=False) - - # Endpoints not implemented, but still provide a response - if action in [ - "get_vod_categories", - "get_vod_streams", - "get_series", - "get_series_categories", - "get_series_info", - "get_vod_info", - ]: - if action == "get_vod_categories": - return JsonResponse(xc_get_vod_categories(user), safe=False) - elif action == "get_vod_streams": - return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) - elif action == "get_series_categories": - return JsonResponse(xc_get_series_categories(user), safe=False) - elif action == "get_series": - return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) - elif action == "get_series_info": - return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) - elif action == "get_vod_info": - return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) - else: - return JsonResponse([], safe=False) - - raise Http404() + elif action == "get_vod_categories": + return JsonResponse(xc_get_vod_categories(user), safe=False) + elif action == "get_vod_streams": + return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_categories": + return JsonResponse(xc_get_series_categories(user), safe=False) + elif action == "get_series": + return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_info": + return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) + elif action == "get_vod_info": + return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) + else: + # For any other action (including get_account_info or unknown actions), + # return server_info/account_info to match provider behavior + server_info = xc_get_info(request) + return JsonResponse(server_info, safe=False) def xc_panel_api(request): @@ -1562,12 +2008,34 @@ def xc_panel_api(request): def xc_get(request): if not network_access_allowed(request, 'XC_API'): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Forbidden'}, status=403) action = request.GET.get("action") user = xc_get_user(request) if user is None: + # Log blocked M3U download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Unauthorized'}, status=401) return generate_m3u(request, None, user) @@ -1575,17 +2043,40 @@ def xc_get(request): def xc_xmltv(request): if not network_access_allowed(request, 'XC_API'): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Forbidden'}, status=403) user = xc_get_user(request) if user is None: + # Log blocked EPG download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Unauthorized'}, status=401) return generate_epg(request, None, user) def xc_get_live_categories(user): + from django.db.models import Min response = [] if user.user_level == 0: @@ -1596,7 +2087,7 @@ def xc_get_live_categories(user): # No profile filtering - user sees all channel groups channel_groups = ChannelGroup.objects.filter( channels__isnull=False, channels__user_level__lte=user.user_level - ).distinct().order_by(Lower("name")) + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') else: # User has specific limited profiles assigned filters = { @@ -1604,11 +2095,11 @@ def xc_get_live_categories(user): "channels__user_level": 0, "channels__channelprofilemembership__channel_profile__in": user.channel_profiles.all() } - channel_groups = ChannelGroup.objects.filter(**filters).distinct().order_by(Lower("name")) + channel_groups = ChannelGroup.objects.filter(**filters).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') else: channel_groups = ChannelGroup.objects.filter( channels__isnull=False, channels__user_level__lte=user.user_level - ).distinct().order_by(Lower("name")) + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') for group in channel_groups: response.append( @@ -1653,10 +2144,38 @@ def xc_get_live_streams(request, user, category_id=None): channel_group__id=category_id, user_level__lte=user.user_level ).order_by("channel_number") + # Build collision-free mapping for XC clients (which require integers) + # This ensures channels with float numbers don't conflict with existing integers + channel_num_map = {} # Maps channel.id -> integer channel number for XC + used_numbers = set() # Track all assigned integer channel numbers + + # First pass: assign integers for channels that already have integer numbers for channel in channels: + if channel.channel_number == int(channel.channel_number): + # Already an integer, use it directly + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + # Find next available number to avoid collisions + for channel in channels: + if channel.channel_number != int(channel.channel_number): + # Has decimal component, need to find available integer + # Start from truncated value and increment until we find an unused number + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) + + # Build the streams list with the collision-free channel numbers + for channel in channels: + channel_num_int = channel_num_map[channel.id] + streams.append( { - "num": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, + "num": channel_num_int, "name": channel.name, "stream_type": "live", "stream_id": channel.id, @@ -1668,7 +2187,7 @@ def xc_get_live_streams(request, user, category_id=None): reverse("api:channels:logo-cache", args=[channel.logo.id]) ) ), - "epg_channel_id": str(int(channel.channel_number)) if channel.channel_number.is_integer() else str(channel.channel_number), + "epg_channel_id": str(channel_num_int), "added": int(channel.created_at.timestamp()), "is_adult": 0, "category_id": str(channel.channel_group.id), @@ -1717,7 +2236,36 @@ def xc_get_epg(request, user, short=False): if not channel: raise Http404() - limit = request.GET.get('limit', 4) + # Calculate the collision-free integer channel number for this channel + # This must match the logic in xc_get_live_streams to ensure consistency + # Get all channels in the same category for collision detection + category_channels = Channel.objects.filter( + channel_group=channel.channel_group + ).order_by("channel_number") + + channel_num_map = {} + used_numbers = set() + + # First pass: assign integers for channels that already have integer numbers + for ch in category_channels: + if ch.channel_number == int(ch.channel_number): + num = int(ch.channel_number) + channel_num_map[ch.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + for ch in category_channels: + if ch.channel_number != int(ch.channel_number): + candidate = int(ch.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[ch.id] = candidate + used_numbers.add(candidate) + + # Get the mapped integer for this specific channel + channel_num_int = channel_num_map.get(channel.id, int(channel.channel_number)) + + limit = int(request.GET.get('limit', 4)) if channel.epg_data: # Check if this is a dummy EPG that generates on-demand if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': @@ -1749,32 +2297,43 @@ def xc_get_epg(request, user, short=False): programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name, epg_source=None) output = {"epg_listings": []} + for program in programs: - id = "0" - epg_id = "0" title = program['title'] if isinstance(program, dict) else program.title description = program['description'] if isinstance(program, dict) else program.description start = program["start_time"] if isinstance(program, dict) else program.start_time end = program["end_time"] if isinstance(program, dict) else program.end_time + # For database programs, use actual ID; for generated dummy programs, create synthetic ID + if isinstance(program, dict): + # Generated dummy program - create unique ID from channel + timestamp + program_id = str(abs(hash(f"{channel_id}_{int(start.timestamp())}"))) + else: + # Database program - use actual ID + program_id = str(program.id) + + # epg_id refers to the EPG source/channel mapping in XC panels + # Use the actual EPGData ID when available, otherwise fall back to 0 + epg_id = str(channel.epg_data.id) if channel.epg_data else "0" + program_output = { - "id": f"{id}", - "epg_id": f"{epg_id}", - "title": base64.b64encode(title.encode()).decode(), + "id": program_id, + "epg_id": epg_id, + "title": base64.b64encode((title or "").encode()).decode(), "lang": "", - "start": start.strftime("%Y%m%d%H%M%S"), - "end": end.strftime("%Y%m%d%H%M%S"), - "description": base64.b64encode(description.encode()).decode(), - "channel_id": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, - "start_timestamp": int(start.timestamp()), - "stop_timestamp": int(end.timestamp()), + "start": start.strftime("%Y-%m-%d %H:%M:%S"), + "end": end.strftime("%Y-%m-%d %H:%M:%S"), + "description": base64.b64encode((description or "").encode()).decode(), + "channel_id": str(channel_num_int), + "start_timestamp": str(int(start.timestamp())), + "stop_timestamp": str(int(end.timestamp())), "stream_id": f"{channel_id}", } if short == False: program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 - program_output["has_archive"] = "0" + program_output["has_archive"] = 0 output['epg_listings'].append(program_output) @@ -1846,7 +2405,7 @@ def xc_get_vod_streams(request, user, category_id=None): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), #'stream_icon': movie.logo.url if movie.logo else '', @@ -1916,7 +2475,7 @@ def xc_get_series(request, user, category_id=None): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series.description or "", @@ -1979,34 +2538,45 @@ def xc_get_series_info(request, user, series_id): except Exception as e: logger.error(f"Error refreshing series data for relation {series_relation.id}: {str(e)}") - # Get episodes for this series from the same M3U account - episode_relations = M3UEpisodeRelation.objects.filter( - episode__series=series, - m3u_account=series_relation.m3u_account - ).select_related('episode').order_by('episode__season_number', 'episode__episode_number') + # Get unique episodes for this series that have relations from any active M3U account + # We query episodes directly to avoid duplicates when multiple relations exist + # (e.g., same episode in different languages/qualities) + from apps.vod.models import Episode + episodes = Episode.objects.filter( + series=series, + m3u_relations__m3u_account__is_active=True + ).distinct().order_by('season_number', 'episode_number') # Group episodes by season seasons = {} - for relation in episode_relations: - episode = relation.episode + for episode in episodes: season_num = episode.season_number or 1 if season_num not in seasons: seasons[season_num] = [] - # Try to get the highest priority related M3UEpisodeRelation for this episode (for video/audio/bitrate) + # Get the highest priority relation for this episode (for container_extension, video/audio/bitrate) from apps.vod.models import M3UEpisodeRelation - first_relation = M3UEpisodeRelation.objects.filter( - episode=episode + best_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + video = audio = bitrate = None - if first_relation and first_relation.custom_properties: - info = first_relation.custom_properties.get('info') - if info and isinstance(info, dict): - info_info = info.get('info') - if info_info and isinstance(info_info, dict): - video = info_info.get('video', {}) - audio = info_info.get('audio', {}) - bitrate = info_info.get('bitrate', 0) + container_extension = "mp4" + added_timestamp = str(int(episode.created_at.timestamp())) + + if best_relation: + container_extension = best_relation.container_extension or "mp4" + added_timestamp = str(int(best_relation.created_at.timestamp())) + if best_relation.custom_properties: + info = best_relation.custom_properties.get('info') + if info and isinstance(info, dict): + info_info = info.get('info') + if info_info and isinstance(info_info, dict): + video = info_info.get('video', {}) + audio = info_info.get('audio', {}) + bitrate = info_info.get('bitrate', 0) + if video is None: video = episode.custom_properties.get('video', {}) if episode.custom_properties else {} if audio is None: @@ -2019,8 +2589,8 @@ def xc_get_series_info(request, user, series_id): "season": season_num, "episode_num": episode.episode_number or 0, "title": episode.name, - "container_extension": relation.container_extension or "mp4", - "added": str(int(relation.created_at.timestamp())), + "container_extension": container_extension, + "added": added_timestamp, "custom_sid": None, "direct_source": "", "info": { @@ -2109,7 +2679,7 @@ def xc_get_series_info(request, user, series_id): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series_data['description'], @@ -2237,14 +2807,14 @@ def xc_get_vod_info(request, user, vod_id): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), "movie_image": ( None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), 'description': movie_data.get('description', ''), @@ -2336,7 +2906,7 @@ def xc_series_stream(request, username, password, stream_id, extension): filters = {"episode_id": stream_id, "m3u_account__is_active": True} try: - episode_relation = M3UEpisodeRelation.objects.select_related('episode').get(**filters) + episode_relation = M3UEpisodeRelation.objects.select_related('episode').filter(**filters).order_by('-m3u_account__priority', 'id').first() except M3UEpisodeRelation.DoesNotExist: return JsonResponse({"error": "Episode not found"}, status=404) @@ -2357,45 +2927,81 @@ def get_host_and_port(request): Returns (host, port) for building absolute URIs. - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). - Falls back to Host header. - - In dev, if missing, uses 5656 or 8000 as a guess. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. """ - # 1. Try X-Forwarded-Host (may include port) + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx xfh = request.META.get("HTTP_X_FORWARDED_HOST") if xfh: if ":" in xfh: host, port = xfh.split(":", 1) + # Omit standard ports from URLs + if port == standard_port: + return host, None + # Non-standard port in X-Forwarded-Host - return it + # This handles reverse proxies on non-standard ports (e.g., https://example.com:8443) + return host, port else: host = xfh - port = request.META.get("HTTP_X_FORWARDED_PORT") + + # Check for X-Forwarded-Port header (if we didn't find a port in X-Forwarded-Host) + port = request.META.get("HTTP_X_FORWARDED_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): + return host, None # 2. Try Host header raw_host = request.get_host() if ":" in raw_host: host, port = raw_host.split(":", 1) - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = raw_host - # 3. Try X-Forwarded-Port + # 3. Check for X-Forwarded-Port (when Host header has no port but we're behind a reverse proxy) port = request.META.get("HTTP_X_FORWARDED_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port - # 4. Dev fallback: guess port + # 4. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 5. Try SERVER_PORT from META (only if NOT behind reverse proxy) + port = request.META.get("SERVER_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + + # 6. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): - guess = "5656" - return host, guess + return host, "5656" - # 5. Fallback to scheme default - port = "443" if request.is_secure() else "9191" - return host, port + # 7. Final fallback: assume standard port for scheme (omit from URL) + return host, None def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ host, port = get_host_and_port(request) - scheme = request.scheme - return f"{scheme}://{host}:{port}{path}" + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" def format_duration_hms(seconds): """ diff --git a/apps/proxy/config.py b/apps/proxy/config.py index 74bfc61f..3b1ce967 100644 --- a/apps/proxy/config.py +++ b/apps/proxy/config.py @@ -26,7 +26,7 @@ class BaseConfig: now = time.time() if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl: return cls._proxy_settings_cache - + # Cache miss or expired - fetch from database try: from core.models import CoreSettings @@ -34,7 +34,7 @@ class BaseConfig: cls._proxy_settings_cache = settings cls._proxy_settings_cache_time = now return settings - + except Exception: # Return defaults if database query fails return { @@ -44,7 +44,7 @@ class BaseConfig: "channel_shutdown_delay": 0, "channel_init_grace_period": 5, } - + finally: # Always close the connection after reading settings try: @@ -94,10 +94,10 @@ class TSConfig(BaseConfig): CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds # Client tracking settings - CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. + CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds) - CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds) - GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1) + CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds) + GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6) CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect # Stream health and recovery settings diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index 3d89b3b8..a361bfa1 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -34,6 +34,10 @@ class ClientManager: self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10) self.last_heartbeat_time = {} + # Get ProxyServer instance for ownership checks + from .server import ProxyServer + self.proxy_server = ProxyServer.get_instance() + # Start heartbeat thread for local clients self._start_heartbeat_thread() self._registered_clients = set() # Track already registered client IDs @@ -44,9 +48,11 @@ class ClientManager: # Import here to avoid potential import issues from apps.proxy.ts_proxy.channel_status import ChannelStatus import redis + from django.conf import settings - # Get all channels from Redis - redis_client = redis.Redis.from_url('redis://localhost:6379', decode_responses=True) + # Get all channels from Redis using settings + redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0') + redis_client = redis.Redis.from_url(redis_url, decode_responses=True) all_channels = [] cursor = 0 @@ -337,16 +343,30 @@ class ClientManager: self._notify_owner_of_activity() - # Publish client disconnected event - event_data = json.dumps({ - "event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string - "channel_id": self.channel_id, - "client_id": client_id, - "worker_id": self.worker_id or "unknown", - "timestamp": time.time(), - "remaining_clients": remaining - }) - self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + # Check if we're the owner - if so, handle locally; if not, publish event + am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id) + + if am_i_owner: + # We're the owner - handle the disconnect directly + logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)") + if remaining == 0: + # Trigger shutdown check directly via ProxyServer method + logger.debug(f"No clients left - triggering immediate shutdown check") + # Spawn greenlet to avoid blocking + import gevent + gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id) + else: + # We're not the owner - publish event so owner can handle it + logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}") + event_data = json.dumps({ + "event": EventType.CLIENT_DISCONNECTED, + "channel_id": self.channel_id, + "client_id": client_id, + "worker_id": self.worker_id or "unknown", + "timestamp": time.time(), + "remaining_clients": remaining + }) + self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) # Trigger channel stats update via WebSocket self._trigger_stats_update() diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index a72cbfc5..7baa9e1c 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -33,6 +33,8 @@ class EventType: # Stream types class StreamType: HLS = "hls" + RTSP = "rtsp" + UDP = "udp" TS = "ts" UNKNOWN = "unknown" diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index cca827a9..db5b3d57 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -19,7 +19,7 @@ import gevent # Add gevent import from typing import Dict, Optional, Set from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream -from core.utils import RedisClient +from core.utils import RedisClient, log_system_event from redis.exceptions import ConnectionError, TimeoutError from .stream_manager import StreamManager from .stream_buffer import StreamBuffer @@ -194,35 +194,11 @@ class ProxyServer: self.redis_client.delete(disconnect_key) elif event_type == EventType.CLIENT_DISCONNECTED: - logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}") - # Check if any clients remain - if channel_id in self.client_managers: - # VERIFY REDIS CLIENT COUNT DIRECTLY - client_set_key = RedisKeys.clients(channel_id) - total = self.redis_client.scard(client_set_key) or 0 - - if total == 0: - logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") - # Set the disconnect timer for other workers to see - disconnect_key = RedisKeys.last_client_disconnect(channel_id) - self.redis_client.setex(disconnect_key, 60, str(time.time())) - - # Get configured shutdown delay or default - shutdown_delay = ConfigHelper.channel_shutdown_delay() - - if shutdown_delay > 0: - logger.info(f"Waiting {shutdown_delay}s before stopping channel...") - gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay) - - # Re-check client count before stopping - total = self.redis_client.scard(client_set_key) or 0 - if total > 0: - logger.info(f"New clients connected during shutdown delay - aborting shutdown") - self.redis_client.delete(disconnect_key) - return - - # Stop the channel directly - self.stop_channel(channel_id) + client_id = data.get("client_id") + worker_id = data.get("worker_id") + logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}") + # Delegate to dedicated method + self.handle_client_disconnect(channel_id) elif event_type == EventType.STREAM_SWITCH: @@ -646,6 +622,29 @@ class ProxyServer: logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager + # Log channel start event + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Get stream name if stream_id is available + stream_name = None + if channel_stream_id: + try: + stream_obj = Stream.objects.get(id=channel_stream_id) + stream_name = stream_obj.name + except Exception: + pass + + log_system_event( + 'channel_start', + channel_id=channel_id, + channel_name=channel_obj.name, + stream_name=stream_name, + stream_id=channel_stream_id + ) + except Exception as e: + logger.error(f"Could not log channel start event: {e}") + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) if channel_id not in self.client_managers: client_manager = ClientManager( @@ -703,9 +702,10 @@ class ProxyServer: state = metadata.get(b'state', b'unknown').decode('utf-8') owner = metadata.get(b'owner', b'').decode('utf-8') - # States that indicate the channel is running properly + # States that indicate the channel is running properly or shutting down valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, - ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING] + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING, + ChannelState.STOPPING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -718,12 +718,24 @@ class ProxyServer: else: # This is a zombie channel - owner is gone but metadata still exists logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") + + # Check if there are any clients connected + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + if client_count > 0: + logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover") + # Could potentially take ownership here in the future + # For now, just clean it up to be safe + else: + logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up") + self._clean_zombie_channel(channel_id, metadata) return False - elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: - # These states indicate the channel should be reinitialized - logger.info(f"Channel {channel_id} exists but in terminal state: {state}") - return True + elif state in [ChannelState.STOPPED, ChannelState.ERROR]: + # These terminal states indicate the channel should be cleaned up and reinitialized + logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup") + return False else: # Unknown or initializing state, check how long it's been in this state if b'state_changed_at' in metadata: @@ -787,6 +799,44 @@ class ProxyServer: logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True) return False + def handle_client_disconnect(self, channel_id): + """ + Handle client disconnect event - check if channel should shut down. + Can be called directly by owner or via PubSub from non-owner workers. + """ + if channel_id not in self.client_managers: + return + + try: + # VERIFY REDIS CLIENT COUNT DIRECTLY + client_set_key = RedisKeys.clients(channel_id) + total = self.redis_client.scard(client_set_key) or 0 + + if total == 0: + logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") + # Set the disconnect timer for other workers to see + disconnect_key = RedisKeys.last_client_disconnect(channel_id) + self.redis_client.setex(disconnect_key, 60, str(time.time())) + + # Get configured shutdown delay or default + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if shutdown_delay > 0: + logger.info(f"Waiting {shutdown_delay}s before stopping channel...") + gevent.sleep(shutdown_delay) + + # Re-check client count before stopping + total = self.redis_client.scard(client_set_key) or 0 + if total > 0: + logger.info(f"New clients connected during shutdown delay - aborting shutdown") + self.redis_client.delete(disconnect_key) + return + + # Stop the channel directly + self.stop_channel(channel_id) + except Exception as e: + logger.error(f"Error handling client disconnect for channel {channel_id}: {e}") + def stop_channel(self, channel_id): """Stop a channel with proper ownership handling""" try: @@ -834,6 +884,41 @@ class ProxyServer: self.release_ownership(channel_id) logger.info(f"Released ownership of channel {channel_id}") + # Log channel stop event (after cleanup, before releasing ownership section ends) + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Calculate runtime and get total bytes from metadata + runtime = None + total_bytes = None + if self.redis_client: + metadata_key = RedisKeys.channel_metadata(channel_id) + metadata = self.redis_client.hgetall(metadata_key) + if metadata: + # Calculate runtime from init_time + if b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + runtime = round(time.time() - init_time, 2) + except Exception: + pass + # Get total bytes transferred + if b'total_bytes' in metadata: + try: + total_bytes = int(metadata[b'total_bytes'].decode('utf-8')) + except Exception: + pass + + log_system_event( + 'channel_stop', + channel_id=channel_id, + channel_name=channel_obj.name, + runtime=runtime, + total_bytes=total_bytes + ) + except Exception as e: + logger.error(f"Could not log channel stop event: {e}") + # Always clean up local resources - WITH SAFE CHECKS if channel_id in self.stream_managers: del self.stream_managers[channel_id] @@ -939,6 +1024,15 @@ class ProxyServer: if channel_id in self.client_managers: client_manager = self.client_managers[channel_id] total_clients = client_manager.get_total_client_count() + else: + # This can happen during reconnection attempts or crashes + # Check Redis directly for any connected clients + if self.redis_client: + client_set_key = RedisKeys.clients(channel_id) + total_clients = self.redis_client.scard(client_set_key) or 0 + + if total_clients == 0: + logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup") # Log client count periodically if time.time() % 30 < 1: # Every ~30 seconds @@ -946,7 +1040,14 @@ class ProxyServer: # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: - # Get connection ready time from metadata + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: try: @@ -954,17 +1055,60 @@ class ProxyServer: except (ValueError, TypeError): pass - # If still connecting, give it more time - if channel_state == ChannelState.CONNECTING: - logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet") - continue + if total_clients == 0: + # Check if we have a connection_attempt timestamp (set when CONNECTING starts) + connection_attempt_time = None + attempt_key = RedisKeys.connection_attempt(channel_id) + if self.redis_client: + attempt_value = self.redis_client.get(attempt_key) + if attempt_value: + try: + connection_attempt_time = float(attempt_value.decode('utf-8')) + except (ValueError, TypeError): + pass - # If waiting for clients, check grace period - if connection_ready_time: + # Also get init time as a fallback + init_time = None + if metadata and b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + except (ValueError, TypeError): + pass + + # Use whichever timestamp we have (prefer connection_attempt as it's more recent) + start_time = connection_attempt_time or init_time + + if start_time: + # Check which timeout to apply based on channel lifecycle + if connection_ready_time: + # Already reached ready - use shutdown_delay + time_since_ready = time.time() - connection_ready_time + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if time_since_ready > shutdown_delay: + logger.warning( + f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s " + f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel" + ) + self.stop_channel(channel_id) + continue + else: + # Never reached ready - use grace_period timeout + time_since_start = time.time() - start_time + connecting_timeout = ConfigHelper.channel_init_grace_period() + + if time_since_start > connecting_timeout: + logger.warning( + f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s " + f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues" + ) + self.stop_channel(channel_id) + continue + elif connection_ready_time: + # We have clients now, but check grace period for state transition grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time - # Add this debug log logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, " f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, " f"total_clients={total_clients}") @@ -973,16 +1117,9 @@ class ProxyServer: # Still within grace period logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed") continue - elif total_clients == 0: - # Grace period expired with no clients - logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}") - self.stop_channel(channel_id) else: - # Grace period expired but we have clients - mark channel as active + # Grace period expired with clients - mark channel as active logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active") - old_state = "unknown" - if metadata and b'state' in metadata: - old_state = metadata[b'state'].decode('utf-8') if self.update_channel_state(channel_id, ChannelState.ACTIVE, { "grace_period_ended_at": str(time.time()), "clients_at_activation": str(total_clients) @@ -990,6 +1127,13 @@ class ProxyServer: logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period") # If active and no clients, start normal shutdown procedure elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0: + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + # Check if there's a pending no-clients timeout disconnect_key = RedisKeys.last_client_disconnect(channel_id) disconnect_time = None @@ -1049,14 +1193,30 @@ class ProxyServer: continue # Check for local client count - if zero, clean up our local resources - if self.client_managers[channel_id].get_client_count() == 0: - # We're not the owner, and we have no local clients - clean up our resources - logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + if channel_id in self.client_managers: + if self.client_managers[channel_id].get_client_count() == 0: + # We're not the owner, and we have no local clients - clean up our resources + logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + self._cleanup_local_resources(channel_id) + else: + # This shouldn't happen, but clean up anyway + logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources") self._cleanup_local_resources(channel_id) except Exception as e: logger.error(f"Error in cleanup thread: {e}", exc_info=True) + # Periodically check for orphaned channels (every 30 seconds) + if hasattr(self, '_last_orphan_check'): + if time.time() - self._last_orphan_check > 30: + try: + self._check_orphaned_metadata() + self._last_orphan_check = time.time() + except Exception as orphan_error: + logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True) + else: + self._last_orphan_check = time.time() + gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval()) thread = threading.Thread(target=cleanup_task, daemon=True) @@ -1078,10 +1238,6 @@ class ProxyServer: try: channel_id = key.decode('utf-8').split(':')[2] - # Skip channels we already have locally - if channel_id in self.stream_buffers: - continue - # Check if this channel has an owner owner = self.get_channel_owner(channel_id) @@ -1096,13 +1252,84 @@ class ProxyServer: else: # Orphaned channel with no clients - clean it up logger.info(f"Cleaning up orphaned channel {channel_id}") - self._clean_redis_keys(channel_id) + + # If we have it locally, stop it properly to clean up processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) except Exception as e: logger.error(f"Error processing channel key {key}: {e}") except Exception as e: logger.error(f"Error checking orphaned channels: {e}") + def _check_orphaned_metadata(self): + """ + Check for metadata entries that have no owner and no clients. + This catches zombie channels that weren't cleaned up properly. + """ + if not self.redis_client: + return + + try: + # Get all channel metadata keys + channel_pattern = "ts_proxy:channel:*:metadata" + channel_keys = self.redis_client.keys(channel_pattern) + + for key in channel_keys: + try: + channel_id = key.decode('utf-8').split(':')[2] + + # Get metadata first + metadata = self.redis_client.hgetall(key) + if not metadata: + # Empty metadata - clean it up + logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up") + # If we have it locally, stop it properly + if channel_id in self.stream_managers or channel_id in self.client_managers: + self.stop_channel(channel_id) + else: + self._clean_redis_keys(channel_id) + continue + + # Get owner + owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else '' + + # Check if owner is still alive + owner_alive = False + if owner: + owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" + owner_alive = self.redis_client.exists(owner_heartbeat_key) + + # Check client count + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + # If no owner and no clients, clean it up + if not owner_alive and client_count == 0: + state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown' + logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up") + + # If we have it locally, stop it properly to clean up transcode/proxy processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) + elif not owner_alive and client_count > 0: + # Owner is gone but clients remain - just log for now + logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover") + + except Exception as e: + logger.error(f"Error processing metadata key {key}: {e}", exc_info=True) + + except Exception as e: + logger.error(f"Error checking orphaned metadata: {e}", exc_info=True) + def _clean_redis_keys(self, channel_id): """Clean up all Redis keys for a channel more efficiently""" # Release the channel, stream, and profile keys from the channel diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index 551e2d27..4c4a73ac 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -14,6 +14,8 @@ from ..server import ProxyServer from ..redis_keys import RedisKeys from ..constants import EventType, ChannelState, ChannelMetadataField from ..url_utils import get_stream_info_for_switch +from core.utils import log_system_event +from .log_parsers import LogParserFactory logger = logging.getLogger("ts_proxy") @@ -418,124 +420,51 @@ class ChannelService: @staticmethod def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None): - """Parse FFmpeg stream info line and store in Redis metadata and database""" + """ + Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB. + Uses specialized parsers for each streaming tool. + """ try: - if stream_type == "input": - # Example lines: - # Input #0, mpegts, from 'http://example.com/stream.ts': - # Input #0, hls, from 'http://example.com/stream.m3u8': + # Use factory to parse the line based on stream type + parsed_data = LogParserFactory.parse(stream_type, stream_info_line) + + if not parsed_data: + return - # Extract input format (e.g., "mpegts", "hls", "flv", etc.) - input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line) - input_format = input_match.group(1).strip() if input_match else None + # Update Redis and database with parsed data + ChannelService._update_stream_info_in_redis( + channel_id, + parsed_data.get('video_codec'), + parsed_data.get('resolution'), + parsed_data.get('width'), + parsed_data.get('height'), + parsed_data.get('source_fps'), + parsed_data.get('pixel_format'), + parsed_data.get('video_bitrate'), + parsed_data.get('audio_codec'), + parsed_data.get('sample_rate'), + parsed_data.get('audio_channels'), + parsed_data.get('audio_bitrate'), + parsed_data.get('stream_type') + ) - # Store in Redis if we have valid data - if input_format: - ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format) - # Save to database if stream_id is provided - if stream_id: - ChannelService._update_stream_stats_in_db(stream_id, stream_type=input_format) - - logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}") - - elif stream_type == "video": - # Example line: - # Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn - - # Extract video codec (e.g., "h264", "mpeg2video", etc.) - codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line) - video_codec = codec_match.group(1) if codec_match else None - - # Extract resolution (e.g., "1280x720") - be more specific to avoid hex values - # Look for resolution patterns that are realistic video dimensions - resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line) - if resolution_match: - width = int(resolution_match.group(1)) - height = int(resolution_match.group(2)) - # Validate that these look like reasonable video dimensions - if 100 <= width <= 10000 and 100 <= height <= 10000: - resolution = f"{width}x{height}" - else: - width = height = resolution = None - else: - width = height = resolution = None - - # Extract source FPS (e.g., "29.97 fps") - fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line) - source_fps = float(fps_match.group(1)) if fps_match else None - - # Extract pixel format (e.g., "yuv420p") - pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line) - pixel_format = None - if pixel_format_match: - pf = pixel_format_match.group(1).strip() - # Clean up pixel format (remove extra info in parentheses) - if '(' in pf: - pf = pf.split('(')[0].strip() - pixel_format = pf - - # Extract bitrate if present (e.g., "2000 kb/s") - video_bitrate = None - bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) - if bitrate_match: - video_bitrate = float(bitrate_match.group(1)) - - # Store in Redis if we have valid data - if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]): - ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None) - # Save to database if stream_id is provided - if stream_id: - ChannelService._update_stream_stats_in_db( - stream_id, - video_codec=video_codec, - resolution=resolution, - source_fps=source_fps, - pixel_format=pixel_format, - video_bitrate=video_bitrate - ) - - logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, " - f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, " - f"Video Bitrate: {video_bitrate} kb/s") - - elif stream_type == "audio": - # Example line: - # Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s - - # Extract audio codec (e.g., "aac", "mp3", etc.) - codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line) - audio_codec = codec_match.group(1) if codec_match else None - - # Extract sample rate (e.g., "48000 Hz") - sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line) - sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None - - # Extract channel layout (e.g., "stereo", "5.1", "mono") - # Look for common channel layouts - channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE) - channels = channel_match.group(1) if channel_match else None - - # Extract audio bitrate if present (e.g., "64 kb/s") - audio_bitrate = None - bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) - if bitrate_match: - audio_bitrate = float(bitrate_match.group(1)) - - # Store in Redis if we have valid data - if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]): - ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None) - # Save to database if stream_id is provided - if stream_id: - ChannelService._update_stream_stats_in_db( - stream_id, - audio_codec=audio_codec, - sample_rate=sample_rate, - audio_channels=channels, - audio_bitrate=audio_bitrate - ) + if stream_id: + ChannelService._update_stream_stats_in_db( + stream_id, + video_codec=parsed_data.get('video_codec'), + resolution=parsed_data.get('resolution'), + source_fps=parsed_data.get('source_fps'), + pixel_format=parsed_data.get('pixel_format'), + video_bitrate=parsed_data.get('video_bitrate'), + audio_codec=parsed_data.get('audio_codec'), + sample_rate=parsed_data.get('sample_rate'), + audio_channels=parsed_data.get('audio_channels'), + audio_bitrate=parsed_data.get('audio_bitrate'), + stream_type=parsed_data.get('stream_type') + ) except Exception as e: - logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}") + logger.debug(f"Error parsing {stream_type} stream info: {e}") @staticmethod def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None): @@ -598,7 +527,7 @@ class ChannelService: def _update_stream_stats_in_db(stream_id, **stats): """Update stream stats in database""" from django.db import connection - + try: from apps.channels.models import Stream from django.utils import timezone @@ -624,7 +553,7 @@ class ChannelService: except Exception as e: logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") return False - + finally: # Always close database connection after update try: @@ -700,6 +629,7 @@ class ChannelService: RedisKeys.events_channel(channel_id), json.dumps(switch_request) ) + return True @staticmethod diff --git a/apps/proxy/ts_proxy/services/log_parsers.py b/apps/proxy/ts_proxy/services/log_parsers.py new file mode 100644 index 00000000..95ee7a06 --- /dev/null +++ b/apps/proxy/ts_proxy/services/log_parsers.py @@ -0,0 +1,410 @@ +"""Log parsers for FFmpeg, Streamlink, and VLC output.""" +import re +import logging +from abc import ABC, abstractmethod +from typing import Optional, Dict, Any + +logger = logging.getLogger(__name__) + + +class BaseLogParser(ABC): + """Base class for log parsers""" + + # Map of stream_type -> method_name that this parser handles + STREAM_TYPE_METHODS: Dict[str, str] = {} + + @abstractmethod + def can_parse(self, line: str) -> Optional[str]: + """ + Check if this parser can handle the line. + Returns the stream_type if it can parse, None otherwise. + e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink' + """ + pass + + @abstractmethod + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + pass + + @abstractmethod + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + pass + + @abstractmethod + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + pass + + +class FFmpegLogParser(BaseLogParser): + """Parser for FFmpeg log output""" + + STREAM_TYPE_METHODS = { + 'input': 'parse_input_format', + 'video': 'parse_video_stream', + 'audio': 'parse_audio_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is an FFmpeg line we can parse""" + lower = line.lower() + + # Input format detection + if lower.startswith('input #'): + return 'input' + + # Stream info (only during input phase, but we'll let stream_manager handle phase tracking) + if 'stream #' in lower: + if 'video:' in lower: + return 'video' + elif 'audio:' in lower: + return 'audio' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg input format (e.g., mpegts, hls)""" + try: + input_match = re.search(r'Input #\d+,\s*([^,]+)', line) + input_format = input_match.group(1).strip() if input_match else None + + if input_format: + logger.debug(f"Input format info - Format: {input_format}") + return {'stream_type': input_format} + except Exception as e: + logger.debug(f"Error parsing FFmpeg input format: {e}") + + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg video stream info""" + try: + result = {} + + # Extract codec, resolution, fps, pixel format, bitrate + codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line) + if codec_match: + result['video_codec'] = codec_match.group(1) + + resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + + fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line) + if fps_match: + result['source_fps'] = float(fps_match.group(1)) + + pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line) + if pixel_format_match: + pf = pixel_format_match.group(1).strip() + if '(' in pf: + pf = pf.split('(')[0].strip() + result['pixel_format'] = pf + + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line) + if bitrate_match: + result['video_bitrate'] = float(bitrate_match.group(1)) + + if result: + logger.info(f"Video stream info - Codec: {result.get('video_codec')}, " + f"Resolution: {result.get('resolution')}, " + f"Source FPS: {result.get('source_fps')}, " + f"Pixel Format: {result.get('pixel_format')}, " + f"Video Bitrate: {result.get('video_bitrate')} kb/s") + return result + + except Exception as e: + logger.debug(f"Error parsing FFmpeg video stream info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg audio stream info""" + try: + result = {} + + codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line) + if codec_match: + result['audio_codec'] = codec_match.group(1) + + sample_rate_match = re.search(r'(\d+)\s*Hz', line) + if sample_rate_match: + result['sample_rate'] = int(sample_rate_match.group(1)) + + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE) + if channel_match: + result['audio_channels'] = channel_match.group(1) + + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line) + if bitrate_match: + result['audio_bitrate'] = float(bitrate_match.group(1)) + + if result: + return result + + except Exception as e: + logger.debug(f"Error parsing FFmpeg audio stream info: {e}") + + return None + + +class VLCLogParser(BaseLogParser): + """Parser for VLC log output""" + + STREAM_TYPE_METHODS = { + 'vlc_video': 'parse_video_stream', + 'vlc_audio': 'parse_audio_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is a VLC line we can parse""" + lower = line.lower() + + # VLC TS demux codec detection + if 'ts demux debug' in lower and 'type=' in lower: + if 'video' in lower: + return 'vlc_video' + elif 'audio' in lower: + return 'vlc_audio' + + # VLC decoder output + if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower): + if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower: + return 'vlc_audio' + else: + return 'vlc_video' + + # VLC transcode output for resolution/FPS + if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)): + return 'vlc_video' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse VLC TS demux output and decoder info for video""" + try: + lower = line.lower() + result = {} + + # Codec detection from TS demux + video_codec_map = { + ('avc', 'h.264', 'type=0x1b'): "h264", + ('hevc', 'h.265', 'type=0x24'): "hevc", + ('mpeg-2', 'type=0x02'): "mpeg2video", + ('mpeg-4', 'type=0x10'): "mpeg4" + } + + for patterns, codec in video_codec_map.items(): + if any(p in lower for p in patterns): + result['video_codec'] = codec + break + + # Extract FPS from transcode output: "source fps 30/1" + fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower) + if fps_fraction_match: + numerator = int(fps_fraction_match.group(1)) + denominator = int(fps_fraction_match.group(2)) + if denominator > 0: + result['source_fps'] = numerator / denominator + + # Extract resolution from transcode output: "source 1280x720" + source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower) + if source_res_match: + width = int(source_res_match.group(1)) + height = int(source_res_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + else: + # Fallback: generic resolution pattern + resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + + # Fallback: try to extract FPS from generic format + if 'source_fps' not in result: + fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower) + if fps_match: + result['source_fps'] = float(fps_match.group(1)) + + return result if result else None + + except Exception as e: + logger.debug(f"Error parsing VLC video stream info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse VLC TS demux output and decoder info for audio""" + try: + lower = line.lower() + result = {} + + # Codec detection from TS demux + audio_codec_map = { + ('type=0xf', 'adts'): "aac", + ('type=0x03', 'type=0x04'): "mp3", + ('type=0x06', 'type=0x81'): "ac3", + ('type=0x0b', 'lpcm'): "pcm" + } + + for patterns, codec in audio_codec_map.items(): + if any(p in lower for p in patterns): + result['audio_codec'] = codec + break + + # VLC decoder format: "AAC channels: 2 samplerate: 48000" + if 'channels:' in lower: + channels_match = re.search(r'channels:\s*(\d+)', lower) + if channels_match: + num_channels = int(channels_match.group(1)) + # Convert number to name + channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'} + result['audio_channels'] = channel_names.get(num_channels, str(num_channels)) + + if 'samplerate:' in lower: + samplerate_match = re.search(r'samplerate:\s*(\d+)', lower) + if samplerate_match: + result['sample_rate'] = int(samplerate_match.group(1)) + + # Try to extract sample rate (Hz format) + sample_rate_match = re.search(r'(\d+)\s*hz', lower) + if sample_rate_match and 'sample_rate' not in result: + result['sample_rate'] = int(sample_rate_match.group(1)) + + # Try to extract channels (word format) + if 'audio_channels' not in result: + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower) + if channel_match: + result['audio_channels'] = channel_match.group(1) + + return result if result else None + + except Exception as e: + logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}") + + return None + + +class StreamlinkLogParser(BaseLogParser): + """Parser for Streamlink log output""" + + STREAM_TYPE_METHODS = { + 'streamlink': 'parse_video_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is a Streamlink line we can parse""" + lower = line.lower() + + if 'opening stream:' in lower or 'available streams:' in lower: + return 'streamlink' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse Streamlink quality/resolution""" + try: + quality_match = re.search(r'(\d+p|\d+x\d+)', line) + if quality_match: + quality = quality_match.group(1) + + if 'x' in quality: + resolution = quality + width, height = map(int, quality.split('x')) + else: + resolutions = { + '2160p': ('3840x2160', 3840, 2160), + '1080p': ('1920x1080', 1920, 1080), + '720p': ('1280x720', 1280, 720), + '480p': ('854x480', 854, 480), + '360p': ('640x360', 640, 360) + } + resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080)) + + return { + 'video_codec': 'h264', + 'resolution': resolution, + 'width': width, + 'height': height, + 'pixel_format': 'yuv420p' + } + + except Exception as e: + logger.debug(f"Error parsing Streamlink video info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + return None + + +class LogParserFactory: + """Factory to get the appropriate log parser""" + + _parsers = { + 'ffmpeg': FFmpegLogParser(), + 'vlc': VLCLogParser(), + 'streamlink': StreamlinkLogParser() + } + + @classmethod + def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]: + """Determine parser and method from stream_type""" + # Check each parser to see if it handles this stream_type + for parser in cls._parsers.values(): + method_name = parser.STREAM_TYPE_METHODS.get(stream_type) + if method_name: + return (parser, method_name) + + return None + + @classmethod + def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]: + """ + Parse a log line based on stream type. + Returns parsed data or None if parsing fails. + """ + result = cls._get_parser_and_method(stream_type) + if not result: + return None + + parser, method_name = result + method = getattr(parser, method_name, None) + if method: + return method(line) + + return None + + @classmethod + def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]: + """ + Automatically detect which parser can handle this line and parse it. + Returns (stream_type, parsed_data) or None if no parser can handle it. + """ + # Try each parser to see if it can handle this line + for parser in cls._parsers.values(): + stream_type = parser.can_parse(line) + if stream_type: + # Parser can handle this line, now parse it + parsed_data = cls.parse(stream_type, line) + if parsed_data: + return (stream_type, parsed_data) + + return None diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 368691b8..50404f1d 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -8,6 +8,8 @@ import logging import threading import gevent # Add this import at the top of your file from apps.proxy.config import TSConfig as Config +from apps.channels.models import Channel +from core.utils import log_system_event from .server import ProxyServer from .utils import create_ts_packet, get_logger from .redis_keys import RedisKeys @@ -52,6 +54,10 @@ class StreamGenerator: self.last_stats_bytes = 0 self.current_rate = 0.0 + # TTL refresh tracking + self.last_ttl_refresh = time.time() + self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming + def generate(self): """ Generator function that produces the stream content for the client. @@ -84,6 +90,20 @@ class StreamGenerator: if not self._setup_streaming(): return + # Log client connect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_connect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None + ) + except Exception as e: + logger.error(f"Could not log client connect event: {e}") + # Main streaming loop for chunk in self._stream_data_generator(): yield chunk @@ -336,7 +356,20 @@ class StreamGenerator: ChannelMetadataField.STATS_UPDATED_AT: str(current_time) } proxy_server.redis_client.hset(client_key, mapping=stats) - # No need to set expiration as client heartbeat will refresh this key + + # Refresh TTL periodically while actively streaming + # This provides proof-of-life independent of heartbeat thread + if current_time - self.last_ttl_refresh > self.ttl_refresh_interval: + try: + # Refresh TTL on client key + proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL) + # Also refresh the client set TTL + client_set_key = f"ts_proxy:channel:{self.channel_id}:clients" + proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL) + self.last_ttl_refresh = current_time + logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)") + except Exception as ttl_error: + logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}") except Exception as e: logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}") @@ -422,6 +455,22 @@ class StreamGenerator: total_clients = client_manager.get_total_client_count() logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})") + # Log client disconnect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_disconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None, + duration=round(elapsed, 2), + bytes_sent=self.bytes_sent + ) + except Exception as e: + logger.error(f"Could not log client disconnect event: {e}") + # Schedule channel shutdown if no clients left if not stream_released: # Only if we haven't already released the stream self._schedule_channel_shutdown_if_needed(local_clients) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 99ae8027..e7f752d8 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -16,6 +16,7 @@ from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile from core.models import UserAgent, CoreSettings +from core.utils import log_system_event from .stream_buffer import StreamBuffer from .utils import detect_stream_type, get_logger from .redis_keys import RedisKeys @@ -106,6 +107,10 @@ class StreamManager: # Add this flag for tracking transcoding process status self.transcode_process_active = False + # Track stream command for efficient log parser routing + self.stream_command = None + self.parser_type = None # Will be set when transcode process starts + # Add tracking for data throughput self.bytes_processed = 0 self.last_bytes_update = time.time() @@ -227,11 +232,12 @@ class StreamManager: # Continue with normal flow # Check stream type before connecting - stream_type = detect_stream_type(self.url) - if self.transcode == False and stream_type == StreamType.HLS: - logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}") - logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}") - # Enable transcoding for HLS streams + self.stream_type = detect_stream_type(self.url) + if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP): + stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP") + logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}") + logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}") + # Enable transcoding for HLS, RTSP/RTP, and UDP streams self.transcode = True # We'll override the stream profile selection with ffmpeg in the transcoding section self.force_ffmpeg = True @@ -259,6 +265,20 @@ class StreamManager: # Store connection start time to measure success duration connection_start_time = time.time() + # Log reconnection event if this is a retry (not first attempt) + if self.retry_count > 0: + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + attempt=self.retry_count + 1, + max_attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + # Successfully connected - read stream data until disconnect/error self._process_stream_data() # If we get here, the connection was closed/failed @@ -288,6 +308,20 @@ class StreamManager: if self.retry_count >= self.max_retries: url_failed = True logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}") + + # Log connection error event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_failed', + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log connection error event: {e}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -301,6 +335,21 @@ class StreamManager: if self.retry_count >= self.max_retries: url_failed = True + + # Log connection error event with exception details + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_exception', + error_message=str(e)[:200], + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as log_error: + logger.error(f"Could not log connection error event: {log_error}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -420,7 +469,7 @@ class StreamManager: from core.models import StreamProfile try: stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True) - logger.info("Using FFmpeg stream profile for HLS content") + logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)") except StreamProfile.DoesNotExist: # Fall back to channel's profile if FFmpeg not found stream_profile = channel.get_stream_profile() @@ -430,6 +479,28 @@ class StreamManager: # Build and start transcode command self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent) + + # Store stream command for efficient log parser routing + self.stream_command = stream_profile.command + # Map actual commands to parser types for direct routing + command_to_parser = { + 'ffmpeg': 'ffmpeg', + 'cvlc': 'vlc', + 'vlc': 'vlc', + 'streamlink': 'streamlink' + } + self.parser_type = command_to_parser.get(self.stream_command.lower()) + if self.parser_type: + logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})") + else: + logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing") + + # For UDP streams, remove any user_agent parameters from the command + if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP: + # Filter out any arguments that contain the user_agent value or related headers + self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()] + logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}") + logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}") # Modified to capture stderr instead of discarding it @@ -593,35 +664,51 @@ class StreamManager: if content_lower.startswith('output #') or 'encoder' in content_lower: self.ffmpeg_input_phase = False - # Only parse stream info if we're still in the input phase - if ("stream #" in content_lower and - ("video:" in content_lower or "audio:" in content_lower) and - self.ffmpeg_input_phase): + # Route to appropriate parser based on known command type + from .services.log_parsers import LogParserFactory + from .services.channel_service import ChannelService - from .services.channel_service import ChannelService - if "video:" in content_lower: - ChannelService.parse_and_store_stream_info(self.channel_id, content, "video", self.current_stream_id) - elif "audio:" in content_lower: - ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio", self.current_stream_id) + parse_result = None + + # If we know the parser type, use direct routing for efficiency + if self.parser_type: + # Get the appropriate parser and check what it can parse + parser = LogParserFactory._parsers.get(self.parser_type) + if parser: + stream_type = parser.can_parse(content) + if stream_type: + # Parser can handle this line, parse it directly + parsed_data = LogParserFactory.parse(stream_type, content) + if parsed_data: + parse_result = (stream_type, parsed_data) + else: + # Unknown command type - use auto-detection as fallback + parse_result = LogParserFactory.auto_parse(content) + + if parse_result: + stream_type, parsed_data = parse_result + # For FFmpeg, only parse during input phase + if stream_type in ['video', 'audio', 'input']: + if self.ffmpeg_input_phase: + ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id) + else: + # VLC and Streamlink can be parsed anytime + ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id) # Determine log level based on content if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): - logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}") + logger.error(f"Stream process error for channel {self.channel_id}: {content}") elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']): - logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}") + logger.warning(f"Stream process warning for channel {self.channel_id}: {content}") elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content: # Stats lines - log at trace level to avoid spam - logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}") + logger.trace(f"Stream stats for channel {self.channel_id}: {content}") elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']): # Stream info - log at info level - logger.info(f"FFmpeg info for channel {self.channel_id}: {content}") - if content.startswith('Input #0'): - # If it's input 0, parse stream info - from .services.channel_service import ChannelService - ChannelService.parse_and_store_stream_info(self.channel_id, content, "input", self.current_stream_id) + logger.info(f"Stream info for channel {self.channel_id}: {content}") else: # Everything else at debug level - logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}") + logger.debug(f"Stream process output for channel {self.channel_id}: {content}") except Exception as e: logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}") @@ -694,6 +781,19 @@ class StreamManager: # Reset buffering state self.buffering = False self.buffering_start_time = None + + # Log failover event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_failover', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='buffering_timeout', + duration=buffering_duration + ) + except Exception as e: + logger.error(f"Could not log failover event: {e}") else: logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout") else: @@ -701,6 +801,19 @@ class StreamManager: self.buffering = True self.buffering_start_time = time.time() logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x") + + # Log system event for buffering + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_buffering', + channel_id=self.channel_id, + channel_name=channel_obj.name, + speed=ffmpeg_speed + ) + except Exception as e: + logger.error(f"Could not log buffering event: {e}") + # Log buffering warning logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected") # Set channel state to buffering @@ -948,10 +1061,10 @@ class StreamManager: logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}") else: logger.warning(f"Failed to update stream profile for channel {self.channel_id}") - + except Exception as e: logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}") - + finally: # Always close database connection after profile update try: @@ -996,6 +1109,19 @@ class StreamManager: except Exception as e: logger.warning(f"Failed to reset buffer position: {e}") + # Log stream switch event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'stream_switch', + channel_id=self.channel_id, + channel_name=channel_obj.name, + new_url=new_url[:100] if new_url else None, + stream_id=stream_id + ) + except Exception as e: + logger.error(f"Could not log stream switch event: {e}") + return True except Exception as e: logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True) @@ -1114,6 +1240,19 @@ class StreamManager: if connection_result: self.connection_start_time = time.time() logger.info(f"Reconnect successful for channel {self.channel_id}") + + # Log reconnection event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='health_monitor' + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + return True else: logger.warning(f"Reconnect failed for channel {self.channel_id}") @@ -1191,25 +1330,17 @@ class StreamManager: logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") pass - # Enhanced transcode process cleanup with more aggressive termination + # Enhanced transcode process cleanup with immediate termination if self.transcode_process: try: - # First try polite termination - logger.debug(f"Terminating transcode process for channel {self.channel_id}") - self.transcode_process.terminate() + logger.debug(f"Killing transcode process for channel {self.channel_id}") + self.transcode_process.kill() - # Give it a short time to terminate gracefully + # Give it a very short time to die try: - self.transcode_process.wait(timeout=1.0) + self.transcode_process.wait(timeout=0.5) except subprocess.TimeoutExpired: - # If it doesn't terminate quickly, kill it - logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}") - self.transcode_process.kill() - - try: - self.transcode_process.wait(timeout=1.0) - except subprocess.TimeoutExpired: - logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") + logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") except Exception as e: logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}") diff --git a/apps/proxy/ts_proxy/url_utils.py b/apps/proxy/ts_proxy/url_utils.py index db53cc74..8b467b7f 100644 --- a/apps/proxy/ts_proxy/url_utils.py +++ b/apps/proxy/ts_proxy/url_utils.py @@ -39,6 +39,8 @@ def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]] # Handle direct stream preview (custom streams) if isinstance(channel_or_stream, Stream): + from core.utils import RedisClient + stream = channel_or_stream logger.info(f"Previewing stream directly: {stream.id} ({stream.name})") @@ -48,12 +50,43 @@ def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]] logger.error(f"Stream {stream.id} has no M3U account") return None, None, False, None - # Get the default profile for this M3U account (custom streams use default) - m3u_profiles = m3u_account.profiles.all() - profile = next((obj for obj in m3u_profiles if obj.is_default), None) + # Get active profiles for this M3U account + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - if not profile: - logger.error(f"No default profile found for M3U account {m3u_account.id}") + if not default_profile: + logger.error(f"No default active profile found for M3U account {m3u_account.id}") + return None, None, False, None + + # Check profiles in order: default first, then others + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + # Try to find an available profile with connection capacity + redis_client = RedisClient.get_client() + selected_profile = None + + for profile in profiles: + logger.info(profile) + + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if profile has available slots (or unlimited connections) + if profile.max_streams == 0 or current_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}") + else: + # No Redis available, use first active profile + selected_profile = profile + break + + if not selected_profile: + logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}") return None, None, False, None # Get the appropriate user agent @@ -62,8 +95,8 @@ def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]] stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) logger.debug(f"No user agent found for account, using default: {stream_user_agent}") - # Get stream URL (no transformation for custom streams) - stream_url = stream.url + # Get stream URL with the selected profile's URL transformation + stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern) # Check if the stream has its own stream_profile set, otherwise use default if stream.stream_profile: @@ -402,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): """ Validate if a stream URL is accessible without downloading the full content. + Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot + be validated via HTTP methods. + Args: url (str): The URL to validate user_agent (str): User agent to use for the request @@ -410,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): Returns: tuple: (is_valid, final_url, status_code, message) """ + # Check if URL uses non-HTTP protocols (UDP/RTP/RTSP) + # These cannot be validated via HTTP methods, so we skip validation + if url.startswith(('udp://', 'rtp://', 'rtsp://')): + logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}") + return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped" + try: # Create session with proper headers session = requests.Session() @@ -420,16 +462,21 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): session.headers.update(headers) # Make HEAD request first as it's faster and doesn't download content - head_response = session.head( - url, - timeout=timeout, - allow_redirects=True - ) + head_request_success = True + try: + head_response = session.head( + url, + timeout=timeout, + allow_redirects=True + ) + except requests.exceptions.RequestException as e: + head_request_success = False + logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}") # If HEAD not supported, server will return 405 or other error - if 200 <= head_response.status_code < 300: + if head_request_success and (200 <= head_response.status_code < 300): # HEAD request successful - return True, head_response.url, head_response.status_code, "Valid (HEAD request)" + return True, url, head_response.status_code, "Valid (HEAD request)" # Try a GET request with stream=True to avoid downloading all content get_response = session.get( @@ -442,7 +489,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): # IMPORTANT: Check status code first before checking content if not (200 <= get_response.status_code < 300): logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}") - return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}" + return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}" # Only check content if status code is valid try: @@ -496,7 +543,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): get_response.close() # If we have content, consider it valid even with unrecognized content type - return is_valid, get_response.url, get_response.status_code, message + return is_valid, url, get_response.status_code, message except requests.exceptions.Timeout: return False, url, 0, "Timeout connecting to stream" diff --git a/apps/proxy/ts_proxy/utils.py b/apps/proxy/ts_proxy/utils.py index b568b804..20a6e140 100644 --- a/apps/proxy/ts_proxy/utils.py +++ b/apps/proxy/ts_proxy/utils.py @@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy") def detect_stream_type(url): """ - Detect if stream URL is HLS or TS format. + Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format. Args: url (str): The stream URL to analyze Returns: - str: 'hls' or 'ts' depending on detected format + str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format """ if not url: return 'unknown' url_lower = url.lower() + # Check for UDP streams (requires FFmpeg) + if url_lower.startswith('udp://'): + return 'udp' + + # Check for RTSP/RTP streams (requires FFmpeg) + if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'): + return 'rtsp' + # Look for common HLS indicators if (url_lower.endswith('.m3u8') or '.m3u8?' in url_lower or diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index 109e88cf..91f254a7 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -4,7 +4,7 @@ import time import random import re import pathlib -from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect +from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse from django.views.decorators.csrf import csrf_exempt from django.shortcuts import get_object_or_404 from apps.proxy.config import TSConfig as Config @@ -84,11 +84,18 @@ def stream_ts(request, channel_id): if state_field in metadata: channel_state = metadata[state_field].decode("utf-8") - if channel_state: - # Channel is being initialized or already active - no need for reinitialization + # Active/running states - channel is operational, don't reinitialize + if channel_state in [ + ChannelState.ACTIVE, + ChannelState.WAITING_FOR_CLIENTS, + ChannelState.BUFFERING, + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ChannelState.STOPPING, + ]: needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization" + f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization" ) # Special handling for initializing/connecting states @@ -98,19 +105,34 @@ def stream_ts(request, channel_id): ]: channel_initializing = True logger.debug( - f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion" + f"[{client_id}] Channel {channel_id} is still initializing, client will wait" ) + # Terminal states - channel needs cleanup before reinitialization + elif channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPED, + ]: + needs_initialization = True + logger.info( + f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize" + ) + # Unknown/empty state - check if owner is alive else: - # Only check for owner if channel is in a valid state owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): - # Owner is still active, so we don't need to reinitialize + # Owner is still active with unknown state - don't reinitialize needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} has active owner {owner}" + f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init" + ) + else: + # Owner dead - needs reinitialization + needs_initialization = True + logger.warning( + f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize" ) # Start initialization if needed @@ -292,6 +314,15 @@ def stream_ts(request, channel_id): logger.info( f"[{client_id}] Redirecting to validated URL: {final_url} ({message})" ) + + # For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect + # because Django's HttpResponseRedirect blocks them for security + if final_url.startswith(('rtsp://', 'rtp://', 'udp://')): + logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol") + response = HttpResponse(status=301) + response['Location'] = final_url + return response + return HttpResponseRedirect(final_url) else: logger.error( diff --git a/apps/proxy/vod_proxy/connection_manager.py b/apps/proxy/vod_proxy/connection_manager.py index dea5759b..ec0bffa5 100644 --- a/apps/proxy/vod_proxy/connection_manager.py +++ b/apps/proxy/vod_proxy/connection_manager.py @@ -97,7 +97,11 @@ class PersistentVODConnection: # First check if we have a pre-stored content length from HEAD request try: import redis - r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + from django.conf import settings + redis_host = getattr(settings, 'REDIS_HOST', 'localhost') + redis_port = int(getattr(settings, 'REDIS_PORT', 6379)) + redis_db = int(getattr(settings, 'REDIS_DB', 0)) + r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True) content_length_key = f"vod_content_length:{self.session_id}" stored_length = r.get(content_length_key) if stored_length: diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py index fefc8739..1534f761 100644 --- a/apps/proxy/vod_proxy/multi_worker_connection_manager.py +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -24,6 +24,11 @@ from apps.m3u.models import M3UAccountProfile logger = logging.getLogger("vod_proxy") +def get_vod_client_stop_key(client_id): + """Get the Redis key for signaling a VOD client to stop""" + return f"vod_proxy:client:{client_id}:stop" + + def infer_content_type_from_url(url: str) -> Optional[str]: """ Infer MIME type from file extension in URL @@ -352,12 +357,12 @@ class RedisBackedVODConnection: logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL") - # Make request + # Make request (10s connect, 10s read timeout - keeps lock time reasonable if client disconnects) response = self.local_session.get( target_url, headers=headers, stream=True, - timeout=(10, 30), + timeout=(10, 10), allow_redirects=allow_redirects ) response.raise_for_status() @@ -707,6 +712,10 @@ class MultiWorkerVODConnectionManager: content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj) client_id = session_id + # Track whether we incremented profile connections (for cleanup on error) + profile_connections_incremented = False + redis_connection = None + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}") try: @@ -797,6 +806,7 @@ class MultiWorkerVODConnectionManager: # Increment profile connections after successful connection creation self._increment_profile_connections(m3u_profile) + profile_connections_incremented = True logger.info(f"[{client_id}] Worker {self.worker_id} - Created consolidated connection with session metadata") else: @@ -832,6 +842,7 @@ class MultiWorkerVODConnectionManager: # Create streaming generator def stream_generator(): decremented = False + stop_signal_detected = False try: logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") @@ -846,14 +857,25 @@ class MultiWorkerVODConnectionManager: bytes_sent = 0 chunk_count = 0 + # Get the stop signal key for this client + stop_key = get_vod_client_stop_key(client_id) + for chunk in upstream_response.iter_content(chunk_size=8192): if chunk: yield chunk bytes_sent += len(chunk) chunk_count += 1 - # Update activity every 100 chunks in consolidated connection state + # Check for stop signal every 100 chunks if chunk_count % 100 == 0: + # Check if stop signal has been set + if self.redis_client and self.redis_client.exists(stop_key): + logger.info(f"[{client_id}] Worker {self.worker_id} - Stop signal detected, terminating stream") + # Delete the stop key + self.redis_client.delete(stop_key) + stop_signal_detected = True + break + # Update the connection state logger.debug(f"Client: [{client_id}] Worker: {self.worker_id} sent {chunk_count} chunks for VOD: {content_name}") if redis_connection._acquire_lock(): @@ -867,7 +889,10 @@ class MultiWorkerVODConnectionManager: finally: redis_connection._release_lock() - logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") + if stop_signal_detected: + logger.info(f"[{client_id}] Worker {self.worker_id} - Stream stopped by signal: {bytes_sent} bytes sent") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") redis_connection.decrement_active_streams() decremented = True @@ -1004,6 +1029,19 @@ class MultiWorkerVODConnectionManager: except Exception as e: logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True) + + # Decrement profile connections if we incremented them but failed before streaming started + if profile_connections_incremented: + logger.info(f"[{client_id}] Connection error occurred after profile increment - decrementing profile connections") + self._decrement_profile_connections(m3u_profile.id) + + # Also clean up the Redis connection state since we won't be using it + if redis_connection: + try: + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + except Exception as cleanup_error: + logger.error(f"[{client_id}] Error during cleanup after connection failure: {cleanup_error}") + return HttpResponse(f"Streaming error: {str(e)}", status=500) def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): diff --git a/apps/proxy/vod_proxy/urls.py b/apps/proxy/vod_proxy/urls.py index c06426ce..f48f70e0 100644 --- a/apps/proxy/vod_proxy/urls.py +++ b/apps/proxy/vod_proxy/urls.py @@ -21,4 +21,7 @@ urlpatterns = [ # VOD Stats path('stats/', views.VODStatsView.as_view(), name='vod_stats'), + + # Stop VOD client connection + path('stop_client/', views.stop_vod_client, name='stop_vod_client'), ] diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py index 00ed8a10..2ec95cc3 100644 --- a/apps/proxy/vod_proxy/views.py +++ b/apps/proxy/vod_proxy/views.py @@ -15,7 +15,7 @@ from django.views import View from apps.vod.models import Movie, Series, Episode from apps.m3u.models import M3UAccount, M3UAccountProfile from apps.proxy.vod_proxy.connection_manager import VODConnectionManager -from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url +from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url, get_vod_client_stop_key from .utils import get_client_info, create_vod_response logger = logging.getLogger(__name__) @@ -329,7 +329,11 @@ class VODStreamView(View): # Store the total content length in Redis for the persistent connection to use try: import redis - r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + from django.conf import settings + redis_host = getattr(settings, 'REDIS_HOST', 'localhost') + redis_port = int(getattr(settings, 'REDIS_PORT', 6379)) + redis_db = int(getattr(settings, 'REDIS_DB', 0)) + r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True) content_length_key = f"vod_content_length:{session_id}" r.set(content_length_key, total_size, ex=1800) # Store for 30 minutes logger.info(f"[VOD-HEAD] Stored total content length {total_size} for session {session_id}") @@ -1011,3 +1015,59 @@ class VODStatsView(View): except Exception as e: logger.error(f"Error getting VOD stats: {e}") return JsonResponse({'error': str(e)}, status=500) + + +from rest_framework.decorators import api_view, permission_classes +from apps.accounts.permissions import IsAdmin + + +@csrf_exempt +@api_view(["POST"]) +@permission_classes([IsAdmin]) +def stop_vod_client(request): + """Stop a specific VOD client connection using stop signal mechanism""" + try: + # Parse request body + import json + try: + data = json.loads(request.body) + except json.JSONDecodeError: + return JsonResponse({'error': 'Invalid JSON'}, status=400) + + client_id = data.get('client_id') + if not client_id: + return JsonResponse({'error': 'No client_id provided'}, status=400) + + logger.info(f"Request to stop VOD client: {client_id}") + + # Get Redis client + connection_manager = MultiWorkerVODConnectionManager.get_instance() + redis_client = connection_manager.redis_client + + if not redis_client: + return JsonResponse({'error': 'Redis not available'}, status=500) + + # Check if connection exists + connection_key = f"vod_persistent_connection:{client_id}" + connection_data = redis_client.hgetall(connection_key) + if not connection_data: + logger.warning(f"VOD connection not found: {client_id}") + return JsonResponse({'error': 'Connection not found'}, status=404) + + # Set a stop signal key that the worker will check + stop_key = get_vod_client_stop_key(client_id) + redis_client.setex(stop_key, 60, "true") # 60 second TTL + + logger.info(f"Set stop signal for VOD client: {client_id}") + + return JsonResponse({ + 'message': 'VOD client stop signal sent', + 'client_id': client_id, + 'stop_key': stop_key + }) + + except Exception as e: + logger.error(f"Error stopping VOD client: {e}", exc_info=True) + return JsonResponse({'error': str(e)}, status=500) + + diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py index ffccc3f5..e897bd28 100644 --- a/apps/vod/api_urls.py +++ b/apps/vod/api_urls.py @@ -6,6 +6,7 @@ from .api_views import ( SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet, + VODLogoViewSet, ) app_name = 'vod' @@ -16,5 +17,6 @@ router.register(r'episodes', EpisodeViewSet, basename='episode') router.register(r'series', SeriesViewSet, basename='series') router.register(r'categories', VODCategoryViewSet, basename='vodcategory') router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py index 517038a6..3bd984e6 100644 --- a/apps/vod/api_views.py +++ b/apps/vod/api_views.py @@ -3,23 +3,29 @@ from rest_framework.response import Response from rest_framework.decorators import action from rest_framework.filters import SearchFilter, OrderingFilter from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny from django_filters.rest_framework import DjangoFilterBackend from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q import django_filters import logging +import os +import requests from apps.accounts.permissions import ( Authenticated, permission_classes_by_action, ) from .models import ( - Series, VODCategory, Movie, Episode, - M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) from .serializers import ( MovieSerializer, EpisodeSerializer, SeriesSerializer, VODCategorySerializer, + VODLogoSerializer, M3UMovieRelationSerializer, M3USeriesRelationSerializer, M3UEpisodeRelationSerializer @@ -56,7 +62,7 @@ class MovieFilter(django_filters.FilterSet): # Handle the format 'category_name|category_type' if '|' in value: - category_name, category_type = value.split('|', 1) + category_name, category_type = value.rsplit('|', 1) return queryset.filter( m3u_relations__category__name=category_name, m3u_relations__category__category_type=category_type @@ -213,7 +219,7 @@ class SeriesFilter(django_filters.FilterSet): # Handle the format 'category_name|category_type' if '|' in value: - category_name, category_type = value.split('|', 1) + category_name, category_type = value.rsplit('|', 1) return queryset.filter( m3u_relations__category__name=category_name, m3u_relations__category__category_type=category_type @@ -470,6 +476,59 @@ class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet): except KeyError: return [Authenticated()] + def list(self, request, *args, **kwargs): + """Override list to ensure Uncategorized categories and relations exist for all XC accounts with VOD enabled""" + from apps.m3u.models import M3UAccount + + # Ensure Uncategorized categories exist + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Get all active XC accounts with VOD enabled + xc_accounts = M3UAccount.objects.filter( + account_type=M3UAccount.Types.XC, + is_active=True + ) + + for account in xc_accounts: + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get("enable_vod", False) + + if vod_enabled: + # Ensure relations exist for this account + auto_enable_new = custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Now proceed with normal list operation + return super().list(request, *args, **kwargs) + class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): """ViewSet that combines Movies and Series for unified 'All' view""" @@ -529,7 +588,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): if category: if '|' in category: - cat_name, cat_type = category.split('|', 1) + cat_name, cat_type = category.rsplit('|', 1) if cat_type == 'movie': where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)" where_conditions[1] = "1=0" # Exclude series @@ -564,7 +623,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'movie' as content_type FROM vod_movie movies - LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id WHERE {where_conditions[0]} UNION ALL @@ -586,7 +645,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logo.url as logo_url, 'series' as content_type FROM vod_series series - LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id WHERE {where_conditions[1]} ) SELECT * FROM unified_content @@ -613,10 +672,10 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): 'id': item_dict['logo_id'], 'name': item_dict['logo_name'], 'url': item_dict['logo_url'], - 'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None, - 'channel_count': 0, # We don't need this for VOD - 'is_used': True, - 'channel_names': [] # We don't need this for VOD + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True } # Convert to the format expected by frontend @@ -668,4 +727,173 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): logger.error(f"Error in UnifiedContentViewSet.list(): {e}") import traceback logger.error(traceback.format_exc()) - return Response({'error': str(e)}, status=500) \ No newline at end of file + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 00000000..1bd2c418 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/models.py b/apps/vod/models.py index f0825ba2..7067856e 100644 --- a/apps/vod/models.py +++ b/apps/vod/models.py @@ -4,10 +4,22 @@ from django.utils import timezone from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from apps.m3u.models import M3UAccount -from apps.channels.models import Logo import uuid +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + class VODCategory(models.Model): """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" @@ -69,7 +81,7 @@ class Series(models.Model): year = models.IntegerField(blank=True, null=True) rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") @@ -108,7 +120,7 @@ class Movie(models.Model): rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") @@ -233,10 +245,13 @@ class M3UMovieRelation(models.Model): """Get the full stream URL for this movie from this provider""" # Build URL dynamically for XtreamCodes accounts if self.m3u_account.account_type == 'XC': - server_url = self.m3u_account.server_url.rstrip('/') + from core.xtream_codes import Client as XCClient + # Use XC client's URL normalization to handle malformed URLs + # (e.g., URLs with /player_api.php or query parameters) + normalized_url = XCClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url) username = self.m3u_account.username password = self.m3u_account.password - return f"{server_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + return f"{normalized_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" else: # For other account types, we would need another way to build URLs return None @@ -273,10 +288,12 @@ class M3UEpisodeRelation(models.Model): if self.m3u_account.account_type == 'XC': # For XtreamCodes accounts, build the URL dynamically - server_url = self.m3u_account.server_url.rstrip('/') + # Use XC client's URL normalization to handle malformed URLs + # (e.g., URLs with /player_api.php or query parameters) + normalized_url = XtreamCodesClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url) username = self.m3u_account.username password = self.m3u_account.password - return f"{server_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + return f"{normalized_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" else: # We might support non XC accounts in the future # For now, return None diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py index 5a672b33..7747cb88 100644 --- a/apps/vod/serializers.py +++ b/apps/vod/serializers.py @@ -1,12 +1,79 @@ from rest_framework import serializers +from django.urls import reverse from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.serializers import LogoSerializer from apps.m3u.serializers import M3UAccountSerializer +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): category = serializers.IntegerField(source="category.id") m3u_account = serializers.IntegerField(source="m3u_account.id") @@ -31,7 +98,7 @@ class VODCategorySerializer(serializers.ModelSerializer): ] class SeriesSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) episode_count = serializers.SerializerMethodField() class Meta: @@ -43,7 +110,7 @@ class SeriesSerializer(serializers.ModelSerializer): class MovieSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) class Meta: model = Movie @@ -225,7 +292,7 @@ class M3UEpisodeRelationSerializer(serializers.ModelSerializer): class EnhancedSeriesSerializer(serializers.ModelSerializer): """Enhanced serializer for series with provider information""" - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) episode_count = serializers.SerializerMethodField() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index bc8ad80f..0dcd9cfd 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -5,10 +5,9 @@ from django.db.models import Q from apps.m3u.models import M3UAccount from core.xtream_codes import Client as XtreamCodesClient from .models import ( - VODCategory, Series, Movie, Episode, + VODCategory, Series, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.models import Logo from datetime import datetime import logging import json @@ -128,6 +127,37 @@ def refresh_movies(client, account, categories_by_provider, relations, scan_star """Refresh movie content using single API call for all movies""" logger.info(f"Refreshing movies for account {account.name}") + # Ensure "Uncategorized" category exists for movies without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for movies") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + # Get all movies in a single API call logger.info("Fetching all movies from provider...") all_movies_data = client.get_vod_streams() # No category_id = get all movies @@ -151,6 +181,37 @@ def refresh_series(client, account, categories_by_provider, relations, scan_star """Refresh series content using single API call for all series""" logger.info(f"Refreshing series for account {account.name}") + # Ensure "Uncategorized" category exists for series without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for series") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + # Get all series in a single API call logger.info("Fetching all series from provider...") all_series_data = client.get_series() # No category_id = get all series @@ -241,6 +302,7 @@ def batch_create_categories(categories_data, category_type, account): M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) # Delete orphaned category relationships (categories no longer in the M3U source) + # Exclude "Uncategorized" from cleanup as it's a special category we manage current_category_ids = set(existing_categories[name].id for name in category_names) existing_relations = M3UVODCategoryRelation.objects.filter( m3u_account=account, @@ -249,7 +311,7 @@ def batch_create_categories(categories_data, category_type, account): relations_to_delete = [ rel for rel in existing_relations - if rel.category_id not in current_category_ids + if rel.category_id not in current_category_ids and rel.category.name != "Uncategorized" ] if relations_to_delete: @@ -332,17 +394,26 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N logger.debug("Skipping disabled category") continue else: - logger.warning(f"No category ID provided for movie {name}") + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for movie {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + movie_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue # Extract metadata year = extract_year_from_data(movie_data, 'name') tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb') imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb') - # Clean empty string IDs - if tmdb_id == '': + # Clean empty string IDs and zero values (some providers use 0 to indicate no ID) + if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0': tmdb_id = None - if imdb_id == '': + if imdb_id == '' or imdb_id == 0 or imdb_id == '0': imdb_id = None # Create a unique key for this movie (priority: TMDB > IMDB > name+year) @@ -403,7 +474,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -411,20 +482,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N for logo_url in logo_urls: if logo_url not in existing_logos: movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') - logos_to_create.append(Logo(url=logo_url, name=movie_name)) + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for movies") + logger.info(f"Created {len(newly_created)} new VOD logos for movies") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing movies based on our keys existing_movies = {} @@ -543,26 +614,41 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # First, create new movies and get their IDs created_movies = {} if movies_to_create: - Movie.objects.bulk_create(movies_to_create, ignore_conflicts=True) + # Bulk query to check which movies already exist + tmdb_ids = [m.tmdb_id for m in movies_to_create if m.tmdb_id] + imdb_ids = [m.imdb_id for m in movies_to_create if m.imdb_id] + name_year_pairs = [(m.name, m.year) for m in movies_to_create if not m.tmdb_id and not m.imdb_id] - # Get the newly created movies with their IDs - # We need to re-fetch them to get the primary keys + existing_by_tmdb = {m.tmdb_id: m for m in Movie.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {} + existing_by_imdb = {m.imdb_id: m for m in Movie.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {} + + existing_by_name_year = {} + if name_year_pairs: + for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = (movie.name, movie.year) + if key in name_year_pairs: + existing_by_name_year[key] = movie + + # Check each movie against the bulk query results + movies_actually_created = [] for movie in movies_to_create: - # Find the movie by its unique identifiers - if movie.tmdb_id: - db_movie = Movie.objects.filter(tmdb_id=movie.tmdb_id).first() - elif movie.imdb_id: - db_movie = Movie.objects.filter(imdb_id=movie.imdb_id).first() - else: - db_movie = Movie.objects.filter( - name=movie.name, - year=movie.year, - tmdb_id__isnull=True, - imdb_id__isnull=True - ).first() + existing = None + if movie.tmdb_id and movie.tmdb_id in existing_by_tmdb: + existing = existing_by_tmdb[movie.tmdb_id] + elif movie.imdb_id and movie.imdb_id in existing_by_imdb: + existing = existing_by_imdb[movie.imdb_id] + elif not movie.tmdb_id and not movie.imdb_id: + existing = existing_by_name_year.get((movie.name, movie.year)) - if db_movie: - created_movies[id(movie)] = db_movie + if existing: + created_movies[id(movie)] = existing + else: + movies_actually_created.append(movie) + created_movies[id(movie)] = movie + + # Bulk create only movies that don't exist + if movies_actually_created: + Movie.objects.bulk_create(movies_actually_created) # Update existing movies if movies_to_update: @@ -578,12 +664,16 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N movie.logo = movie._logo_to_update movie.save(update_fields=['logo']) - # Update relations to reference the correct movie objects + # Update relations to reference the correct movie objects (with PKs) for relation in relations_to_create: if id(relation.movie) in created_movies: relation.movie = created_movies[id(relation.movie)] - # Handle relations + for relation in relations_to_update: + if id(relation.movie) in created_movies: + relation.movie = created_movies[id(relation.movie)] + + # All movies now have PKs, safe to bulk create/update relations if relations_to_create: M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) @@ -634,7 +724,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= logger.debug("Skipping disabled category") continue else: - logger.warning(f"No category ID provided for series {name}") + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for series {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + series_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue # Extract metadata year = extract_year(series_data.get('releaseDate', '')) @@ -644,10 +743,10 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id') imdb_id = series_data.get('imdb') or series_data.get('imdb_id') - # Clean empty string IDs - if tmdb_id == '': + # Clean empty string IDs and zero values (some providers use 0 to indicate no ID) + if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0': tmdb_id = None - if imdb_id == '': + if imdb_id == '' or imdb_id == 0 or imdb_id == '0': imdb_id = None # Create a unique key for this series (priority: TMDB > IMDB > name+year) @@ -725,7 +824,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -733,20 +832,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= for logo_url in logo_urls: if logo_url not in existing_logos: series_name = logo_url_to_name.get(logo_url, 'Unknown Series') - logos_to_create.append(Logo(url=logo_url, name=series_name)) + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for series") + logger.info(f"Created {len(newly_created)} new VOD logos for series") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing series based on our keys - same pattern as movies existing_series = {} @@ -865,26 +964,41 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # First, create new series and get their IDs created_series = {} if series_to_create: - Series.objects.bulk_create(series_to_create, ignore_conflicts=True) + # Bulk query to check which series already exist + tmdb_ids = [s.tmdb_id for s in series_to_create if s.tmdb_id] + imdb_ids = [s.imdb_id for s in series_to_create if s.imdb_id] + name_year_pairs = [(s.name, s.year) for s in series_to_create if not s.tmdb_id and not s.imdb_id] - # Get the newly created series with their IDs - # We need to re-fetch them to get the primary keys + existing_by_tmdb = {s.tmdb_id: s for s in Series.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {} + existing_by_imdb = {s.imdb_id: s for s in Series.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {} + + existing_by_name_year = {} + if name_year_pairs: + for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = (series.name, series.year) + if key in name_year_pairs: + existing_by_name_year[key] = series + + # Check each series against the bulk query results + series_actually_created = [] for series in series_to_create: - # Find the series by its unique identifiers - if series.tmdb_id: - db_series = Series.objects.filter(tmdb_id=series.tmdb_id).first() - elif series.imdb_id: - db_series = Series.objects.filter(imdb_id=series.imdb_id).first() - else: - db_series = Series.objects.filter( - name=series.name, - year=series.year, - tmdb_id__isnull=True, - imdb_id__isnull=True - ).first() + existing = None + if series.tmdb_id and series.tmdb_id in existing_by_tmdb: + existing = existing_by_tmdb[series.tmdb_id] + elif series.imdb_id and series.imdb_id in existing_by_imdb: + existing = existing_by_imdb[series.imdb_id] + elif not series.tmdb_id and not series.imdb_id: + existing = existing_by_name_year.get((series.name, series.year)) - if db_series: - created_series[id(series)] = db_series + if existing: + created_series[id(series)] = existing + else: + series_actually_created.append(series) + created_series[id(series)] = series + + # Bulk create only series that don't exist + if series_actually_created: + Series.objects.bulk_create(series_actually_created) # Update existing series if series_to_update: @@ -900,12 +1014,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= series.logo = series._logo_to_update series.save(update_fields=['logo']) - # Update relations to reference the correct series objects + # Update relations to reference the correct series objects (with PKs) for relation in relations_to_create: if id(relation.series) in created_series: relation.series = created_series[id(relation.series)] - # Handle relations + for relation in relations_to_update: + if id(relation.series) in created_series: + relation.series = created_series[id(relation.series)] + + # All series now have PKs, safe to bulk create/update relations if relations_to_create: M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) @@ -1152,7 +1270,13 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N def batch_process_episodes(account, series, episodes_data, scan_start_time=None): - """Process episodes in batches for better performance""" + """Process episodes in batches for better performance. + + Note: Multiple streams can represent the same episode (e.g., different languages + or qualities). Each stream has a unique stream_id, but they share the same + season/episode number. We create one Episode record per (series, season, episode) + and multiple M3UEpisodeRelation records pointing to it. + """ if not episodes_data: return @@ -1169,12 +1293,13 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}") # Extract episode identifiers - episode_keys = [] + # Note: episode_keys may have duplicates when multiple streams represent same episode + episode_keys = set() # Use set to track unique episode keys episode_ids = [] for episode_data in all_episodes_data: season_num = episode_data['_season_number'] episode_num = episode_data.get('episode_num', 0) - episode_keys.append((series.id, season_num, episode_num)) + episode_keys.add((series.id, season_num, episode_num)) episode_ids.append(str(episode_data.get('id'))) # Pre-fetch existing episodes @@ -1197,12 +1322,25 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) relations_to_create = [] relations_to_update = [] + # Track episodes we're creating in this batch to avoid duplicates + # Key: (series_id, season_number, episode_number) -> Episode object + episodes_pending_creation = {} + for episode_data in all_episodes_data: try: episode_id = str(episode_data.get('id')) episode_name = episode_data.get('title', 'Unknown Episode') - season_number = episode_data['_season_number'] - episode_number = episode_data.get('episode_num', 0) + # Ensure season and episode numbers are integers (API may return strings) + try: + season_number = int(episode_data['_season_number']) + except (ValueError, TypeError) as e: + logger.warning(f"Invalid season_number '{episode_data.get('_season_number')}' for episode '{episode_name}': {e}") + season_number = 0 + try: + episode_number = int(episode_data.get('episode_num', 0)) + except (ValueError, TypeError) as e: + logger.warning(f"Invalid episode_num '{episode_data.get('episode_num')}' for episode '{episode_name}': {e}") + episode_number = 0 info = episode_data.get('info', {}) # Extract episode metadata @@ -1226,10 +1364,15 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) if backdrop: custom_props['backdrop_path'] = [backdrop] - # Find existing episode + # Find existing episode - check DB first, then pending creations episode_key = (series.id, season_number, episode_number) episode = existing_episodes.get(episode_key) + # Check if we already have this episode pending creation (multiple streams for same episode) + if not episode and episode_key in episodes_pending_creation: + episode = episodes_pending_creation[episode_key] + logger.debug(f"Reusing pending episode for S{season_number}E{episode_number} (stream_id: {episode_id})") + if episode: # Update existing episode updated = False @@ -1258,7 +1401,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) episode.custom_properties = custom_props if custom_props else None updated = True - if updated: + # Only add to update list if episode has a PK (exists in DB) and isn't already in list + # Episodes pending creation don't have PKs yet and will be created via bulk_create + if updated and episode.pk and episode not in episodes_to_update: episodes_to_update.append(episode) else: # Create new episode @@ -1276,6 +1421,8 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) custom_properties=custom_props if custom_props else None ) episodes_to_create.append(episode) + # Track this episode so subsequent streams with same season/episode can reuse it + episodes_pending_creation[episode_key] = episode # Handle episode relation if episode_id in existing_relations: @@ -1309,9 +1456,43 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) # Execute batch operations with transaction.atomic(): - # Create new episodes + # Create new episodes - use ignore_conflicts in case of race conditions if episodes_to_create: - Episode.objects.bulk_create(episodes_to_create) + Episode.objects.bulk_create(episodes_to_create, ignore_conflicts=True) + + # Re-fetch the created episodes to get their PKs + # We need to do this because bulk_create with ignore_conflicts doesn't set PKs + created_episode_keys = [ + (ep.series_id, ep.season_number, ep.episode_number) + for ep in episodes_to_create + ] + db_episodes = Episode.objects.filter(series=series) + episode_pk_map = { + (ep.series_id, ep.season_number, ep.episode_number): ep + for ep in db_episodes + } + + # Update relations to point to the actual DB episodes with PKs + for relation in relations_to_create: + ep = relation.episode + key = (ep.series_id, ep.season_number, ep.episode_number) + if key in episode_pk_map: + relation.episode = episode_pk_map[key] + + # Filter out relations with unsaved episodes (no PK) + # This can happen if bulk_create had a conflict and ignore_conflicts=True didn't save the episode + valid_relations_to_create = [] + for relation in relations_to_create: + if relation.episode.pk is not None: + valid_relations_to_create.append(relation) + else: + season_num = relation.episode.season_number + episode_num = relation.episode.episode_number + logger.warning( + f"Skipping relation for episode S{season_num}E{episode_num} " + f"- episode not saved to database" + ) + relations_to_create = valid_relations_to_create # Update existing episodes if episodes_to_update: @@ -1320,9 +1501,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) 'tmdb_id', 'imdb_id', 'custom_properties' ]) - # Create new episode relations + # Create new episode relations - use ignore_conflicts for stream_id duplicates if relations_to_create: - M3UEpisodeRelation.objects.bulk_create(relations_to_create) + M3UEpisodeRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) # Update existing episode relations if relations_to_update: @@ -1424,21 +1605,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id= stale_episode_count = stale_episode_relations.count() stale_episode_relations.delete() - # Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup) - if not account_id: - orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) - orphaned_movie_count = orphaned_movies.count() + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") orphaned_movies.delete() - # Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup) - orphaned_series = Series.objects.filter(m3u_relations__isnull=True) - orphaned_series_count = orphaned_series.count() + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") orphaned_series.delete() - else: - # When cleaning up for specific account, we don't remove orphaned content - # as other accounts might still reference it - orphaned_movie_count = 0 - orphaned_series_count = 0 # Episodes will be cleaned up via CASCADE when series are deleted @@ -1999,7 +2180,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): def validate_logo_reference(obj, obj_type="object"): """ - Validate that a logo reference exists in the database. + Validate that a VOD logo reference exists in the database. If not, set it to None to prevent foreign key constraint violations. Args: @@ -2019,9 +2200,9 @@ def validate_logo_reference(obj, obj_type="object"): try: # Verify the logo exists in the database - Logo.objects.get(pk=obj.logo.pk) + VODLogo.objects.get(pk=obj.logo.pk) return True - except Logo.DoesNotExist: - logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") obj.logo = None return False diff --git a/core/api_urls.py b/core/api_urls.py index baa4bbe5..75257db1 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,7 +2,16 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint, TimezoneListView +from .api_views import ( + UserAgentViewSet, + StreamProfileViewSet, + CoreSettingsViewSet, + environment, + version, + rehash_streams_endpoint, + TimezoneListView, + get_system_events +) router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') @@ -13,5 +22,6 @@ urlpatterns = [ path('version/', version, name='version'), path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), path('timezones/', TimezoneListView.as_view(), name='timezones'), + path('system-events/', get_system_events, name='system_events'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index f475909a..30829174 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -15,8 +15,9 @@ from .models import ( UserAgent, StreamProfile, CoreSettings, - STREAM_HASH_KEY, - NETWORK_ACCESS, + STREAM_SETTINGS_KEY, + DVR_SETTINGS_KEY, + NETWORK_ACCESS_KEY, PROXY_SETTINGS_KEY, ) from .serializers import ( @@ -68,16 +69,28 @@ class CoreSettingsViewSet(viewsets.ModelViewSet): def update(self, request, *args, **kwargs): instance = self.get_object() + old_value = instance.value response = super().update(request, *args, **kwargs) - if instance.key == STREAM_HASH_KEY: - if instance.value != request.data["value"]: - rehash_streams.delay(request.data["value"].split(",")) - # If DVR pre/post offsets changed, reschedule upcoming recordings - try: - from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY - if instance.key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY): - if instance.value != request.data.get("value"): + # If stream settings changed and m3u_hash_key is different, rehash streams + if instance.key == STREAM_SETTINGS_KEY: + new_value = request.data.get("value", {}) + if isinstance(new_value, dict) and isinstance(old_value, dict): + old_hash = old_value.get("m3u_hash_key", "") + new_hash = new_value.get("m3u_hash_key", "") + if old_hash != new_hash: + hash_keys = new_hash.split(",") if isinstance(new_hash, str) else new_hash + rehash_streams.delay(hash_keys) + + # If DVR settings changed and pre/post offsets are different, reschedule upcoming recordings + if instance.key == DVR_SETTINGS_KEY: + new_value = request.data.get("value", {}) + if isinstance(new_value, dict) and isinstance(old_value, dict): + old_pre = old_value.get("pre_offset_minutes") + new_pre = new_value.get("pre_offset_minutes") + old_post = old_value.get("post_offset_minutes") + new_post = new_value.get("post_offset_minutes") + if old_pre != new_pre or old_post != new_post: try: # Prefer async task if Celery is available from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change @@ -86,24 +99,23 @@ class CoreSettingsViewSet(viewsets.ModelViewSet): # Fallback to synchronous implementation from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl reschedule_upcoming_recordings_for_offset_change_impl() - except Exception: - pass return response def create(self, request, *args, **kwargs): response = super().create(request, *args, **kwargs) - # If creating DVR pre/post offset settings, also reschedule upcoming recordings + # If creating DVR settings with offset values, reschedule upcoming recordings try: key = request.data.get("key") - from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY - if key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY): - try: - from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change - reschedule_upcoming_recordings_for_offset_change.delay() - except Exception: - from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl - reschedule_upcoming_recordings_for_offset_change_impl() + if key == DVR_SETTINGS_KEY: + value = request.data.get("value", {}) + if isinstance(value, dict) and ("pre_offset_minutes" in value or "post_offset_minutes" in value): + try: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change + reschedule_upcoming_recordings_for_offset_change.delay() + except Exception: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl + reschedule_upcoming_recordings_for_offset_change_impl() except Exception: pass return response @@ -111,13 +123,13 @@ class CoreSettingsViewSet(viewsets.ModelViewSet): def check(self, request, *args, **kwargs): data = request.data - if data.get("key") == NETWORK_ACCESS: + if data.get("key") == NETWORK_ACCESS_KEY: client_ip = ipaddress.ip_address(get_client_ip(request)) in_network = {} invalid = [] - value = json.loads(data.get("value", "{}")) + value = data.get("value", {}) for key, val in value.items(): in_network[key] = [] cidrs = val.split(",") @@ -143,7 +155,11 @@ class CoreSettingsViewSet(viewsets.ModelViewSet): status=status.HTTP_200_OK, ) - return Response(in_network, status=status.HTTP_200_OK) + response_data = { + **in_network, + "client_ip": str(client_ip) + } + return Response(response_data, status=status.HTTP_200_OK) return Response({}, status=status.HTTP_200_OK) @@ -157,8 +173,8 @@ class ProxySettingsViewSet(viewsets.ViewSet): """Get or create the proxy settings CoreSettings entry""" try: settings_obj = CoreSettings.objects.get(key=PROXY_SETTINGS_KEY) - settings_data = json.loads(settings_obj.value) - except (CoreSettings.DoesNotExist, json.JSONDecodeError): + settings_data = settings_obj.value + except CoreSettings.DoesNotExist: # Create default settings settings_data = { "buffering_timeout": 15, @@ -171,7 +187,7 @@ class ProxySettingsViewSet(viewsets.ViewSet): key=PROXY_SETTINGS_KEY, defaults={ "name": "Proxy Settings", - "value": json.dumps(settings_data) + "value": settings_data } ) return settings_obj, settings_data @@ -193,8 +209,8 @@ class ProxySettingsViewSet(viewsets.ViewSet): serializer = ProxySettingsSerializer(data=request.data) serializer.is_valid(raise_exception=True) - # Update the JSON data - settings_obj.value = json.dumps(serializer.validated_data) + # Update the JSON data - store as dict directly + settings_obj.value = serializer.validated_data settings_obj.save() return Response(serializer.validated_data) @@ -209,8 +225,8 @@ class ProxySettingsViewSet(viewsets.ViewSet): serializer = ProxySettingsSerializer(data=updated_data) serializer.is_valid(raise_exception=True) - # Update the JSON data - settings_obj.value = json.dumps(serializer.validated_data) + # Update the JSON data - store as dict directly + settings_obj.value = serializer.validated_data settings_obj.save() return Response(serializer.validated_data) @@ -328,8 +344,8 @@ def rehash_streams_endpoint(request): """Trigger the rehash streams task""" try: # Get the current hash keys from settings - hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY) - hash_keys = hash_key_setting.value.split(",") + hash_key = CoreSettings.get_m3u_hash_key() + hash_keys = hash_key.split(",") if isinstance(hash_key, str) else hash_key # Queue the rehash task task = rehash_streams.delay(hash_keys) @@ -340,10 +356,10 @@ def rehash_streams_endpoint(request): "task_id": task.id }, status=status.HTTP_200_OK) - except CoreSettings.DoesNotExist: + except Exception as e: return Response({ "success": False, - "message": "Hash key settings not found" + "message": f"Error triggering rehash: {str(e)}" }, status=status.HTTP_400_BAD_REQUEST) except Exception as e: @@ -396,3 +412,64 @@ class TimezoneListView(APIView): 'grouped': grouped, 'count': len(all_timezones) }) + + +# ───────────────────────────── +# System Events API +# ───────────────────────────── +@api_view(['GET']) +@permission_classes([IsAuthenticated]) +def get_system_events(request): + """ + Get recent system events (channel start/stop, buffering, client connections, etc.) + + Query Parameters: + limit: Number of events to return per page (default: 100, max: 1000) + offset: Number of events to skip (for pagination, default: 0) + event_type: Filter by specific event type (optional) + """ + from core.models import SystemEvent + + try: + # Get pagination params + limit = min(int(request.GET.get('limit', 100)), 1000) + offset = int(request.GET.get('offset', 0)) + + # Start with all events + events = SystemEvent.objects.all() + + # Filter by event_type if provided + event_type = request.GET.get('event_type') + if event_type: + events = events.filter(event_type=event_type) + + # Get total count before applying pagination + total_count = events.count() + + # Apply offset and limit for pagination + events = events[offset:offset + limit] + + # Serialize the data + events_data = [{ + 'id': event.id, + 'event_type': event.event_type, + 'event_type_display': event.get_event_type_display(), + 'timestamp': event.timestamp.isoformat(), + 'channel_id': str(event.channel_id) if event.channel_id else None, + 'channel_name': event.channel_name, + 'details': event.details + } for event in events] + + return Response({ + 'events': events_data, + 'count': len(events_data), + 'total': total_count, + 'offset': offset, + 'limit': limit + }) + + except Exception as e: + logger.error(f"Error fetching system events: {e}") + return Response({ + 'error': 'Failed to fetch system events' + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) diff --git a/core/fixtures/initial_data.json b/core/fixtures/initial_data.json index c037fa78..889f0d24 100644 --- a/core/fixtures/initial_data.json +++ b/core/fixtures/initial_data.json @@ -23,7 +23,7 @@ "model": "core.streamprofile", "pk": 1, "fields": { - "name": "ffmpeg", + "name": "FFmpeg", "command": "ffmpeg", "parameters": "-i {streamUrl} -c:v copy -c:a copy -f mpegts pipe:1", "is_active": true, @@ -34,11 +34,22 @@ "model": "core.streamprofile", "pk": 2, "fields": { - "name": "streamlink", + "name": "Streamlink", "command": "streamlink", "parameters": "{streamUrl} best --stdout", "is_active": true, "user_agent": "1" } + }, + { + "model": "core.streamprofile", + "pk": 3, + "fields": { + "name": "VLC", + "command": "cvlc", + "parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + "is_active": true, + "user_agent": "1" + } } ] diff --git a/core/management/commands/reset_network_access.py b/core/management/commands/reset_network_access.py index 3b0e5a55..a31d247c 100644 --- a/core/management/commands/reset_network_access.py +++ b/core/management/commands/reset_network_access.py @@ -1,13 +1,13 @@ # your_app/management/commands/update_column.py from django.core.management.base import BaseCommand -from core.models import CoreSettings, NETWORK_ACCESS +from core.models import CoreSettings, NETWORK_ACCESS_KEY class Command(BaseCommand): help = "Reset network access settings" def handle(self, *args, **options): - setting = CoreSettings.objects.get(key=NETWORK_ACCESS) - setting.value = "{}" + setting = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY) + setting.value = {} setting.save() diff --git a/core/migrations/0017_systemevent.py b/core/migrations/0017_systemevent.py new file mode 100644 index 00000000..9b97213c --- /dev/null +++ b/core/migrations/0017_systemevent.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-11-20 20:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_update_dvr_template_paths'), + ] + + operations = [ + migrations.CreateModel( + name='SystemEvent', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)), + ('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)), + ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), + ('channel_name', models.CharField(blank=True, max_length=255, null=True)), + ('details', models.JSONField(blank=True, default=dict)), + ], + options={ + 'ordering': ['-timestamp'], + 'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')], + }, + ), + ] diff --git a/core/migrations/0018_alter_systemevent_event_type.py b/core/migrations/0018_alter_systemevent_event_type.py new file mode 100644 index 00000000..3fe4eecd --- /dev/null +++ b/core/migrations/0018_alter_systemevent_event_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-11-21 15:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0017_systemevent'), + ] + + operations = [ + migrations.AlterField( + model_name='systemevent', + name='event_type', + field=models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded'), ('login_success', 'Login Successful'), ('login_failed', 'Login Failed'), ('logout', 'User Logged Out'), ('m3u_blocked', 'M3U Download Blocked'), ('epg_blocked', 'EPG Download Blocked')], db_index=True, max_length=50), + ), + ] diff --git a/core/migrations/0019_add_vlc_stream_profile.py b/core/migrations/0019_add_vlc_stream_profile.py new file mode 100644 index 00000000..c3f72592 --- /dev/null +++ b/core/migrations/0019_add_vlc_stream_profile.py @@ -0,0 +1,42 @@ +# Generated migration to add VLC stream profile + +from django.db import migrations + +def add_vlc_profile(apps, schema_editor): + StreamProfile = apps.get_model("core", "StreamProfile") + UserAgent = apps.get_model("core", "UserAgent") + + # Check if VLC profile already exists + if not StreamProfile.objects.filter(name="VLC").exists(): + # Get the TiviMate user agent (should be pk=1) + try: + tivimate_ua = UserAgent.objects.get(pk=1) + except UserAgent.DoesNotExist: + # Fallback: get first available user agent + tivimate_ua = UserAgent.objects.first() + if not tivimate_ua: + # No user agents exist, skip creating profile + return + + StreamProfile.objects.create( + name="VLC", + command="cvlc", + parameters="-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + is_active=True, + user_agent=tivimate_ua, + locked=True, # Make it read-only like ffmpeg/streamlink + ) + +def remove_vlc_profile(apps, schema_editor): + StreamProfile = apps.get_model("core", "StreamProfile") + StreamProfile.objects.filter(name="VLC").delete() + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0018_alter_systemevent_event_type'), + ] + + operations = [ + migrations.RunPython(add_vlc_profile, remove_vlc_profile), + ] diff --git a/core/migrations/0020_change_coresettings_value_to_jsonfield.py b/core/migrations/0020_change_coresettings_value_to_jsonfield.py new file mode 100644 index 00000000..ac6ad089 --- /dev/null +++ b/core/migrations/0020_change_coresettings_value_to_jsonfield.py @@ -0,0 +1,267 @@ +# Generated migration to change CoreSettings value field to JSONField and consolidate settings + +import json +from django.db import migrations, models + + +def convert_string_to_json(apps, schema_editor): + """Convert existing string values to appropriate JSON types before changing column type""" + CoreSettings = apps.get_model("core", "CoreSettings") + + for setting in CoreSettings.objects.all(): + value = setting.value + + if not value: + # Empty strings become empty string in JSON + setting.value = json.dumps("") + setting.save(update_fields=['value']) + continue + + # Try to parse as JSON if it looks like JSON (objects/arrays) + if value.startswith('{') or value.startswith('['): + try: + parsed = json.loads(value) + # Store as JSON string temporarily (column is still CharField) + setting.value = json.dumps(parsed) + setting.save(update_fields=['value']) + continue + except (json.JSONDecodeError, ValueError): + pass + + # Try to parse as number + try: + # Check if it's an integer + if '.' not in value and value.lstrip('-').isdigit(): + setting.value = json.dumps(int(value)) + setting.save(update_fields=['value']) + continue + # Check if it's a float + float_val = float(value) + setting.value = json.dumps(float_val) + setting.save(update_fields=['value']) + continue + except (ValueError, AttributeError): + pass + + # Check for booleans + if value.lower() in ('true', 'false', '1', '0', 'yes', 'no', 'on', 'off'): + bool_val = value.lower() in ('true', '1', 'yes', 'on') + setting.value = json.dumps(bool_val) + setting.save(update_fields=['value']) + continue + + # Default: store as JSON string + setting.value = json.dumps(value) + setting.save(update_fields=['value']) + + +def consolidate_settings(apps, schema_editor): + """Consolidate individual setting rows into grouped JSON objects.""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Helper to get setting value + def get_value(key, default=None): + try: + obj = CoreSettings.objects.get(key=key) + return obj.value if obj.value is not None else default + except CoreSettings.DoesNotExist: + return default + + # STREAM SETTINGS + stream_settings = { + "default_user_agent": get_value("default-user-agent"), + "default_stream_profile": get_value("default-stream-profile"), + "m3u_hash_key": get_value("m3u-hash-key", ""), + "preferred_region": get_value("preferred-region"), + "auto_import_mapped_files": get_value("auto-import-mapped-files"), + } + CoreSettings.objects.update_or_create( + key="stream_settings", + defaults={"name": "Stream Settings", "value": stream_settings} + ) + + # DVR SETTINGS + dvr_settings = { + "tv_template": get_value("dvr-tv-template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + "movie_template": get_value("dvr-movie-template", "Movies/{title} ({year}).mkv"), + "tv_fallback_dir": get_value("dvr-tv-fallback-dir", "TV_Shows"), + "tv_fallback_template": get_value("dvr-tv-fallback-template", "TV_Shows/{show}/{start}.mkv"), + "movie_fallback_template": get_value("dvr-movie-fallback-template", "Movies/{start}.mkv"), + "comskip_enabled": bool(get_value("dvr-comskip-enabled", False)), + "comskip_custom_path": get_value("dvr-comskip-custom-path", ""), + "pre_offset_minutes": int(get_value("dvr-pre-offset-minutes", 0) or 0), + "post_offset_minutes": int(get_value("dvr-post-offset-minutes", 0) or 0), + "series_rules": get_value("dvr-series-rules", []), + } + CoreSettings.objects.update_or_create( + key="dvr_settings", + defaults={"name": "DVR Settings", "value": dvr_settings} + ) + + # BACKUP SETTINGS - using underscore keys (not dashes) + backup_settings = { + "schedule_enabled": get_value("backup_schedule_enabled") if get_value("backup_schedule_enabled") is not None else True, + "schedule_frequency": get_value("backup_schedule_frequency") or "daily", + "schedule_time": get_value("backup_schedule_time") or "03:00", + "schedule_day_of_week": get_value("backup_schedule_day_of_week") if get_value("backup_schedule_day_of_week") is not None else 0, + "retention_count": get_value("backup_retention_count") if get_value("backup_retention_count") is not None else 3, + "schedule_cron_expression": get_value("backup_schedule_cron_expression") or "", + } + CoreSettings.objects.update_or_create( + key="backup_settings", + defaults={"name": "Backup Settings", "value": backup_settings} + ) + + # SYSTEM SETTINGS + system_settings = { + "time_zone": get_value("system-time-zone", "UTC"), + "max_system_events": int(get_value("max-system-events", 100) or 100), + } + CoreSettings.objects.update_or_create( + key="system_settings", + defaults={"name": "System Settings", "value": system_settings} + ) + + # Rename proxy-settings to proxy_settings (if it exists with old name) + try: + old_proxy = CoreSettings.objects.get(key="proxy-settings") + old_proxy.key = "proxy_settings" + old_proxy.save() + except CoreSettings.DoesNotExist: + pass + + # Ensure proxy_settings exists with defaults if not present + proxy_obj, proxy_created = CoreSettings.objects.get_or_create( + key="proxy_settings", + defaults={ + "name": "Proxy Settings", + "value": { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + } + ) + + # Rename network-access to network_access (if it exists with old name) + try: + old_network = CoreSettings.objects.get(key="network-access") + old_network.key = "network_access" + old_network.save() + except CoreSettings.DoesNotExist: + pass + + # Ensure network_access exists with defaults if not present + network_obj, network_created = CoreSettings.objects.get_or_create( + key="network_access", + defaults={ + "name": "Network Access", + "value": {} + } + ) + # Delete old individual setting rows (keep only the new grouped settings) + grouped_keys = ["stream_settings", "dvr_settings", "backup_settings", "system_settings", "proxy_settings", "network_access"] + CoreSettings.objects.exclude(key__in=grouped_keys).delete() + + +def reverse_migration(apps, schema_editor): + """Reverse migration: split grouped settings and convert JSON back to strings""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Helper to create individual setting + def create_setting(key, name, value): + # Convert value back to string representation for CharField + if isinstance(value, str): + str_value = value + elif isinstance(value, bool): + str_value = "true" if value else "false" + elif isinstance(value, (int, float)): + str_value = str(value) + elif isinstance(value, (dict, list)): + str_value = json.dumps(value) + elif value is None: + str_value = "" + else: + str_value = str(value) + + CoreSettings.objects.update_or_create( + key=key, + defaults={"name": name, "value": str_value} + ) + + # Split stream_settings + try: + stream = CoreSettings.objects.get(key="stream_settings") + if isinstance(stream.value, dict): + create_setting("default_user_agent", "Default User Agent", stream.value.get("default_user_agent")) + create_setting("default_stream_profile", "Default Stream Profile", stream.value.get("default_stream_profile")) + create_setting("stream_hash_key", "Stream Hash Key", stream.value.get("m3u_hash_key", "")) + create_setting("preferred_region", "Preferred Region", stream.value.get("preferred_region")) + create_setting("auto_import_mapped_files", "Auto Import Mapped Files", stream.value.get("auto_import_mapped_files")) + stream.delete() + except CoreSettings.DoesNotExist: + pass + + # Split dvr_settings + try: + dvr = CoreSettings.objects.get(key="dvr_settings") + if isinstance(dvr.value, dict): + create_setting("dvr_tv_template", "DVR TV Template", dvr.value.get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv")) + create_setting("dvr_movie_template", "DVR Movie Template", dvr.value.get("movie_template", "Movies/{title} ({year}).mkv")) + create_setting("dvr_tv_fallback_dir", "DVR TV Fallback Dir", dvr.value.get("tv_fallback_dir", "TV_Shows")) + create_setting("dvr_tv_fallback_template", "DVR TV Fallback Template", dvr.value.get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv")) + create_setting("dvr_movie_fallback_template", "DVR Movie Fallback Template", dvr.value.get("movie_fallback_template", "Movies/{start}.mkv")) + create_setting("dvr_comskip_enabled", "DVR Comskip Enabled", dvr.value.get("comskip_enabled", False)) + create_setting("dvr_comskip_custom_path", "DVR Comskip Custom Path", dvr.value.get("comskip_custom_path", "")) + create_setting("dvr_pre_offset_minutes", "DVR Pre Offset Minutes", dvr.value.get("pre_offset_minutes", 0)) + create_setting("dvr_post_offset_minutes", "DVR Post Offset Minutes", dvr.value.get("post_offset_minutes", 0)) + create_setting("dvr_series_rules", "DVR Series Rules", dvr.value.get("series_rules", [])) + dvr.delete() + except CoreSettings.DoesNotExist: + pass + + # Split backup_settings + try: + backup = CoreSettings.objects.get(key="backup_settings") + if isinstance(backup.value, dict): + create_setting("backup_schedule_enabled", "Backup Schedule Enabled", backup.value.get("schedule_enabled", False)) + create_setting("backup_schedule_frequency", "Backup Schedule Frequency", backup.value.get("schedule_frequency", "weekly")) + create_setting("backup_schedule_time", "Backup Schedule Time", backup.value.get("schedule_time", "02:00")) + create_setting("backup_schedule_day_of_week", "Backup Schedule Day of Week", backup.value.get("schedule_day_of_week", 0)) + create_setting("backup_retention_count", "Backup Retention Count", backup.value.get("retention_count", 7)) + create_setting("backup_schedule_cron_expression", "Backup Schedule Cron Expression", backup.value.get("schedule_cron_expression", "")) + backup.delete() + except CoreSettings.DoesNotExist: + pass + + # Split system_settings + try: + system = CoreSettings.objects.get(key="system_settings") + if isinstance(system.value, dict): + create_setting("system_time_zone", "System Time Zone", system.value.get("time_zone", "UTC")) + create_setting("max_system_events", "Max System Events", system.value.get("max_system_events", 100)) + system.delete() + except CoreSettings.DoesNotExist: + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0019_add_vlc_stream_profile'), + ] + + operations = [ + # First, convert all data to valid JSON strings while column is still CharField + migrations.RunPython(convert_string_to_json, migrations.RunPython.noop), + # Then change the field type to JSONField + migrations.AlterField( + model_name='coresettings', + name='value', + field=models.JSONField(blank=True, default=dict), + ), + # Finally, consolidate individual settings into grouped JSON objects + migrations.RunPython(consolidate_settings, reverse_migration), + ] diff --git a/core/models.py b/core/models.py index 3a5895ba..683acb0d 100644 --- a/core/models.py +++ b/core/models.py @@ -1,4 +1,7 @@ # core/models.py + +from shlex import split as shlex_split + from django.conf import settings from django.db import models from django.utils.text import slugify @@ -133,7 +136,7 @@ class StreamProfile(models.Model): # Split the command and iterate through each part to apply replacements cmd = [self.command] + [ self._replace_in_part(part, replacements) - for part in self.parameters.split() + for part in shlex_split(self.parameters) # use shlex to handle quoted strings ] return cmd @@ -145,24 +148,13 @@ class StreamProfile(models.Model): return part -DEFAULT_USER_AGENT_KEY = slugify("Default User-Agent") -DEFAULT_STREAM_PROFILE_KEY = slugify("Default Stream Profile") -STREAM_HASH_KEY = slugify("M3U Hash Key") -PREFERRED_REGION_KEY = slugify("Preferred Region") -AUTO_IMPORT_MAPPED_FILES = slugify("Auto-Import Mapped Files") -NETWORK_ACCESS = slugify("Network Access") -PROXY_SETTINGS_KEY = slugify("Proxy Settings") -DVR_TV_TEMPLATE_KEY = slugify("DVR TV Template") -DVR_MOVIE_TEMPLATE_KEY = slugify("DVR Movie Template") -DVR_SERIES_RULES_KEY = slugify("DVR Series Rules") -DVR_TV_FALLBACK_DIR_KEY = slugify("DVR TV Fallback Dir") -DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template") -DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template") -DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled") -DVR_COMSKIP_CUSTOM_PATH_KEY = slugify("DVR Comskip Custom Path") -DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes") -DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes") -SYSTEM_TIME_ZONE_KEY = slugify("System Time Zone") +# Setting group keys +STREAM_SETTINGS_KEY = "stream_settings" +DVR_SETTINGS_KEY = "dvr_settings" +BACKUP_SETTINGS_KEY = "backup_settings" +PROXY_SETTINGS_KEY = "proxy_settings" +NETWORK_ACCESS_KEY = "network_access" +SYSTEM_SETTINGS_KEY = "system_settings" class CoreSettings(models.Model): @@ -173,205 +165,208 @@ class CoreSettings(models.Model): name = models.CharField( max_length=255, ) - value = models.CharField( - max_length=255, + value = models.JSONField( + default=dict, + blank=True, ) def __str__(self): return "Core Settings" + # Helper methods to get/set grouped settings + @classmethod + def _get_group(cls, key, defaults=None): + """Get a settings group, returning defaults if not found.""" + try: + return cls.objects.get(key=key).value or (defaults or {}) + except cls.DoesNotExist: + return defaults or {} + + @classmethod + def _update_group(cls, key, name, updates): + """Update specific fields in a settings group.""" + obj, created = cls.objects.get_or_create( + key=key, + defaults={"name": name, "value": {}} + ) + current = obj.value if isinstance(obj.value, dict) else {} + current.update(updates) + obj.value = current + obj.save() + return current + + # Stream Settings + @classmethod + def get_stream_settings(cls): + """Get all stream-related settings.""" + return cls._get_group(STREAM_SETTINGS_KEY, { + "default_user_agent": None, + "default_stream_profile": None, + "m3u_hash_key": "", + "preferred_region": None, + "auto_import_mapped_files": None, + }) + @classmethod def get_default_user_agent_id(cls): - """Retrieve a system profile by name (or return None if not found).""" - return cls.objects.get(key=DEFAULT_USER_AGENT_KEY).value + return cls.get_stream_settings().get("default_user_agent") @classmethod def get_default_stream_profile_id(cls): - return cls.objects.get(key=DEFAULT_STREAM_PROFILE_KEY).value + return cls.get_stream_settings().get("default_stream_profile") @classmethod def get_m3u_hash_key(cls): - return cls.objects.get(key=STREAM_HASH_KEY).value + return cls.get_stream_settings().get("m3u_hash_key", "") @classmethod def get_preferred_region(cls): - """Retrieve the preferred region setting (or return None if not found).""" - try: - return cls.objects.get(key=PREFERRED_REGION_KEY).value - except cls.DoesNotExist: - return None + return cls.get_stream_settings().get("preferred_region") @classmethod def get_auto_import_mapped_files(cls): - """Retrieve the preferred region setting (or return None if not found).""" - try: - return cls.objects.get(key=AUTO_IMPORT_MAPPED_FILES).value - except cls.DoesNotExist: - return None + return cls.get_stream_settings().get("auto_import_mapped_files") + # DVR Settings @classmethod - def get_proxy_settings(cls): - """Retrieve proxy settings as dict (or return defaults if not found).""" - try: - import json - settings_json = cls.objects.get(key=PROXY_SETTINGS_KEY).value - return json.loads(settings_json) - except (cls.DoesNotExist, json.JSONDecodeError): - # Return defaults if not found or invalid JSON - return { - "buffering_timeout": 15, - "buffering_speed": 1.0, - "redis_chunk_ttl": 60, - "channel_shutdown_delay": 0, - "channel_init_grace_period": 5, - } + def get_dvr_settings(cls): + """Get all DVR-related settings.""" + return cls._get_group(DVR_SETTINGS_KEY, { + "tv_template": "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv", + "movie_template": "Movies/{title} ({year}).mkv", + "tv_fallback_dir": "TV_Shows", + "tv_fallback_template": "TV_Shows/{show}/{start}.mkv", + "movie_fallback_template": "Movies/{start}.mkv", + "comskip_enabled": False, + "comskip_custom_path": "", + "pre_offset_minutes": 0, + "post_offset_minutes": 0, + "series_rules": [], + }) @classmethod def get_dvr_tv_template(cls): - try: - return cls.objects.get(key=DVR_TV_TEMPLATE_KEY).value - except cls.DoesNotExist: - # Default: relative to recordings root (/data/recordings) - return "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv" + return cls.get_dvr_settings().get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv") @classmethod def get_dvr_movie_template(cls): - try: - return cls.objects.get(key=DVR_MOVIE_TEMPLATE_KEY).value - except cls.DoesNotExist: - return "Movies/{title} ({year}).mkv" + return cls.get_dvr_settings().get("movie_template", "Movies/{title} ({year}).mkv") @classmethod def get_dvr_tv_fallback_dir(cls): - """Folder name to use when a TV episode has no season/episode information. - Defaults to 'TV_Show' to match existing behavior but can be overridden in settings. - """ - try: - return cls.objects.get(key=DVR_TV_FALLBACK_DIR_KEY).value or "TV_Shows" - except cls.DoesNotExist: - return "TV_Shows" + return cls.get_dvr_settings().get("tv_fallback_dir", "TV_Shows") @classmethod def get_dvr_tv_fallback_template(cls): - """Full path template used when season/episode are missing for a TV airing.""" - try: - return cls.objects.get(key=DVR_TV_FALLBACK_TEMPLATE_KEY).value - except cls.DoesNotExist: - # default requested by user - return "TV_Shows/{show}/{start}.mkv" + return cls.get_dvr_settings().get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv") @classmethod def get_dvr_movie_fallback_template(cls): - """Full path template used when movie metadata is incomplete.""" - try: - return cls.objects.get(key=DVR_MOVIE_FALLBACK_TEMPLATE_KEY).value - except cls.DoesNotExist: - return "Movies/{start}.mkv" + return cls.get_dvr_settings().get("movie_fallback_template", "Movies/{start}.mkv") @classmethod def get_dvr_comskip_enabled(cls): - """Return boolean-like string value ('true'/'false') for comskip enablement.""" - try: - val = cls.objects.get(key=DVR_COMSKIP_ENABLED_KEY).value - return str(val).lower() in ("1", "true", "yes", "on") - except cls.DoesNotExist: - return False + return bool(cls.get_dvr_settings().get("comskip_enabled", False)) @classmethod def get_dvr_comskip_custom_path(cls): - """Return configured comskip.ini path or empty string if unset.""" - try: - return cls.objects.get(key=DVR_COMSKIP_CUSTOM_PATH_KEY).value - except cls.DoesNotExist: - return "" + return cls.get_dvr_settings().get("comskip_custom_path", "") @classmethod def set_dvr_comskip_custom_path(cls, path: str | None): - """Persist the comskip.ini path setting, normalizing nulls to empty string.""" value = (path or "").strip() - obj, _ = cls.objects.get_or_create( - key=DVR_COMSKIP_CUSTOM_PATH_KEY, - defaults={"name": "DVR Comskip Custom Path", "value": value}, - ) - if obj.value != value: - obj.value = value - obj.save(update_fields=["value"]) + cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"comskip_custom_path": value}) return value @classmethod def get_dvr_pre_offset_minutes(cls): - """Minutes to start recording before scheduled start (default 0).""" - try: - val = cls.objects.get(key=DVR_PRE_OFFSET_MINUTES_KEY).value - return int(val) - except cls.DoesNotExist: - return 0 - except Exception: - try: - return int(float(val)) - except Exception: - return 0 + return int(cls.get_dvr_settings().get("pre_offset_minutes", 0) or 0) @classmethod def get_dvr_post_offset_minutes(cls): - """Minutes to stop recording after scheduled end (default 0).""" - try: - val = cls.objects.get(key=DVR_POST_OFFSET_MINUTES_KEY).value - return int(val) - except cls.DoesNotExist: - return 0 - except Exception: - try: - return int(float(val)) - except Exception: - return 0 - - @classmethod - def get_system_time_zone(cls): - """Return configured system time zone or fall back to Django settings.""" - try: - value = cls.objects.get(key=SYSTEM_TIME_ZONE_KEY).value - if value: - return value - except cls.DoesNotExist: - pass - return getattr(settings, "TIME_ZONE", "UTC") or "UTC" - - @classmethod - def set_system_time_zone(cls, tz_name: str | None): - """Persist the desired system time zone identifier.""" - value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" - obj, _ = cls.objects.get_or_create( - key=SYSTEM_TIME_ZONE_KEY, - defaults={"name": "System Time Zone", "value": value}, - ) - if obj.value != value: - obj.value = value - obj.save(update_fields=["value"]) - return value + return int(cls.get_dvr_settings().get("post_offset_minutes", 0) or 0) @classmethod def get_dvr_series_rules(cls): - """Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}""" - import json - try: - raw = cls.objects.get(key=DVR_SERIES_RULES_KEY).value - rules = json.loads(raw) if raw else [] - if isinstance(rules, list): - return rules - return [] - except cls.DoesNotExist: - # Initialize empty if missing - cls.objects.create(key=DVR_SERIES_RULES_KEY, name="DVR Series Rules", value="[]") - return [] + return cls.get_dvr_settings().get("series_rules", []) @classmethod def set_dvr_series_rules(cls, rules): - import json - try: - obj, _ = cls.objects.get_or_create(key=DVR_SERIES_RULES_KEY, defaults={"name": "DVR Series Rules", "value": "[]"}) - obj.value = json.dumps(rules) - obj.save(update_fields=["value"]) - return rules - except Exception: - return rules + cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"series_rules": rules}) + return rules + + # Proxy Settings + @classmethod + def get_proxy_settings(cls): + """Get proxy settings.""" + return cls._get_group(PROXY_SETTINGS_KEY, { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + }) + + # System Settings + @classmethod + def get_system_settings(cls): + """Get all system-related settings.""" + return cls._get_group(SYSTEM_SETTINGS_KEY, { + "time_zone": getattr(settings, "TIME_ZONE", "UTC") or "UTC", + "max_system_events": 100, + }) + + @classmethod + def get_system_time_zone(cls): + return cls.get_system_settings().get("time_zone") or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + + @classmethod + def set_system_time_zone(cls, tz_name: str | None): + value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + cls._update_group(SYSTEM_SETTINGS_KEY, "System Settings", {"time_zone": value}) + return value + + +class SystemEvent(models.Model): + """ + Tracks system events like channel start/stop, buffering, failover, client connections. + Maintains a rolling history based on max_system_events setting. + """ + EVENT_TYPES = [ + ('channel_start', 'Channel Started'), + ('channel_stop', 'Channel Stopped'), + ('channel_buffering', 'Channel Buffering'), + ('channel_failover', 'Channel Failover'), + ('channel_reconnect', 'Channel Reconnected'), + ('channel_error', 'Channel Error'), + ('client_connect', 'Client Connected'), + ('client_disconnect', 'Client Disconnected'), + ('recording_start', 'Recording Started'), + ('recording_end', 'Recording Ended'), + ('stream_switch', 'Stream Switched'), + ('m3u_refresh', 'M3U Refreshed'), + ('m3u_download', 'M3U Downloaded'), + ('epg_refresh', 'EPG Refreshed'), + ('epg_download', 'EPG Downloaded'), + ('login_success', 'Login Successful'), + ('login_failed', 'Login Failed'), + ('logout', 'User Logged Out'), + ('m3u_blocked', 'M3U Download Blocked'), + ('epg_blocked', 'EPG Download Blocked'), + ] + + event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True) + timestamp = models.DateTimeField(auto_now_add=True, db_index=True) + channel_id = models.UUIDField(null=True, blank=True, db_index=True) + channel_name = models.CharField(max_length=255, null=True, blank=True) + details = models.JSONField(default=dict, blank=True) + + class Meta: + ordering = ['-timestamp'] + indexes = [ + models.Index(fields=['-timestamp']), + models.Index(fields=['event_type', '-timestamp']), + ] + + def __str__(self): + return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}" diff --git a/core/serializers.py b/core/serializers.py index c6029bc4..b2bd8ecc 100644 --- a/core/serializers.py +++ b/core/serializers.py @@ -3,7 +3,7 @@ import json import ipaddress from rest_framework import serializers -from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS +from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS_KEY class UserAgentSerializer(serializers.ModelSerializer): @@ -40,10 +40,10 @@ class CoreSettingsSerializer(serializers.ModelSerializer): fields = "__all__" def update(self, instance, validated_data): - if instance.key == NETWORK_ACCESS: + if instance.key == NETWORK_ACCESS_KEY: errors = False invalid = {} - value = json.loads(validated_data.get("value")) + value = validated_data.get("value") for key, val in value.items(): cidrs = val.split(",") for cidr in cidrs: diff --git a/core/tasks.py b/core/tasks.py index f757613b..207e7c5e 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -513,7 +513,8 @@ def rehash_streams(keys): for obj in batch: # Generate new hash - new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id) + group_name = obj.channel_group.name if obj.channel_group else None + new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id, group=group_name) # Check if this hash already exists in our tracking dict or in database if new_hash in hash_keys: diff --git a/core/utils.py b/core/utils.py index 36ac5fef..e3d6c389 100644 --- a/core/utils.py +++ b/core/utils.py @@ -377,12 +377,63 @@ def validate_flexible_url(value): import re # More flexible pattern for non-FQDN hostnames with paths - # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml - non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$' + # Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234 + # Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value + # Also supports authentication: rtsp://user:pass@hostname/path + non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' non_fqdn_match = re.match(non_fqdn_pattern, value) if non_fqdn_match: - return # Accept non-FQDN hostnames + return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication # If it doesn't match our flexible patterns, raise the original error raise ValidationError("Enter a valid URL.") + + +def log_system_event(event_type, channel_id=None, channel_name=None, **details): + """ + Log a system event and maintain the configured max history. + + Args: + event_type: Type of event (e.g., 'channel_start', 'client_connect') + channel_id: Optional UUID of the channel + channel_name: Optional name of the channel + **details: Additional details to store in the event (stored as JSON) + + Example: + log_system_event('channel_start', channel_id=uuid, channel_name='CNN', + stream_url='http://...', user='admin') + """ + from core.models import SystemEvent, CoreSettings + + try: + # Create the event + SystemEvent.objects.create( + event_type=event_type, + channel_id=channel_id, + channel_name=channel_name, + details=details + ) + + # Get max events from settings (default 100) + try: + from .models import CoreSettings + system_settings = CoreSettings.objects.filter(key='system_settings').first() + if system_settings and isinstance(system_settings.value, dict): + max_events = int(system_settings.value.get('max_system_events', 100)) + else: + max_events = 100 + except Exception: + max_events = 100 + + # Delete old events beyond the limit (keep it efficient with a single query) + total_count = SystemEvent.objects.count() + if total_count > max_events: + # Get the ID of the event at the cutoff point + cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events] + # Delete all events with ID less than cutoff (older events) + SystemEvent.objects.filter(id__lt=cutoff_event).delete() + + except Exception as e: + # Don't let event logging break the main application + logger.error(f"Failed to log system event {event_type}: {e}") diff --git a/core/views.py b/core/views.py index d10df027..5806d63c 100644 --- a/core/views.py +++ b/core/views.py @@ -1,5 +1,6 @@ # core/views.py import os +from shlex import split as shlex_split import sys import subprocess import logging @@ -37,7 +38,9 @@ def stream_view(request, channel_uuid): """ try: redis_host = getattr(settings, "REDIS_HOST", "localhost") - redis_client = redis.Redis(host=settings.REDIS_HOST, port=6379, db=int(getattr(settings, "REDIS_DB", "0"))) + redis_port = int(getattr(settings, "REDIS_PORT", 6379)) + redis_db = int(getattr(settings, "REDIS_DB", "0")) + redis_client = redis.Redis(host=redis_host, port=redis_port, db=redis_db) # Retrieve the channel by the provided stream_id. channel = Channel.objects.get(uuid=channel_uuid) @@ -129,7 +132,7 @@ def stream_view(request, channel_uuid): stream_profile = channel.stream_profile if not stream_profile: logger.error("No stream profile set for channel ID=%s, using default", channel.id) - stream_profile = StreamProfile.objects.get(id=CoreSettings.objects.get(key="default-stream-profile").value) + stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) logger.debug("Stream profile used: %s", stream_profile.name) @@ -142,7 +145,7 @@ def stream_view(request, channel_uuid): logger.debug("Formatted parameters: %s", parameters) # Build the final command. - cmd = [stream_profile.command] + parameters.split() + cmd = [stream_profile.command] + shlex_split(parameters) logger.debug("Executing command: %s", cmd) try: diff --git a/dispatcharr/persistent_lock.py b/dispatcharr/persistent_lock.py index 360c9b5d..27d480be 100644 --- a/dispatcharr/persistent_lock.py +++ b/dispatcharr/persistent_lock.py @@ -73,8 +73,12 @@ class PersistentLock: # Example usage (for testing purposes only): if __name__ == "__main__": - # Connect to Redis on localhost; adjust connection parameters as needed. - client = redis.Redis(host="localhost", port=6379, db=0) + import os + # Connect to Redis using environment variables; adjust connection parameters as needed. + redis_host = os.environ.get("REDIS_HOST", "localhost") + redis_port = int(os.environ.get("REDIS_PORT", 6379)) + redis_db = int(os.environ.get("REDIS_DB", 0)) + client = redis.Redis(host=redis_host, port=redis_port, db=redis_db) lock = PersistentLock(client, "lock:example_account", lock_timeout=120) if lock.acquire(): diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index a0c4fc84..1a9a1a44 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -4,8 +4,9 @@ from datetime import timedelta BASE_DIR = Path(__file__).resolve().parent.parent -SECRET_KEY = "REPLACE_ME_WITH_A_REAL_SECRET" +SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY") REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") +REDIS_PORT = int(os.environ.get("REDIS_PORT", 6379)) REDIS_DB = os.environ.get("REDIS_DB", "0") # Set DEBUG to True for development, False for production @@ -51,6 +52,11 @@ EPG_BATCH_SIZE = 1000 # Number of records to process in a batch EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing +# XtreamCodes Rate Limiting Settings +# Delay between profile authentications when refreshing multiple profiles +# This prevents providers from temporarily banning users with many profiles +XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes + # Database optimization settings DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries DATABASE_CONN_MAX_AGE = ( @@ -113,7 +119,7 @@ CHANNEL_LAYERS = { "default": { "BACKEND": "channels_redis.core.RedisChannelLayer", "CONFIG": { - "hosts": [(REDIS_HOST, 6379, REDIS_DB)], # Ensure Redis is running + "hosts": [(REDIS_HOST, REDIS_PORT, REDIS_DB)], # Ensure Redis is running }, }, } @@ -179,8 +185,10 @@ STATICFILES_DIRS = [ DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" AUTH_USER_MODEL = "accounts.User" -CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", "redis://localhost:6379/0") -CELERY_RESULT_BACKEND = CELERY_BROKER_URL +# Build default Redis URL from components for Celery +_default_redis_url = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}" +CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", _default_redis_url) +CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", CELERY_BROKER_URL) # Configure Redis key prefix CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = { @@ -221,6 +229,13 @@ CELERY_BEAT_SCHEDULE = { MEDIA_ROOT = BASE_DIR / "media" MEDIA_URL = "/media/" +# Backup settings +BACKUP_ROOT = os.environ.get("BACKUP_ROOT", "/data/backups") +BACKUP_DATA_DIRS = [ + os.environ.get("LOGOS_DIR", "/data/logos"), + os.environ.get("UPLOADS_DIR", "/data/uploads"), + os.environ.get("PLUGINS_DIR", "/data/plugins"), +] SERVER_IP = "127.0.0.1" @@ -237,7 +252,7 @@ SIMPLE_JWT = { } # Redis connection settings -REDIS_URL = "redis://localhost:6379/0" +REDIS_URL = os.environ.get("REDIS_URL", f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}") REDIS_SOCKET_TIMEOUT = 60 # Socket timeout in seconds REDIS_SOCKET_CONNECT_TIMEOUT = 5 # Connection timeout in seconds REDIS_HEALTH_CHECK_INTERVAL = 15 # Health check every 15 seconds diff --git a/dispatcharr/utils.py b/dispatcharr/utils.py index 260515fc..e588bcaa 100644 --- a/dispatcharr/utils.py +++ b/dispatcharr/utils.py @@ -3,7 +3,7 @@ import json import ipaddress from django.http import JsonResponse from django.core.exceptions import ValidationError -from core.models import CoreSettings, NETWORK_ACCESS +from core.models import CoreSettings, NETWORK_ACCESS_KEY def json_error_response(message, status=400): @@ -39,12 +39,15 @@ def get_client_ip(request): def network_access_allowed(request, settings_key): - network_access = json.loads(CoreSettings.objects.get(key=NETWORK_ACCESS).value) + try: + network_access = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY).value + except CoreSettings.DoesNotExist: + network_access = {} cidrs = ( network_access[settings_key].split(",") if settings_key in network_access - else ["0.0.0.0/0"] + else ["0.0.0.0/0", "::/0"] ) network_allowed = False diff --git a/docker/DispatcharrBase b/docker/DispatcharrBase index d37d8958..149bfffb 100644 --- a/docker/DispatcharrBase +++ b/docker/DispatcharrBase @@ -4,26 +4,44 @@ ENV DEBIAN_FRONTEND=noninteractive ENV VIRTUAL_ENV=/dispatcharrpy ENV PATH="$VIRTUAL_ENV/bin:$PATH" -# --- Install Python 3.13 and system dependencies --- +# --- Install Python 3.13 and build dependencies --- # Note: Hardware acceleration (VA-API, VDPAU, NVENC) already included in base ffmpeg image RUN apt-get update && apt-get install --no-install-recommends -y \ ca-certificates software-properties-common gnupg2 curl wget \ && add-apt-repository ppa:deadsnakes/ppa \ && apt-get update \ && apt-get install --no-install-recommends -y \ - python3.13 python3.13-dev python3.13-venv \ + python3.13 python3.13-dev python3.13-venv libpython3.13 \ python-is-python3 python3-pip \ - libpcre3 libpcre3-dev libpq-dev procps \ - build-essential gcc pciutils \ - nginx streamlink comskip\ - && apt-get clean && rm -rf /var/lib/apt/lists/* + libpcre3 libpcre3-dev libpq-dev procps pciutils \ + nginx streamlink comskip \ + vlc-bin vlc-plugin-base \ + build-essential gcc g++ gfortran libopenblas-dev libopenblas0 ninja-build # --- Create Python virtual environment --- RUN python3.13 -m venv $VIRTUAL_ENV && $VIRTUAL_ENV/bin/pip install --upgrade pip # --- Install Python dependencies --- COPY requirements.txt /tmp/requirements.txt -RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir -r /tmp/requirements.txt && rm /tmp/requirements.txt +RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir -r /tmp/requirements.txt && \ + rm /tmp/requirements.txt + +# --- Build legacy NumPy wheel for old hardware (store for runtime switching) --- +RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir build && \ + cd /tmp && \ + $VIRTUAL_ENV/bin/pip download --no-binary numpy --no-deps numpy && \ + tar -xzf numpy-*.tar.gz && \ + cd numpy-*/ && \ + $VIRTUAL_ENV/bin/python -m build --wheel -Csetup-args=-Dcpu-baseline="none" -Csetup-args=-Dcpu-dispatch="none" && \ + mv dist/*.whl /opt/ && \ + cd / && rm -rf /tmp/numpy-* /tmp/*.tar.gz && \ + $VIRTUAL_ENV/bin/pip uninstall -y build + +# --- Clean up build dependencies to reduce image size --- +RUN apt-get remove -y build-essential gcc g++ gfortran libopenblas-dev libpcre3-dev python3.13-dev ninja-build && \ + apt-get autoremove -y --purge && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /root/.cache /tmp/* # --- Set up Redis 7.x --- RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ diff --git a/docker/Dockerfile b/docker/Dockerfile index dc437227..bfb35c11 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -35,9 +35,6 @@ RUN rm -rf /app/frontend # Copy built frontend assets COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist -# Run Django collectstatic -RUN python manage.py collectstatic --noinput - # Add timestamp argument ARG TIMESTAMP diff --git a/docker/build-dev.sh b/docker/build-dev.sh index b02c314e..61640814 100755 --- a/docker/build-dev.sh +++ b/docker/build-dev.sh @@ -1,11 +1,65 @@ -#!/bin/bash -docker build --build-arg BRANCH=dev -t dispatcharr/dispatcharr:dev -f Dockerfile .. +#!/bin/bash +set -e + +# Default values +VERSION=$(python3 -c "import sys; sys.path.append('..'); import version; print(version.__version__)") +REGISTRY="dispatcharr" # Registry or private repo to push to +IMAGE="dispatcharr" # Image that we're building +BRANCH="dev" +ARCH="" # Architectures to build for, e.g. linux/amd64,linux/arm64 +PUSH=false + +usage() { + cat <<- EOF + To test locally: + ./build-dev.sh + + To build and push to registry: + ./build-dev.sh -p + + To build and push to a private registry: + ./build-dev.sh -p -r myregistry:5000 + + To build for -both- x86_64 and arm_64: + ./build-dev.sh -p -a linux/amd64,linux/arm64 + + Do it all: + ./build-dev.sh -p -r myregistry:5000 -a linux/amd64,linux/arm64 +EOF +exit 0 +} + +# Parse options +while getopts "pr:a:b:i:h" opt; do + case $opt in + r) REGISTRY="$OPTARG" ;; + a) ARCH="--platform $OPTARG" ;; + b) BRANCH="$OPTARG" ;; + i) IMAGE="$OPTARG" ;; + p) PUSH=true ;; + h) usage ;; + \?) echo "Invalid option: -$OPTARG" >&2; exit 1 ;; + esac +done + +BUILD_ARGS="BRANCH=$BRANCH" + +echo docker build --build-arg $BUILD_ARGS $ARCH -t $IMAGE +docker build -f Dockerfile --build-arg $BUILD_ARGS $ARCH -t $IMAGE .. +docker tag $IMAGE $IMAGE:$BRANCH +docker tag $IMAGE $IMAGE:$VERSION + +if [ -z "$PUSH" ]; then + echo "Please run 'docker push -t $IMAGE:dev -t $IMAGE:${VERSION}' when ready" +else + for TAG in latest "$VERSION" "$BRANCH"; do + docker tag "$IMAGE" "$REGISTRY/$IMAGE:$TAG" + docker push -q "$REGISTRY/$IMAGE:$TAG" + done + echo "Images pushed successfully." +fi + + + -# Get version information -VERSION=$(python -c "import sys; sys.path.append('..'); import version; print(version.__version__)") -# Build with version tag -docker build --build-arg BRANCH=dev \ - -t dispatcharr/dispatcharr:dev \ - -t dispatcharr/dispatcharr:${VERSION} \ - -f Dockerfile .. diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index fe5e1507..2b1fd2ae 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -14,6 +14,10 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true # Process Priority Configuration (Optional) # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) # Negative values require cap_add: SYS_NICE (uncomment below) diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml index d9dbef0e..c576cfd1 100644 --- a/docker/docker-compose.debug.yml +++ b/docker/docker-compose.debug.yml @@ -18,6 +18,10 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=trace + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true # Process Priority Configuration (Optional) # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) # Negative values require cap_add: SYS_NICE (uncomment below) diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index d1bb3680..b20c3296 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -17,6 +17,10 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=debug + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true # Process Priority Configuration (Optional) # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) # Negative values require cap_add: SYS_NICE (uncomment below) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index aaa63990..e4093e4b 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -17,6 +17,10 @@ services: - REDIS_HOST=redis - CELERY_BROKER_URL=redis://redis:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true # Process Priority Configuration (Optional) # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) # Negative values require cap_add: SYS_NICE (uncomment below) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index fa0eea01..a50f2f49 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -27,6 +27,18 @@ echo_with_timestamp() { echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" } +# --- NumPy version switching for legacy hardware --- +if [ "$USE_LEGACY_NUMPY" = "true" ]; then + # Check if NumPy was compiled with baseline support + if /dispatcharrpy/bin/python -c "import numpy; numpy.show_config()" 2>&1 | grep -qi "baseline"; then + echo_with_timestamp "🔧 Switching to legacy NumPy (no CPU baseline)..." + /dispatcharrpy/bin/pip install --no-cache-dir --force-reinstall --no-deps /opt/numpy-*.whl + echo_with_timestamp "✅ Legacy NumPy installed" + else + echo_with_timestamp "✅ Legacy NumPy (no baseline) already installed, skipping reinstallation" + fi +fi + # Set PostgreSQL environment variables export POSTGRES_DB=${POSTGRES_DB:-dispatcharr} export POSTGRES_USER=${POSTGRES_USER:-dispatch} @@ -40,6 +52,21 @@ export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' +export SECRET_FILE="/data/jwt" +# Ensure Django secret key exists or generate a new one +if [ ! -f "$SECRET_FILE" ]; then + echo "Generating new Django secret key..." + old_umask=$(umask) + umask 077 + tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; } + python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; } +import secrets +print(secrets.token_urlsafe(64)) +PY + mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } + umask $old_umask +fi +export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" # Process priority configuration # UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) @@ -85,12 +112,12 @@ export POSTGRES_DIR=/data/db if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then # Define all variables to process variables=( - PATH VIRTUAL_ENV DJANGO_SETTINGS_MODULE PYTHONUNBUFFERED + PATH VIRTUAL_ENV DJANGO_SETTINGS_MODULE PYTHONUNBUFFERED PYTHONDONTWRITEBYTECODE POSTGRES_DB POSTGRES_USER POSTGRES_PASSWORD POSTGRES_HOST POSTGRES_PORT DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH - CELERY_NICE_LEVEL UWSGI_NICE_LEVEL + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL DJANGO_SECRET_KEY ) # Process each variable for both profile.d and environment @@ -159,9 +186,9 @@ else pids+=("$nginx_pid") fi -cd /app -python manage.py migrate --noinput -python manage.py collectstatic --noinput +# Run Django commands as non-root user to prevent permission issues +su - $POSTGRES_USER -c "cd /app && python manage.py migrate --noinput" +su - $POSTGRES_USER -c "cd /app && python manage.py collectstatic --noinput" # Select proper uwsgi config based on environment if [ "$DISPATCHARR_ENV" = "dev" ] && [ "$DISPATCHARR_DEBUG" != "true" ]; then @@ -187,7 +214,7 @@ fi # Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose # Start with nice as root, then use setpriv to drop privileges to dispatch user # This preserves both the nice value and environment variables -nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$! +nice -n $UWSGI_NICE_LEVEL su - "$POSTGRES_USER" -c "cd /app && exec /dispatcharrpy/bin/uwsgi $uwsgi_args" & uwsgi_pid=$! echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index 2c769241..0c317017 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -9,11 +9,13 @@ DATA_DIRS=( "/data/m3us" "/data/epgs" "/data/plugins" + "/data/models" ) APP_DIRS=( "/app/logo_cache" "/app/media" + "/app/static" ) # Create all directories @@ -28,9 +30,21 @@ if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then chown $PUID:$PGID /app fi fi - +# Configure nginx port +if ! [[ "$DISPATCHARR_PORT" =~ ^[0-9]+$ ]]; then + echo "⚠️ Warning: DISPATCHARR_PORT is not a valid integer, using default port 9191" + DISPATCHARR_PORT=9191 +fi sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default +# Configure nginx based on IPv6 availability +if ip -6 addr show | grep -q "inet6"; then + echo "✅ IPv6 is available, enabling IPv6 in nginx" +else + echo "⚠️ IPv6 not available, disabling IPv6 in nginx" + sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default +fi + # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then @@ -56,5 +70,11 @@ if [ "$(id -u)" = "0" ]; then chown -R postgres:postgres /data/db fi + # Fix /data directory ownership (non-recursive) + if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /data (non-recursive)" + chown $PUID:$PGID /data + fi + chmod +x /data fi \ No newline at end of file diff --git a/docker/nginx.conf b/docker/nginx.conf index 5e754d20..406d587c 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -3,6 +3,7 @@ proxy_cache_path /app/logo_cache levels=1:2 keys_zone=logo_cache:10m server { listen NGINX_PORT; + listen [::]:NGINX_PORT; proxy_connect_timeout 75; proxy_send_timeout 300; @@ -34,6 +35,13 @@ server { root /data; } + # Internal location for X-Accel-Redirect backup downloads + # Django handles auth, nginx serves the file directly + location /protected-backups/ { + internal; + alias /data/backups/; + } + location /api/logos/(?\d+)/cache/ { proxy_pass http://127.0.0.1:5656; proxy_cache logo_cache; diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 3de890a5..69c040f2 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -20,7 +20,6 @@ module = scripts.debug_wrapper:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings - socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index f8fe8ab7..920bac48 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -21,6 +21,7 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = USE_NGINX_ACCEL=true socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true @@ -36,6 +37,7 @@ http-keepalive = 1 buffer-size = 65536 # Increase buffer for large payloads post-buffering = 4096 # Reduce buffering for real-time streaming http-timeout = 600 # Prevent disconnects from long streams +socket-timeout = 600 # Prevent write timeouts when client buffers lazy-apps = true # Improve memory efficiency # Async mode (use gevent for high concurrency) @@ -57,4 +59,4 @@ logformat-strftime = true log-date = %%Y-%%m-%%d %%H:%%M:%%S,000 # Use formatted time with environment variable for log level log-format = %(ftime) $(DISPATCHARR_LOG_LEVEL) uwsgi.requests Worker ID: %(wid) %(method) %(status) %(uri) %(msecs)ms -log-buffering = 1024 # Add buffer size limit for logging \ No newline at end of file +log-buffering = 1024 # Add buffer size limit for logging diff --git a/fixtures.json b/fixtures.json index 2d42f84e..3c31f926 100644 --- a/fixtures.json +++ b/fixtures.json @@ -36,7 +36,7 @@ "model": "core.streamprofile", "pk": 1, "fields": { - "profile_name": "ffmpeg", + "profile_name": "FFmpeg", "command": "ffmpeg", "parameters": "-i {streamUrl} -c:a copy -c:v copy -f mpegts pipe:1", "is_active": true, @@ -46,13 +46,23 @@ { "model": "core.streamprofile", "fields": { - "profile_name": "streamlink", + "profile_name": "Streamlink", "command": "streamlink", "parameters": "{streamUrl} best --stdout", "is_active": true, "user_agent": "1" } }, + { + "model": "core.streamprofile", + "fields": { + "profile_name": "VLC", + "command": "cvlc", + "parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + "is_active": true, + "user_agent": "1" + } + }, { "model": "core.coresettings", "fields": { diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 780aabe1..ed9e6010 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -12,6 +12,7 @@ "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", + "@hookform/resolvers": "^5.2.2", "@mantine/charts": "~8.0.1", "@mantine/core": "~8.0.1", "@mantine/dates": "~8.0.1", @@ -22,13 +23,13 @@ "@tanstack/react-table": "^8.21.2", "allotment": "^1.20.4", "dayjs": "^1.11.13", - "formik": "^2.4.6", "hls.js": "^1.5.20", "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", "react": "^19.1.0", "react-dom": "^19.1.0", "react-draggable": "^4.4.6", + "react-hook-form": "^7.70.0", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", "react-virtualized": "^9.22.6", @@ -50,16 +51,23 @@ "@types/react": "^19.1.0", "@types/react-dom": "^19.1.0", "@vitejs/plugin-react-swc": "^4.1.0", - "eslint": "^9.21.0", + "eslint": "^9.27.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", "jsdom": "^27.0.0", "prettier": "^3.5.3", - "vite": "^6.2.0", + "vite": "^7.1.7", "vitest": "^3.2.4" } }, + "node_modules/@acemir/cssom": { + "version": "0.9.29", + "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.29.tgz", + "integrity": "sha512-G90x0VW+9nW4dFajtjCoT+NM0scAfH9Mb08IcjgFHYbfiL/lU04dTF9JuVOi3/OH+DJCQdcIseSXkdCB9Ky6JA==", + "dev": true, + "license": "MIT" + }, "node_modules/@adobe/css-tools": { "version": "4.4.4", "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", @@ -68,30 +76,31 @@ "license": "MIT" }, "node_modules/@asamuzakjp/css-color": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.0.4.tgz", - "integrity": "sha512-cKjSKvWGmAziQWbCouOsFwb14mp1betm8Y7Fn+yglDMUUu3r9DCbJ9iJbeFDenLMqFbIMC0pQP8K+B8LAxX3OQ==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz", + "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==", "dev": true, "license": "MIT", "dependencies": { "@csstools/css-calc": "^2.1.4", - "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-color-parser": "^3.1.0", "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4", - "lru-cache": "^11.1.0" + "lru-cache": "^11.2.4" } }, "node_modules/@asamuzakjp/dom-selector": { - "version": "6.5.5", - "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.5.5.tgz", - "integrity": "sha512-kI2MX9pmImjxWT8nxDZY+MuN6r1jJGe7WxizEbsAEPB/zxfW5wYLIiPG1v3UKgEOOP8EsDkp0ZL99oRFAdPM8g==", + "version": "6.7.6", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz", + "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==", "dev": true, "license": "MIT", "dependencies": { "@asamuzakjp/nwsapi": "^2.3.9", "bidi-js": "^1.0.3", "css-tree": "^3.1.0", - "is-potential-custom-element-name": "^1.0.1" + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.4" } }, "node_modules/@asamuzakjp/nwsapi": { @@ -102,73 +111,82 @@ "license": "MIT" }, "node_modules/@babel/code-frame": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", - "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.25.9", + "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/generator": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.10.tgz", - "integrity": "sha512-rRHT8siFIXQrAYOYqZQVsAr8vJ+cBNqcVAY6m5V8/4QqzaPl+zDBe6cLEPRDuNOUf3ww8RfJVlOyQMoSI+5Ang==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.10", - "@babel/types": "^7.26.10", - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25", + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-module-imports": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", - "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", "license": "MIT", "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.10.tgz", - "integrity": "sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", "license": "MIT", "dependencies": { - "@babel/types": "^7.26.10" + "@babel/types": "^7.28.5" }, "bin": { "parser": "bin/babel-parser.js" @@ -178,66 +196,54 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.10.tgz", - "integrity": "sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", "license": "MIT", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/template": { - "version": "7.26.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.9.tgz", - "integrity": "sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.26.9", - "@babel/types": "^7.26.9" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.10.tgz", - "integrity": "sha512-k8NuDrxr0WrPH5Aupqb2LCVURP/S0vBEn5mK6iH+GIYob66U5EtoZvcdudR2jQ4cmTwhEwW1DLB+Yyas9zjF6A==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.10", - "@babel/parser": "^7.26.10", - "@babel/template": "^7.26.9", - "@babel/types": "^7.26.10", - "debug": "^4.3.1", - "globals": "^11.1.0" + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/traverse/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/types": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.10.tgz", - "integrity": "sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" }, "engines": { "node": ">=6.9.0" @@ -339,9 +345,9 @@ } }, "node_modules/@csstools/css-syntax-patches-for-csstree": { - "version": "1.0.14", - "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.14.tgz", - "integrity": "sha512-zSlIxa20WvMojjpCSy8WrNpcZ61RqfTfX3XTaOeVlGJrt/8HF3YbzgFZa01yTbT4GWQLwfTcC3EB8i3XnB647Q==", + "version": "1.0.21", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.21.tgz", + "integrity": "sha512-plP8N8zKfEZ26figX4Nvajx8DuzfuRpLTqglQ5d0chfnt35Qt3X+m6ASZ+rG0D0kxe/upDVNwSIVJP5n4FuNfw==", "dev": true, "funding": [ { @@ -356,9 +362,6 @@ "license": "MIT-0", "engines": { "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" } }, "node_modules/@csstools/css-tokenizer": { @@ -487,9 +490,9 @@ "license": "MIT" }, "node_modules/@emotion/is-prop-valid": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.3.1.tgz", - "integrity": "sha512-/ACwoqx7XQi9knQs/G0qKvv5teDMhD7bXYns9N/wM8ah8iNb8jZ2uNO0YOgiq2o2poIvVtJS2YALasQuMSQ7Kw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.4.0.tgz", + "integrity": "sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==", "license": "MIT", "dependencies": { "@emotion/memoize": "^0.9.0" @@ -545,9 +548,9 @@ "license": "MIT" }, "node_modules/@emotion/styled": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/styled/-/styled-11.14.0.tgz", - "integrity": "sha512-XxfOnXFffatap2IyCeJyNov3kiDQWoR08gPUQxvbL7fxKryGBKUZUkG6Hz48DZwVrJSVh9sJboyV1Ds4OW6SgA==", + "version": "11.14.1", + "resolved": "https://registry.npmjs.org/@emotion/styled/-/styled-11.14.1.tgz", + "integrity": "sha512-qEEJt42DuToa3gurlH4Qqc1kVpNq8wO8cJtDzU46TjlzWjDlsVyevtYCRijVq3SrHsROS+gVQ8Fnea108GnKzw==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.18.3", @@ -595,9 +598,9 @@ "license": "MIT" }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.1.tgz", - "integrity": "sha512-kfYGy8IdzTGy+z0vFGvExZtxkFlA4zAxgKEahG9KE1ScBjpQnFsNOX8KTU5ojNru5ed5CVoJYXFtoxaq5nFbjQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", + "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", "cpu": [ "ppc64" ], @@ -612,9 +615,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.1.tgz", - "integrity": "sha512-dp+MshLYux6j/JjdqVLnMglQlFu+MuVeNrmT5nk6q07wNhCdSnB7QZj+7G8VMUGh1q+vj2Bq8kRsuyA00I/k+Q==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", + "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", "cpu": [ "arm" ], @@ -629,9 +632,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.1.tgz", - "integrity": "sha512-50tM0zCJW5kGqgG7fQ7IHvQOcAn9TKiVRuQ/lN0xR+T2lzEFvAi1ZcS8DiksFcEpf1t/GYOeOfCAgDHFpkiSmA==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", + "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", "cpu": [ "arm64" ], @@ -646,9 +649,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.1.tgz", - "integrity": "sha512-GCj6WfUtNldqUzYkN/ITtlhwQqGWu9S45vUXs7EIYf+7rCiiqH9bCloatO9VhxsL0Pji+PF4Lz2XXCES+Q8hDw==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", + "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", "cpu": [ "x64" ], @@ -663,9 +666,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.1.tgz", - "integrity": "sha512-5hEZKPf+nQjYoSr/elb62U19/l1mZDdqidGfmFutVUjjUZrOazAtwK+Kr+3y0C/oeJfLlxo9fXb1w7L+P7E4FQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", + "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", "cpu": [ "arm64" ], @@ -680,9 +683,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.1.tgz", - "integrity": "sha512-hxVnwL2Dqs3fM1IWq8Iezh0cX7ZGdVhbTfnOy5uURtao5OIVCEyj9xIzemDi7sRvKsuSdtCAhMKarxqtlyVyfA==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", + "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", "cpu": [ "x64" ], @@ -697,9 +700,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.1.tgz", - "integrity": "sha512-1MrCZs0fZa2g8E+FUo2ipw6jw5qqQiH+tERoS5fAfKnRx6NXH31tXBKI3VpmLijLH6yriMZsxJtaXUyFt/8Y4A==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", + "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", "cpu": [ "arm64" ], @@ -714,9 +717,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.1.tgz", - "integrity": "sha512-0IZWLiTyz7nm0xuIs0q1Y3QWJC52R8aSXxe40VUxm6BB1RNmkODtW6LHvWRrGiICulcX7ZvyH6h5fqdLu4gkww==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", + "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", "cpu": [ "x64" ], @@ -731,9 +734,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.1.tgz", - "integrity": "sha512-NdKOhS4u7JhDKw9G3cY6sWqFcnLITn6SqivVArbzIaf3cemShqfLGHYMx8Xlm/lBit3/5d7kXvriTUGa5YViuQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", + "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", "cpu": [ "arm" ], @@ -748,9 +751,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.1.tgz", - "integrity": "sha512-jaN3dHi0/DDPelk0nLcXRm1q7DNJpjXy7yWaWvbfkPvI+7XNSc/lDOnCLN7gzsyzgu6qSAmgSvP9oXAhP973uQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", + "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", "cpu": [ "arm64" ], @@ -765,9 +768,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.1.tgz", - "integrity": "sha512-OJykPaF4v8JidKNGz8c/q1lBO44sQNUQtq1KktJXdBLn1hPod5rE/Hko5ugKKZd+D2+o1a9MFGUEIUwO2YfgkQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", + "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", "cpu": [ "ia32" ], @@ -782,9 +785,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.1.tgz", - "integrity": "sha512-nGfornQj4dzcq5Vp835oM/o21UMlXzn79KobKlcs3Wz9smwiifknLy4xDCLUU0BWp7b/houtdrgUz7nOGnfIYg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", + "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", "cpu": [ "loong64" ], @@ -799,9 +802,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.1.tgz", - "integrity": "sha512-1osBbPEFYwIE5IVB/0g2X6i1qInZa1aIoj1TdL4AaAb55xIIgbg8Doq6a5BzYWgr+tEcDzYH67XVnTmUzL+nXg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", + "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", "cpu": [ "mips64el" ], @@ -816,9 +819,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.1.tgz", - "integrity": "sha512-/6VBJOwUf3TdTvJZ82qF3tbLuWsscd7/1w+D9LH0W/SqUgM5/JJD0lrJ1fVIfZsqB6RFmLCe0Xz3fmZc3WtyVg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", + "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", "cpu": [ "ppc64" ], @@ -833,9 +836,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.1.tgz", - "integrity": "sha512-nSut/Mx5gnilhcq2yIMLMe3Wl4FK5wx/o0QuuCLMtmJn+WeWYoEGDN1ipcN72g1WHsnIbxGXd4i/MF0gTcuAjQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", + "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", "cpu": [ "riscv64" ], @@ -850,9 +853,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.1.tgz", - "integrity": "sha512-cEECeLlJNfT8kZHqLarDBQso9a27o2Zd2AQ8USAEoGtejOrCYHNtKP8XQhMDJMtthdF4GBmjR2au3x1udADQQQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", + "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", "cpu": [ "s390x" ], @@ -867,9 +870,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz", - "integrity": "sha512-xbfUhu/gnvSEg+EGovRc+kjBAkrvtk38RlerAzQxvMzlB4fXpCFCeUAYzJvrnhFtdeyVCDANSjJvOvGYoeKzFA==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", + "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", "cpu": [ "x64" ], @@ -884,9 +887,9 @@ } }, "node_modules/@esbuild/netbsd-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.1.tgz", - "integrity": "sha512-O96poM2XGhLtpTh+s4+nP7YCCAfb4tJNRVZHfIE7dgmax+yMP2WgMd2OecBuaATHKTHsLWHQeuaxMRnCsH8+5g==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", + "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", "cpu": [ "arm64" ], @@ -901,9 +904,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.1.tgz", - "integrity": "sha512-X53z6uXip6KFXBQ+Krbx25XHV/NCbzryM6ehOAeAil7X7oa4XIq+394PWGnwaSQ2WRA0KI6PUO6hTO5zeF5ijA==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", + "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", "cpu": [ "x64" ], @@ -918,9 +921,9 @@ } }, "node_modules/@esbuild/openbsd-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.1.tgz", - "integrity": "sha512-Na9T3szbXezdzM/Kfs3GcRQNjHzM6GzFBeU1/6IV/npKP5ORtp9zbQjvkDJ47s6BCgaAZnnnu/cY1x342+MvZg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", + "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", "cpu": [ "arm64" ], @@ -935,9 +938,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.1.tgz", - "integrity": "sha512-T3H78X2h1tszfRSf+txbt5aOp/e7TAz3ptVKu9Oyir3IAOFPGV6O9c2naym5TOriy1l0nNf6a4X5UXRZSGX/dw==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", + "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", "cpu": [ "x64" ], @@ -951,10 +954,27 @@ "node": ">=18" } }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", + "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/sunos-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.1.tgz", - "integrity": "sha512-2H3RUvcmULO7dIE5EWJH8eubZAI4xw54H1ilJnRNZdeo8dTADEZ21w6J22XBkXqGJbe0+wnNJtw3UXRoLJnFEg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", + "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", "cpu": [ "x64" ], @@ -969,9 +989,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.1.tgz", - "integrity": "sha512-GE7XvrdOzrb+yVKB9KsRMq+7a2U/K5Cf/8grVFRAGJmfADr/e/ODQ134RK2/eeHqYV5eQRFxb1hY7Nr15fv1NQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", + "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", "cpu": [ "arm64" ], @@ -986,9 +1006,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.1.tgz", - "integrity": "sha512-uOxSJCIcavSiT6UnBhBzE8wy3n0hOkJsBOzy7HDAuTDE++1DJMRRVCPGisULScHL+a/ZwdXPpXD3IyFKjA7K8A==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", + "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", "cpu": [ "ia32" ], @@ -1003,9 +1023,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.1.tgz", - "integrity": "sha512-Y1EQdcfwMSeQN/ujR5VayLOJ1BHaK+ssyk0AEzPjC+t1lITgsnccPqFjb6V+LsTp/9Iov4ysfjxLaGJ9RPtkVg==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", + "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", "cpu": [ "x64" ], @@ -1020,9 +1040,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.5.0.tgz", - "integrity": "sha512-RoV8Xs9eNwiDvhv7M+xcL4PWyRyIXRY/FLp3buU4h1EYfdF7unWUy3dOjPqb3C7rMUewIcqwW850PgS8h1o1yg==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", "dev": true, "license": "MIT", "dependencies": { @@ -1052,9 +1072,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", - "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", "dev": true, "license": "MIT", "engines": { @@ -1062,13 +1082,13 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.2.tgz", - "integrity": "sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==", + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@eslint/object-schema": "^2.1.6", + "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -1077,19 +1097,22 @@ } }, "node_modules/@eslint/config-helpers": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.1.0.tgz", - "integrity": "sha512-kLrdPDJE1ckPo94kmPPf9Hfd0DU0Jw6oKYrhe+pwSC0iTUInmTa+w6fw8sGgcfkFJGNdWOUeOaDM4quW4a7OkA==", + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", "dev": true, "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/core": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.12.0.tgz", - "integrity": "sha512-cmrR6pytBuSMTaBweKoGMwu3EiHiEC+DoyupPmlZ0HxBJBtIxwe+j/E4XPIKNx+Q74c8lXKPwYawBf5glsTkHg==", + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -1100,9 +1123,9 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.0.tgz", - "integrity": "sha512-yaVPAiNAalnCZedKLdR21GOGILMLKPyqSLWaAjQFvYA2i/ciDi8ArYVr69Anohb6cH2Ukhqti4aFnYyPm8wdwQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1112,7 +1135,7 @@ "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", + "js-yaml": "^4.1.1", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" }, @@ -1137,19 +1160,22 @@ } }, "node_modules/@eslint/js": { - "version": "9.22.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.22.0.tgz", - "integrity": "sha512-vLFajx9o8d1/oL2ZkpMYbkLv8nDB6yaIwFNt7nI4+I80U/z03SxmfOMsLbvWr3p7C+Wnoh//aOu2pQW8cS0HCQ==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", "dev": true, "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" } }, "node_modules/@eslint/object-schema": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", - "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1157,13 +1183,13 @@ } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.7.tgz", - "integrity": "sha512-JubJ5B2pJ4k4yGxaNLdbjrnk9d/iDz6/q8wOilpIowd6PJPgaxCuHBnBszq7Ce2TyMrywm5r4PnKm6V3iiZF+g==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@eslint/core": "^0.12.0", + "@eslint/core": "^0.17.0", "levn": "^0.4.1" }, "engines": { @@ -1223,6 +1249,18 @@ "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", "license": "MIT" }, + "node_modules/@hookform/resolvers": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-5.2.2.tgz", + "integrity": "sha512-A/IxlMLShx3KjV/HeTcTfaMxdwy690+L/ZADoeaTltLx+CVuzkeVIPuybK3jrRfw7YZnmdKsVVHAlEPIAEUNlA==", + "license": "MIT", + "dependencies": { + "@standard-schema/utils": "^0.3.0" + }, + "peerDependencies": { + "react-hook-form": "^7.55.0" + } + }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -1234,33 +1272,19 @@ } }, "node_modules/@humanfs/node": { - "version": "0.16.6", - "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", - "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, "license": "Apache-2.0", "dependencies": { "@humanfs/core": "^0.19.1", - "@humanwhocodes/retry": "^0.3.0" + "@humanwhocodes/retry": "^0.4.0" }, "engines": { "node": ">=18.18.0" } }, - "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", - "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18.18" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -1276,9 +1300,9 @@ } }, "node_modules/@humanwhocodes/retry": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.2.tgz", - "integrity": "sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1290,17 +1314,13 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", - "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { @@ -1312,15 +1332,6 @@ "node": ">=6.0.0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -1328,9 +1339,9 @@ "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -1344,22 +1355,22 @@ "license": "Apache-2.0" }, "node_modules/@mantine/charts": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-8.0.1.tgz", - "integrity": "sha512-yntk4siXpQGSj83tDwftJw6fHTOBS6c/VWinjvTW29ptEdjBCxbKFfyyDc9UGVVuO7ovbdtpfCZBpuN2I7HPCA==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-8.0.2.tgz", + "integrity": "sha512-hVS1+CT+7e3+ZbU1xx7Nyx/5ZBSxzS+68SKeVLeOZPGl9Wx35CY1oLn0n53vQPWV2WFKd0u0Bq3d1iuaDpkzGA==", "license": "MIT", "peerDependencies": { - "@mantine/core": "8.0.1", - "@mantine/hooks": "8.0.1", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x", "recharts": "^2.13.3" } }, "node_modules/@mantine/core": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.0.1.tgz", - "integrity": "sha512-4ezaxKjChSPtawamQ3KrJq+x506uTouXlL0Z5fP+t105KnyxMrAJUENhbh2ivD4pq9Zh1BFiD9IWzyu3IXFR8w==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.0.2.tgz", + "integrity": "sha512-2Ps7bRTeTbRwAKTCL9xdflPz0pwOlTq6ohyTbDZMCADqecf09GHI7GiX+HJatqbPZ2t8jK0fN1b48YhjJaxTqg==", "license": "MIT", "dependencies": { "@floating-ui/react": "^0.26.28", @@ -1370,46 +1381,46 @@ "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "8.0.1", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dates": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.0.1.tgz", - "integrity": "sha512-YCmV5jiGE9Ts2uhNS217IA1Hd5kAa8oaEtfnU0bS1sL36zKEf2s6elmzY718XdF8tFil0jJWAj0jiCrA3/udMg==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.0.2.tgz", + "integrity": "sha512-V1xU00gECfykA4UFln8ulPsPHvaTncsg9zUbzCwqwEAYlZFG3Nnj5eBzzpV3IN1LNDPEVGb1gAOM6jZ+fi2uRQ==", "license": "MIT", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "8.0.1", - "@mantine/hooks": "8.0.1", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "dayjs": ">=1.0.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dropzone": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-8.0.1.tgz", - "integrity": "sha512-8PH5yrtA/ebCIwjs0m4J9qOvEyS/P4XmNlHrw0E389/qq64Ol7+/ZH7Xtiq64IaY8kvsMW1XHaV0c+bdYrijiA==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-8.0.2.tgz", + "integrity": "sha512-dWsz99QjWOQy7wDx4zzvBrPQ6l3201kg0iugk2Dm+MmN9mlboychz/LIZzoCGsodtQRLAsoTlN2zOqhsiggRfw==", "license": "MIT", "dependencies": { "react-dropzone": "14.3.8" }, "peerDependencies": { - "@mantine/core": "8.0.1", - "@mantine/hooks": "8.0.1", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/form": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/form/-/form-8.0.1.tgz", - "integrity": "sha512-lQ94gn/9p60C+tKEW7psQ1tZHod58Q0bXLbRDadRKMwnqBb2WFoIuaQWPDo7ox+PqyOv28dtflgS+Lm95EbBhg==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/form/-/form-8.0.2.tgz", + "integrity": "sha512-vSp9BfrhC9o7RMRYMaND2UAflXO4i6c5F1qPkiM2FID6ye2RJxW8YHaGa3kA0VfBbhDw9sFBbl8p7ttE4RPzcw==", "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3", @@ -1420,34 +1431,34 @@ } }, "node_modules/@mantine/hooks": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.0.1.tgz", - "integrity": "sha512-GvLdM4Ro3QcDyIgqrdXsUZmeeKye2TNL/k3mEr9JhM5KacHQjr83JPp0u9eLobn7kiyBqpLTYmVYAbmjJdCxHw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.0.2.tgz", + "integrity": "sha512-0jpEdC0KIAZ54D5kd9rJudrEm6vkvnrL9yYHnkuNbxokXSzDdYA/wpHnKR5WW+u6fW4JF6A6A7gN1vXKeC9MSw==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" } }, "node_modules/@mantine/notifications": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.0.1.tgz", - "integrity": "sha512-7TX9OyAmUcok3qffnheS7gTAMKDczETy8XEYDr38Sy/XIoXLjM+3CwO+a/vfd1F9oW2LvkahkHT0Ey+vBOVd0Q==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.0.2.tgz", + "integrity": "sha512-whSuoCCZxQF3VM40sumCte9tA79to8OCV/vv0z8PeVTj/eKlaTR+P9LKigO9ovhuNELrvvO3Rxcnno5aMBz0oA==", "license": "MIT", "dependencies": { - "@mantine/store": "8.0.1", + "@mantine/store": "8.0.2", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "8.0.1", - "@mantine/hooks": "8.0.1", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/store": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.0.1.tgz", - "integrity": "sha512-3wfUDeiERXJEI+MGgRAbh+9aY35D9oE4UzquLqZh8cIiH5i5g64Y/eJx3PfjHgO5+Zeu6lbgTgL6k4lg4a2SBQ==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.0.2.tgz", + "integrity": "sha512-/LuizGWAXjVnLLZ55f0QYotiqb8GlHpIb4KRf4LqRkbsA6UAZEVb6beuk0vI2Azf6vfuh7sTHu1xVC5zI6C+Cw==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" @@ -1464,16 +1475,16 @@ } }, "node_modules/@rolldown/pluginutils": { - "version": "1.0.0-beta.35", - "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.35.tgz", - "integrity": "sha512-slYrCpoxJUqzFDDNlvrOYRazQUNRvWPjXA17dAOISY3rDMxX6k8K4cj2H+hEYMHF81HO3uNd5rHVigAWRM5dSg==", + "version": "1.0.0-beta.47", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.47.tgz", + "integrity": "sha512-8QagwMH3kNCuzD8EWL8R2YPW5e4OrHNSAHRFDdmFqEwEaD/KcNKjVoumo+gP2vW5eKB2UPbM6vTYiGZX0ixLnw==", "dev": true, "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.35.0.tgz", - "integrity": "sha512-uYQ2WfPaqz5QtVgMxfN6NpLD+no0MYHDBywl7itPYd3K5TjjSghNKmX8ic9S8NU8w81NVhJv/XojcHptRly7qQ==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.5.tgz", + "integrity": "sha512-iDGS/h7D8t7tvZ1t6+WPK04KD0MwzLZrG0se1hzBjSi5fyxlsiggoJHwh18PCFNn7tG43OWb6pdZ6Y+rMlmyNQ==", "cpu": [ "arm" ], @@ -1485,9 +1496,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.35.0.tgz", - "integrity": "sha512-FtKddj9XZudurLhdJnBl9fl6BwCJ3ky8riCXjEw3/UIbjmIY58ppWwPEvU3fNu+W7FUsAsB1CdH+7EQE6CXAPA==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.5.tgz", + "integrity": "sha512-wrSAViWvZHBMMlWk6EJhvg8/rjxzyEhEdgfMMjREHEq11EtJ6IP6yfcCH57YAEca2Oe3FNCE9DSTgU70EIGmVw==", "cpu": [ "arm64" ], @@ -1499,9 +1510,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.35.0.tgz", - "integrity": "sha512-Uk+GjOJR6CY844/q6r5DR/6lkPFOw0hjfOIzVx22THJXMxktXG6CbejseJFznU8vHcEBLpiXKY3/6xc+cBm65Q==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.5.tgz", + "integrity": "sha512-S87zZPBmRO6u1YXQLwpveZm4JfPpAa6oHBX7/ghSiGH3rz/KDgAu1rKdGutV+WUI6tKDMbaBJomhnT30Y2t4VQ==", "cpu": [ "arm64" ], @@ -1513,9 +1524,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.35.0.tgz", - "integrity": "sha512-3IrHjfAS6Vkp+5bISNQnPogRAW5GAV1n+bNCrDwXmfMHbPl5EhTmWtfmwlJxFRUCBZ+tZ/OxDyU08aF6NI/N5Q==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.5.tgz", + "integrity": "sha512-YTbnsAaHo6VrAczISxgpTva8EkfQus0VPEVJCEaboHtZRIb6h6j0BNxRBOwnDciFTZLDPW5r+ZBmhL/+YpTZgA==", "cpu": [ "x64" ], @@ -1527,9 +1538,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.35.0.tgz", - "integrity": "sha512-sxjoD/6F9cDLSELuLNnY0fOrM9WA0KrM0vWm57XhrIMf5FGiN8D0l7fn+bpUeBSU7dCgPV2oX4zHAsAXyHFGcQ==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.5.tgz", + "integrity": "sha512-1T8eY2J8rKJWzaznV7zedfdhD1BqVs1iqILhmHDq/bqCUZsrMt+j8VCTHhP0vdfbHK3e1IQ7VYx3jlKqwlf+vw==", "cpu": [ "arm64" ], @@ -1541,9 +1552,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.35.0.tgz", - "integrity": "sha512-2mpHCeRuD1u/2kruUiHSsnjWtHjqVbzhBkNVQ1aVD63CcexKVcQGwJ2g5VphOd84GvxfSvnnlEyBtQCE5hxVVw==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.5.tgz", + "integrity": "sha512-sHTiuXyBJApxRn+VFMaw1U+Qsz4kcNlxQ742snICYPrY+DDL8/ZbaC4DVIB7vgZmp3jiDaKA0WpBdP0aqPJoBQ==", "cpu": [ "x64" ], @@ -1555,9 +1566,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.35.0.tgz", - "integrity": "sha512-mrA0v3QMy6ZSvEuLs0dMxcO2LnaCONs1Z73GUDBHWbY8tFFocM6yl7YyMu7rz4zS81NDSqhrUuolyZXGi8TEqg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.5.tgz", + "integrity": "sha512-dV3T9MyAf0w8zPVLVBptVlzaXxka6xg1f16VAQmjg+4KMSTWDvhimI/Y6mp8oHwNrmnmVl9XxJ/w/mO4uIQONA==", "cpu": [ "arm" ], @@ -1569,9 +1580,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.35.0.tgz", - "integrity": "sha512-DnYhhzcvTAKNexIql8pFajr0PiDGrIsBYPRvCKlA5ixSS3uwo/CWNZxB09jhIapEIg945KOzcYEAGGSmTSpk7A==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.5.tgz", + "integrity": "sha512-wIGYC1x/hyjP+KAu9+ewDI+fi5XSNiUi9Bvg6KGAh2TsNMA3tSEs+Sh6jJ/r4BV/bx/CyWu2ue9kDnIdRyafcQ==", "cpu": [ "arm" ], @@ -1583,9 +1594,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.35.0.tgz", - "integrity": "sha512-uagpnH2M2g2b5iLsCTZ35CL1FgyuzzJQ8L9VtlJ+FckBXroTwNOaD0z0/UF+k5K3aNQjbm8LIVpxykUOQt1m/A==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.5.tgz", + "integrity": "sha512-Y+qVA0D9d0y2FRNiG9oM3Hut/DgODZbU9I8pLLPwAsU0tUKZ49cyV1tzmB/qRbSzGvY8lpgGkJuMyuhH7Ma+Vg==", "cpu": [ "arm64" ], @@ -1597,9 +1608,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.35.0.tgz", - "integrity": "sha512-XQxVOCd6VJeHQA/7YcqyV0/88N6ysSVzRjJ9I9UA/xXpEsjvAgDTgH3wQYz5bmr7SPtVK2TsP2fQ2N9L4ukoUg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.5.tgz", + "integrity": "sha512-juaC4bEgJsyFVfqhtGLz8mbopaWD+WeSOYr5E16y+1of6KQjc0BpwZLuxkClqY1i8sco+MdyoXPNiCkQou09+g==", "cpu": [ "arm64" ], @@ -1610,10 +1621,10 @@ "linux" ] }, - "node_modules/@rollup/rollup-linux-loongarch64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.35.0.tgz", - "integrity": "sha512-5pMT5PzfgwcXEwOaSrqVsz/LvjDZt+vQ8RT/70yhPU06PTuq8WaHhfT1LW+cdD7mW6i/J5/XIkX/1tCAkh1W6g==", + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.5.tgz", + "integrity": "sha512-rIEC0hZ17A42iXtHX+EPJVL/CakHo+tT7W0pbzdAGuWOt2jxDFh7A/lRhsNHBcqL4T36+UiAgwO8pbmn3dE8wA==", "cpu": [ "loong64" ], @@ -1624,10 +1635,10 @@ "linux" ] }, - "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.35.0.tgz", - "integrity": "sha512-c+zkcvbhbXF98f4CtEIP1EBA/lCic5xB0lToneZYvMeKu5Kamq3O8gqrxiYYLzlZH6E3Aq+TSW86E4ay8iD8EA==", + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.5.tgz", + "integrity": "sha512-T7l409NhUE552RcAOcmJHj3xyZ2h7vMWzcwQI0hvn5tqHh3oSoclf9WgTl+0QqffWFG8MEVZZP1/OBglKZx52Q==", "cpu": [ "ppc64" ], @@ -1639,9 +1650,23 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.35.0.tgz", - "integrity": "sha512-s91fuAHdOwH/Tad2tzTtPX7UZyytHIRR6V4+2IGlV0Cej5rkG0R61SX4l4y9sh0JBibMiploZx3oHKPnQBKe4g==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.5.tgz", + "integrity": "sha512-7OK5/GhxbnrMcxIFoYfhV/TkknarkYC1hqUw1wU2xUN3TVRLNT5FmBv4KkheSG2xZ6IEbRAhTooTV2+R5Tk0lQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.5.tgz", + "integrity": "sha512-GwuDBE/PsXaTa76lO5eLJTyr2k8QkPipAyOrs4V/KJufHCZBJ495VCGJol35grx9xryk4V+2zd3Ri+3v7NPh+w==", "cpu": [ "riscv64" ], @@ -1653,9 +1678,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.35.0.tgz", - "integrity": "sha512-hQRkPQPLYJZYGP+Hj4fR9dDBMIM7zrzJDWFEMPdTnTy95Ljnv0/4w/ixFw3pTBMEuuEuoqtBINYND4M7ujcuQw==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.5.tgz", + "integrity": "sha512-IAE1Ziyr1qNfnmiQLHBURAD+eh/zH1pIeJjeShleII7Vj8kyEm2PF77o+lf3WTHDpNJcu4IXJxNO0Zluro8bOw==", "cpu": [ "s390x" ], @@ -1667,9 +1692,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.35.0.tgz", - "integrity": "sha512-Pim1T8rXOri+0HmV4CdKSGrqcBWX0d1HoPnQ0uw0bdp1aP5SdQVNBy8LjYncvnLgu3fnnCt17xjWGd4cqh8/hA==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.5.tgz", + "integrity": "sha512-Pg6E+oP7GvZ4XwgRJBuSXZjcqpIW3yCBhK4BcsANvb47qMvAbCjR6E+1a/U2WXz1JJxp9/4Dno3/iSJLcm5auw==", "cpu": [ "x64" ], @@ -1681,9 +1706,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.35.0.tgz", - "integrity": "sha512-QysqXzYiDvQWfUiTm8XmJNO2zm9yC9P/2Gkrwg2dH9cxotQzunBHYr6jk4SujCTqnfGxduOmQcI7c2ryuW8XVg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.5.tgz", + "integrity": "sha512-txGtluxDKTxaMDzUduGP0wdfng24y1rygUMnmlUJ88fzCCULCLn7oE5kb2+tRB+MWq1QDZT6ObT5RrR8HFRKqg==", "cpu": [ "x64" ], @@ -1694,10 +1719,24 @@ "linux" ] }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.5.tgz", + "integrity": "sha512-3DFiLPnTxiOQV993fMc+KO8zXHTcIjgaInrqlG8zDp1TlhYl6WgrOHuJkJQ6M8zHEcntSJsUp1XFZSY8C1DYbg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.35.0.tgz", - "integrity": "sha512-OUOlGqPkVJCdJETKOCEf1mw848ZyJ5w50/rZ/3IBQVdLfR5jk/6Sr5m3iO2tdPgwo0x7VcncYuOvMhBWZq8ayg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.5.tgz", + "integrity": "sha512-nggc/wPpNTgjGg75hu+Q/3i32R00Lq1B6N1DO7MCU340MRKL3WZJMjA9U4K4gzy3dkZPXm9E1Nc81FItBVGRlA==", "cpu": [ "arm64" ], @@ -1709,9 +1748,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.35.0.tgz", - "integrity": "sha512-2/lsgejMrtwQe44glq7AFFHLfJBPafpsTa6JvP2NGef/ifOa4KBoglVf7AKN7EV9o32evBPRqfg96fEHzWo5kw==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.5.tgz", + "integrity": "sha512-U/54pTbdQpPLBdEzCT6NBCFAfSZMvmjr0twhnD9f4EIvlm9wy3jjQ38yQj1AGznrNO65EWQMgm/QUjuIVrYF9w==", "cpu": [ "ia32" ], @@ -1722,10 +1761,10 @@ "win32" ] }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.35.0.tgz", - "integrity": "sha512-PIQeY5XDkrOysbQblSW7v3l1MDZzkTEzAfTPkj5VAu3FW8fS4ynyLg2sINp0fp3SjZ8xkRYpLqoKcYqAkhU1dw==", + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.5.tgz", + "integrity": "sha512-2NqKgZSuLH9SXBBV2dWNRCZmocgSOx8OJSdpRaEcRlIfX8YrKxUT6z0F1NpvDVhOsl190UFTRh2F2WDWWCYp3A==", "cpu": [ "x64" ], @@ -1736,6 +1775,26 @@ "win32" ] }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.5.tgz", + "integrity": "sha512-JRpZUhCfhZ4keB5v0fe02gQJy05GqboPOaxvjugW04RLSYYoB/9t2lx2u/tMs/Na/1NXfY8QYjgRljRpN+MjTQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/utils": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz", + "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==", + "license": "MIT" + }, "node_modules/@swc/core": { "name": "@swc/wasm", "version": "1.13.20", @@ -1745,9 +1804,9 @@ "license": "Apache-2.0" }, "node_modules/@swc/wasm": { - "version": "1.13.20", - "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", - "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.15.7.tgz", + "integrity": "sha512-m1Cslgkp7gFIUB2ZiIUHMoUskwxOAi9uaf27inoKb7Oc8MkMjt+eNTeSyeGckkwRtMQiybKYTGGnA5imxSsedQ==", "dev": true, "license": "Apache-2.0" }, @@ -1805,9 +1864,9 @@ } }, "node_modules/@testing-library/jest-dom": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.8.0.tgz", - "integrity": "sha512-WgXcWzVM6idy5JaftTVC8Vs83NKRmGJz4Hqs4oyOuO2J4r/y79vvKZsb+CaGyCSEbUPI6OsewfPd0G1A0/TUZQ==", + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", "dev": true, "license": "MIT", "dependencies": { @@ -1832,9 +1891,9 @@ "license": "MIT" }, "node_modules/@testing-library/react": { - "version": "16.3.0", - "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz", - "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==", + "version": "16.3.1", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz", + "integrity": "sha512-gr4KtAWqIOQoucWYD/f6ki+j5chXfcPc74Col/6poTyqTmn7zRmodWahWRCp8tYd+GMqBonw6hstNzqjbs6gjw==", "dev": true, "license": "MIT", "dependencies": { @@ -1881,19 +1940,20 @@ "license": "MIT" }, "node_modules/@types/chai": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", - "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", "dev": true, "license": "MIT", "dependencies": { - "@types/deep-eql": "*" + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" } }, "node_modules/@types/d3-array": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", - "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", "license": "MIT" }, "node_modules/@types/d3-color": { @@ -1961,22 +2021,12 @@ "license": "MIT" }, "node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", "dev": true, "license": "MIT" }, - "node_modules/@types/hoist-non-react-statics": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.6.tgz", - "integrity": "sha512-lPByRJUer/iN/xa4qpyL0qmL11DqNW81iU/IG1S3uvRUq4oKagz8VCxZjiWkumgt66YT3vOdDgZ0o32sGKtCEw==", - "license": "MIT", - "dependencies": { - "@types/react": "*", - "hoist-non-react-statics": "^3.3.0" - } - }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -1991,28 +2041,29 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "19.1.16", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.16.tgz", - "integrity": "sha512-WBM/nDbEZmDUORKnh5i1bTnAz6vTohUf9b8esSMu+b24+srbaxa04UbJgWx78CVfNXA20sNu0odEIluZDFdCog==", + "version": "19.2.7", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz", + "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + "devOptional": true, "license": "MIT", "dependencies": { - "csstype": "^3.0.2" + "csstype": "^3.2.2" } }, "node_modules/@types/react-dom": { - "version": "19.1.9", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz", - "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "dev": true, "license": "MIT", "peerDependencies": { - "@types/react": "^19.0.0" + "@types/react": "^19.2.0" } }, "node_modules/@videojs/http-streaming": { - "version": "3.17.0", - "resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.17.0.tgz", - "integrity": "sha512-Ch1P3tvvIEezeZXyK11UfWgp4cWKX4vIhZ30baN/lRinqdbakZ5hiAI3pGjRy3d+q/Epyc8Csz5xMdKNNGYpcw==", + "version": "3.17.2", + "resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.17.2.tgz", + "integrity": "sha512-VBQ3W4wnKnVKb/limLdtSD2rAd5cmHN70xoMf4OmuDd0t2kfJX04G+sfw6u2j8oOm2BXYM9E1f4acHruqKnM1g==", "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.12.5", @@ -2058,13 +2109,13 @@ } }, "node_modules/@vitejs/plugin-react-swc": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.1.0.tgz", - "integrity": "sha512-Ff690TUck0Anlh7wdIcnsVMhofeEVgm44Y4OYdeeEEPSKyZHzDI9gfVBvySEhDfXtBp8tLCbfsVKPWEMEjq8/g==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.2.2.tgz", + "integrity": "sha512-x+rE6tsxq/gxrEJN3Nv3dIV60lFflPj94c90b+NNo6n1QV1QQUTLoL0MpaOVasUZ0zqVBn7ead1B5ecx1JAGfA==", "dev": true, "license": "MIT", "dependencies": { - "@rolldown/pluginutils": "1.0.0-beta.35", + "@rolldown/pluginutils": "1.0.0-beta.47", "@swc/core": "^1.13.5" }, "engines": { @@ -2190,18 +2241,18 @@ } }, "node_modules/@xmldom/xmldom": { - "version": "0.8.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", - "integrity": "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==", + "version": "0.8.11", + "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.11.tgz", + "integrity": "sha512-cQzWCtO6C8TQiYl1ruKNn2U6Ao4o4WBBcbL61yJl84x+j5sOWWFU9X7DpND8XZG3daDppSsigMdfAIl2upQBRw==", "license": "MIT", "engines": { "node": ">=10.0.0" } }, "node_modules/acorn": { - "version": "8.14.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", - "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", "bin": { @@ -2278,6 +2329,19 @@ "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/allotment/node_modules/use-resize-observer": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", + "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", + "license": "MIT", + "dependencies": { + "@juggle/resize-observer": "^3.3.1" + }, + "peerDependencies": { + "react": "16.8.0 - 18", + "react-dom": "16.8.0 - 18" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -2373,9 +2437,9 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", "dependencies": { @@ -2495,12 +2559,16 @@ "license": "MIT" }, "node_modules/cookie": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", - "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", "license": "MIT", "engines": { "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/cosmiconfig": { @@ -2565,14 +2633,14 @@ "license": "MIT" }, "node_modules/cssstyle": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.1.tgz", - "integrity": "sha512-g5PC9Aiph9eiczFpcgUhd9S4UUO3F+LHGRIi5NUMZ+4xtoIYbHNZwZnWA2JsFGe8OU8nl4WyaEFiZuGuxlutJQ==", + "version": "5.3.5", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.5.tgz", + "integrity": "sha512-GlsEptulso7Jg0VaOZ8BXQi3AkYM5BOJKEO/rjMidSCq70FkIC5y0eawrCXeYzxgt3OCf4Ls+eoxN+/05vN0Ag==", "dev": true, "license": "MIT", "dependencies": { - "@asamuzakjp/css-color": "^4.0.3", - "@csstools/css-syntax-patches-for-csstree": "^1.0.14", + "@asamuzakjp/css-color": "^4.1.1", + "@csstools/css-syntax-patches-for-csstree": "^1.0.21", "css-tree": "^3.1.0" }, "engines": { @@ -2580,9 +2648,9 @@ } }, "node_modules/csstype": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", - "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", "license": "MIT" }, "node_modules/d3-array": { @@ -2721,9 +2789,9 @@ } }, "node_modules/dayjs": { - "version": "1.11.13", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", - "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", + "version": "1.11.19", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", "license": "MIT" }, "node_modules/debug": { @@ -2773,15 +2841,6 @@ "dev": true, "license": "MIT" }, - "node_modules/deepmerge": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz", - "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/dequal": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", @@ -2834,9 +2893,9 @@ } }, "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" @@ -2856,9 +2915,9 @@ "license": "MIT" }, "node_modules/esbuild": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.1.tgz", - "integrity": "sha512-BGO5LtrGC7vxnqucAe/rmvKdJllfGaYWdyABvyMoXQlfYMb2bbRuReWR5tEGE//4LcNJj9XrkovTqNYRFZHAMQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", + "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -2869,31 +2928,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.1", - "@esbuild/android-arm": "0.25.1", - "@esbuild/android-arm64": "0.25.1", - "@esbuild/android-x64": "0.25.1", - "@esbuild/darwin-arm64": "0.25.1", - "@esbuild/darwin-x64": "0.25.1", - "@esbuild/freebsd-arm64": "0.25.1", - "@esbuild/freebsd-x64": "0.25.1", - "@esbuild/linux-arm": "0.25.1", - "@esbuild/linux-arm64": "0.25.1", - "@esbuild/linux-ia32": "0.25.1", - "@esbuild/linux-loong64": "0.25.1", - "@esbuild/linux-mips64el": "0.25.1", - "@esbuild/linux-ppc64": "0.25.1", - "@esbuild/linux-riscv64": "0.25.1", - "@esbuild/linux-s390x": "0.25.1", - "@esbuild/linux-x64": "0.25.1", - "@esbuild/netbsd-arm64": "0.25.1", - "@esbuild/netbsd-x64": "0.25.1", - "@esbuild/openbsd-arm64": "0.25.1", - "@esbuild/openbsd-x64": "0.25.1", - "@esbuild/sunos-x64": "0.25.1", - "@esbuild/win32-arm64": "0.25.1", - "@esbuild/win32-ia32": "0.25.1", - "@esbuild/win32-x64": "0.25.1" + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2" } }, "node_modules/escape-string-regexp": { @@ -2909,33 +2969,32 @@ } }, "node_modules/eslint": { - "version": "9.22.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.22.0.tgz", - "integrity": "sha512-9V/QURhsRN40xuHXWjV64yvrzMjcz7ZyNoF2jJFmy9j/SLk0u1OLSZgXi28MrXjymnjEGSR80WCdab3RGMDveQ==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, "license": "MIT", "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", - "@eslint/config-array": "^0.19.2", - "@eslint/config-helpers": "^0.1.0", - "@eslint/core": "^0.12.0", - "@eslint/eslintrc": "^3.3.0", - "@eslint/js": "9.22.0", - "@eslint/plugin-kit": "^0.2.7", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", - "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^8.3.0", - "eslint-visitor-keys": "^4.2.0", - "espree": "^10.3.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -2983,9 +3042,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.19.tgz", - "integrity": "sha512-eyy8pcr/YxSYjBoqIFSrlbn9i/xvxUFa8CjzAYo9cFjgGXqq1hyjihcpZvxRLalpaWmueWR81xn7vuKmAFijDQ==", + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -2993,9 +3052,9 @@ } }, "node_modules/eslint-scope": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.3.0.tgz", - "integrity": "sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==", + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -3010,9 +3069,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", - "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3023,15 +3082,15 @@ } }, "node_modules/espree": { - "version": "10.3.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", - "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "acorn": "^8.14.0", + "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^4.2.0" + "eslint-visitor-keys": "^4.2.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -3103,9 +3162,9 @@ "license": "MIT" }, "node_modules/expect-type": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", - "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3119,9 +3178,9 @@ "license": "MIT" }, "node_modules/fast-equals": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.3.2.tgz", - "integrity": "sha512-6rxyATwPCkaFIL3JLqw8qXqMpIZ942pTX/tbQFkRsDGblS8tNGtlUauA/+mt6RUfqn/4MoEr+WDkYoIQbibWuQ==", + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.4.0.tgz", + "integrity": "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==", "license": "MIT", "engines": { "node": ">=6.0.0" @@ -3228,31 +3287,6 @@ "dev": true, "license": "ISC" }, - "node_modules/formik": { - "version": "2.4.6", - "resolved": "https://registry.npmjs.org/formik/-/formik-2.4.6.tgz", - "integrity": "sha512-A+2EI7U7aG296q2TLGvNapDNTZp1khVt5Vk0Q/fyfSROss0V/V6+txt2aJnwEos44IxTCW/LYAi/zgWzlevj+g==", - "funding": [ - { - "type": "individual", - "url": "https://opencollective.com/formik" - } - ], - "license": "Apache-2.0", - "dependencies": { - "@types/hoist-non-react-statics": "^3.3.1", - "deepmerge": "^2.1.1", - "hoist-non-react-statics": "^3.3.0", - "lodash": "^4.17.21", - "lodash-es": "^4.17.21", - "react-fast-compare": "^2.0.1", - "tiny-warning": "^1.0.2", - "tslib": "^2.0.0" - }, - "peerDependencies": { - "react": ">=16.8.0" - } - }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -3345,9 +3379,9 @@ } }, "node_modules/hls.js": { - "version": "1.5.20", - "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.5.20.tgz", - "integrity": "sha512-uu0VXUK52JhihhnN/MVVo1lvqNNuhoxkonqgO3IpjvQiGpJBdIXMGkofjQb/j9zvV7a1SW8U9g1FslWx/1HOiQ==", + "version": "1.6.15", + "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.6.15.tgz", + "integrity": "sha512-E3a5VwgXimGHwpRGV+WxRTKeSp2DW5DI5MWv34ulL3t5UNmyJWCQ1KmLEHbYzcfThfXG8amBL+fCYPneGHC4VA==", "license": "Apache-2.0" }, "node_modules/hoist-non-react-statics": { @@ -3539,9 +3573,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -3552,22 +3586,22 @@ } }, "node_modules/jsdom": { - "version": "27.0.0", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.0.0.tgz", - "integrity": "sha512-lIHeR1qlIRrIN5VMccd8tI2Sgw6ieYXSVktcSHaNe3Z5nE/tcPQYQWOq00wxMvYOsz+73eAkNenVvmPC6bba9A==", + "version": "27.3.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.3.0.tgz", + "integrity": "sha512-GtldT42B8+jefDUC4yUKAvsaOrH7PDHmZxZXNgF2xMmymjUbRYJvpAybZAKEmXDGTM0mCsz8duOa4vTm5AY2Kg==", "dev": true, "license": "MIT", "dependencies": { - "@asamuzakjp/dom-selector": "^6.5.4", - "cssstyle": "^5.3.0", + "@acemir/cssom": "^0.9.28", + "@asamuzakjp/dom-selector": "^6.7.6", + "cssstyle": "^5.3.4", "data-urls": "^6.0.0", - "decimal.js": "^10.5.0", + "decimal.js": "^10.6.0", "html-encoding-sniffer": "^4.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", - "parse5": "^7.3.0", - "rrweb-cssom": "^0.8.0", + "parse5": "^8.0.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", "tough-cookie": "^6.0.0", @@ -3575,12 +3609,12 @@ "webidl-conversions": "^8.0.0", "whatwg-encoding": "^3.1.1", "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^15.0.0", - "ws": "^8.18.2", + "whatwg-url": "^15.1.0", + "ws": "^8.18.3", "xml-name-validator": "^5.0.0" }, "engines": { - "node": ">=20" + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { "canvas": "^3.0.0" @@ -3691,12 +3725,6 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "license": "MIT" }, - "node_modules/lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", - "license": "MIT" - }, "node_modules/lodash.clamp": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/lodash.clamp/-/lodash.clamp-4.0.3.tgz", @@ -3743,11 +3771,11 @@ "license": "MIT" }, "node_modules/lru-cache": { - "version": "11.2.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.1.tgz", - "integrity": "sha512-r8LA6i4LP4EeWOhqBaZZjDWwehd1xUJPCJd9Sv300H0ZmcUER4+JPh7bqqZeqs1o5pgtgvXm+d9UGrB5zZGDiQ==", + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", "dev": true, - "license": "ISC", + "license": "BlueOak-1.0.0", "engines": { "node": "20 || >=22" } @@ -3783,9 +3811,9 @@ } }, "node_modules/magic-string": { - "version": "0.30.19", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", - "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==", + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3806,9 +3834,10 @@ "license": "MIT" }, "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha512-9Wy1B3m3f66bPPmU5hdA4DR4PB2OfDU/+GS3yAB7IQozE3tqXaVv2zOjgla7MEGSRv95+ILmOuvhLkOK6wJtCQ==", + "version": "2.19.2", + "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.2.tgz", + "integrity": "sha512-8S5I8db/uZN8r9HSLFVWPdJCvYOejMcEC82VIzNUc6Zkklf/d1gg2psfE79/vyhWOj4+J8MtwmoOz3TmvaGu5A==", + "license": "MIT", "dependencies": { "dom-walk": "^0.1.0" } @@ -3885,9 +3914,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.9", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.9.tgz", - "integrity": "sha512-SppoicMGpZvbF1l3z4x7No3OlIjP7QJvC9XR7AhZr1kL133KHnKPztkKDc+Ir4aJ/1VhTySrtKhrsycmrMQfvg==", + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "dev": true, "funding": [ { @@ -4000,9 +4029,9 @@ } }, "node_modules/parse5": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", - "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", + "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", "dev": true, "license": "MIT", "dependencies": { @@ -4096,9 +4125,9 @@ } }, "node_modules/postcss": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", - "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", "dev": true, "funding": [ { @@ -4116,7 +4145,7 @@ ], "license": "MIT", "dependencies": { - "nanoid": "^3.3.8", + "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, @@ -4135,9 +4164,9 @@ } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz", + "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", "dev": true, "license": "MIT", "bin": { @@ -4222,33 +4251,33 @@ } }, "node_modules/react": { - "version": "19.1.1", - "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", - "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", + "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "19.1.1", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", - "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", "license": "MIT", "dependencies": { - "scheduler": "^0.26.0" + "scheduler": "^0.27.0" }, "peerDependencies": { - "react": "^19.1.1" + "react": "^19.2.3" } }, "node_modules/react-draggable": { - "version": "4.4.6", - "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.4.6.tgz", - "integrity": "sha512-LtY5Xw1zTPqHkVmtM3X8MUOxNDOUhv/khTgBgrUvwaS064bwVvxT+q5El0uUFNx5IEPKXuRejr7UqLwBIg5pdw==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.5.0.tgz", + "integrity": "sha512-VC+HBLEZ0XJxnOxVAZsdRi8rD04Iz3SiiKOoYzamjylUcju/hP9np/aZdLHf/7WOD268WMoNJMvYfB5yAK45cw==", "license": "MIT", "dependencies": { - "clsx": "^1.1.1", + "clsx": "^2.1.1", "prop-types": "^15.8.1" }, "peerDependencies": { @@ -4256,15 +4285,6 @@ "react-dom": ">= 16.3.0" } }, - "node_modules/react-draggable/node_modules/clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/react-dropzone": { "version": "14.3.8", "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz", @@ -4282,11 +4302,21 @@ "react": ">= 16.8 || 18.0.0" } }, - "node_modules/react-fast-compare": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-2.0.4.tgz", - "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==", - "license": "MIT" + "node_modules/react-hook-form": { + "version": "7.70.0", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.70.0.tgz", + "integrity": "sha512-COOMajS4FI3Wuwrs3GPpi/Jeef/5W1DRR84Yl5/ShlT3dKVFUfoGiEZ/QE6Uw8P4T2/CLJdcTVYKvWBMQTEpvw==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18 || ^19" + } }, "node_modules/react-is": { "version": "16.13.1", @@ -4327,9 +4357,9 @@ } }, "node_modules/react-remove-scroll": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.3.tgz", - "integrity": "sha512-pnAi91oOk8g8ABQKGF5/M9qxmmOPxaAnopyTHYfqYEwJhyFrbbBtHuSgtKEoH0jpcxx5o3hXqH1mNd9/Oi+8iQ==", + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", "license": "MIT", "dependencies": { "react-remove-scroll-bar": "^2.3.7", @@ -4374,9 +4404,9 @@ } }, "node_modules/react-router": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.6.0.tgz", - "integrity": "sha512-GGufuHIVCJDbnIAXP3P9Sxzq3UUsddG3rrI3ut1q6m0FI6vxVBF3JoPQ38+W/blslLH4a5Yutp8drkEpXoddGQ==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.11.0.tgz", + "integrity": "sha512-uI4JkMmjbWCZc01WVP2cH7ZfSzH91JAZUDd7/nIprDgWxBV1TkkmLToFh7EbMTcMak8URFRa2YoBL/W8GWnCTQ==", "license": "MIT", "dependencies": { "cookie": "^1.0.1", @@ -4396,12 +4426,12 @@ } }, "node_modules/react-router-dom": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.6.0.tgz", - "integrity": "sha512-DYgm6RDEuKdopSyGOWZGtDfSm7Aofb8CCzgkliTjtu/eDuB0gcsv6qdFhhi8HdtmA+KHkt5MfZ5K2PdzjugYsA==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.11.0.tgz", + "integrity": "sha512-e49Ir/kMGRzFOOrYQBdoitq3ULigw4lKbAyKusnvtDu2t4dBX4AGYPrzNvorXmVuOyeakai6FUPW5MmibvVG8g==", "license": "MIT", "dependencies": { - "react-router": "7.6.0" + "react-router": "7.11.0" }, "engines": { "node": ">=20.0.0" @@ -4536,9 +4566,9 @@ } }, "node_modules/recharts": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.1.tgz", - "integrity": "sha512-v8PUTUlyiDe56qUj82w/EDVuzEFXwEHp9/xOowGAZwfLjB9uAy3GllQVIYMWF6nU+qibx85WF75zD7AjqoT54Q==", + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.4.tgz", + "integrity": "sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==", "license": "MIT", "dependencies": { "clsx": "^2.0.0", @@ -4593,12 +4623,6 @@ "node": ">=8" } }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", - "license": "MIT" - }, "node_modules/require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", @@ -4610,12 +4634,12 @@ } }, "node_modules/resolve": { - "version": "1.22.10", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", - "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", "license": "MIT", "dependencies": { - "is-core-module": "^2.16.0", + "is-core-module": "^2.16.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -4639,13 +4663,13 @@ } }, "node_modules/rollup": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.35.0.tgz", - "integrity": "sha512-kg6oI4g+vc41vePJyO6dHt/yl0Rz3Thv0kJeVQ3D1kS3E5XSuKbPc29G4IpT/Kv1KQwgHVcN+HtyS+HYLNSvQg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.5.tgz", + "integrity": "sha512-iTNAbFSlRpcHeeWu73ywU/8KuU/LZmNCSxp6fjQkJBD3ivUb8tpDrXhIxEzA05HlYMEwmtaUnb3RP+YNv162OQ==", "dev": true, "license": "MIT", "dependencies": { - "@types/estree": "1.0.6" + "@types/estree": "1.0.8" }, "bin": { "rollup": "dist/bin/rollup" @@ -4655,35 +4679,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.35.0", - "@rollup/rollup-android-arm64": "4.35.0", - "@rollup/rollup-darwin-arm64": "4.35.0", - "@rollup/rollup-darwin-x64": "4.35.0", - "@rollup/rollup-freebsd-arm64": "4.35.0", - "@rollup/rollup-freebsd-x64": "4.35.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.35.0", - "@rollup/rollup-linux-arm-musleabihf": "4.35.0", - "@rollup/rollup-linux-arm64-gnu": "4.35.0", - "@rollup/rollup-linux-arm64-musl": "4.35.0", - "@rollup/rollup-linux-loongarch64-gnu": "4.35.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.35.0", - "@rollup/rollup-linux-riscv64-gnu": "4.35.0", - "@rollup/rollup-linux-s390x-gnu": "4.35.0", - "@rollup/rollup-linux-x64-gnu": "4.35.0", - "@rollup/rollup-linux-x64-musl": "4.35.0", - "@rollup/rollup-win32-arm64-msvc": "4.35.0", - "@rollup/rollup-win32-ia32-msvc": "4.35.0", - "@rollup/rollup-win32-x64-msvc": "4.35.0", + "@rollup/rollup-android-arm-eabi": "4.53.5", + "@rollup/rollup-android-arm64": "4.53.5", + "@rollup/rollup-darwin-arm64": "4.53.5", + "@rollup/rollup-darwin-x64": "4.53.5", + "@rollup/rollup-freebsd-arm64": "4.53.5", + "@rollup/rollup-freebsd-x64": "4.53.5", + "@rollup/rollup-linux-arm-gnueabihf": "4.53.5", + "@rollup/rollup-linux-arm-musleabihf": "4.53.5", + "@rollup/rollup-linux-arm64-gnu": "4.53.5", + "@rollup/rollup-linux-arm64-musl": "4.53.5", + "@rollup/rollup-linux-loong64-gnu": "4.53.5", + "@rollup/rollup-linux-ppc64-gnu": "4.53.5", + "@rollup/rollup-linux-riscv64-gnu": "4.53.5", + "@rollup/rollup-linux-riscv64-musl": "4.53.5", + "@rollup/rollup-linux-s390x-gnu": "4.53.5", + "@rollup/rollup-linux-x64-gnu": "4.53.5", + "@rollup/rollup-linux-x64-musl": "4.53.5", + "@rollup/rollup-openharmony-arm64": "4.53.5", + "@rollup/rollup-win32-arm64-msvc": "4.53.5", + "@rollup/rollup-win32-ia32-msvc": "4.53.5", + "@rollup/rollup-win32-x64-gnu": "4.53.5", + "@rollup/rollup-win32-x64-msvc": "4.53.5", "fsevents": "~2.3.2" } }, - "node_modules/rrweb-cssom": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", - "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", - "dev": true, - "license": "MIT" - }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -4705,15 +4725,15 @@ } }, "node_modules/scheduler": { - "version": "0.26.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", - "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", "license": "MIT" }, "node_modules/set-cookie-parser": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", - "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", "license": "MIT" }, "node_modules/shebang-command": { @@ -4773,9 +4793,9 @@ "license": "MIT" }, "node_modules/std-env": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", - "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", "dev": true, "license": "MIT" }, @@ -4806,9 +4826,9 @@ } }, "node_modules/strip-literal": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", - "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", + "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", "dev": true, "license": "MIT", "dependencies": { @@ -4864,9 +4884,9 @@ "license": "MIT" }, "node_modules/tabbable": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.3.0.tgz", + "integrity": "sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ==", "license": "MIT" }, "node_modules/tiny-case": { @@ -4881,12 +4901,6 @@ "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", "license": "MIT" }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", - "license": "MIT" - }, "node_modules/tinybench": { "version": "2.9.0", "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", @@ -4949,22 +4963,22 @@ } }, "node_modules/tldts": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.15.tgz", - "integrity": "sha512-heYRCiGLhtI+U/D0V8YM3QRwPfsLJiP+HX+YwiHZTnWzjIKC+ZCxQRYlzvOoTEc6KIP62B1VeAN63diGCng2hg==", + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz", + "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", "dev": true, "license": "MIT", "dependencies": { - "tldts-core": "^7.0.15" + "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, "node_modules/tldts-core": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.15.tgz", - "integrity": "sha512-YBkp2VfS9VTRMPNL2PA6PMESmxV1JEVoAr5iBlZnB5JG3KUrWzNCB3yNNkRa2FZkqClaBgfNYCp8PgpYmpjkZw==", + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz", + "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", "dev": true, "license": "MIT" }, @@ -5020,9 +5034,9 @@ } }, "node_modules/type-fest": { - "version": "4.37.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz", - "integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==", + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=16" @@ -5107,19 +5121,6 @@ } } }, - "node_modules/use-resize-observer": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", - "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", - "license": "MIT", - "dependencies": { - "@juggle/resize-observer": "^3.3.1" - }, - "peerDependencies": { - "react": "16.8.0 - 18", - "react-dom": "16.8.0 - 18" - } - }, "node_modules/use-sidecar": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", @@ -5165,13 +5166,13 @@ } }, "node_modules/video.js": { - "version": "8.22.0", - "resolved": "https://registry.npmjs.org/video.js/-/video.js-8.22.0.tgz", - "integrity": "sha512-xge2kpjsvC0zgFJ1cqt+wTqsi21+huFswlonPFh7qiplypsb4FN/D2Rz6bWdG/S9eQaPHfWHsarmJL/7D3DHoA==", + "version": "8.23.4", + "resolved": "https://registry.npmjs.org/video.js/-/video.js-8.23.4.tgz", + "integrity": "sha512-qI0VTlYmKzEqRsz1Nppdfcaww4RSxZAq77z2oNSl3cNg2h6do5C8Ffl0KqWQ1OpD8desWXsCrde7tKJ9gGTEyQ==", "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.12.5", - "@videojs/http-streaming": "^3.17.0", + "@videojs/http-streaming": "^3.17.2", "@videojs/vhs-utils": "^4.1.1", "@videojs/xhr": "2.7.0", "aes-decrypter": "^4.0.2", @@ -5216,24 +5217,24 @@ } }, "node_modules/vite": { - "version": "6.3.5", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.3.5.tgz", - "integrity": "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", + "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", "dev": true, "license": "MIT", "dependencies": { - "esbuild": "^0.25.0", - "fdir": "^6.4.4", - "picomatch": "^4.0.2", - "postcss": "^8.5.3", - "rollup": "^4.34.9", - "tinyglobby": "^0.2.13" + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + "node": "^20.19.0 || >=22.12.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" @@ -5242,14 +5243,14 @@ "fsevents": "~2.3.3" }, "peerDependencies": { - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@types/node": "^20.19.0 || >=22.12.0", "jiti": ">=1.21.0", - "less": "*", + "less": "^4.0.0", "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" @@ -5411,8 +5412,7 @@ }, "node_modules/webworkify-webpack": { "version": "2.1.5", - "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git", - "integrity": "sha512-W8Bg+iLq52d2GFvwabPNCIDCgMHcW3g68Tr8zwpJliEz2cKBIKYL3T0VdYeZWhz5rOxWRBBEdF931fquSO6iCQ==", + "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git#24d1e719b4a6cac37a518b2bb10fe124527ef4ef", "license": "MIT" }, "node_modules/whatwg-encoding": { @@ -5535,9 +5535,9 @@ "license": "MIT" }, "node_modules/yaml": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.1.tgz", - "integrity": "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==", + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", "dev": true, "license": "ISC", "optional": true, @@ -5547,6 +5547,9 @@ }, "engines": { "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" } }, "node_modules/yocto-queue": { @@ -5563,9 +5566,9 @@ } }, "node_modules/yup": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/yup/-/yup-1.6.1.tgz", - "integrity": "sha512-JED8pB50qbA4FOkDol0bYF/p60qSEDQqBD0/qeIrUCG1KbPBIQ776fCUNb9ldbPcSTxA69g/47XTo4TqWiuXOA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/yup/-/yup-1.7.1.tgz", + "integrity": "sha512-GKHFX2nXul2/4Dtfxhozv701jLQHdf6J34YDh2cEkpqoo8le5Mg6/LrdseVLrFarmFygZTlfIhHx/QKfb/QWXw==", "license": "MIT", "dependencies": { "property-expr": "^2.0.5", @@ -5587,9 +5590,9 @@ } }, "node_modules/zustand": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.3.tgz", - "integrity": "sha512-14fwWQtU3pH4dE0dOpdMiWjddcH+QzKIgk1cl8epwSE7yag43k/AD/m4L6+K7DytAOr9gGBe3/EXj9g7cdostg==", + "version": "5.0.9", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.9.tgz", + "integrity": "sha512-ALBtUj0AfjJt3uNRQoL1tL2tMvj6Gp/6e39dnfT6uzpelGru8v1tPOGBzayOWbPJvujM8JojDk3E1LxeFisBNg==", "license": "MIT", "engines": { "node": ">=12.20.0" diff --git a/frontend/package.json b/frontend/package.json index fea6b73e..7b2d5927 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -23,11 +23,12 @@ "@mantine/form": "~8.0.1", "@mantine/hooks": "~8.0.1", "@mantine/notifications": "~8.0.1", + "@hookform/resolvers": "^5.2.2", "@tanstack/react-table": "^8.21.2", "allotment": "^1.20.4", "dayjs": "^1.11.13", - "formik": "^2.4.6", "hls.js": "^1.5.20", + "react-hook-form": "^7.70.0", "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", "react": "^19.1.0", @@ -54,18 +55,21 @@ "@types/react": "^19.1.0", "@types/react-dom": "^19.1.0", "@vitejs/plugin-react-swc": "^4.1.0", - "eslint": "^9.21.0", + "eslint": "^9.27.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", "jsdom": "^27.0.0", "prettier": "^3.5.3", - "vite": "^6.2.0", + "vite": "^7.1.7", "vitest": "^3.2.4" }, "resolutions": { "vite": "7.1.7", "react": "19.1.0", "react-dom": "19.1.0" + }, + "overrides": { + "js-yaml": "^4.1.1" } } diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 4b701533..f22d408f 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -19,7 +19,6 @@ import Users from './pages/Users'; import LogosPage from './pages/Logos'; import VODsPage from './pages/VODs'; import useAuthStore from './store/auth'; -import useLogosStore from './store/logos'; import FloatingVideo from './components/FloatingVideo'; import { WebsocketProvider } from './WebSocket'; import { Box, AppShell, MantineProvider } from '@mantine/core'; @@ -40,8 +39,6 @@ const defaultRoute = '/channels'; const App = () => { const [open, setOpen] = useState(true); - const [backgroundLoadingStarted, setBackgroundLoadingStarted] = - useState(false); const isAuthenticated = useAuthStore((s) => s.isAuthenticated); const setIsAuthenticated = useAuthStore((s) => s.setIsAuthenticated); const logout = useAuthStore((s) => s.logout); @@ -81,11 +78,7 @@ const App = () => { const loggedIn = await initializeAuth(); if (loggedIn) { await initData(); - // Start background logo loading after app is fully initialized (only once) - if (!backgroundLoadingStarted) { - setBackgroundLoadingStarted(true); - useLogosStore.getState().startBackgroundLoading(); - } + // Logos are now loaded at the end of initData, no need for background loading } else { await logout(); } @@ -96,7 +89,7 @@ const App = () => { }; checkAuth(); - }, [initializeAuth, initData, logout, backgroundLoadingStarted]); + }, [initializeAuth, initData, logout]); return ( { height: 0, }} navbar={{ - width: open ? drawerWidth : miniDrawerWidth, + width: isAuthenticated + ? open + ? drawerWidth + : miniDrawerWidth + : 0, }} > - + {isAuthenticated && ( + + )} { break; case 'epg_refresh': - // Update the store with progress information - updateEPGProgress(parsedEvent.data); - - // If we have source_id/account info, update the EPG source status - if (parsedEvent.data.source_id || parsedEvent.data.account) { + // If we have source/account info, check if EPG exists before processing + if (parsedEvent.data.source || parsedEvent.data.account) { const sourceId = - parsedEvent.data.source_id || parsedEvent.data.account; + parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; + // Only update progress if the EPG still exists in the store + // This prevents crashes when receiving updates for deleted EPGs + if (epg) { + // Update the store with progress information + updateEPGProgress(parsedEvent.data); + } else { + // EPG was deleted, ignore this update + console.debug( + `Ignoring EPG refresh update for deleted EPG ${sourceId}` + ); + break; + } + if (epg) { // Check for any indication of an error (either via status or error field) const hasError = @@ -613,6 +623,10 @@ export const WebsocketProvider = ({ children }) => { status: parsedEvent.data.status || 'success', last_message: parsedEvent.data.message || epg.last_message, + // Use the timestamp from the backend if provided + ...(parsedEvent.data.updated_at && { + updated_at: parsedEvent.data.updated_at, + }), }); // Only show success notification if we've finished parsing programs and had no errors @@ -742,6 +756,7 @@ export const WebsocketProvider = ({ children }) => { try { await API.requeryChannels(); await useChannelsStore.getState().fetchChannels(); + await fetchChannelProfiles(); console.log('Channels refreshed after bulk creation'); } catch (error) { console.error( diff --git a/frontend/src/api.js b/frontend/src/api.js index 5b80a3f7..c33ff1ee 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -170,7 +170,7 @@ export default class API { static async logout() { return await request(`${host}/api/accounts/auth/logout/`, { - auth: false, + auth: true, // Send JWT token so backend can identify the user method: 'POST', }); } @@ -336,6 +336,15 @@ export default class API { delete channelData.channel_number; } + // Add channel profile IDs based on current selection + const selectedProfileId = useChannelsStore.getState().selectedProfileId; + if (selectedProfileId && selectedProfileId !== '0') { + // Specific profile selected - add only to that profile + channelData.channel_profile_ids = [parseInt(selectedProfileId)]; + } + // If selectedProfileId is '0' or not set, don't include channel_profile_ids + // which will trigger the backend's default behavior of adding to all profiles + if (channel.logo_file) { // Must send FormData for file upload body = new FormData(); @@ -462,7 +471,16 @@ export default class API { } ); - // Don't automatically update the store here - let the caller handle it + // Show success notification + if (response.message) { + notifications.show({ + title: 'Channels Updated', + message: response.message, + color: 'green', + autoClose: 4000, + }); + } + return response; } catch (e) { errorNotification('Failed to update channels', e); @@ -1044,8 +1062,20 @@ export default class API { } static async updateEPG(values, isToggle = false) { + // Validate that values is an object + if (!values || typeof values !== 'object') { + console.error('updateEPG called with invalid values:', values); + return; + } + const { id, ...payload } = values; + // Validate that we have an ID and payload is an object + if (!id || typeof payload !== 'object') { + console.error('updateEPG: invalid id or payload', { id, payload }); + return; + } + try { // If this is just toggling the active state, make a simpler request if ( @@ -1328,6 +1358,183 @@ export default class API { } } + // Backup API (async with Celery task polling) + static async listBackups() { + try { + const response = await request(`${host}/api/backups/`); + return response || []; + } catch (e) { + errorNotification('Failed to load backups', e); + throw e; + } + } + + static async getBackupStatus(taskId, token = null) { + try { + let url = `${host}/api/backups/status/${taskId}/`; + if (token) { + url += `?token=${encodeURIComponent(token)}`; + } + const response = await request(url, { auth: !token }); + return response; + } catch (e) { + throw e; + } + } + + static async waitForBackupTask(taskId, onProgress, token = null) { + const pollInterval = 2000; // Poll every 2 seconds + const maxAttempts = 300; // Max 10 minutes (300 * 2s) + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + const status = await API.getBackupStatus(taskId, token); + + if (onProgress) { + onProgress(status); + } + + if (status.state === 'completed') { + return status.result; + } else if (status.state === 'failed') { + throw new Error(status.error || 'Task failed'); + } + } catch (e) { + throw e; + } + + // Wait before next poll + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + } + + throw new Error('Task timed out'); + } + + static async createBackup(onProgress) { + try { + // Start the backup task + const response = await request(`${host}/api/backups/create/`, { + method: 'POST', + }); + + // Wait for the task to complete using token for auth + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to create backup', e); + throw e; + } + } + + static async uploadBackup(file) { + try { + const formData = new FormData(); + formData.append('file', file); + + const response = await request( + `${host}/api/backups/upload/`, + { + method: 'POST', + body: formData, + } + ); + return response; + } catch (e) { + errorNotification('Failed to upload backup', e); + throw e; + } + } + + static async deleteBackup(filename) { + try { + const encodedFilename = encodeURIComponent(filename); + await request(`${host}/api/backups/${encodedFilename}/delete/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification('Failed to delete backup', e); + throw e; + } + } + + static async getDownloadToken(filename) { + // Get a download token from the server + try { + const response = await request(`${host}/api/backups/${encodeURIComponent(filename)}/download-token/`); + return response.token; + } catch (e) { + throw e; + } + } + + static async downloadBackup(filename) { + try { + // Get a download token first (requires auth) + const token = await API.getDownloadToken(filename); + const encodedFilename = encodeURIComponent(filename); + + // Build the download URL with token + const downloadUrl = `${host}/api/backups/${encodedFilename}/download/?token=${encodeURIComponent(token)}`; + + // Use direct browser navigation instead of fetch to avoid CORS issues + const link = document.createElement('a'); + link.href = downloadUrl; + link.download = filename; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + return { filename }; + } catch (e) { + errorNotification('Failed to download backup', e); + throw e; + } + } + + static async restoreBackup(filename, onProgress) { + try { + // Start the restore task + const encodedFilename = encodeURIComponent(filename); + const response = await request( + `${host}/api/backups/${encodedFilename}/restore/`, + { + method: 'POST', + } + ); + + // Wait for the task to complete using token for auth + // Token-based auth allows status polling even after DB restore invalidates user sessions + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to restore backup', e); + throw e; + } + } + + static async getBackupSchedule() { + try { + const response = await request(`${host}/api/backups/schedule/`); + return response; + } catch (e) { + errorNotification('Failed to get backup schedule', e); + throw e; + } + } + + static async updateBackupSchedule(settings) { + try { + const response = await request(`${host}/api/backups/schedule/update/`, { + method: 'PUT', + body: settings, + }); + return response; + } catch (e) { + errorNotification('Failed to update backup schedule', e); + throw e; + } + } + static async getVersion() { try { const response = await request(`${host}/api/core/version/`, { @@ -1493,6 +1700,19 @@ export default class API { } } + static async stopVODClient(clientId) { + try { + const response = await request(`${host}/proxy/vod/stop_client/`, { + method: 'POST', + body: { client_id: clientId }, + }); + + return response; + } catch (e) { + errorNotification('Failed to stop VOD client', e); + } + } + static async stopChannel(id) { try { const response = await request(`${host}/proxy/ts/stop/${id}`, { @@ -1788,6 +2008,77 @@ export default class API { } } + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; + } + } + static async getChannelProfiles() { try { const response = await request(`${host}/api/channels/profiles/`); @@ -1830,6 +2121,24 @@ export default class API { } } + static async duplicateChannelProfile(id, name) { + try { + const response = await request( + `${host}/api/channels/profiles/${id}/duplicate/`, + { + method: 'POST', + body: { name }, + } + ); + + useChannelsStore.getState().addProfile(response); + + return response; + } catch (e) { + errorNotification(`Failed to duplicate channel profile ${id}`, e); + } + } + static async deleteChannelProfile(id) { try { await request(`${host}/api/channels/profiles/${id}/`, { @@ -2039,7 +2348,8 @@ export default class API { static async deleteSeriesRule(tvgId) { try { - await request(`${host}/api/channels/series-rules/${tvgId}/`, { method: 'DELETE' }); + const encodedTvgId = encodeURIComponent(tvgId); + await request(`${host}/api/channels/series-rules/${encodedTvgId}/`, { method: 'DELETE' }); notifications.show({ title: 'Series rule removed' }); } catch (e) { errorNotification('Failed to remove series rule', e); @@ -2132,9 +2442,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); @@ -2395,4 +2711,21 @@ export default class API { errorNotification('Failed to update playback position', e); } } + + static async getSystemEvents(limit = 100, offset = 0, eventType = null) { + try { + const params = new URLSearchParams(); + params.append('limit', limit); + params.append('offset', offset); + if (eventType) { + params.append('event_type', eventType); + } + const response = await request( + `${host}/api/core/system-events/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve system events', e); + } + } } diff --git a/frontend/src/components/ConfirmationDialog.jsx b/frontend/src/components/ConfirmationDialog.jsx index 73805513..94fb169c 100644 --- a/frontend/src/components/ConfirmationDialog.jsx +++ b/frontend/src/components/ConfirmationDialog.jsx @@ -16,6 +16,7 @@ import useWarningsStore from '../store/warnings'; * @param {string} props.actionKey - Unique key for this type of action (used for suppression) * @param {Function} props.onSuppressChange - Called when "don't show again" option changes * @param {string} [props.size='md'] - Size of the modal + * @param {boolean} [props.loading=false] - Whether the confirm button should show loading state */ const ConfirmationDialog = ({ opened, @@ -31,6 +32,7 @@ const ConfirmationDialog = ({ zIndex = 1000, showDeleteFileOption = false, deleteFileLabel = 'Also delete files from disk', + loading = false, }) => { const suppressWarning = useWarningsStore((s) => s.suppressWarning); const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); @@ -93,10 +95,16 @@ const ConfirmationDialog = ({ )} - - diff --git a/frontend/src/components/ErrorBoundary.jsx b/frontend/src/components/ErrorBoundary.jsx new file mode 100644 index 00000000..60c4ba38 --- /dev/null +++ b/frontend/src/components/ErrorBoundary.jsx @@ -0,0 +1,18 @@ +import React from 'react'; + +class ErrorBoundary extends React.Component { + state = { hasError: false }; + + static getDerivedStateFromError(error) { + return { hasError: true }; + } + + render() { + if (this.state.hasError) { + return
Something went wrong
; + } + return this.props.children; + } +} + +export default ErrorBoundary; \ No newline at end of file diff --git a/frontend/src/components/Field.jsx b/frontend/src/components/Field.jsx new file mode 100644 index 00000000..1293bf7b --- /dev/null +++ b/frontend/src/components/Field.jsx @@ -0,0 +1,47 @@ +import { NumberInput, Select, Switch, TextInput } from '@mantine/core'; +import React from 'react'; + +export const Field = ({ field, value, onChange }) => { + const common = { label: field.label, description: field.help_text }; + const effective = value ?? field.default; + switch (field.type) { + case 'boolean': + return ( + onChange(field.id, e.currentTarget.checked)} + label={field.label} + description={field.help_text} + /> + ); + case 'number': + return ( + onChange(field.id, v)} + {...common} + /> + ); + case 'select': + return ( + setEventsRefreshInterval(parseInt(value))} + data={[ + { value: '0', label: 'Manual' }, + { value: '5', label: '5s' }, + { value: '10', label: '10s' }, + { value: '30', label: '30s' }, + { value: '60', label: '1m' }, + ]} + style={{ width: 120 }} + /> + + + )} + setIsExpanded(!isExpanded)} + > + + + + + + {isExpanded && ( + <> + {totalEvents > eventsLimit && ( + + + Showing {offset + 1}- + {Math.min(offset + eventsLimit, totalEvents)} of {totalEvents} + + + + )} + + {events.length === 0 ? ( + + No events recorded yet + + ) : ( + events.map((event) => ( + + + + + {getEventIcon(event.event_type)} + + + + + {event.event_type_display || event.event_type} + + {event.channel_name && ( + + {event.channel_name} + + )} + + {event.details && + Object.keys(event.details).length > 0 && ( + + {Object.entries(event.details) + .filter( + ([key]) => + !['stream_url', 'new_url'].includes(key) + ) + .map(([key, value]) => `${key}: ${value}`) + .join(', ')} + + )} + + + + {dayjs(event.timestamp).format(`${dateFormat} HH:mm:ss`)} + + + + )) + )} + + + )} + + ); +}; + +export default SystemEvents; diff --git a/frontend/src/components/VODModal.jsx b/frontend/src/components/VODModal.jsx index 90fd3fad..7df90ec0 100644 --- a/frontend/src/components/VODModal.jsx +++ b/frontend/src/components/VODModal.jsx @@ -13,7 +13,9 @@ import { Stack, Modal, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -232,9 +234,9 @@ const VODModal = ({ vod, opened, onClose }) => { } }, [opened]); - const handlePlayVOD = () => { + const getStreamUrl = () => { const vodToPlay = detailedVOD || vod; - if (!vodToPlay) return; + if (!vodToPlay) return null; let streamUrl = `/proxy/vod/movie/${vod.uuid}`; @@ -253,9 +255,29 @@ const VODModal = ({ vod, opened, onClose }) => { } else { streamUrl = `${window.location.origin}${streamUrl}`; } + return streamUrl; + }; + + const handlePlayVOD = () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const vodToPlay = detailedVOD || vod; showVideo(streamUrl, 'vod', vodToPlay); }; + const handleCopyLink = async () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Stream link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + // Helper to get embeddable YouTube URL const getEmbedUrl = (url) => { if (!url) return ''; @@ -486,6 +508,16 @@ const VODModal = ({ vod, opened, onClose }) => { Watch Trailer )} + @@ -662,7 +694,8 @@ const VODModal = ({ vod, opened, onClose }) => { src={trailerUrl} title="YouTube Trailer" frameBorder="0" - allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" + allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" + referrerPolicy="strict-origin-when-cross-origin" allowFullScreen style={{ position: 'absolute', diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx new file mode 100644 index 00000000..dc130254 --- /dev/null +++ b/frontend/src/components/backups/BackupManager.jsx @@ -0,0 +1,978 @@ +import { useEffect, useMemo, useState } from 'react'; +import { + ActionIcon, + Box, + Button, + FileInput, + Flex, + Group, + Loader, + Modal, + NumberInput, + Paper, + Select, + Stack, + Switch, + Text, + TextInput, + Tooltip, +} from '@mantine/core'; +import { + Download, + RefreshCcw, + RotateCcw, + SquareMinus, + SquarePlus, + UploadCloud, +} from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import dayjs from 'dayjs'; + +import API from '../../api'; +import ConfirmationDialog from '../ConfirmationDialog'; +import useLocalStorage from '../../hooks/useLocalStorage'; +import useWarningsStore from '../../store/warnings'; +import { CustomTable, useTable } from '../tables/CustomTable'; + +const RowActions = ({ + row, + handleDownload, + handleRestoreClick, + handleDeleteClick, + downloading, +}) => { + return ( + + + handleDownload(row.original.name)} + loading={downloading === row.original.name} + disabled={downloading !== null} + > + + + + + handleRestoreClick(row.original)} + > + + + + + handleDeleteClick(row.original)} + > + + + + + ); +}; + +// Convert 24h time string to 12h format with period +function to12Hour(time24) { + if (!time24) return { time: '12:00', period: 'AM' }; + const [hours, minutes] = time24.split(':').map(Number); + const period = hours >= 12 ? 'PM' : 'AM'; + const hours12 = hours % 12 || 12; + return { + time: `${hours12}:${String(minutes).padStart(2, '0')}`, + period, + }; +} + +// Convert 12h time + period to 24h format +function to24Hour(time12, period) { + if (!time12) return '00:00'; + const [hours, minutes] = time12.split(':').map(Number); + let hours24 = hours; + if (period === 'PM' && hours !== 12) { + hours24 = hours + 12; + } else if (period === 'AM' && hours === 12) { + hours24 = 0; + } + return `${String(hours24).padStart(2, '0')}:${String(minutes).padStart(2, '0')}`; +} + +// Get default timezone (same as Settings page) +function getDefaultTimeZone() { + try { + return Intl.DateTimeFormat().resolvedOptions().timeZone || 'UTC'; + } catch { + return 'UTC'; + } +} + +// Validate cron expression +function validateCronExpression(expression) { + if (!expression || expression.trim() === '') { + return { valid: false, error: 'Cron expression is required' }; + } + + const parts = expression.trim().split(/\s+/); + if (parts.length !== 5) { + return { + valid: false, + error: + 'Cron expression must have exactly 5 parts: minute hour day month weekday', + }; + } + + const [minute, hour, dayOfMonth, month, dayOfWeek] = parts; + + // Validate each part (allowing *, */N steps, ranges, lists, steps) + // Supports: *, */2, 5, 1-5, 1-5/2, 1,3,5, etc. + const cronPartRegex = + /^(\*\/\d+|\*|\d+(-\d+)?(\/\d+)?(,\d+(-\d+)?(\/\d+)?)*)$/; + + if (!cronPartRegex.test(minute)) { + return { + valid: false, + error: 'Invalid minute field (0-59, *, or cron syntax)', + }; + } + if (!cronPartRegex.test(hour)) { + return { + valid: false, + error: 'Invalid hour field (0-23, *, or cron syntax)', + }; + } + if (!cronPartRegex.test(dayOfMonth)) { + return { + valid: false, + error: 'Invalid day field (1-31, *, or cron syntax)', + }; + } + if (!cronPartRegex.test(month)) { + return { + valid: false, + error: 'Invalid month field (1-12, *, or cron syntax)', + }; + } + if (!cronPartRegex.test(dayOfWeek)) { + return { + valid: false, + error: 'Invalid weekday field (0-6, *, or cron syntax)', + }; + } + + // Additional range validation for numeric values + const validateRange = (value, min, max, name) => { + // Skip if it's * or contains special characters + if ( + value === '*' || + value.includes('/') || + value.includes('-') || + value.includes(',') + ) { + return null; + } + const num = parseInt(value, 10); + if (isNaN(num) || num < min || num > max) { + return `${name} must be between ${min} and ${max}`; + } + return null; + }; + + const minuteError = validateRange(minute, 0, 59, 'Minute'); + if (minuteError) return { valid: false, error: minuteError }; + + const hourError = validateRange(hour, 0, 23, 'Hour'); + if (hourError) return { valid: false, error: hourError }; + + const dayError = validateRange(dayOfMonth, 1, 31, 'Day'); + if (dayError) return { valid: false, error: dayError }; + + const monthError = validateRange(month, 1, 12, 'Month'); + if (monthError) return { valid: false, error: monthError }; + + const weekdayError = validateRange(dayOfWeek, 0, 6, 'Weekday'); + if (weekdayError) return { valid: false, error: weekdayError }; + + return { valid: true, error: null }; +} + +const DAYS_OF_WEEK = [ + { value: '0', label: 'Sunday' }, + { value: '1', label: 'Monday' }, + { value: '2', label: 'Tuesday' }, + { value: '3', label: 'Wednesday' }, + { value: '4', label: 'Thursday' }, + { value: '5', label: 'Friday' }, + { value: '6', label: 'Saturday' }, +]; + +function formatBytes(bytes) { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}`; +} + +export default function BackupManager() { + const [backups, setBackups] = useState([]); + const [loading, setLoading] = useState(false); + const [creating, setCreating] = useState(false); + const [downloading, setDownloading] = useState(null); + const [uploadFile, setUploadFile] = useState(null); + const [uploadModalOpen, setUploadModalOpen] = useState(false); + const [restoreConfirmOpen, setRestoreConfirmOpen] = useState(false); + const [deleteConfirmOpen, setDeleteConfirmOpen] = useState(false); + const [selectedBackup, setSelectedBackup] = useState(null); + const [restoring, setRestoring] = useState(false); + const [deleting, setDeleting] = useState(false); + + // Read user's preferences from settings + const [timeFormat] = useLocalStorage('time-format', '12h'); + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + const [tableSize] = useLocalStorage('table-size', 'default'); + const [userTimezone] = useLocalStorage('time-zone', getDefaultTimeZone()); + const is12Hour = timeFormat === '12h'; + + // Format date according to user preferences + const formatDate = (dateString) => { + const date = dayjs(dateString); + const datePart = dateFormatSetting === 'mdy' ? 'MM/DD/YYYY' : 'DD/MM/YYYY'; + const timePart = is12Hour ? 'h:mm:ss A' : 'HH:mm:ss'; + return date.format(`${datePart}, ${timePart}`); + }; + + // Warning suppression for confirmation dialogs + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + + // Schedule state + const [schedule, setSchedule] = useState({ + enabled: false, + frequency: 'daily', + time: '03:00', + day_of_week: 0, + retention_count: 0, + cron_expression: '', + }); + const [scheduleLoading, setScheduleLoading] = useState(false); + const [scheduleSaving, setScheduleSaving] = useState(false); + const [scheduleChanged, setScheduleChanged] = useState(false); + const [advancedMode, setAdvancedMode] = useState(false); + const [cronError, setCronError] = useState(null); + + // For 12-hour display mode + const [displayTime, setDisplayTime] = useState('3:00'); + const [timePeriod, setTimePeriod] = useState('AM'); + + const columns = useMemo( + () => [ + { + header: 'Filename', + accessorKey: 'name', + grow: true, + cell: ({ cell }) => ( +
+ {cell.getValue()} +
+ ), + }, + { + header: 'Size', + accessorKey: 'size', + size: 80, + cell: ({ cell }) => ( + {formatBytes(cell.getValue())} + ), + }, + { + header: 'Created', + accessorKey: 'created', + minSize: 180, + cell: ({ cell }) => ( + + {formatDate(cell.getValue())} + + ), + }, + { + id: 'actions', + header: 'Actions', + size: tableSize === 'compact' ? 75 : 100, + }, + ], + [tableSize] + ); + + const renderHeaderCell = (header) => { + return ( + + {header.column.columnDef.header} + + ); + }; + + const renderBodyCell = ({ cell, row }) => { + switch (cell.column.id) { + case 'actions': + return ( + + ); + } + }; + + const table = useTable({ + columns, + data: backups, + allRowIds: backups.map((b) => b.name), + bodyCellRenderFns: { + actions: renderBodyCell, + }, + headerCellRenderFns: { + name: renderHeaderCell, + size: renderHeaderCell, + created: renderHeaderCell, + actions: renderHeaderCell, + }, + }); + + const loadBackups = async () => { + setLoading(true); + try { + const backupList = await API.listBackups(); + setBackups(backupList); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to load backups', + color: 'red', + }); + } finally { + setLoading(false); + } + }; + + const loadSchedule = async () => { + setScheduleLoading(true); + try { + const settings = await API.getBackupSchedule(); + + // Check if using cron expression (advanced mode) + if (settings.cron_expression) { + setAdvancedMode(true); + } + + setSchedule(settings); + + // Initialize 12-hour display values + const { time, period } = to12Hour(settings.time); + setDisplayTime(time); + setTimePeriod(period); + + setScheduleChanged(false); + } catch (error) { + // Ignore errors on initial load - settings may not exist yet + } finally { + setScheduleLoading(false); + } + }; + + useEffect(() => { + loadBackups(); + loadSchedule(); + }, []); + + // Validate cron expression when switching to advanced mode + useEffect(() => { + if (advancedMode && schedule.cron_expression) { + const validation = validateCronExpression(schedule.cron_expression); + setCronError(validation.valid ? null : validation.error); + } else { + setCronError(null); + } + }, [advancedMode, schedule.cron_expression]); + + const handleScheduleChange = (field, value) => { + setSchedule((prev) => ({ ...prev, [field]: value })); + setScheduleChanged(true); + + // Validate cron expression if in advanced mode + if (field === 'cron_expression' && advancedMode) { + const validation = validateCronExpression(value); + setCronError(validation.valid ? null : validation.error); + } + }; + + // Handle time changes in 12-hour mode + const handleTimeChange12h = (newTime, newPeriod) => { + const time = newTime ?? displayTime; + const period = newPeriod ?? timePeriod; + setDisplayTime(time); + setTimePeriod(period); + // Convert to 24h and update schedule + const time24 = to24Hour(time, period); + handleScheduleChange('time', time24); + }; + + // Handle time changes in 24-hour mode + const handleTimeChange24h = (value) => { + handleScheduleChange('time', value); + // Also update 12h display state in case user switches formats + const { time, period } = to12Hour(value); + setDisplayTime(time); + setTimePeriod(period); + }; + + const handleSaveSchedule = async () => { + setScheduleSaving(true); + try { + const scheduleToSave = advancedMode + ? schedule + : { ...schedule, cron_expression: '' }; + + const updated = await API.updateBackupSchedule(scheduleToSave); + setSchedule(updated); + setScheduleChanged(false); + + notifications.show({ + title: 'Success', + message: 'Backup schedule saved', + color: 'green', + }); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to save schedule', + color: 'red', + }); + } finally { + setScheduleSaving(false); + } + }; + + const handleCreateBackup = async () => { + setCreating(true); + try { + await API.createBackup(); + notifications.show({ + title: 'Success', + message: 'Backup created successfully', + color: 'green', + }); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to create backup', + color: 'red', + }); + } finally { + setCreating(false); + } + }; + + const handleDownload = async (filename) => { + setDownloading(filename); + try { + await API.downloadBackup(filename); + notifications.show({ + title: 'Download Started', + message: `Downloading ${filename}...`, + color: 'blue', + }); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to download backup', + color: 'red', + }); + } finally { + setDownloading(null); + } + }; + + const handleDeleteClick = (backup) => { + setSelectedBackup(backup); + setDeleteConfirmOpen(true); + }; + + const handleDeleteConfirm = async () => { + setDeleting(true); + try { + await API.deleteBackup(selectedBackup.name); + notifications.show({ + title: 'Success', + message: 'Backup deleted successfully', + color: 'green', + }); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to delete backup', + color: 'red', + }); + } finally { + setDeleting(false); + setDeleteConfirmOpen(false); + setSelectedBackup(null); + } + }; + + const handleRestoreClick = (backup) => { + setSelectedBackup(backup); + setRestoreConfirmOpen(true); + }; + + const handleRestoreConfirm = async () => { + setRestoring(true); + try { + await API.restoreBackup(selectedBackup.name); + notifications.show({ + title: 'Success', + message: + 'Backup restored successfully. You may need to refresh the page.', + color: 'green', + }); + setTimeout(() => window.location.reload(), 2000); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to restore backup', + color: 'red', + }); + } finally { + setRestoring(false); + setRestoreConfirmOpen(false); + setSelectedBackup(null); + } + }; + + const handleUploadSubmit = async () => { + if (!uploadFile) return; + + try { + await API.uploadBackup(uploadFile); + notifications.show({ + title: 'Success', + message: 'Backup uploaded successfully', + color: 'green', + }); + setUploadModalOpen(false); + setUploadFile(null); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to upload backup', + color: 'red', + }); + } + }; + + return ( + + {/* Schedule Settings */} + + + + Scheduled Backups + + + handleScheduleChange('enabled', e.currentTarget.checked) + } + label={schedule.enabled ? 'Enabled' : 'Disabled'} + /> + + + + + Advanced (Cron Expression) + + setAdvancedMode(e.currentTarget.checked)} + label={advancedMode ? 'Enabled' : 'Disabled'} + disabled={!schedule.enabled} + size="sm" + /> + + + {scheduleLoading ? ( + + ) : ( + <> + {advancedMode ? ( + <> + + + handleScheduleChange( + 'cron_expression', + e.currentTarget.value + ) + } + placeholder="0 3 * * *" + description="Format: minute hour day month weekday (e.g., '0 3 * * *' = 3:00 AM daily)" + disabled={!schedule.enabled} + error={cronError} + /> + + Examples:
0 3 * * * - Every day at 3:00 + AM +
0 2 * * 0 - Every Sunday at 2:00 AM +
0 */6 * * * - Every 6 hours +
30 14 1 * * - 1st of every month at + 2:30 PM +
+
+ + + handleScheduleChange('retention_count', value || 0) + } + min={0} + disabled={!schedule.enabled} + /> + + + + ) : ( + + + + handleScheduleChange('day_of_week', parseInt(value, 10)) + } + data={DAYS_OF_WEEK} + disabled={!schedule.enabled} + /> + )} + {is12Hour ? ( + <> + { + const hour = displayTime + ? displayTime.split(':')[0] + : '12'; + handleTimeChange12h(`${hour}:${value}`, null); + }} + data={Array.from({ length: 60 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + { + const minute = schedule.time + ? schedule.time.split(':')[1] + : '00'; + handleTimeChange24h(`${value}:${minute}`); + }} + data={Array.from({ length: 24 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + + + )} + + {/* Add stream information badges */} + + {channel.resolution && ( + + + {channel.resolution} + + + )} + {channel.source_fps && ( + + + {channel.source_fps} FPS + + + )} + {channel.video_codec && ( + + + {channel.video_codec.toUpperCase()} + + + )} + {channel.audio_codec && ( + + + {channel.audio_codec.toUpperCase()} + + + )} + {channel.audio_channels && ( + + + {channel.audio_channels} + + + )} + {channel.stream_type && ( + + + {channel.stream_type.toUpperCase()} + + + )} + {channel.ffmpeg_speed && ( + + = + getBufferingSpeedThreshold(settings['proxy_settings']) + ? 'green' + : 'red' + } + > + {parseFloat(channel.ffmpeg_speed).toFixed(2)}x + + + )} + + + + + + + + {formatSpeed(bitrates.at(-1) || 0)} + + + + + + + Avg: {avgBitrate} + + + + + + + + {formatBytes(totalBytes)} + + + + + + + + + {clientCount} + + + + + + + + + ); +}; + +export default StreamConnectionCard; diff --git a/frontend/src/components/cards/VODCard.jsx b/frontend/src/components/cards/VODCard.jsx new file mode 100644 index 00000000..42468dae --- /dev/null +++ b/frontend/src/components/cards/VODCard.jsx @@ -0,0 +1,143 @@ +import { + ActionIcon, + Badge, + Box, + Card, + CardSection, + Group, + Image, + Stack, + Text, +} from '@mantine/core'; +import { Calendar, Clock, Play, Star } from 'lucide-react'; +import React from 'react'; +import { + formatDuration, + getSeasonLabel, +} from '../../utils/cards/VODCardUtils.js'; + +const VODCard = ({ vod, onClick }) => { + const isEpisode = vod.type === 'episode'; + + const getDisplayTitle = () => { + if (isEpisode && vod.series) { + return ( + + + {vod.series.name} + + + {getSeasonLabel(vod)} - {vod.name} + + + ); + } + return {vod.name}; + }; + + const handleCardClick = async () => { + // Just pass the basic vod info to the parent handler + onClick(vod); + }; + + return ( + + + + {vod.logo?.url ? ( + {vod.name} + ) : ( + + + + )} + + { + e.stopPropagation(); + onClick(vod); + }} + > + + + + + {isEpisode ? 'Episode' : 'Movie'} + + + + + + {getDisplayTitle()} + + + {vod.year && ( + + + + {vod.year} + + + )} + + {vod.duration && ( + + + + {formatDuration(vod.duration_secs)} + + + )} + + {vod.rating && ( + + + + {vod.rating} + + + )} + + + {vod.genre && ( + + {vod.genre} + + )} + + + ); +}; + +export default VODCard; \ No newline at end of file diff --git a/frontend/src/components/cards/VodConnectionCard.jsx b/frontend/src/components/cards/VodConnectionCard.jsx new file mode 100644 index 00000000..57564dce --- /dev/null +++ b/frontend/src/components/cards/VodConnectionCard.jsx @@ -0,0 +1,422 @@ +// Format duration for content length +import useLocalStorage from '../../hooks/useLocalStorage.jsx'; +import React, { useCallback, useEffect, useState } from 'react'; +import logo from '../../images/logo.png'; +import { ActionIcon, Badge, Box, Card, Center, Flex, Group, Progress, Stack, Text, Tooltip } from '@mantine/core'; +import { convertToSec, fromNow, toFriendlyDuration } from '../../utils/dateTimeUtils.js'; +import { ChevronDown, HardDriveUpload, SquareX, Timer, Video } from 'lucide-react'; +import { + calculateConnectionDuration, + calculateConnectionStartTime, + calculateProgress, + formatDuration, + formatTime, + getEpisodeDisplayTitle, + getEpisodeSubtitle, + getMovieDisplayTitle, + getMovieSubtitle, +} from '../../utils/cards/VodConnectionCardUtils.js'; + +const ClientDetails = ({ connection, connectionStartTime }) => { + return ( + + {connection.user_agent && + connection.user_agent !== 'Unknown' && ( + + + User Agent: + + + {connection.user_agent.length > 100 + ? `${connection.user_agent.substring(0, 100)}...` + : connection.user_agent} + + + )} + + + + Client ID: + + + {connection.client_id || 'Unknown'} + + + + {connection.connected_at && ( + + + Connected: + + {connectionStartTime} + + )} + + {connection.duration && connection.duration > 0 && ( + + + Watch Duration: + + + {toFriendlyDuration(connection.duration, 'seconds')} + + + )} + + {/* Seek/Position Information */} + {(connection.last_seek_percentage > 0 || + connection.last_seek_byte > 0) && ( + <> + + + Last Seek: + + + {connection.last_seek_percentage?.toFixed(1)}% + {connection.total_content_size > 0 && ( + + {' '} + ({Math.round(connection.last_seek_byte / (1024 * 1024))} + MB /{' '} + {Math.round( + connection.total_content_size / (1024 * 1024) + )} + MB) + + )} + + + + {Number(connection.last_seek_timestamp) > 0 && ( + + + Seek Time: + + + {fromNow(convertToSec(Number(connection.last_seek_timestamp)))} + + + )} + + )} + + {connection.bytes_sent > 0 && ( + + + Data Sent: + + + {(connection.bytes_sent / (1024 * 1024)).toFixed(1)} MB + + + )} + + ); +} + +// Create a VOD Card component similar to ChannelCard +const VodConnectionCard = ({ vodContent, stopVODClient }) => { + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM'; + const [isClientExpanded, setIsClientExpanded] = useState(false); + const [, setUpdateTrigger] = useState(0); // Force re-renders for progress updates + + // Get metadata from the VOD content + const metadata = vodContent.content_metadata || {}; + const contentType = vodContent.content_type; + const isMovie = contentType === 'movie'; + const isEpisode = contentType === 'episode'; + + // Set up timer to update progress every second + useEffect(() => { + const interval = setInterval(() => { + setUpdateTrigger((prev) => prev + 1); + }, 1000); + + return () => clearInterval(interval); + }, []); + + // Get the individual connection (since we now separate cards per connection) + const connection = + vodContent.individual_connection || + (vodContent.connections && vodContent.connections[0]); + + // Get poster/logo URL + const posterUrl = metadata.logo_url || logo; + + // Get display title + const getDisplayTitle = () => { + if (isMovie) { + return getMovieDisplayTitle(vodContent); + } else if (isEpisode) { + return getEpisodeDisplayTitle(metadata); + } + return vodContent.content_name; + }; + + // Get subtitle info + const getSubtitle = () => { + if (isMovie) { + return getMovieSubtitle(metadata); + } else if (isEpisode) { + return getEpisodeSubtitle(metadata); + } + return []; + }; + + // Render subtitle + const renderSubtitle = () => { + const subtitleParts = getSubtitle(); + if (subtitleParts.length === 0) return null; + + return ( + + {subtitleParts.join(' • ')} + + ); + }; + + // Calculate progress percentage and time + const getProgressInfo = useCallback(() => { + return calculateProgress(connection, metadata.duration_secs); + }, [connection, metadata.duration_secs]); + + // Calculate duration for connection + const getConnectionDuration = useCallback((connection) => { + return calculateConnectionDuration(connection); + }, []); + + // Get connection start time for tooltip + const getConnectionStartTime = useCallback( + (connection) => { + return calculateConnectionStartTime(connection, dateFormat); + }, + [dateFormat] + ); + + return ( + + + {/* Header with poster and basic info */} + + + content poster + + + + {connection && ( + +
+ + {getConnectionDuration(connection)} +
+
+ )} + {connection && stopVODClient && ( +
+ + stopVODClient(connection.client_id)} + > + + + +
+ )} +
+
+ + {/* Title and type */} + + + {getDisplayTitle()} + + + + + + + + + {/* Display M3U profile information - matching channel card style */} + {connection && + connection.m3u_profile && + (connection.m3u_profile.profile_name || + connection.m3u_profile.account_name) && ( + + + + + + + {connection.m3u_profile.account_name || 'Unknown Account'} + + + + + {connection.m3u_profile.profile_name || 'Default Profile'} + + + + + + )} + + {/* Subtitle/episode info */} + {getSubtitle().length > 0 && ( + + {renderSubtitle()} + + )} + + {/* Content information badges - streamlined to avoid duplication */} + + {metadata.year && ( + + + {metadata.year} + + + )} + + {metadata.duration_secs && ( + + + {formatDuration(metadata.duration_secs)} + + + )} + + {metadata.rating && ( + + + {parseFloat(metadata.rating).toFixed(1)}/10 + + + )} + + + {/* Progress bar - show current position in content */} + {connection && + metadata.duration_secs && + (() => { + const { totalTime, currentTime, percentage} = getProgressInfo(); + return totalTime > 0 ? ( + + + + Progress + + + {formatTime(currentTime)} /{' '} + {formatTime(totalTime)} + + + + + {percentage.toFixed(1)}% watched + + + ) : null; + })()} + + {/* Client information section - collapsible like channel cards */} + {connection && ( + + {/* Client summary header - always visible */} + setIsClientExpanded(!isClientExpanded)} + > + + + Client: + + + {connection.client_ip || 'Unknown IP'} + + + + + + {isClientExpanded ? 'Hide Details' : 'Show Details'} + + + + + + {/* Expanded client details */} + {isClientExpanded && ( + + )} + + )} +
+
+ ); +}; + +export default VodConnectionCard; \ No newline at end of file diff --git a/frontend/src/components/forms/Channel.jsx b/frontend/src/components/forms/Channel.jsx index fd2e5312..4f12dd2c 100644 --- a/frontend/src/components/forms/Channel.jsx +++ b/frontend/src/components/forms/Channel.jsx @@ -1,5 +1,6 @@ import React, { useState, useEffect, useRef, useMemo } from 'react'; -import { useFormik } from 'formik'; +import { useForm } from 'react-hook-form'; +import { yupResolver } from '@hookform/resolvers/yup'; import * as Yup from 'yup'; import useChannelsStore from '../../store/channels'; import API from '../../api'; @@ -42,6 +43,11 @@ import useEPGsStore from '../../store/epgs'; import { FixedSizeList as List } from 'react-window'; import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants'; +const validationSchema = Yup.object({ + name: Yup.string().required('Name is required'), + channel_group_id: Yup.string().required('Channel group is required'), +}); + const ChannelForm = ({ channel = null, isOpen, onClose }) => { const theme = useMantineTheme(); @@ -100,7 +106,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const handleLogoSuccess = ({ logo }) => { if (logo && logo.id) { - formik.setFieldValue('logo_id', logo.id); + setValue('logo_id', logo.id); ensureLogosLoaded(); // Refresh logos } setLogoModalOpen(false); @@ -124,7 +130,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { if (response.matched) { // Update the form with the new EPG data if (response.channel && response.channel.epg_data_id) { - formik.setFieldValue('epg_data_id', response.channel.epg_data_id); + setValue('epg_data_id', response.channel.epg_data_id); } notifications.show({ @@ -152,7 +158,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { }; const handleSetNameFromEpg = () => { - const epgDataId = formik.values.epg_data_id; + const epgDataId = watch('epg_data_id'); if (!epgDataId) { notifications.show({ title: 'No EPG Selected', @@ -164,7 +170,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const tvg = tvgsById[epgDataId]; if (tvg && tvg.name) { - formik.setFieldValue('name', tvg.name); + setValue('name', tvg.name); notifications.show({ title: 'Success', message: `Channel name set to "${tvg.name}"`, @@ -180,7 +186,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { }; const handleSetLogoFromEpg = async () => { - const epgDataId = formik.values.epg_data_id; + const epgDataId = watch('epg_data_id'); if (!epgDataId) { notifications.show({ title: 'No EPG Selected', @@ -207,7 +213,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { ); if (matchingLogo) { - formik.setFieldValue('logo_id', matchingLogo.id); + setValue('logo_id', matchingLogo.id); notifications.show({ title: 'Success', message: `Logo set to "${matchingLogo.name}"`, @@ -231,7 +237,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { // Create logo by calling the Logo API directly const newLogo = await API.createLogo(newLogoData); - formik.setFieldValue('logo_id', newLogo.id); + setValue('logo_id', newLogo.id); notifications.update({ id: 'creating-logo', @@ -264,7 +270,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { }; const handleSetTvgIdFromEpg = () => { - const epgDataId = formik.values.epg_data_id; + const epgDataId = watch('epg_data_id'); if (!epgDataId) { notifications.show({ title: 'No EPG Selected', @@ -276,7 +282,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { const tvg = tvgsById[epgDataId]; if (tvg && tvg.tvg_id) { - formik.setFieldValue('tvg_id', tvg.tvg_id); + setValue('tvg_id', tvg.tvg_id); notifications.show({ title: 'Success', message: `TVG-ID set to "${tvg.tvg_id}"`, @@ -291,130 +297,130 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { } }; - const formik = useFormik({ - initialValues: { - name: '', - channel_number: '', // Change from 0 to empty string for consistency - channel_group_id: - Object.keys(channelGroups).length > 0 + const defaultValues = useMemo( + () => ({ + name: channel?.name || '', + channel_number: + channel?.channel_number !== null && + channel?.channel_number !== undefined + ? channel.channel_number + : '', + channel_group_id: channel?.channel_group_id + ? `${channel.channel_group_id}` + : Object.keys(channelGroups).length > 0 ? Object.keys(channelGroups)[0] : '', - stream_profile_id: '0', - tvg_id: '', - tvc_guide_stationid: '', - epg_data_id: '', - logo_id: '', - user_level: '0', - }, - validationSchema: Yup.object({ - name: Yup.string().required('Name is required'), - channel_group_id: Yup.string().required('Channel group is required'), + stream_profile_id: channel?.stream_profile_id + ? `${channel.stream_profile_id}` + : '0', + tvg_id: channel?.tvg_id || '', + tvc_guide_stationid: channel?.tvc_guide_stationid || '', + epg_data_id: channel?.epg_data_id ?? '', + logo_id: channel?.logo_id ? `${channel.logo_id}` : '', + user_level: `${channel?.user_level ?? '0'}`, }), - onSubmit: async (values, { setSubmitting }) => { - let response; + [channel, channelGroups] + ); - try { - const formattedValues = { ...values }; + const { + register, + handleSubmit, + setValue, + watch, + reset, + formState: { errors, isSubmitting }, + } = useForm({ + defaultValues, + resolver: yupResolver(validationSchema), + }); - // Convert empty or "0" stream_profile_id to null for the API - if ( - !formattedValues.stream_profile_id || - formattedValues.stream_profile_id === '0' - ) { - formattedValues.stream_profile_id = null; - } + const onSubmit = async (values) => { + let response; - // Ensure tvg_id is properly included (no empty strings) - formattedValues.tvg_id = formattedValues.tvg_id || null; + try { + const formattedValues = { ...values }; - // Ensure tvc_guide_stationid is properly included (no empty strings) - formattedValues.tvc_guide_stationid = - formattedValues.tvc_guide_stationid || null; + // Convert empty or "0" stream_profile_id to null for the API + if ( + !formattedValues.stream_profile_id || + formattedValues.stream_profile_id === '0' + ) { + formattedValues.stream_profile_id = null; + } - if (channel) { - // If there's an EPG to set, use our enhanced endpoint - if (values.epg_data_id !== (channel.epg_data_id ?? '')) { - // Use the special endpoint to set EPG and trigger refresh - const epgResponse = await API.setChannelEPG( - channel.id, - values.epg_data_id - ); + // Ensure tvg_id is properly included (no empty strings) + formattedValues.tvg_id = formattedValues.tvg_id || null; - // Remove epg_data_id from values since we've handled it separately - const { epg_data_id, ...otherValues } = formattedValues; + // Ensure tvc_guide_stationid is properly included (no empty strings) + formattedValues.tvc_guide_stationid = + formattedValues.tvc_guide_stationid || null; - // Update other channel fields if needed - if (Object.keys(otherValues).length > 0) { - response = await API.updateChannel({ - id: channel.id, - ...otherValues, - streams: channelStreams.map((stream) => stream.id), - }); - } - } else { - // No EPG change, regular update + if (channel) { + // If there's an EPG to set, use our enhanced endpoint + if (values.epg_data_id !== (channel.epg_data_id ?? '')) { + // Use the special endpoint to set EPG and trigger refresh + const epgResponse = await API.setChannelEPG( + channel.id, + values.epg_data_id + ); + + // Remove epg_data_id from values since we've handled it separately + const { epg_data_id, ...otherValues } = formattedValues; + + // Update other channel fields if needed + if (Object.keys(otherValues).length > 0) { response = await API.updateChannel({ id: channel.id, - ...formattedValues, + ...otherValues, streams: channelStreams.map((stream) => stream.id), }); } } else { - // New channel creation - use the standard method - response = await API.addChannel({ + // No EPG change, regular update + response = await API.updateChannel({ + id: channel.id, ...formattedValues, streams: channelStreams.map((stream) => stream.id), }); } - } catch (error) { - console.error('Error saving channel:', error); + } else { + // New channel creation - use the standard method + response = await API.addChannel({ + ...formattedValues, + streams: channelStreams.map((stream) => stream.id), + }); } + } catch (error) { + console.error('Error saving channel:', error); + } - formik.resetForm(); - API.requeryChannels(); + reset(); + API.requeryChannels(); - // Refresh channel profiles to update the membership information - useChannelsStore.getState().fetchChannelProfiles(); + // Refresh channel profiles to update the membership information + useChannelsStore.getState().fetchChannelProfiles(); - setSubmitting(false); - setTvgFilter(''); - setLogoFilter(''); - onClose(); - }, - }); + setTvgFilter(''); + setLogoFilter(''); + onClose(); + }; useEffect(() => { - if (channel) { - if (channel.epg_data_id) { - const epgSource = epgs[tvgsById[channel.epg_data_id]?.epg_source]; - setSelectedEPG(epgSource ? `${epgSource.id}` : ''); - } + reset(defaultValues); + setChannelStreams(channel?.streams || []); - formik.setValues({ - name: channel.name || '', - channel_number: - channel.channel_number !== null ? channel.channel_number : '', - channel_group_id: channel.channel_group_id - ? `${channel.channel_group_id}` - : '', - stream_profile_id: channel.stream_profile_id - ? `${channel.stream_profile_id}` - : '0', - tvg_id: channel.tvg_id || '', - tvc_guide_stationid: channel.tvc_guide_stationid || '', - epg_data_id: channel.epg_data_id ?? '', - logo_id: channel.logo_id ? `${channel.logo_id}` : '', - user_level: `${channel.user_level}`, - }); - - setChannelStreams(channel.streams || []); + if (channel?.epg_data_id) { + const epgSource = epgs[tvgsById[channel.epg_data_id]?.epg_source]; + setSelectedEPG(epgSource ? `${epgSource.id}` : ''); } else { - formik.resetForm(); + setSelectedEPG(''); + } + + if (!channel) { setTvgFilter(''); setLogoFilter(''); - setChannelStreams([]); // Ensure streams are cleared when adding a new channel } - }, [channel, tvgsById, channelGroups]); + }, [defaultValues, channel, reset, epgs, tvgsById]); // Memoize logo options to prevent infinite re-renders during background loading const logoOptions = useMemo(() => { @@ -431,10 +437,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { // If a new group was created and returned, update the form with it if (newGroup && newGroup.id) { // Preserve all current form values while updating just the channel_group_id - formik.setValues({ - ...formik.values, - channel_group_id: `${newGroup.id}`, - }); + setValue('channel_group_id', `${newGroup.id}`); } }; @@ -472,7 +475,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { } styles={{ content: { '--mantine-color-body': '#27272A' } }} > -
+ { label={ Channel Name - {formik.values.epg_data_id && ( + {watch('epg_data_id') && ( @@ -933,7 +905,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { } readOnly value={(() => { - const tvg = tvgsById[formik.values.epg_data_id]; + const tvg = tvgsById[watch('epg_data_id')]; const epgSource = tvg && epgs[tvg.epg_source]; const tvgLabel = tvg ? tvg.name || tvg.id : ''; if (epgSource && tvgLabel) { @@ -953,7 +925,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { color="white" onClick={(e) => { e.stopPropagation(); - formik.setFieldValue('epg_data_id', null); + setValue('epg_data_id', null); }} title="Create new group" size="small" @@ -1012,12 +984,9 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { size="xs" onClick={() => { if (filteredTvgs[index].id == '0') { - formik.setFieldValue('epg_data_id', null); + setValue('epg_data_id', null); } else { - formik.setFieldValue( - 'epg_data_id', - filteredTvgs[index].id - ); + setValue('epg_data_id', filteredTvgs[index].id); // Also update selectedEPG to match the EPG source of the selected tvg if (filteredTvgs[index].epg_source) { setSelectedEPG( @@ -1047,9 +1016,11 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => { diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx index 0527a3b6..14dd22f1 100644 --- a/frontend/src/components/forms/ChannelBatch.jsx +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -2,6 +2,7 @@ import React, { useState, useEffect, useMemo, useRef } from 'react'; import useChannelsStore from '../../store/channels'; import API from '../../api'; import useStreamProfilesStore from '../../store/streamProfiles'; +import useEPGsStore from '../../store/epgs'; import ChannelGroupForm from './ChannelGroup'; import { Box, @@ -53,6 +54,9 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }, [ensureLogosLoaded]); const streamProfiles = useStreamProfilesStore((s) => s.profiles); + const epgs = useEPGsStore((s) => s.epgs); + const tvgs = useEPGsStore((s) => s.tvgs); + const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); const [selectedChannelGroup, setSelectedChannelGroup] = useState('-1'); @@ -60,6 +64,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const [isSubmitting, setIsSubmitting] = useState(false); const [regexFind, setRegexFind] = useState(''); const [regexReplace, setRegexReplace] = useState(''); + const [selectedDummyEpgId, setSelectedDummyEpgId] = useState(null); const [groupPopoverOpened, setGroupPopoverOpened] = useState(false); const [groupFilter, setGroupFilter] = useState(''); @@ -71,10 +76,25 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { const [confirmSetNamesOpen, setConfirmSetNamesOpen] = useState(false); const [confirmSetLogosOpen, setConfirmSetLogosOpen] = useState(false); const [confirmSetTvgIdsOpen, setConfirmSetTvgIdsOpen] = useState(false); - const [confirmClearEpgsOpen, setConfirmClearEpgsOpen] = useState(false); + const [confirmBatchUpdateOpen, setConfirmBatchUpdateOpen] = useState(false); + const [settingNames, setSettingNames] = useState(false); + const [settingLogos, setSettingLogos] = useState(false); + const [settingTvgIds, setSettingTvgIds] = useState(false); const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); const suppressWarning = useWarningsStore((s) => s.suppressWarning); + // Fetch EPG sources when modal opens + useEffect(() => { + if (isOpen) { + fetchEPGs(); + } + }, [isOpen, fetchEPGs]); + + // Get dummy EPG sources + const dummyEpgSources = useMemo(() => { + return Object.values(epgs).filter((epg) => epg.source_type === 'dummy'); + }, [epgs]); + const form = useForm({ mode: 'uncontrolled', initialValues: { @@ -85,7 +105,90 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }, }); + // Build confirmation message based on selected changes + const getConfirmationMessage = () => { + const changes = []; + const values = form.getValues(); + + // Check for regex name changes + if (regexFind.trim().length > 0) { + changes.push( + `• Name Change: Apply regex find "${regexFind}" replace with "${regexReplace || ''}"` + ); + } + + // Check channel group + if (selectedChannelGroup && selectedChannelGroup !== '-1') { + const groupName = channelGroups[selectedChannelGroup]?.name || 'Unknown'; + changes.push(`• Channel Group: ${groupName}`); + } + + // Check logo + if (selectedLogoId && selectedLogoId !== '-1') { + if (selectedLogoId === '0') { + changes.push(`• Logo: Use Default`); + } else { + const logoName = channelLogos[selectedLogoId]?.name || 'Selected Logo'; + changes.push(`• Logo: ${logoName}`); + } + } + + // Check stream profile + if (values.stream_profile_id && values.stream_profile_id !== '-1') { + if (values.stream_profile_id === '0') { + changes.push(`• Stream Profile: Use Default`); + } else { + const profile = streamProfiles.find( + (p) => `${p.id}` === `${values.stream_profile_id}` + ); + const profileName = profile?.name || 'Selected Profile'; + changes.push(`• Stream Profile: ${profileName}`); + } + } + + // Check user level + if (values.user_level && values.user_level !== '-1') { + const userLevelLabel = + USER_LEVEL_LABELS[values.user_level] || values.user_level; + changes.push(`• User Level: ${userLevelLabel}`); + } + + // Check dummy EPG + if (selectedDummyEpgId) { + if (selectedDummyEpgId === 'clear') { + changes.push(`• EPG: Clear Assignment (use default dummy)`); + } else { + const epgName = epgs[selectedDummyEpgId]?.name || 'Selected EPG'; + changes.push(`• Dummy EPG: ${epgName}`); + } + } + + return changes; + }; + + const handleSubmit = () => { + const changes = getConfirmationMessage(); + + // If no changes detected, show notification + if (changes.length === 0) { + notifications.show({ + title: 'No Changes', + message: 'Please select at least one field to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-update-channels')) { + return onSubmit(); + } + + setConfirmBatchUpdateOpen(true); + }; + const onSubmit = async () => { + setConfirmBatchUpdateOpen(false); setIsSubmitting(true); const values = { @@ -126,6 +229,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { try { const applyRegex = regexFind.trim().length > 0; + // First, handle standard field updates (name, group, logo, etc.) if (applyRegex) { // Build per-channel updates to apply unique names via regex let flags = 'g'; @@ -153,10 +257,48 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }); await API.bulkUpdateChannels(updates); - } else { + } else if (Object.keys(values).length > 0) { await API.updateChannels(channelIds, values); } + // Then, handle EPG assignment if a dummy EPG was selected + if (selectedDummyEpgId) { + if (selectedDummyEpgId === 'clear') { + // Clear EPG assignments + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: null, + })); + await API.batchSetEPG(associations); + } else { + // Assign the selected dummy EPG + const selectedEpg = epgs[selectedDummyEpgId]; + if (selectedEpg && selectedEpg.epg_data_count > 0) { + // Convert to number for comparison since Select returns string + const epgSourceId = parseInt(selectedDummyEpgId, 10); + + // Check if we already have EPG data loaded in the store + let epgData = tvgs.find((data) => data.epg_source === epgSourceId); + + // If not in store, fetch it + if (!epgData) { + const epgDataList = await API.getEPGData(); + epgData = epgDataList.find( + (data) => data.epg_source === epgSourceId + ); + } + + if (epgData) { + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: epgData.id, + })); + await API.batchSetEPG(associations); + } + } + } + } + // Refresh both the channels table data and the main channels store await Promise.all([ API.requeryChannels(), @@ -189,6 +331,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }; const executeSetNamesFromEpg = async () => { + setSettingNames(true); try { // Start the backend task await API.setChannelNamesFromEpg(channelIds); @@ -202,7 +345,6 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }); // Close the modal since the task is now running in background - setConfirmSetNamesOpen(false); onClose(); } catch (error) { console.error('Failed to start EPG name setting task:', error); @@ -211,6 +353,8 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { message: 'Failed to start EPG name setting task.', color: 'red', }); + } finally { + setSettingNames(false); setConfirmSetNamesOpen(false); } }; @@ -234,6 +378,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }; const executeSetLogosFromEpg = async () => { + setSettingLogos(true); try { // Start the backend task await API.setChannelLogosFromEpg(channelIds); @@ -247,7 +392,6 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }); // Close the modal since the task is now running in background - setConfirmSetLogosOpen(false); onClose(); } catch (error) { console.error('Failed to start EPG logo setting task:', error); @@ -256,6 +400,8 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { message: 'Failed to start EPG logo setting task.', color: 'red', }); + } finally { + setSettingLogos(false); setConfirmSetLogosOpen(false); } }; @@ -279,6 +425,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }; const executeSetTvgIdsFromEpg = async () => { + setSettingTvgIds(true); try { // Start the backend task await API.setChannelTvgIdsFromEpg(channelIds); @@ -292,7 +439,6 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { }); // Close the modal since the task is now running in background - setConfirmSetTvgIdsOpen(false); onClose(); } catch (error) { console.error('Failed to start EPG TVG-ID setting task:', error); @@ -301,53 +447,12 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { message: 'Failed to start EPG TVG-ID setting task.', color: 'red', }); + } finally { + setSettingTvgIds(false); setConfirmSetTvgIdsOpen(false); } }; - const handleClearEpgs = async () => { - if (!channelIds || channelIds.length === 0) { - notifications.show({ - title: 'No Channels Selected', - message: 'No channels to update.', - color: 'orange', - }); - return; - } - - // Skip warning if suppressed - if (isWarningSuppressed('batch-clear-epgs')) { - return executeClearEpgs(); - } - - setConfirmClearEpgsOpen(true); - }; - - const executeClearEpgs = async () => { - try { - // Clear EPG assignments (set to null/dummy) using existing batchSetEPG API - const associations = channelIds.map((id) => ({ - channel_id: id, - epg_data_id: null, - })); - - await API.batchSetEPG(associations); - - // batchSetEPG already shows a notification and refreshes channels - // Close the modal - setConfirmClearEpgsOpen(false); - onClose(); - } catch (error) { - console.error('Failed to clear EPG assignments:', error); - notifications.show({ - title: 'Error', - message: 'Failed to clear EPG assignments.', - color: 'red', - }); - setConfirmClearEpgsOpen(false); - } - }; - // useEffect(() => { // // const sameStreamProfile = channels.every( // // (channel) => channel.stream_profile_id == channels[0].stream_profile_id @@ -418,7 +523,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { } styles={{ hannontent: { '--mantine-color-body': '#27272A' } }} > -
+ @@ -484,20 +589,30 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { Set TVG-IDs from EPG - - - - )} - - - - - - - - - - -
- - ); -}; - -export default ChannelsForm; diff --git a/frontend/src/components/forms/DummyEPG.jsx b/frontend/src/components/forms/DummyEPG.jsx index a449d0c6..9f9346da 100644 --- a/frontend/src/components/forms/DummyEPG.jsx +++ b/frontend/src/components/forms/DummyEPG.jsx @@ -7,6 +7,7 @@ import { Group, Modal, NumberInput, + Paper, Select, Stack, Text, @@ -16,6 +17,7 @@ import { import { useForm } from '@mantine/form'; import { notifications } from '@mantine/notifications'; import API from '../../api'; +import useEPGsStore from '../../store/epgs'; import dayjs from 'dayjs'; import utc from 'dayjs/plugin/utc'; import timezone from 'dayjs/plugin/timezone'; @@ -25,6 +27,16 @@ dayjs.extend(utc); dayjs.extend(timezone); const DummyEPGForm = ({ epg, isOpen, onClose }) => { + // Get all EPGs from the store + const epgs = useEPGsStore((state) => state.epgs); + + // Filter for dummy EPG sources only + const dummyEpgs = useMemo(() => { + return Object.values(epgs) + .filter((e) => e.source_type === 'dummy') + .sort((a, b) => a.name.localeCompare(b.name)); + }, [epgs]); + // Separate state for each field to prevent focus loss const [titlePattern, setTitlePattern] = useState(''); const [timePattern, setTimePattern] = useState(''); @@ -37,6 +49,11 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { useState(''); const [endedTitleTemplate, setEndedTitleTemplate] = useState(''); const [endedDescriptionTemplate, setEndedDescriptionTemplate] = useState(''); + const [fallbackTitleTemplate, setFallbackTitleTemplate] = useState(''); + const [fallbackDescriptionTemplate, setFallbackDescriptionTemplate] = + useState(''); + const [channelLogoUrl, setChannelLogoUrl] = useState(''); + const [programPosterUrl, setProgramPosterUrl] = useState(''); const [timezoneOptions, setTimezoneOptions] = useState([]); const [loadingTimezones, setLoadingTimezones] = useState(true); @@ -59,11 +76,16 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { upcoming_description_template: '', ended_title_template: '', ended_description_template: '', + fallback_title_template: '', + fallback_description_template: '', + channel_logo_url: '', + program_poster_url: '', name_source: 'channel', stream_index: 1, category: '', include_date: true, include_live: false, + include_new: false, }, }, validate: { @@ -101,12 +123,15 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { titleGroups: {}, timeGroups: {}, dateGroups: {}, + calculatedPlaceholders: {}, formattedTitle: '', formattedDescription: '', formattedUpcomingTitle: '', formattedUpcomingDescription: '', formattedEndedTitle: '', formattedEndedDescription: '', + formattedChannelLogoUrl: '', + formattedProgramPosterUrl: '', error: null, }; @@ -166,6 +191,21 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { ...result.dateGroups, }; + // Add normalized versions of all groups for cleaner URLs + // These remove all non-alphanumeric characters and convert to lowercase + Object.keys(allGroups).forEach((key) => { + const value = allGroups[key]; + if (value) { + // Remove all non-alphanumeric characters (except spaces temporarily) + // then replace spaces with nothing, and convert to lowercase + const normalized = String(value) + .replace(/[^a-zA-Z0-9\s]/g, '') + .replace(/\s+/g, '') + .toLowerCase(); + allGroups[`${key}_normalize`] = normalized; + } + }); + // Calculate formatted time strings if time was extracted if (result.timeGroups.hour) { try { @@ -186,10 +226,91 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { const sourceTimezone = form.values.custom_properties?.timezone || 'UTC'; const outputTimezone = form.values.custom_properties?.output_timezone; + // Determine the base date to use + let baseDate = dayjs().tz(sourceTimezone); + + // If date was extracted from pattern, use that instead of today + if (result.dateGroups.month && result.dateGroups.day) { + const monthValue = result.dateGroups.month; + let extractedMonth; + + // Parse month - can be numeric (1-12) or text (Jan, January, Oct, October, etc.) + if (/^\d+$/.test(monthValue)) { + // Numeric month + extractedMonth = parseInt(monthValue); + } else { + // Text month - convert to number (1-12) + const monthLower = monthValue.toLowerCase(); + const monthNames = [ + 'january', + 'february', + 'march', + 'april', + 'may', + 'june', + 'july', + 'august', + 'september', + 'october', + 'november', + 'december', + ]; + const monthAbbr = [ + 'jan', + 'feb', + 'mar', + 'apr', + 'may', + 'jun', + 'jul', + 'aug', + 'sep', + 'oct', + 'nov', + 'dec', + ]; + + // Try full month names first + let monthIndex = monthNames.findIndex((m) => m === monthLower); + if (monthIndex === -1) { + // Try abbreviated month names + monthIndex = monthAbbr.findIndex((m) => m === monthLower); + } + + if (monthIndex !== -1) { + extractedMonth = monthIndex + 1; // Convert 0-indexed to 1-12 + } else { + // If we can't parse it, default to current month + extractedMonth = dayjs().month() + 1; + } + } + + const extractedDay = parseInt(result.dateGroups.day); + const extractedYear = result.dateGroups.year + ? parseInt(result.dateGroups.year) + : dayjs().year(); // Default to current year if not provided + + // Validate that we have valid numeric values + if ( + !isNaN(extractedMonth) && + !isNaN(extractedDay) && + !isNaN(extractedYear) && + extractedMonth >= 1 && + extractedMonth <= 12 && + extractedDay >= 1 && + extractedDay <= 31 + ) { + // Create a specific date string and parse it in the source timezone + // This ensures DST is calculated correctly for the target date + const dateString = `${extractedYear}-${extractedMonth.toString().padStart(2, '0')}-${extractedDay.toString().padStart(2, '0')}`; + baseDate = dayjs.tz(dateString, sourceTimezone); + } + } + if (outputTimezone && outputTimezone !== sourceTimezone) { - // Create a date in the source timezone - const sourceDate = dayjs() - .tz(sourceTimezone) + // Create a date in the source timezone with extracted or current date + // Set the time on the date, which will use the DST rules for that specific date + const sourceDate = baseDate .set('hour', hour24) .set('minute', minute) .set('second', 0); @@ -201,11 +322,18 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { hour24 = outputDate.hour(); const convertedMinute = outputDate.minute(); - // Format 24-hour time string with converted time + // Add date placeholders based on the OUTPUT timezone + // This ensures {date}, {month}, {day}, {year} reflect the converted timezone + allGroups.date = outputDate.format('YYYY-MM-DD'); + allGroups.month = outputDate.month() + 1; // dayjs months are 0-indexed + allGroups.day = outputDate.date(); + allGroups.year = outputDate.year(); + + // Format 24-hour start time string with converted time if (convertedMinute > 0) { - allGroups.time24 = `${hour24.toString().padStart(2, '0')}:${convertedMinute.toString().padStart(2, '0')}`; + allGroups.starttime24 = `${hour24.toString().padStart(2, '0')}:${convertedMinute.toString().padStart(2, '0')}`; } else { - allGroups.time24 = `${hour24.toString().padStart(2, '0')}:00`; + allGroups.starttime24 = `${hour24.toString().padStart(2, '0')}:00`; } // Convert to 12-hour format with converted time @@ -217,19 +345,34 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { hour12 = hour24 - 12; } - // Format 12-hour time string with converted time + // Format 12-hour start time string with converted time if (convertedMinute > 0) { - allGroups.time = `${hour12}:${convertedMinute.toString().padStart(2, '0')} ${ampmDisplay}`; + allGroups.starttime = `${hour12}:${convertedMinute.toString().padStart(2, '0')} ${ampmDisplay}`; } else { - allGroups.time = `${hour12} ${ampmDisplay}`; + allGroups.starttime = `${hour12} ${ampmDisplay}`; } + + // Format long versions that always include minutes + allGroups.starttime_long = `${hour12}:${convertedMinute.toString().padStart(2, '0')} ${ampmDisplay}`; + allGroups.starttime24_long = `${hour24.toString().padStart(2, '0')}:${convertedMinute.toString().padStart(2, '0')}`; } else { // No timezone conversion - use original logic - // Format 24-hour time string + // Add date placeholders based on the source timezone + const sourceDate = baseDate + .set('hour', hour24) + .set('minute', minute) + .set('second', 0); + + allGroups.date = sourceDate.format('YYYY-MM-DD'); + allGroups.month = sourceDate.month() + 1; // dayjs months are 0-indexed + allGroups.day = sourceDate.date(); + allGroups.year = sourceDate.year(); + + // Format 24-hour start time string if (minute > 0) { - allGroups.time24 = `${hour24.toString().padStart(2, '0')}:${minute.toString().padStart(2, '0')}`; + allGroups.starttime24 = `${hour24.toString().padStart(2, '0')}:${minute.toString().padStart(2, '0')}`; } else { - allGroups.time24 = `${hour24.toString().padStart(2, '0')}:00`; + allGroups.starttime24 = `${hour24.toString().padStart(2, '0')}:00`; } // Convert to 12-hour format @@ -241,15 +384,69 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { hour12 = hour24 - 12; } - // Format 12-hour time string + // Format 12-hour start time string if (minute > 0) { - allGroups.time = `${hour12}:${minute.toString().padStart(2, '0')} ${ampmDisplay}`; + allGroups.starttime = `${hour12}:${minute.toString().padStart(2, '0')} ${ampmDisplay}`; } else { - allGroups.time = `${hour12} ${ampmDisplay}`; + allGroups.starttime = `${hour12} ${ampmDisplay}`; } + + // Format long version that always includes minutes + allGroups.starttime_long = `${hour12}:${minute.toString().padStart(2, '0')} ${ampmDisplay}`; } + + // Calculate end time based on program duration + const programDuration = + form.values.custom_properties?.program_duration || 180; + + // Calculate end time by adding duration to start time + const startMinutes = hour24 * 60 + minute; + const endMinutes = startMinutes + programDuration; + + let endHour24 = Math.floor(endMinutes / 60) % 24; // Wrap around 24 hours + const endMinute = endMinutes % 60; + + // Format 24-hour end time string + if (endMinute > 0) { + allGroups.endtime24 = `${endHour24.toString().padStart(2, '0')}:${endMinute.toString().padStart(2, '0')}`; + } else { + allGroups.endtime24 = `${endHour24.toString().padStart(2, '0')}:00`; + } + + // Convert to 12-hour format for endtime + const endAmpmDisplay = endHour24 < 12 ? 'AM' : 'PM'; + let endHour12 = endHour24; + if (endHour24 === 0) { + endHour12 = 12; + } else if (endHour24 > 12) { + endHour12 = endHour24 - 12; + } + + // Format 12-hour end time string + if (endMinute > 0) { + allGroups.endtime = `${endHour12}:${endMinute.toString().padStart(2, '0')} ${endAmpmDisplay}`; + } else { + allGroups.endtime = `${endHour12} ${endAmpmDisplay}`; + } + + // Format long version that always includes minutes + allGroups.endtime_long = `${endHour12}:${endMinute.toString().padStart(2, '0')} ${endAmpmDisplay}`; + + // Store calculated placeholders for display in preview + result.calculatedPlaceholders = { + starttime: allGroups.starttime, + starttime24: allGroups.starttime24, + starttime_long: allGroups.starttime_long, + endtime: allGroups.endtime, + endtime24: allGroups.endtime24, + endtime_long: allGroups.endtime_long, + date: allGroups.date, + month: allGroups.month, + day: allGroups.day, + year: allGroups.year, + }; } catch (e) { - // If parsing fails, leave time/time24 as placeholders + // If parsing fails, leave starttime/endtime as placeholders console.error('Error formatting time:', e); } } @@ -305,6 +502,30 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { ); } + // Format channel logo URL + if (channelLogoUrl && (result.titleMatch || result.timeMatch)) { + result.formattedChannelLogoUrl = channelLogoUrl.replace( + /\{(\w+)\}/g, + (match, key) => { + const value = allGroups[key]; + // URL encode the value to handle spaces and special characters + return value ? encodeURIComponent(String(value)) : match; + } + ); + } + + // Format program poster URL + if (programPosterUrl && (result.titleMatch || result.timeMatch)) { + result.formattedProgramPosterUrl = programPosterUrl.replace( + /\{(\w+)\}/g, + (match, key) => { + const value = allGroups[key]; + // URL encode the value to handle spaces and special characters + return value ? encodeURIComponent(String(value)) : match; + } + ); + } + return result; }, [ titlePattern, @@ -317,8 +538,11 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { upcomingDescriptionTemplate, endedTitleTemplate, endedDescriptionTemplate, + channelLogoUrl, + programPosterUrl, form.values.custom_properties?.timezone, form.values.custom_properties?.output_timezone, + form.values.custom_properties?.program_duration, ]); useEffect(() => { @@ -347,11 +571,17 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { custom.upcoming_description_template || '', ended_title_template: custom.ended_title_template || '', ended_description_template: custom.ended_description_template || '', + fallback_title_template: custom.fallback_title_template || '', + fallback_description_template: + custom.fallback_description_template || '', + channel_logo_url: custom.channel_logo_url || '', + program_poster_url: custom.program_poster_url || '', name_source: custom.name_source || 'channel', stream_index: custom.stream_index || 1, category: custom.category || '', include_date: custom.include_date ?? true, include_live: custom.include_live ?? false, + include_new: custom.include_new ?? false, }, }); @@ -368,6 +598,12 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { ); setEndedTitleTemplate(custom.ended_title_template || ''); setEndedDescriptionTemplate(custom.ended_description_template || ''); + setFallbackTitleTemplate(custom.fallback_title_template || ''); + setFallbackDescriptionTemplate( + custom.fallback_description_template || '' + ); + setChannelLogoUrl(custom.channel_logo_url || ''); + setProgramPosterUrl(custom.program_poster_url || ''); } else { form.reset(); setTitlePattern(''); @@ -380,6 +616,10 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { setUpcomingDescriptionTemplate(''); setEndedTitleTemplate(''); setEndedDescriptionTemplate(''); + setFallbackTitleTemplate(''); + setFallbackDescriptionTemplate(''); + setChannelLogoUrl(''); + setProgramPosterUrl(''); } // eslint-disable-next-line react-hooks/exhaustive-deps }, [epg]); @@ -420,9 +660,84 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { fetchTimezones(); }, []); + // Function to import settings from an existing dummy EPG + const handleImportFromTemplate = (templateId) => { + const template = dummyEpgs.find((e) => e.id === parseInt(templateId)); + if (!template) return; + + const custom = template.custom_properties || {}; + + // Update form values + form.setValues({ + name: `${template.name} (Copy)`, + is_active: template.is_active ?? true, + source_type: 'dummy', + custom_properties: { + title_pattern: custom.title_pattern || '', + time_pattern: custom.time_pattern || '', + date_pattern: custom.date_pattern || '', + timezone: + custom.timezone || custom.timezone_offset?.toString() || 'US/Eastern', + output_timezone: custom.output_timezone || '', + program_duration: custom.program_duration || 180, + sample_title: custom.sample_title || '', + title_template: custom.title_template || '', + description_template: custom.description_template || '', + upcoming_title_template: custom.upcoming_title_template || '', + upcoming_description_template: + custom.upcoming_description_template || '', + ended_title_template: custom.ended_title_template || '', + ended_description_template: custom.ended_description_template || '', + fallback_title_template: custom.fallback_title_template || '', + fallback_description_template: + custom.fallback_description_template || '', + channel_logo_url: custom.channel_logo_url || '', + program_poster_url: custom.program_poster_url || '', + name_source: custom.name_source || 'channel', + stream_index: custom.stream_index || 1, + category: custom.category || '', + include_date: custom.include_date ?? true, + include_live: custom.include_live ?? false, + include_new: custom.include_new ?? false, + }, + }); + + // Update all individual state variables to match + setTitlePattern(custom.title_pattern || ''); + setTimePattern(custom.time_pattern || ''); + setDatePattern(custom.date_pattern || ''); + setSampleTitle(custom.sample_title || ''); + setTitleTemplate(custom.title_template || ''); + setDescriptionTemplate(custom.description_template || ''); + setUpcomingTitleTemplate(custom.upcoming_title_template || ''); + setUpcomingDescriptionTemplate(custom.upcoming_description_template || ''); + setEndedTitleTemplate(custom.ended_title_template || ''); + setEndedDescriptionTemplate(custom.ended_description_template || ''); + setFallbackTitleTemplate(custom.fallback_title_template || ''); + setFallbackDescriptionTemplate(custom.fallback_description_template || ''); + setChannelLogoUrl(custom.channel_logo_url || ''); + setProgramPosterUrl(custom.program_poster_url || ''); + + notifications.show({ + title: 'Template Imported', + message: `Settings imported from "${template.name}". Don't forget to change the name!`, + color: 'blue', + }); + }; + const handleSubmit = async (values) => { try { if (epg?.id) { + // Validate that we have a valid EPG object before updating + if (!epg || typeof epg !== 'object' || !epg.id) { + notifications.show({ + title: 'Error', + message: 'Invalid EPG data. Please close and reopen this form.', + color: 'red', + }); + return; + } + await API.updateEPG({ ...values, id: epg.id }); notifications.show({ title: 'Success', @@ -456,6 +771,32 @@ const DummyEPGForm = ({ epg, isOpen, onClose }) => { >
+ {/* Import from Existing - Only show when creating new */} + {!epg && dummyEpgs.length > 0 && ( + + + + + Import from Existing + + + Use an existing dummy EPG as a template + + +