diff --git a/.dockerignore b/.dockerignore index 5073af60..296537de 100755 --- a/.dockerignore +++ b/.dockerignore @@ -11,6 +11,10 @@ **/.toolstarget **/.vs **/.vscode +**/.history +**/media +**/models +**/static **/*.*proj.user **/*.dbmdl **/*.jfm @@ -26,3 +30,5 @@ **/values.dev.yaml LICENSE README.md +data/ +docker/data/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..47f12f7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,64 @@ +name: Bug Report +description: I have an issue with Dispatcharr +title: "[Bug]: " +labels: ["Triage"] +type: "Bug" +projects: [] +assignees: [] +body: + - type: markdown + attributes: + value: | + Please make sure you search for similar issues before submitting. Thank you for your bug report! + - type: textarea + id: describe-the-bug + attributes: + label: Describe the bug + description: Make sure to attach screenshots if possible! + placeholder: Tell us what you see! + value: "A clear and concise description of what the bug is. What did you expect to happen?" + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: How can we recreate this bug? + description: Be detailed! + placeholder: Tell us what you see! + value: "1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error" + validations: + required: true + - type: input + id: dispatcharr-version + attributes: + label: Dispatcharr Version + description: What version of Dispatcharr are you running? + placeholder: Located bottom left of main screen + validations: + required: true + - type: input + id: docker-version + attributes: + label: Docker Version + description: What version of Docker are you running? + placeholder: docker --version + validations: + required: true + - type: textarea + id: docker-compose + attributes: + label: What's in your Docker Compose file? + description: Please share your docker-compose.yml file + placeholder: Tell us what you see! + value: "If not using Docker Compose just put not using." + validations: + required: true + - type: textarea + id: client-info + attributes: + label: Client Information + description: What are you using the view the streams from Dispatcharr + placeholder: Tell us what you see! + value: "Device, App, Versions for both, etc..." + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..ec4bb386 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..77a03df7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,39 @@ +name: Feature request +description: I want to suggest a new feature for Dispatcharr +title: "[Feature]: " +labels: ["Triage"] +type: "Feature" +projects: [] +assignees: [] +body: + - type: markdown + attributes: + value: | + Thank you for helping to make Dispatcharr better! + - type: textarea + id: describe-problem + attributes: + label: Is your feature request related to a problem? + description: Make sure to attach screenshots if possible! + placeholder: Tell us what you see! + value: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]" + validations: + required: true + - type: textarea + id: describe-solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + placeholder: Tell us what you see! + value: "Describe here." + validations: + required: true + - type: textarea + id: extras + attributes: + label: Additional context + description: Anything else you want to add? + placeholder: Tell us what you see! + value: "Nothing Extra" + validations: + required: true \ No newline at end of file diff --git a/.github/workflows/base-image.yml b/.github/workflows/base-image.yml new file mode 100644 index 00000000..d290d49a --- /dev/null +++ b/.github/workflows/base-image.yml @@ -0,0 +1,250 @@ +name: Base Image Build + +on: + push: + branches: [main, dev] + paths: + - 'docker/DispatcharrBase' + - '.github/workflows/base-image.yml' + - 'requirements.txt' + pull_request: + branches: [main, dev] + paths: + - 'docker/DispatcharrBase' + - '.github/workflows/base-image.yml' + - 'requirements.txt' + workflow_dispatch: # Allow manual triggering + +permissions: + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry + +jobs: + prepare: + runs-on: ubuntu-24.04 + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + + - name: Set repository and image metadata + id: meta + run: | + # Get lowercase repository owner + REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT + + # Get repository name + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + # Determine branch name + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "branch_tag=base" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then + echo "branch_tag=base-dev" >> $GITHUB_OUTPUT + else + # For other branches, use the branch name + BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') + echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT + fi + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + + - name: Build and push Docker base image + uses: docker/build-push-action@v4 + with: + context: . + file: ./docker/DispatcharrBase + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/${{ matrix.platform }} + tags: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} + BRANCH=${{ github.ref_name }} + REPO_URL=https://github.com/${{ github.repository }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 + + # Docker Hub manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1418cbf5..d8f4a3a7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,18 +2,86 @@ name: CI Pipeline on: push: - branches: [ dev ] + branches: [dev] + paths-ignore: + - '**.md' pull_request: - branches: [ dev ] + branches: [dev] + workflow_dispatch: -# Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write + packages: write jobs: - build: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + # compute a single timestamp, version, and repo metadata for the entire workflow + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + version: ${{ steps.version.outputs.version }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + + - name: Extract version info + id: version + run: | + VERSION=$(python -c "import version; print(version.__version__)") + echo "version=${VERSION}" >> $GITHUB_OUTPUT + + - name: Set repository and image metadata + id: meta + run: | + REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT + + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "branch_tag=latest" >> $GITHUB_OUTPUT + echo "is_main=true" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then + echo "branch_tag=dev" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + else + BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') + echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + fi + + if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then + echo "is_fork=true" >> $GITHUB_OUTPUT + else + echo "is_fork=false" >> $GITHUB_OUTPUT + fi + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + # no per-job outputs here; shared metadata comes from the `prepare` job steps: - uses: actions/checkout@v3 with: @@ -44,63 +112,162 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Generate timestamp for build - id: timestamp - run: | - TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') - echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Extract version info - id: version - run: | - VERSION=$(python -c "import version; print(version.__version__)") - echo "version=${VERSION}" >> $GITHUB_OUTPUT - echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT - - - name: Set repository and image metadata + - name: Extract metadata for Docker id: meta - run: | - # Get lowercase repository owner - REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - # Get repository name - REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') - echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT - - # Determine branch name - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - echo "branch_tag=latest" >> $GITHUB_OUTPUT - echo "is_main=true" >> $GITHUB_OUTPUT - elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then - echo "branch_tag=dev" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - else - # For other branches, use the branch name - BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') - echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - fi - - # Determine if this is from a fork - if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then - echo "is_fork=true" >> $GITHUB_OUTPUT - else - echo "is_fork=false" >> $GITHUB_OUTPUT - fi + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: ${{ github.event_name != 'pull_request' }} - platforms: linux/amd64 # Fast build - amd64 only + # Build only the platform for this matrix job to avoid running amd64 + # stages under qemu on an arm64 runner (and vice-versa). This makes + # the matrix runner's platform the one built by buildx. + platforms: linux/${{ matrix.platform }} + # push arch-specific tags from each matrix job (they will be combined + # into a multi-arch manifest in a follow-up job) tags: | - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} + BASE_TAG=base BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} - TIMESTAMP=${{ steps.timestamp.outputs.timestamp }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} file: ./docker/Dockerfile + + create-manifest: + # wait for prepare and all matrix builds to finish + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + VERSION=${{ needs.prepare.outputs.version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # branch tag (e.g. latest or dev) + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # version + timestamp tag + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 + + # also create Docker Hub manifests using the same username + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/frontend-tests.yml b/.github/workflows/frontend-tests.yml new file mode 100644 index 00000000..4e9e2505 --- /dev/null +++ b/.github/workflows/frontend-tests.yml @@ -0,0 +1,41 @@ +name: Frontend Tests + +on: + push: + branches: [main, dev] + paths: + - 'frontend/**' + - '.github/workflows/frontend-tests.yml' + pull_request: + branches: [main, dev] + paths: + - 'frontend/**' + - '.github/workflows/frontend-tests.yml' + +jobs: + test: + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ./frontend + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + cache: 'npm' + cache-dependency-path: './frontend/package-lock.json' + + - name: Install dependencies + run: npm ci + + # - name: Run linter + # run: npm run lint + + - name: Run tests + run: npm test diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 52c2ada2..9186541d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,16 +15,22 @@ on: # Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry jobs: - release: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + outputs: + new_version: ${{ steps.update_version.outputs.new_version }} + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} steps: - uses: actions/checkout@v3 with: fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Configure Git run: | @@ -38,14 +44,55 @@ jobs: NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')") echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT - - name: Set lowercase repo owner - id: repo_owner + - name: Update Changelog + run: | + python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }} + + - name: Set repository metadata + id: meta run: | REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + + - name: Commit and Tag + run: | + git add version.py CHANGELOG.md + git commit -m "Release v${{ steps.update_version.outputs.new_version }}" + git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" + git push origin main --tags + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + ref: main + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -57,36 +104,134 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Commit and Tag - run: | - git add version.py - git commit -m "Release v${{ steps.update_version.outputs.new_version }}" - git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" - git push origin main --tags + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Release Image + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }} + + - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: true - platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases + platforms: linux/${{ matrix.platform }} tags: | - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }} - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64 + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} file: ./docker/Dockerfile + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + VERSION=${{ needs.prepare.outputs.new_version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # Create one manifest with both latest and version tags + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:latest \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64 + + # Docker Hub manifests + # Create one manifest with both latest and version tags + docker buildx imagetools create \ + --annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "index:org.opencontainers.image.version=${VERSION}" \ + --annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "index:org.opencontainers.image.licenses=See repository" \ + --annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "index:org.opencontainers.image.vendor=${OWNER}" \ + --annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "index:maintainer=${{ github.actor }}" \ + --annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64 + + create-release: + needs: [prepare, create-manifest] + runs-on: ubuntu-24.04 + steps: - name: Create GitHub Release uses: softprops/action-gh-release@v1 with: - tag_name: v${{ steps.update_version.outputs.new_version }} - name: Release v${{ steps.update_version.outputs.new_version }} + tag_name: v${{ needs.prepare.outputs.new_version }} + name: Release v${{ needs.prepare.outputs.new_version }} draft: false prerelease: false token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index a9d76412..20968f46 100755 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,5 @@ dump.rdb debugpy* uwsgi.sock package-lock.json -models \ No newline at end of file +models +.idea \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..610c2ee5 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1014 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Changed + +- Frontend tests GitHub workflow now uses Node.js 24 (matching Dockerfile) and runs on both `main` and `dev` branch pushes and pull requests for comprehensive CI coverage. + +### Fixed + +- Fixed NumPy baseline detection in Docker entrypoint. Now calls `numpy.show_config()` directly with case-insensitive grep instead of incorrectly wrapping the output. +- Fixed SettingsUtils frontend tests for new grouped settings architecture. Updated test suite to properly verify grouped JSON settings (stream_settings, dvr_settings, etc.) instead of individual CharField settings, including tests for type conversions, array-to-CSV transformations, and special handling of proxy_settings and network_access. + +## [0.17.0] - 2026-01-13 + +### Added + +- Loading feedback for all confirmation dialogs: Extended visual loading indicators across all confirmation dialogs throughout the application. Delete, cleanup, and bulk operation dialogs now show an animated dots loader and disabled state during async operations, providing consistent user feedback for backups (restore/delete), channels, EPGs, logos, VOD logos, M3U accounts, streams, users, groups, filters, profiles, batch operations, and network access changes. +- Channel profile edit and duplicate functionality: Users can now rename existing channel profiles and create duplicates with automatic channel membership cloning. Each profile action (edit, duplicate, delete) in the profile dropdown for quick access. +- ProfileModal component extracted for improved code organization and maintainability of channel profile management operations. +- Frontend unit tests for pages and utilities: Added comprehensive unit test coverage for frontend components within pages/ and JS files within utils/, along with a GitHub Actions workflow (`frontend-tests.yml`) to automatically run tests on commits and pull requests - Thanks [@nick4810](https://github.com/nick4810) +- Channel Profile membership control for manual channel creation and bulk operations: Extended the existing `channel_profile_ids` parameter from `POST /api/channels/from-stream/` to also support `POST /api/channels/` (manual creation) and bulk creation tasks with the same flexible semantics: + - Omitted parameter (default): Channels are added to ALL profiles (preserves backward compatibility) + - Empty array `[]`: Channels are added to NO profiles + - Sentinel value `[0]`: Channels are added to ALL profiles (explicit) + - Specific IDs `[1, 2, ...]`: Channels are added only to the specified profiles + This allows API consumers to control profile membership across all channel creation methods without requiring all channels to be added to every profile by default. +- Channel profile selection in creation modal: Users can now choose which profiles to add channels to when creating channels from streams (both single and bulk operations). Options include adding to all profiles, no profiles, or specific profiles with mutual exclusivity between special options ("All Profiles", "None") and specific profile selections. Profile selection defaults to the current table filter for intuitive workflow. +- Group retention policy for M3U accounts: Groups now follow the same stale retention logic as streams, using the account's `stale_stream_days` setting. Groups that temporarily disappear from an M3U source are retained for the configured retention period instead of being immediately deleted, preserving user settings and preventing data loss when providers temporarily remove/re-add groups. (Closes #809) +- Visual stale indicators for streams and groups: Added `is_stale` field to Stream and both `is_stale` and `last_seen` fields to ChannelGroupM3UAccount models to track items in their retention grace period. Stale groups display with orange buttons and a warning tooltip, while stale streams show with a red background color matching the visual treatment of empty channels. + +### Changed + +- Settings architecture refactored to use grouped JSON storage: Migrated from individual CharField settings to grouped JSONField settings for improved performance, maintainability, and type safety. Settings are now organized into logical groups (stream_settings, dvr_settings, backup_settings, system_settings, proxy_settings, network_access) with automatic migration handling. Backend provides helper methods (`get_stream_settings()`, `get_default_user_agent_id()`, etc.) for easy access. Frontend simplified by removing complex key mapping logic and standardizing on underscore-based field names throughout. +- Docker setup enhanced for legacy CPU support: Added `USE_LEGACY_NUMPY` environment variable to enable custom-built NumPy with no CPU baseline, allowing Dispatcharr to run on older CPUs (circa 2009) that lack support for newer baseline CPU features. When set to `true`, the entrypoint script will install the legacy NumPy build instead of the standard distribution. (Fixes #805) +- VOD upstream read timeout reduced from 30 seconds to 10 seconds to minimize lock hold time when clients disconnect during connection phase +- Form management refactored across application: Migrated Channel, Stream, M3U Profile, Stream Profile, Logo, and User Agent forms from Formik to React Hook Form (RHF) with Yup validation for improved form handling, better validation feedback, and enhanced code maintainability +- Stats and VOD pages refactored for clearer separation of concerns: extracted Stream/VOD connection cards (StreamConnectionCard, VodConnectionCard, VODCard, SeriesCard), moved page logic into dedicated utils, and lazy-loaded heavy components with ErrorBoundary fallbacks to improve readability and maintainability - Thanks [@nick4810](https://github.com/nick4810) +- Channel creation modal refactored: Extracted and unified channel numbering dialogs from StreamsTable into a dedicated CreateChannelModal component that handles both single and bulk channel creation with cleaner, more maintainable implementation and integrated profile selection controls. + +### Fixed + +- Fixed bulk channel profile membership update endpoint silently ignoring channels without existing membership records. The endpoint now creates missing memberships automatically (matching single-channel endpoint behavior), validates that all channel IDs exist before processing, and provides detailed response feedback including counts of updated vs. created memberships. Added comprehensive Swagger documentation with request/response schemas. +- Fixed bulk channel edit endpoint crashing with `ValueError: Field names must be given to bulk_update()` when the first channel in the update list had no actual field changes. The endpoint now collects all unique field names from all channels being updated instead of only looking at the first channel, properly handling cases where different channels update different fields or when some channels have no changes - Thanks [@mdellavo](https://github.com/mdellavo) (Fixes #804) +- Fixed PostgreSQL backup restore not completely cleaning database before restoration. The restore process now drops and recreates the entire `public` schema before running `pg_restore`, ensuring a truly clean restore that removes all tables, functions, and other objects not present in the backup file. This prevents leftover database objects from persisting when restoring backups from older branches or versions. Added `--no-owner` flag to `pg_restore` to avoid role permission errors when the backup was created by a different PostgreSQL user. +- Fixed TV Guide loading overlay not disappearing after navigating from DVR page. The `fetchRecordings()` function in the channels store was setting `isLoading: true` on start but never resetting it to `false` on successful completion, causing the Guide page's loading overlay to remain visible indefinitely when accessed after the DVR page. +- Fixed stream profile parameters not properly handling quoted arguments. Switched from basic `.split()` to `shlex.split()` for parsing command-line parameters, allowing proper handling of multi-word arguments in quotes (e.g., OAuth tokens in HTTP headers like `"--twitch-api-header=Authorization=OAuth token123"`). This ensures external streaming tools like Streamlink and FFmpeg receive correctly formatted arguments when using stream profiles with complex parameters - Thanks [@justinforlenza](https://github.com/justinforlenza) (Fixes #833) +- Fixed bulk and manual channel creation not refreshing channel profile memberships in the UI for all connected clients. WebSocket `channels_created` event now calls `fetchChannelProfiles()` to ensure profile membership updates are reflected in real-time for all users without requiring a page refresh. +- Fixed Channel Profile filter incorrectly applying profile membership filtering even when "Show Disabled" was enabled, preventing all channels from being displayed. Profile filter now only applies when hiding disabled channels. (Fixes #825) +- Fixed manual channel creation not adding channels to channel profiles. Manually created channels are now added to the selected profile if one is active, or to all profiles if "All" is selected, matching the behavior of channels created from streams. +- Fixed VOD streams disappearing from stats page during playback by adding `socket-timeout = 600` to production uWSGI config. The missing directive caused uWSGI to use its default 4-second timeout, triggering premature cleanup when clients buffered content. Now matches the existing `http-timeout = 600` value and prevents timeout errors during normal client buffering - Thanks [@patchy8736](https://github.com/patchy8736) +- Fixed Channels table EPG column showing "Not Assigned" on initial load for users with large EPG datasets. Added `tvgsLoaded` flag to EPG store to track when EPG data has finished loading, ensuring the table waits for EPG data before displaying. EPG cells now show animated skeleton placeholders while loading instead of incorrectly showing "Not Assigned". (Fixes #810) +- Fixed VOD profile connection count not being decremented when stream connection fails (timeout, 404, etc.), preventing profiles from reaching capacity limits and rejecting valid stream requests +- Fixed React warning in Channel form by removing invalid `removeTrailingZeros` prop from NumberInput component +- Release workflow Docker tagging: Fixed issue where `latest` and version tags (e.g., `0.16.0`) were creating separate manifests instead of pointing to the same image digest, which caused old `latest` tags to become orphaned/untagged after new releases. Now creates a single multi-arch manifest with both tags, maintaining proper tag relationships and download statistics visibility on GitHub. +- Fixed onboarding message appearing in the Channels Table when filtered results are empty. The onboarding message now only displays when there are no channels created at all, not when channels exist but are filtered out by current filters. +- Fixed `M3UMovieRelation.get_stream_url()` and `M3UEpisodeRelation.get_stream_url()` to use XC client's `_normalize_url()` method instead of simple `rstrip('/')`. This properly handles malformed M3U account URLs (e.g., containing `/player_api.php` or query parameters) before constructing VOD stream endpoints, matching behavior of live channel URL building. (Closes #722) +- Fixed bulk_create and bulk_update errors during VOD content refresh by pre-checking object existence with optimized bulk queries (3 queries total instead of N per batch) before creating new objects. This ensures all movie/series objects have primary keys before relation operations, preventing "prohibited to prevent data loss due to unsaved related object" errors. Additionally fixed duplicate key constraint violations by treating TMDB/IMDB ID values of `0` or `'0'` as invalid (some providers use this to indicate "no ID"), converting them to NULL to prevent multiple items from incorrectly sharing the same ID. (Fixes #813) + +## [0.16.0] - 2026-01-04 + +### Added + +- Advanced filtering for Channels table: Filter menu now allows toggling disabled channels visibility (when a profile is selected) and filtering to show only empty channels without streams (Closes #182) +- Network Access warning modal now displays the client's IP address for better transparency when network restrictions are being enforced - Thanks [@damien-alt-sudo](https://github.com/damien-alt-sudo) (Closes #778) +- VLC streaming support - Thanks [@sethwv](https://github.com/sethwv) + - Added `cvlc` as an alternative streaming backend alongside FFmpeg and Streamlink + - Log parser refactoring: Introduced `LogParserFactory` and stream-specific parsers (`FFmpegLogParser`, `VLCLogParser`, `StreamlinkLogParser`) to enable codec and resolution detection from multiple streaming tools + - VLC log parsing for stream information: Detects video/audio codecs from TS demux output, supports both stream-copy and transcode modes with resolution/FPS extraction from transcode output + - Locked, read-only VLC stream profile configured for headless operation with intelligent audio/video codec detection + - VLC and required plugins installed in Docker environment with headless configuration +- ErrorBoundary component for handling frontend errors gracefully with generic error message - Thanks [@nick4810](https://github.com/nick4810) + +### Changed + +- Fixed event viewer arrow direction (previously inverted) — UI behavior corrected. - Thanks [@drnikcuk](https://github.com/drnikcuk) (Closes #772) +- Region code options now intentionally include both `GB` (ISO 3166-1 standard) and `UK` (commonly used by EPG/XMLTV providers) to accommodate real-world EPG data variations. Many providers use `UK` in channel identifiers (e.g., `BBCOne.uk`) despite `GB` being the official ISO country code. Users should select the region code that matches their specific EPG provider's convention for optimal region-based EPG matching bonuses - Thanks [@bigpandaaaa](https://github.com/bigpandaaaa) +- Channel number inputs in stream-to-channel creation modals no longer have a maximum value restriction, allowing users to enter any valid channel number supported by the database +- Stream log parsing refactored to use factory pattern: Simplified `ChannelService.parse_and_store_stream_info()` to route parsing through specialized log parsers instead of inline program-specific logic (~150 lines of code removed) +- Stream profile names in fixtures updated to use proper capitalization (ffmpeg → FFmpeg, streamlink → Streamlink) +- Frontend component refactoring for improved code organization and maintainability - Thanks [@nick4810](https://github.com/nick4810) + - Extracted large nested components into separate files (RecordingCard, RecordingDetailsModal, RecurringRuleModal, RecordingSynopsis, GuideRow, HourTimeline, PluginCard, ProgramRecordingModal, SeriesRecordingModal, Field) + - Moved business logic from components into dedicated utility files (dateTimeUtils, RecordingCardUtils, RecordingDetailsModalUtils, RecurringRuleModalUtils, DVRUtils, guideUtils, PluginsUtils, PluginCardUtils, notificationUtils) + - Lazy loaded heavy components (SuperuserForm, RecordingDetailsModal, ProgramRecordingModal, SeriesRecordingModal, PluginCard) with loading fallbacks + - Removed unused Dashboard and Home pages + - Guide page refactoring: Extracted GuideRow and HourTimeline components, moved grid calculations and utility functions to guideUtils.js, added loading states for initial data fetching, improved performance through better memoization + - Plugins page refactoring: Extracted PluginCard and Field components, added Zustand store for plugin state management, improved plugin action confirmation handling, better separation of concerns between UI and business logic +- Logo loading optimization: Logos now load only after both Channels and Streams tables complete loading to prevent blocking initial page render, with rendering gated by table readiness to ensure data loads before visual elements +- M3U stream URLs now use `build_absolute_uri_with_port()` for consistency with EPG and logo URLs, ensuring uniform port handling across all M3U file URLs +- Settings and Logos page refactoring for improved readability and separation of concerns - Thanks [@nick4810](https://github.com/nick4810) + - Extracted individual settings forms (DVR, Network Access, Proxy, Stream, System, UI) into separate components with dedicated utility files + - Moved larger nested components into their own files + - Moved business logic into corresponding utils/ files + - Extracted larger in-line component logic into its own function + - Each panel in Settings now uses its own form state with the parent component handling active state management + +### Fixed + +- Auto Channel Sync Force EPG Source feature not properly forcing "No EPG" assignment - When selecting "Force EPG Source" > "No EPG (Disabled)", channels were still being auto-matched to EPG data instead of forcing dummy/no EPG. Now correctly sets `force_dummy_epg` flag to prevent unwanted EPG assignment. (Fixes #788) +- VOD episode processing now properly handles season and episode numbers from APIs that return string values instead of integers, with comprehensive error logging to track data quality issues - Thanks [@patchy8736](https://github.com/patchy8736) (Fixes #770) +- VOD episode-to-stream relations are now validated to ensure episodes have been saved to the database before creating relations, preventing integrity errors when bulk_create operations encounter conflicts - Thanks [@patchy8736](https://github.com/patchy8736) +- VOD category filtering now correctly handles category names containing pipe "|" characters (e.g., "PL | BAJKI", "EN | MOVIES") by using `rsplit()` to split from the right instead of the left, ensuring the category type is correctly extracted as the last segment - Thanks [@Vitekant](https://github.com/Vitekant) +- M3U and EPG URLs now correctly preserve non-standard HTTPS ports (e.g., `:8443`) when accessed behind reverse proxies that forward the port in headers — `get_host_and_port()` now properly checks `X-Forwarded-Port` header before falling back to other detection methods (Fixes #704) +- M3U and EPG manager page no longer crashes when a playlist references a deleted channel group (Fixes screen blank on navigation) +- Stream validation now returns original URL instead of redirected URL to prevent issues with temporary redirect URLs that expire before clients can connect +- XtreamCodes EPG limit parameter now properly converted to integer to prevent type errors when accessing EPG listings (Fixes #781) +- Docker container file permissions: Django management commands (`migrate`, `collectstatic`) now run as the non-root user to prevent root-owned `__pycache__` and static files from causing permission issues - Thanks [@sethwv](https://github.com/sethwv) +- Stream validation now continues with GET request if HEAD request fails due to connection issues - Thanks [@kvnnap](https://github.com/kvnnap) (Fixes #782) +- XtreamCodes M3U files now correctly set `x-tvg-url` and `url-tvg` headers to reference XC EPG URL (`xmltv.php`) instead of standard EPG endpoint when downloaded via XC API (Fixes #629) + +## [0.15.1] - 2025-12-22 + +### Fixed + +- XtreamCodes EPG `has_archive` field now returns integer `0` instead of string `"0"` for proper JSON type consistency +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744) + +## [0.15.0] - 2025-12-20 + +### Added + +- VOD client stop button in Stats page: Users can now disconnect individual VOD clients from the Stats view, similar to the existing channel client disconnect functionality. +- Automated configuration backup/restore system with scheduled backups, retention policies, and async task processing - Thanks [@stlalpha](https://github.com/stlalpha) (Closes #153) +- Stream group as available hash option: Users can now select 'Group' as a hash key option in Settings → Stream Settings → M3U Hash Key, allowing streams to be differentiated by their group membership in addition to name, URL, TVG-ID, and M3U ID + +### Changed + +- Initial super user creation page now matches the login page design with logo, welcome message, divider, and version display for a more consistent and polished first-time setup experience +- Removed unreachable code path in m3u output - Thanks [@DawtCom](https://github.com/DawtCom) +- GitHub Actions workflows now use `docker/metadata-action` for cleaner and more maintainable OCI-compliant image label generation across all build pipelines (ci.yml, base-image.yml, release.yml). Labels are applied to both platform-specific images and multi-arch manifests with proper annotation formatting. - Thanks [@mrdynamo]https://github.com/mrdynamo) (Closes #724) +- Update docker/dev-build.sh to support private registries, multiple architectures and pushing. Now you can do things like `dev-build.sh -p -r my.private.registry -a linux/arm64,linux/amd64` - Thanks [@jdblack](https://github.com/jblack) +- Updated dependencies: Django (5.2.4 → 5.2.9) includes CVE security patch, psycopg2-binary (2.9.10 → 2.9.11), celery (5.5.3 → 5.6.0), djangorestframework (3.16.0 → 3.16.1), requests (2.32.4 → 2.32.5), psutil (7.0.0 → 7.1.3), gevent (25.5.1 → 25.9.1), rapidfuzz (3.13.0 → 3.14.3), torch (2.7.1 → 2.9.1), sentence-transformers (5.1.0 → 5.2.0), lxml (6.0.0 → 6.0.2) (Closes #662) +- Frontend dependencies updated: Vite (6.2.0 → 7.1.7), ESLint (9.21.0 → 9.27.0), and related packages; added npm `overrides` to enforce js-yaml@^4.1.1 for transitive security fix. All 6 reported vulnerabilities resolved with `npm audit fix`. +- Floating video player now supports resizing via a drag handles, with minimum size enforcement and viewport/page boundary constraints to keep it visible. +- Redis connection settings now fully configurable via environment variables (`REDIS_HOST`, `REDIS_PORT`, `REDIS_DB`, `REDIS_URL`), replacing hardcoded `localhost:6379` values throughout the codebase. This enables use of external Redis services in production deployments. (Closes #762) +- Celery broker and result backend URLs now respect `REDIS_HOST`/`REDIS_PORT`/`REDIS_DB` settings as defaults, with `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables available for override. + +### Fixed + +- Docker init script now validates DISPATCHARR_PORT is an integer before using it, preventing sed errors when Kubernetes sets it to a service URL like `tcp://10.98.37.10:80`. Falls back to default port 9191 when invalid (Fixes #737) +- M3U Profile form now properly resets local state for search and replace patterns after saving, preventing validation errors when adding multiple profiles in a row +- DVR series rule deletion now properly handles TVG IDs that contain slashes by encoding them in the URL path (Fixes #697) +- VOD episode processing now correctly handles duplicate episodes (same episode in multiple languages/qualities) by reusing Episode records across multiple M3UEpisodeRelation entries instead of attempting to create duplicates (Fixes #556) +- XtreamCodes series streaming endpoint now correctly handles episodes with multiple streams (different languages/qualities) by selecting the best available stream based on account priority (Fixes #569) +- XtreamCodes series info API now returns unique episodes instead of duplicate entries when multiple streams exist for the same episode (different languages/qualities) +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744) +- XtreamCodes EPG API now returns correct date/time format for start/end fields and proper string types for timestamps and channel_id +- XtreamCodes EPG API now handles None values for title and description fields to prevent AttributeError +- XtreamCodes EPG `id` field now provides unique identifiers per program listing instead of always returning "0" for better client EPG handling +- XtreamCodes EPG `epg_id` field now correctly returns the EPGData record ID (representing the EPG source/channel mapping) instead of a dummy value + +## [0.14.0] - 2025-12-09 + +### Added + +- Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) +- EPG source priority field for controlling which EPG source is preferred when multiple sources have matching entries for a channel (higher numbers = higher priority) (Closes #603) + +### Changed + +- EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. +- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) +- IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) +- nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) +- EPG matching now respects source priority and only uses active (enabled) EPG sources (Closes #672) +- EPG form API Key field now only visible when Schedules Direct source type is selected + +### Fixed + +- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh +- Bulk channel editor confirmation dialog now displays the correct stream profile name that will be applied to the selected channels. +- uWSGI not found and 502 bad gateway on first startup + +## [0.13.1] - 2025-12-06 + +### Fixed + +- JWT token generated so is unique for each deployment + +## [0.13.0] - 2025-12-02 + +### Added + +- `CHANGELOG.md` file following Keep a Changelog format to document all notable changes and project history +- System event logging and viewer: Comprehensive logging system that tracks internal application events (M3U refreshes, EPG updates, stream switches, errors) with a dedicated UI viewer for filtering and reviewing historical events. Improves monitoring, troubleshooting, and understanding system behavior +- M3U/EPG endpoint caching: Implements intelligent caching for frequently requested M3U playlists and EPG data to reduce database load and improve response times for clients. +- Search icon to name headers for the channels and streams tables (#686) +- Comprehensive logging for user authentication events and network access restrictions +- Validation for EPG objects and payloads in updateEPG functions to prevent errors from invalid data +- Referrerpolicy to YouTube iframes in series and VOD modals for better compatibility + +### Changed + +- XC player API now returns server_info for unknown actions to align with provider behavior +- XC player API refactored to streamline action handling and ensure consistent responses +- Date parsing logic in generate_custom_dummy_programs improved to handle empty or invalid inputs +- DVR cards now reflect date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten) +- "Uncategorized" categories and relations now automatically created for VOD accounts to improve content management (#627) +- Improved minimum horizontal size in the stats page for better usability on smaller displays +- M3U and EPG generation now handles missing channel profiles with appropriate error logging + +### Fixed + +- Episode URLs in series modal now use UUID instead of ID, fixing broken links (#684, #694) +- Stream preview now respects selected M3U profile instead of always using default profile (#690) +- Channel groups filter in M3UGroupFilter component now filters out non-existent groups (prevents blank webui when editing M3U after a group was removed) +- Stream order now preserved in PATCH/PUT responses from ChannelSerializer, ensuring consistent ordering across all API operations - Thanks [@FiveBoroughs](https://github.com/FiveBoroughs) (#643) +- XC client compatibility: float channel numbers now converted to integers +- M3U account and profile modals now scrollable on mobile devices for improved usability + +## [0.12.0] - 2025-11-19 + +### Added + +- RTSP stream support with automatic protocol detection when a proxy profile requires it. The proxy now forces FFmpeg for RTSP sources and properly handles RTSP URLs - Thanks [@ragchuck](https://github.com/ragchuck) (#184) +- UDP stream support, including correct handling when a proxy profile specifies a UDP source. The proxy now skips HTTP-specific headers (like `user_agent`) for non-HTTP protocols and performs manual redirect handling to improve reliability (#617) +- Separate VOD logos system with a new `VODLogo` model, database migration, dedicated API/viewset, and server-paginated UI. This separates movie/series logos from channel logos, making cleanup safer and enabling independent bulk operations + +### Changed + +- Background profile refresh now uses a rate-limiting/backoff strategy to avoid provider bans +- Bulk channel editing now validates all requested changes up front and applies updates in a single database transaction +- ProxyServer shutdown & ghost-client handling improved to avoid initializing channels for transient clients and prevent duplicate reinitialization during rapid reconnects +- URL / Stream validation expanded to support credentials on non-FQDN hosts, skips HTTP-only checks for RTSP/RTP/UDP streams, and improved host/port normalization +- TV guide scrolling & timeline synchronization improved with mouse-wheel scrolling, synchronized timeline position with guide navigation, and improved mobile momentum scrolling (#252) +- EPG Source dropdown now sorts alphabetically - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) +- M3U POST handling restored and improved for clients (e.g., Smarters) that request playlists using HTTP POST - Thanks [@maluueu](https://github.com/maluueu) +- Login form revamped with branding, cleaner layout, loading state, "Remember Me" option, and focused sign-in flow +- Series & VOD now have copy-link buttons in modals for easier URL sharing +- `get_host_and_port` now prioritizes verified port sources and handles reverse-proxy edge cases more accurately (#618) + +### Fixed + +- EXTINF parsing overhauled to correctly extract attributes such as `tvg-id`, `tvg-name`, and `group-title`, even when values include quotes or commas (#637) +- Websocket payload size reduced during EPG processing to avoid UI freezes, blank screens, or memory spikes in the browser (#327) +- Logo management UI fixes including confirmation dialogs, header checkbox reset, delete button reliability, and full client refetch after cleanup + +## [0.11.2] - 2025-11-04 + +### Added + +- Custom Dummy EPG improvements: + - Support for using an existing Custom Dummy EPG as a template for creating new EPGs + - Custom fallback templates for unmatched patterns + - `{endtime}` as an available output placeholder and renamed `{time}` → `{starttime}` (#590) + - Support for date placeholders that respect both source and output timezones (#597) + - Ability to bulk assign Custom Dummy EPGs to multiple channels + - "Include New Tag" option to mark programs as new in Dummy EPG output + - Support for month strings in date parsing + - Ability to set custom posters and channel logos via regex patterns for Custom Dummy EPGs + - Improved DST handling by calculating offsets based on the actual program date, not today's date + +### Changed + +- Stream model maximum URL length increased from 2000 to 4096 characters (#585) +- Groups now sorted during `xc_get_live_categories` based on the order they first appear (by lowest channel number) +- Client TTL settings updated and periodic refresh implemented during active streaming to maintain accurate connection tracking +- `ProgramData.sub_title` field changed from `CharField` to `TextField` to allow subtitles longer than 255 characters (#579) +- Startup improved by verifying `/data` directory ownership and automatically fixing permissions if needed. Pre-creates `/data/models` during initialization (#614) +- Port detection enhanced to check `request.META.get("SERVER_PORT")` before falling back to defaults, ensuring correct port when generating M3U, EPG, and logo URLs - Thanks [@lasharor](https://github.com/lasharor) + +### Fixed + +- Custom Dummy EPG frontend DST calculation now uses program date instead of current date +- Channel titles no longer truncated early after an apostrophe - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) + +## [0.11.1] - 2025-10-22 + +### Fixed + +- uWSGI not receiving environmental variables +- LXC unable to access daemons launched by uWSGI ([#575](https://github.com/Dispatcharr/Dispatcharr/issues/575), [#576](https://github.com/Dispatcharr/Dispatcharr/issues/576), [#577](https://github.com/Dispatcharr/Dispatcharr/issues/577)) + +## [0.11.0] - 2025-10-22 + +### Added + +- Custom Dummy EPG system: + - Regex pattern matching and name source selection + - Support for custom upcoming and ended programs + - Timezone-aware with source and local timezone selection + - Option to include categories and date/live tags in Dummy EPG output + - (#293) +- Auto-Enable & Category Improvements: + - Auto-enable settings for new groups and categories in M3U and VOD components (#208) +- IPv6 CIDR validation in Settings - Thanks [@jordandalley](https://github.com/jordandalley) (#236) +- Custom logo support for channel groups in Auto Sync Channels (#555) +- Tooltips added to the Stream Table + +### Changed + +- Celery and uWSGI now have configurable `nice` levels (defaults: `uWSGI=0`, `Celery=5`) to prioritize streaming when needed. (#571) +- Directory creation and ownership management refactored in init scripts to avoid unnecessary recursive `chown` operations and improve boot speed +- HTTP streamer switched to threaded model with piped output for improved robustness +- Chunk timeout configuration improved and StreamManager timeout handling enhanced +- Proxy timeout values reduced to avoid unnecessary waiting +- Resource cleanup improved to prevent "Too many open files" errors +- Proxy settings caching implemented and database connections properly closed after use +- EPG program fetching optimized with chunked retrieval and explicit ordering to reduce memory usage during output +- EPG output now sorted by channel number for consistent presentation +- Stream Table buttons reordered for better usability +- Database connection handling improved throughout the codebase to reduce overall connection count + +### Fixed + +- Crash when resizing columns in the Channel Table (#516) +- Errors when saving stream settings (#535) +- Preview and edit bugs for custom streams where profile and group selections did not display correctly +- `channel_id` and `channel.uuid` now converted to strings before processing to fix manual switching when the uWSGI worker was not the stream owner (#269) +- Stream locking and connection search issues when switching channels; increased search timeout to reduce premature failures (#503) +- Stream Table buttons no longer shift into multiple rows when selecting many streams +- Custom stream previews +- Custom Stream settings not loading properly (#186) +- Orphaned categories now automatically removed for VOD and Series during M3U refresh (#540) + +## [0.10.4] - 2025-10-08 + +### Added + +- "Assign TVG-ID from EPG" functionality with frontend actions for single-channel and batch operations +- Confirmation dialogs in `ChannelBatchForm` for setting names, logos, TVG-IDs, and clearing EPG assignments +- "Clear EPG" button to `ChannelBatchForm` for easy reset of assignments +- Batch editing of channel logos - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Ability to set logo name from URL - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Proper timestamp tracking for channel creation and updates; `XC Get Live Streams` now uses this information +- Time Zone Settings added to the application ([#482](https://github.com/Dispatcharr/Dispatcharr/issues/482), [#347](https://github.com/Dispatcharr/Dispatcharr/issues/347)) +- Comskip settings support including comskip.ini upload and custom directory selection (#418) +- Manual recording scheduling for channels without EPG data (#162) + +### Changed + +- Default M3U account type is now set to XC for new accounts +- Performance optimization: Only fetch playlists and channel profiles after a successful M3U refresh (rather than every status update) +- Playlist retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) + +### Fixed + +- Large EPGs now fully parse all channels +- Duplicate channel outputs for streamer profiles set to "All" +- Streamer profiles with "All" assigned now receive all eligible channels +- PostgreSQL btree index errors from logo URL validation during channel creation (#519) +- M3U processing lock not releasing when no streams found during XC refresh, which also skipped VOD scanning (#449) +- Float conversion errors by normalizing decimal format during VOD scanning (#526) +- Direct URL ordering in M3U output to use correct stream sequence (#528) +- Adding multiple M3U accounts without refreshing modified only the first entry (#397) +- UI state bug where new playlist creation was not notified to frontend ("Fetching Groups" stuck) +- Minor FFmpeg task and stream termination bugs in DVR module +- Input escaping issue where single quotes were interpreted as code delimiters (#406) + +## [0.10.3] - 2025-10-04 + +### Added + +- Logo management UI improvements where Channel editor now uses the Logo Manager modal, allowing users to add logos by URL directly from the edit form - Thanks [@EmeraldPi](https://github.com/EmeraldPi) + +### Changed + +- FFmpeg base container rebuilt with improved native build support - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- GitHub Actions workflow updated to use native runners instead of QEMU emulation for more reliable multi-architecture builds + +### Fixed + +- EPG parsing stability when large EPG files would not fully parse all channels. Parser now uses `iterparse` with `recover=True` for both channel and program-level parsing, ensuring complete and resilient XML processing even when Cloudflare injects additional root elements + +## [0.10.2] - 2025-10-03 + +### Added + +- `m3u_id` parameter to `generate_hash_key` and updated related calls +- Support for `x-tvg-url` and `url-tvg` generation with preserved query parameters (#345) +- Exact Gracenote ID matching for EPG channel mapping (#291) +- Recovery handling for XMLTV parser errors +- `nice -n 5` added to Celery commands for better process priority management + +### Changed + +- Default M3U hash key changed to URL only for new installs +- M3U profile retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) +- XMLTV parsing refactored to use `iterparse` for `` element +- Release workflow refactored to run on native architecture +- Docker build system improvements: + - Split install/build steps + - Switch from Yarn → NPM + - Updated to Node.js 24 (frontend build) + - Improved ARM build reliability + - Pushes to DockerHub with combined manifest + - Removed redundant tags and improved build organization + +### Fixed + +- Cloudflare-hosted EPG feeds breaking parsing (#497) +- Bulk channel creation now preserves the order channels were selected in (no longer reversed) +- M3U hash settings not saving properly +- VOD selecting the wrong M3U profile at session start (#461) +- Redundant `h` removed from 12-hour time format in settings page + +## [0.10.1] - 2025-09-24 + +### Added + +- Virtualized rendering for TV Guide for smoother performance when displaying large guides - Thanks [@stlalpha](https://github.com/stlalpha) (#438) +- Enhanced channel/program mapping to reuse EPG data across multiple channels that share the same TVG-ID + +### Changed + +- `URL` field length in EPGSource model increased from 200 → 1000 characters to support long URLs with tokens +- Improved URL transformation logic with more advanced regex during profile refreshes +- During EPG scanning, the first display name for a channel is now used instead of the last +- `whiteSpace` style changed from `nowrap` → `pre` in StreamsTable for better text formatting + +### Fixed + +- EPG channel parsing failure when channel `URL` exceeded 500 characters by adding validation during scanning (#452) +- Frontend incorrectly saving case-sensitive setting as a JSON string for stream filters + +## [0.10.0] - 2025-09-18 + +### Added + +- Channel Creation Improvements: + - Ability to specify channel number during channel creation ([#377](https://github.com/Dispatcharr/Dispatcharr/issues/377), [#169](https://github.com/Dispatcharr/Dispatcharr/issues/169)) + - Asynchronous bulk channel creation from stream IDs with WebSocket progress updates + - WebSocket notifications when channels are created +- EPG Auto-Matching (Rewritten & Enhanced): + - Completely refactored for improved accuracy and efficiency + - Can now be applied to selected channels or triggered directly from the channel edit form + - Uses stricter matching logic with support from sentence transformers + - Added progress notifications during the matching process + - Implemented memory cleanup for ML models after matching operations + - Removed deprecated matching scripts +- Logo & EPG Management: + - Ability in channel edit form and bulk channel editor to set logos and names from assigned EPG (#157) + - Improved logo update flow: frontend refreshes on changes, store updates after bulk changes, progress shown via notifications +- Table Enhancements: + - All tables now support adjustable column resizing (#295) + - Channels and Streams tables persist column widths and center divider position to local storage + - Improved sizing and layout for user-agents, stream profiles, logos, M3U, and EPG tables + +### Changed + +- Simplified VOD and series access: removed user-level restrictions on M3U accounts +- Skip disabled M3U accounts when choosing streams during playback (#402) +- Enhanced `UserViewSet` queryset to prefetch related channel profiles for better performance +- Auto-focus added to EPG filter input +- Category API retrieval now sorts by name +- Increased default column size for EPG fields and removed max size on group/EPG columns +- Standardized EPG column header to display `(EPG ID - TVG-ID)` + +### Fixed + +- Bug during VOD cleanup where all VODs not from the current M3U scan could be deleted +- Logos not being set correctly in some cases +- Bug where not setting a channel number caused an error when creating a channel (#422) +- Bug where clicking "Add Channel" with a channel selected opened the edit form instead +- Bug where a newly created channel could reuse streams from another channel due to form not clearing properly +- VOD page not displaying correct order while changing pages +- `ReferenceError: setIsInitialized is not defined` when logging into web UI +- `cannot access local variable 'total_chunks' where it is not associated with a value` during VOD refresh + +## [0.9.1] - 2025-09-13 + +### Fixed + +- Broken migrations affecting the plugins system +- DVR and plugin paths to ensure proper functionality (#381) + +## [0.9.0] - 2025-09-12 + +### Added + +- **Video on Demand (VOD) System:** + - Complete VOD infrastructure with support for movies and TV series + - Advanced VOD metadata including IMDB/TMDB integration, trailers, cast information + - Smart VOD categorization with filtering by type (movies vs series) + - Multi-provider VOD support with priority-based selection + - VOD streaming proxy with connection tracking and statistics + - Season/episode organization for TV series with expandable episode details + - VOD statistics and monitoring integrated with existing stats dashboard + - Optimized VOD parsing and category filtering + - Dedicated VOD page with movies and series tabs + - Rich VOD modals with backdrop images, trailers, and metadata + - Episode management with season-based organization + - Play button integration with external player support + - VOD statistics cards similar to channel cards +- **Plugin System:** + - Extensible Plugin Framework - Developers can build custom functionality without modifying Dispatcharr core + - Plugin Discovery & Management - Automatic detection of installed plugins, with enable/disable controls in the UI + - Backend API Support - New APIs for listing, loading, and managing plugins programmatically + - Plugin Registry - Structured models for plugin metadata (name, version, author, description) + - UI Enhancements - Dedicated Plugins page in the admin panel for centralized plugin management + - Documentation & Scaffolding - Initial documentation and scaffolding to accelerate plugin development +- **DVR System:** + - Refreshed DVR page for managing scheduled and completed recordings + - Global pre/post padding controls surfaced in Settings + - Playback support for completed recordings directly in the UI + - DVR table view includes title, channel, time, and padding adjustments for clear scheduling + - Improved population of DVR listings, fixing intermittent blank screen issues + - Comskip integration for automated commercial detection and skipping in recordings + - User-configurable comskip toggle in Settings +- **Enhanced Channel Management:** + - EPG column added to channels table for better organization + - EPG filtering by channel assignment and source name + - Channel batch renaming for efficient bulk channel name updates + - Auto channel sync improvements with custom stream profile override + - Channel logo management overhaul with background loading +- Date and time format customization in settings - Thanks [@Biologisten](https://github.com/Biologisten) +- Auto-refresh intervals for statistics with better UI controls +- M3U profile notes field for better organization +- XC account information retrieval and display with account refresh functionality and notifications + +### Changed + +- JSONB field conversion for custom properties (replacing text fields) for better performance +- Database encoding converted from ASCII to UTF8 for better character support +- Batch processing for M3U updates and channel operations +- Query optimization with prefetch_related to eliminate N+1 queries +- Reduced API calls by fetching all data at once instead of per-category +- Buffering speed setting now affects UI indicators +- Swagger endpoint accessible with or without trailing slash +- EPG source names displayed before channel names in edit forms +- Logo loading improvements with background processing +- Channel card enhancements with better status indicators +- Group column width optimization +- Better content-type detection for streams +- Improved headers with content-range and total length +- Enhanced user-agent handling for M3U accounts +- HEAD request support with connection keep-alive +- Progress tracking improvements for clients with new sessions +- Server URL length increased to 1000 characters for token support +- Prettier formatting applied to all frontend code +- String quote standardization and code formatting improvements + +### Fixed + +- Logo loading issues in channel edit forms resolved +- M3U download error handling and user feedback improved +- Unique constraint violations fixed during stream rehashing +- Channel stats fetching moved from Celery beat task to configurable API calls +- Speed badge colors now use configurable buffering speed setting +- Channel cards properly close when streams stop +- Active streams labeling updated from "Active Channels" +- WebSocket updates for client connect/disconnect events +- Null value handling before database saves +- Empty string scrubbing for cleaner data +- Group relationship cleanup for removed M3U groups +- Logo cleanup for unused files with proper batch processing +- Recordings start 5 mins after show starts (#102) + +### Closed + +- [#350](https://github.com/Dispatcharr/Dispatcharr/issues/350): Allow DVR recordings to be played via the UI +- [#349](https://github.com/Dispatcharr/Dispatcharr/issues/349): DVR screen doesn't populate consistently +- [#340](https://github.com/Dispatcharr/Dispatcharr/issues/340): Global find and replace +- [#311](https://github.com/Dispatcharr/Dispatcharr/issues/311): Stat's "Current Speed" does not reflect "Buffering Speed" setting +- [#304](https://github.com/Dispatcharr/Dispatcharr/issues/304): Name ignored when uploading logo +- [#300](https://github.com/Dispatcharr/Dispatcharr/issues/300): Updating Logo throws error +- [#286](https://github.com/Dispatcharr/Dispatcharr/issues/286): 2 Value/Column EPG in Channel Edit +- [#280](https://github.com/Dispatcharr/Dispatcharr/issues/280): Add general text field in M3U/XS profiles +- [#190](https://github.com/Dispatcharr/Dispatcharr/issues/190): Show which stream is being used and allow it to be altered in channel properties +- [#155](https://github.com/Dispatcharr/Dispatcharr/issues/155): Additional column with EPG assignment information / Allow filtering by EPG assignment +- [#138](https://github.com/Dispatcharr/Dispatcharr/issues/138): Bulk Channel Edit Functions + +## [0.8.0] - 2025-08-19 + +### Added + +- Channel & Stream Enhancements: + - Preview streams under a channel, with stream logo and name displayed in the channel card + - Advanced stats for channel streams + - Stream qualities displayed in the channel table + - Stream stats now saved to the database + - URL badges can now be clicked to copy stream links to the clipboard +- M3U Filtering for Streams: + - Streams for an M3U account can now be filtered using flexible parameters + - Apply filters based on stream name, group title, or stream URL (via regex) + - Filters support both inclusion and exclusion logic for precise control + - Multiple filters can be layered with a priority order for complex rules +- Ability to reverse the sort order for auto channel sync +- Custom validator for URL fields now allows non-FQDN hostnames (#63) +- Membership creation added in `UpdateChannelMembershipAPIView` if not found (#275) + +### Changed + +- Bumped Postgres to version 17 +- Updated dependencies in `requirements.txt` for compatibility and improvements +- Improved chunked extraction to prevent memory issues - Thanks [@pantherale0](https://github.com/pantherale0) + +### Fixed + +- XML escaping for channel ID in `generate_dummy_epg` function +- Bug where creating a channel from a stream not displayed in the table used an invalid stream name +- Debian install script - Thanks [@deku-m](https://github.com/deku-m) + +## [0.7.1] - 2025-07-29 + +### Added + +- Natural sorting for channel names during auto channel sync +- Ability to sort auto sync order by provider order (default), channel name, TVG ID, or last updated time +- Auto-created channels can now be assigned to specific channel profiles (#255) +- Channel profiles are now fetched automatically after a successful M3U refresh +- Uses only whole numbers when assigning the next available channel number + +### Changed + +- Logo upload behavior changed to wait for the Create button before saving +- Uses the channel name as the display name in EPG output for improved readability +- Ensures channels are only added to a selected profile if one is explicitly chosen + +### Fixed + +- Logo Manager prevents redundant messages from the file scanner by properly tracking uploaded logos in Redis +- Fixed an issue preventing logo uploads via URL +- Adds internal support for assigning multiple profiles via API + +## [0.7.0] - 2025-07-19 + +### Added + +- **Logo Manager:** + - Complete logo management system with filtering, search, and usage tracking + - Upload logos directly through the UI + - Automatically scan `/data/logos` for existing files (#69) + - View which channels use each logo + - Bulk delete unused logos with cleanup + - Enhanced display with hover effects and improved sizing + - Improved logo fetching with timeouts and user-agent headers to prevent hanging +- **Group Manager:** + - Comprehensive group management interface (#128) + - Search and filter groups with ease + - Bulk operations for cleanup + - Filter channels by group membership + - Automatically clean up unused groups +- **Auto Channel Sync:** + - Automatic channel synchronization from M3U sources (#147) + - Configure auto-sync settings per M3U account group + - Set starting channel numbers by group + - Override group names during sync + - Apply regex match and replace for channel names + - Filter channels by regex match on stream name + - Track auto-created vs manually added channels + - Smart updates preserve UUIDs and existing links +- Stream rehashing with WebSocket notifications +- Better error handling for blocked rehash attempts +- Lock acquisition to prevent conflicts +- Real-time progress tracking + +### Changed + +- Persist table page sizes in local storage (streams & channels) +- Smoother pagination and improved UX +- Fixed z-index issues during table refreshes +- Improved XC client with connection pooling +- Better error handling for API and JSON decode failures +- Smarter handling of empty content and blocking responses +- Improved EPG XML generation with richer metadata +- Better support for keywords, languages, ratings, and credits +- Better form layouts and responsive buttons +- Enhanced confirmation dialogs and feedback + +### Fixed + +- Channel table now correctly restores page size from local storage +- Resolved WebSocket message formatting issues +- Fixed logo uploads and edits +- Corrected ESLint issues across the codebase +- Fixed HTML validation errors in menus +- Optimized logo fetching with proper timeouts and headers ([#101](https://github.com/Dispatcharr/Dispatcharr/issues/101), [#217](https://github.com/Dispatcharr/Dispatcharr/issues/217)) + +## [0.6.2] - 2025-07-10 + +### Fixed + +- **Streaming & Connection Stability:** + - Provider timeout issues - Slow but responsive providers no longer cause channel lockups + - Added chunk and process timeouts - Prevents hanging during stream processing and transcoding + - Improved connection handling - Enhanced process management and socket closure detection for safer streaming + - Enhanced health monitoring - Health monitor now properly notifies main thread without attempting reconnections +- **User Interface & Experience:** + - Touch screen compatibility - Web player can now be properly closed on touch devices + - Improved user management - Added support for first/last names, login tracking, and standardized table formatting +- Improved logging - Enhanced log messages with channel IDs for better debugging +- Code cleanup - Removed unused imports, variables, and dead links + +## [0.6.1] - 2025-06-27 + +### Added + +- Dynamic parameter options for M3U and EPG URLs (#207) +- Support for 'num' property in channel number extraction (fixes channel creation from XC streams not having channel numbers) + +### Changed + +- EPG generation now uses streaming responses to prevent client timeouts during large EPG file generation (#179) +- Improved reliability when downloading EPG data from external sources +- Better program positioning - Programs that start before the current view now have proper text positioning (#223) +- Better mobile support - Improved sizing and layout for mobile devices across multiple tables +- Responsive stats cards - Better calculation for card layout and improved filling on different screen sizes (#218) +- Enhanced table rendering - M3U and EPG tables now render better on small screens +- Optimized spacing - Removed unnecessary padding and blank space throughout the interface +- Better settings layout - Improved minimum widths and mobile support for settings pages +- Always show 2 decimal places for FFmpeg speed values + +### Fixed + +- TV Guide now properly filters channels based on selected channel group +- Resolved loading issues - Fixed channels and groups not loading correctly in the TV Guide +- Stream profile fixes - Resolved issue with setting stream profile to 'use default' +- Single channel editing - When only one channel is selected, the correct channel editor now opens +- Bulk edit improvements - Added "no change" options for bulk editing operations +- Bulk channel editor now properly saves changes (#222) +- Link form improvements - Better sizing and rendering of link forms with proper layering +- Confirmation dialogs added with warning suppression for user deletion, channel profile deletion, and M3U profile deletion + +## [0.6.0] - 2025-06-19 + +### Added + +- **User Management & Access Control:** + - Complete user management system with user levels and channel access controls + - Network access control with CIDR validation and IP-based restrictions + - Logout functionality and improved loading states for authenticated users +- **Xtream Codes Output:** + - Xtream Codes support enables easy output to IPTV clients (#195) +- **Stream Management & Monitoring:** + - FFmpeg statistics integration - Real-time display of video/audio codec info, resolution, speed, and stream type + - Automatic stream switching when buffering is detected + - Enhanced stream profile management with better connection tracking + - Improved stream state detection, including buffering as an active state +- **Channel Management:** + - Bulk channel editing for channel group, stream profile, and user access level +- **Enhanced M3U & EPG Features:** + - Dynamic `tvg-id` source selection for M3U and EPG (`tvg_id`, `gracenote`, or `channel_number`) + - Direct URL support in M3U output via `direct=true` parameter + - Flexible EPG output with a configurable day limit via `days=#` parameter + - Support for LIVE tags and `dd_progrid` numbering in EPG processing +- Proxy settings configuration with UI integration and improved validation +- Stream retention controls - Set stale stream days to `0` to disable retention completely (#123) +- Tuner flexibility - Minimum of 1 tuner now allowed for HDHomeRun output +- Fallback IP geolocation provider (#127) - Thanks [@maluueu](https://github.com/maluueu) +- POST method now allowed for M3U output, enabling support for Smarters IPTV - Thanks [@maluueu](https://github.com/maluueu) + +### Changed + +- Improved channel cards with better status indicators and tooltips +- Clearer error messaging for unsupported codecs in the web player +- Network access warnings to prevent accidental lockouts +- Case-insensitive M3U parsing for improved compatibility +- Better EPG processing with improved channel matching +- Replaced Mantine React Table with custom implementations +- Improved tooltips and parameter wrapping for cleaner interfaces +- Better badge colors and status indicators +- Stronger form validation and user feedback +- Streamlined settings management using JSON configs +- Default value population for clean installs +- Environment-specific configuration support for multiple deployment scenarios + +### Fixed + +- FFmpeg process cleanup - Ensures FFmpeg fully exits before marking connection closed +- Resolved stream profile update issues in statistics display +- Fixed M3U profile ID behavior when switching streams +- Corrected stream switching logic - Redis is only updated on successful switches +- Fixed connection counting - Excludes the current profile from available connection counts +- Fixed custom stream channel creation when no group is assigned (#122) +- Resolved EPG auto-matching deadlock when many channels match simultaneously - Thanks [@xham3](https://github.com/xham3) + +## [0.5.2] - 2025-06-03 + +### Added + +- Direct Logo Support: Added ability to bypass logo caching by adding `?cachedlogos=false` to the end of M3U and EPG URLs (#109) + +### Changed + +- Dynamic Resource Management: Auto-scales Celery workers based on demand, reducing overall memory and CPU usage while still allowing high-demand tasks to complete quickly (#111) +- Enhanced Logging: + - Improved logging for M3U processing + - Better error output from XML parser for easier troubleshooting + +### Fixed + +- XMLTV Parsing: Added `remove_blank_text=True` to lxml parser to prevent crashes with poorly formatted XMLTV files (#115) +- Stats Display: Refactored channel info retrieval for safer decoding and improved error logging, fixing intermittent issues with statistics not displaying properly + +## [0.5.1] - 2025-05-28 + +### Added + +- Support for ZIP-compressed EPG files +- Automatic extraction of compressed files after downloading +- Intelligent file type detection for EPG sources: + - Reads the first bits of files to determine file type + - If a compressed file is detected, it peeks inside to find XML files +- Random descriptions for dummy channels in the TV guide +- Support for decimal channel numbers (converted from integer to float) - Thanks [@MooseyOnTheLoosey](https://github.com/MooseyOnTheLoosey) +- Show channels without EPG data in TV Guide +- Profile name added to HDHR-friendly name and device ID (allows adding multiple HDHR profiles to Plex) + +### Changed + +- About 30% faster EPG processing +- Significantly improved memory usage for large EPG files +- Improved timezone handling +- Cleaned up cached files when deleting EPG sources +- Performance improvements when processing extremely large M3U files +- Improved batch processing with better cleanup +- Enhanced WebSocket update handling for large operations +- Redis configured for better performance (no longer saves to disk) +- Improved memory management for Celery tasks +- Separated beat schedules with a file scanning interval set to 20 seconds +- Improved authentication error handling with user redirection to the login page +- Improved channel card formatting for different screen resolutions (can now actually read the channel stats card on mobile) +- Decreased line height for status messages in the EPG and M3U tables for better appearance on smaller screens +- Updated the EPG form to match the M3U form for consistency + +### Fixed + +- Profile selection issues that previously caused WebUI crashes +- Issue with `tvc-guide-id` (Gracenote ID) in bulk channel creation +- Bug when uploading an M3U with the default user-agent set +- Bug where multiple channel initializations could occur, causing zombie streams and performance issues (choppy streams) +- Better error handling for buffer overflow issues +- Fixed various memory leaks +- Bug in the TV Guide that would crash the web UI when selecting a profile to filter by +- Multiple minor bug fixes and code cleanup + +## [0.5.0] - 2025-05-15 + +### Added + +- **XtreamCodes Support:** + - Initial XtreamCodes client support + - Option to add EPG source with XC account + - Improved XC login and authentication + - Improved error handling for XC connections +- **Hardware Acceleration:** + - Detection of hardware acceleration capabilities with recommendations (available in logs after startup) + - Improved support for NVIDIA, Intel (QSV), and VAAPI acceleration methods + - Added necessary drivers and libraries for hardware acceleration + - Automatically assigns required permissions for hardware acceleration + - Thanks to [@BXWeb](https://github.com/BXWeb), @chris.r3x, [@rykr](https://github.com/rykr), @j3111, [@jesmannstl](https://github.com/jesmannstl), @jimmycarbone, [@gordlaben](https://github.com/gordlaben), [@roofussummers](https://github.com/roofussummers), [@slamanna212](https://github.com/slamanna212) +- **M3U and EPG Management:** + - Enhanced M3U profile creation with live regex results + - Added stale stream detection with configurable thresholds + - Improved status messaging for M3U and EPG operations: + - Shows download speed with estimated time remaining + - Shows parsing time remaining + - Added "Pending Setup" status for M3U's requiring group selection + - Improved handling of M3U group filtering +- **UI Improvements:** + - Added configurable table sizes + - Enhanced video player with loading and error states + - Improved WebSocket connection handling with authentication + - Added confirmation dialogs for critical operations + - Auto-assign numbers now configurable by selection + - Added bulk editing of channel profile membership (select multiple channels, then click the profile toggle on any selected channel to apply the change to all) +- **Infrastructure & Performance:** + - Standardized and improved the logging system + - New environment variable to set logging level: `DISPATCHARR_LOG_LEVEL` (default: `INFO`, available: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`) + - Introduced a new base image build process: updates are now significantly smaller (typically under 15MB unless the base image changes) + - Improved environment variable handling in container +- Support for Gracenote ID (`tvc-guide-stationid`) - Thanks [@rykr](https://github.com/rykr) +- Improved file upload handling with size limits removed + +### Fixed + +- Issues with profiles not loading correctly +- Problems with stream previews in tables +- Channel creation and editing workflows +- Logo display issues +- WebSocket connection problems +- Multiple React-related errors and warnings +- Pagination and filtering issues in tables + +## [0.4.1] - 2025-05-01 + +### Changed + +- Optimized uWSGI configuration settings for better server performance +- Improved asynchronous processing by converting additional timers to gevent +- Enhanced EPG (Electronic Program Guide) downloading with proper user agent headers + +### Fixed + +- Issue with "add streams to channel" functionality to correctly follow disabled state logic + +## [0.4.0] - 2025-05-01 + +### Added + +- URL copy buttons for stream and channel URLs +- Manual stream switching ability +- EPG auto-match notifications - Users now receive feedback about how many matches were found +- Informative tooltips throughout the interface, including stream profiles and user-agent details +- Display of connected time for each client +- Current M3U profile information to stats +- Better logging for which channel clients are getting chunks from + +### Changed + +- Table System Rewrite: Completely refactored channel and stream tables for dramatically improved performance with large datasets +- Improved Concurrency: Replaced time.sleep with gevent.sleep for better performance when handling multiple streams +- Improved table interactions: + - Restored alternating row colors and hover effects + - Added shift-click support for multiple row selection + - Preserved drag-and-drop functionality +- Adjusted logo display to prevent layout shifts with different sized logos +- Improved sticky headers in tables +- Fixed spacing and padding in EPG and M3U tables for better readability on smaller displays +- Stream URL handling improved for search/replace patterns +- Enhanced stream lock management for better reliability +- Added stream name to channel status for better visibility +- Properly track current stream ID during stream switches +- Improved EPG cache handling and cleanup of old cache files +- Corrected content type for M3U file (using m3u instead of m3u8) +- Fixed logo URL handling in M3U generation +- Enhanced tuner count calculation to include only active M3U accounts +- Increased thread stack size in uwsgi configuration +- Changed proxy to use uwsgi socket +- Added build timestamp to version information +- Reduced excessive logging during M3U/EPG file importing +- Improved store variable handling to increase application efficiency +- Frontend now being built by Yarn instead of NPM + +### Fixed + +- Issues with channel statistics randomly not working +- Stream ordering in channel selection +- M3U profile name added to stream names for better identification +- Channel form not updating some properties after saving +- Issue with setting logos to default +- Channel creation from streams +- Channel group saving +- Improved error handling throughout the application +- Bugs in deleting stream profiles +- Resolved mimetype detection issues +- Fixed form display issues +- Added proper requerying after form submissions and item deletions +- Bug overwriting tvg-id when loading TV Guide +- Bug that prevented large m3u's and epg's from uploading +- Typo in Stream Profile header column for Description - Thanks [@LoudSoftware](https://github.com/LoudSoftware) +- Typo in m3u input processing (tv-chno instead of tvg-chno) - Thanks @www2a + +## [0.3.3] - 2025-04-18 + +### Fixed + +- Issue with dummy EPG calculating hours above 24, ensuring time values remain within valid 24-hour format +- Auto import functionality to properly process old files that hadn't been imported yet, rather than ignoring them + +## [0.3.2] - 2025-04-16 + +### Fixed + +- Issue with stream ordering for channels - resolved problem where stream objects were incorrectly processed when assigning order in channel configurations + +## [0.3.1] - 2025-04-16 + +### Added + +- Key to navigation links in sidebar to resolve DOM errors when loading web UI +- Channels that are set to 'dummy' epg to the TV Guide + +### Fixed + +- Issue preventing dummy EPG from being set +- Channel numbers not saving properly +- EPGs not refreshing when linking EPG to channel +- Improved error messages in notifications + +## [0.3.0] - 2025-04-15 + +### Added + +- URL validation for redirect profile: + - Validates stream URLs before redirecting clients + - Prevents clients from being redirected to unavailable streams + - Now tries alternate streams when primary stream validation fails +- Dynamic tuner configuration for HDHomeRun devices: + - TunerCount is now dynamically created based on profile max connections + - Sets minimum of 2 tuners, up to 10 for unlimited profiles + +### Changed + +- More robust stream switching: + - Clients now wait properly if a stream is in the switching state + - Improved reliability during stream transitions +- Performance enhancements: + - Increased workers and threads for uwsgi for better concurrency + +### Fixed + +- Issue with multiple dead streams in a row - System now properly handles cases where several sequential streams are unavailable +- Broken links to compose files in documentation + +## [0.2.1] - 2025-04-13 + +### Fixed + +- Stream preview (not channel) +- Streaming wouldn't work when using default user-agent for an M3U +- WebSockets and M3U profile form issues + +## [0.2.0] - 2025-04-12 + +Initial beta public release. diff --git a/Plugins.md b/Plugins.md new file mode 100644 index 00000000..62ea0d87 --- /dev/null +++ b/Plugins.md @@ -0,0 +1,286 @@ +# Dispatcharr Plugins + +This document explains how to build, install, and use Python plugins in Dispatcharr. It covers discovery, the plugin interface, settings, actions, how to access application APIs, and examples. + +--- + +## Quick Start + +1) Create a folder under `/app/data/plugins/my_plugin/` (host path `data/plugins/my_plugin/` in the repo). + +2) Add a `plugin.py` file exporting a `Plugin` class: + +``` +# /app/data/plugins/my_plugin/plugin.py +class Plugin: + name = "My Plugin" + version = "0.1.0" + description = "Does something useful" + + # Settings fields rendered by the UI and persisted by the backend + fields = [ + {"id": "enabled", "label": "Enabled", "type": "boolean", "default": True}, + {"id": "limit", "label": "Item limit", "type": "number", "default": 5}, + {"id": "mode", "label": "Mode", "type": "select", "default": "safe", + "options": [ + {"value": "safe", "label": "Safe"}, + {"value": "fast", "label": "Fast"}, + ]}, + {"id": "note", "label": "Note", "type": "string", "default": ""}, + ] + + # Actions appear as buttons. Clicking one calls run(action, params, context) + actions = [ + {"id": "do_work", "label": "Do Work", "description": "Process items"}, + ] + + def run(self, action: str, params: dict, context: dict): + settings = context.get("settings", {}) + logger = context.get("logger") + + if action == "do_work": + limit = int(settings.get("limit", 5)) + mode = settings.get("mode", "safe") + logger.info(f"My Plugin running with limit={limit}, mode={mode}") + # Do a small amount of work here. Schedule Celery tasks for heavy work. + return {"status": "ok", "processed": limit, "mode": mode} + + return {"status": "error", "message": f"Unknown action {action}"} +``` + +3) Open the Plugins page in the UI, click the refresh icon to reload discovery, then configure and run your plugin. + +--- + +## Where Plugins Live + +- Default directory: `/app/data/plugins` inside the container. +- Override with env var: `DISPATCHARR_PLUGINS_DIR`. +- Each plugin is a directory containing either: + - `plugin.py` exporting a `Plugin` class, or + - a Python package (`__init__.py`) exporting a `Plugin` class. + +The directory name (lowercased, spaces as `_`) is used as the registry key and module import path (e.g. `my_plugin.plugin`). + +--- + +## Discovery & Lifecycle + +- Discovery runs at server startup and on-demand when: + - Fetching the plugins list from the UI + - Hitting `POST /api/plugins/plugins/reload/` +- The loader imports each plugin module and instantiates `Plugin()`. +- Metadata (name, version, description) and a per-plugin settings JSON are stored in the DB. + +Backend code: +- Loader: `apps/plugins/loader.py` +- API Views: `apps/plugins/api_views.py` +- API URLs: `apps/plugins/api_urls.py` +- Model: `apps/plugins/models.py` (stores `enabled` flag and `settings` per plugin) + +--- + +## Plugin Interface + +Export a `Plugin` class. Supported attributes and behavior: + +- `name` (str): Human-readable name. +- `version` (str): Semantic version string. +- `description` (str): Short description. +- `fields` (list): Settings schema used by the UI to render controls. +- `actions` (list): Available actions; the UI renders a Run button for each. +- `run(action, params, context)` (callable): Invoked when a user clicks an action. + +### Settings Schema +Supported field `type`s: +- `boolean` +- `number` +- `string` +- `select` (requires `options`: `[{"value": ..., "label": ...}, ...]`) + +Common field keys: +- `id` (str): Settings key. +- `label` (str): Label shown in the UI. +- `type` (str): One of above. +- `default` (any): Default value used until saved. +- `help_text` (str, optional): Shown under the control. +- `options` (list, for select): List of `{value, label}`. + +The UI automatically renders settings and persists them. The backend stores settings in `PluginConfig.settings`. + +Read settings in `run` via `context["settings"]`. + +### Actions +Each action is a dict: +- `id` (str): Unique action id. +- `label` (str): Button label. +- `description` (str, optional): Helper text. + +Clicking an action calls your plugin’s `run(action, params, context)` and shows a notification with the result or error. + +### Action Confirmation (Modal) +Developers can request a confirmation modal per action using the `confirm` key on the action. Options: + +- Boolean: `confirm: true` will show a default confirmation modal. +- Object: `confirm: { required: true, title: '...', message: '...' }` to customize the modal title and message. + +Example: +``` +actions = [ + { + "id": "danger_run", + "label": "Do Something Risky", + "description": "Runs a job that affects many records.", + "confirm": { "required": true, "title": "Proceed?", "message": "This will modify many records." }, + } +] +``` + +--- + +## Accessing Dispatcharr APIs from Plugins + +Plugins are server-side Python code running within the Django application. You can: + +- Import models and run queries/updates: + ``` + from apps.m3u.models import M3UAccount + from apps.epg.models import EPGSource + from apps.channels.models import Channel + from core.models import CoreSettings + ``` + +- Dispatch Celery tasks for heavy work (recommended): + ``` + from apps.m3u.tasks import refresh_m3u_accounts # apps/m3u/tasks.py + from apps.epg.tasks import refresh_all_epg_data # apps/epg/tasks.py + + refresh_m3u_accounts.delay() + refresh_all_epg_data.delay() + ``` + +- Send WebSocket updates: + ``` + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"type": "plugin", "plugin": "my_plugin", "message": "Done"}) + ``` + +- Use transactions: + ``` + from django.db import transaction + with transaction.atomic(): + # bulk updates here + ... + ``` + +- Log via provided context or standard logging: + ``` + def run(self, action, params, context): + logger = context.get("logger") # already configured + logger.info("running action %s", action) + ``` + +Prefer Celery tasks (`.delay()`) to keep `run` fast and non-blocking. + +--- + +## REST Endpoints (for UI and tooling) + +- List plugins: `GET /api/plugins/plugins/` + - Response: `{ "plugins": [{ key, name, version, description, enabled, fields, settings, actions }, ...] }` +- Reload discovery: `POST /api/plugins/plugins/reload/` +- Import plugin: `POST /api/plugins/plugins/import/` with form-data file field `file` +- Update settings: `POST /api/plugins/plugins//settings/` with `{"settings": {...}}` +- Run action: `POST /api/plugins/plugins//run/` with `{"action": "id", "params": {...}}` +- Enable/disable: `POST /api/plugins/plugins//enabled/` with `{"enabled": true|false}` + +Notes: +- When disabled, a plugin cannot run actions; backend returns HTTP 403. + +--- + +## Importing Plugins + +- In the UI, click the Import button on the Plugins page and upload a `.zip` containing a plugin folder. +- The archive should contain either `plugin.py` or a Python package (`__init__.py`). +- On success, the UI shows the plugin name/description and lets you enable it immediately (plugins are disabled by default). + +--- + +## Enabling / Disabling Plugins + +- Each plugin has a persisted `enabled` flag (default: disabled) and `ever_enabled` flag in the DB (`apps/plugins/models.py`). +- New plugins are disabled by default and require an explicit enable. +- The first time a plugin is enabled, the UI shows a trust warning modal explaining that plugins can run arbitrary server-side code. +- The Plugins page shows a toggle in the card header. Turning it off dims the card and disables the Run button. +- Backend enforcement: Attempts to run an action for a disabled plugin return HTTP 403. + +--- + +## Example: Refresh All Sources Plugin + +Path: `data/plugins/refresh_all/plugin.py` + +``` +class Plugin: + name = "Refresh All Sources" + version = "1.0.0" + description = "Force refresh all M3U accounts and EPG sources." + + fields = [ + {"id": "confirm", "label": "Require confirmation", "type": "boolean", "default": True, + "help_text": "If enabled, the UI should ask before running."} + ] + + actions = [ + {"id": "refresh_all", "label": "Refresh All M3Us and EPGs", + "description": "Queues background refresh for all active M3U accounts and EPG sources."} + ] + + def run(self, action: str, params: dict, context: dict): + if action == "refresh_all": + from apps.m3u.tasks import refresh_m3u_accounts + from apps.epg.tasks import refresh_all_epg_data + refresh_m3u_accounts.delay() + refresh_all_epg_data.delay() + return {"status": "queued", "message": "Refresh jobs queued"} + return {"status": "error", "message": f"Unknown action: {action}"} +``` + +--- + +## Best Practices + +- Keep `run` short and schedule heavy operations via Celery tasks. +- Validate and sanitize `params` received from the UI. +- Use database transactions for bulk or related updates. +- Log actionable messages for troubleshooting. +- Only write files under `/data` or `/app/data` paths. +- Treat plugins as trusted code: they run with full app permissions. + +--- + +## Troubleshooting + +- Plugin not listed: ensure the folder exists and contains `plugin.py` with a `Plugin` class. +- Import errors: the folder name is the import name; avoid spaces or exotic characters. +- No confirmation: include a boolean field with `id: "confirm"` and set it to true or default true. +- HTTP 403 on run: the plugin is disabled; enable it from the toggle or via the `enabled/` endpoint. + +--- + +## Contributing + +- Keep dependencies minimal. Vendoring small helpers into the plugin folder is acceptable. +- Use the existing task and model APIs where possible; propose extensions if you need new capabilities. + +--- + +## Internals Reference + +- Loader: `apps/plugins/loader.py` +- API Views: `apps/plugins/api_views.py` +- API URLs: `apps/plugins/api_urls.py` +- Model: `apps/plugins/models.py` +- Frontend page: `frontend/src/pages/Plugins.jsx` +- Sidebar entry: `frontend/src/components/Sidebar.jsx` diff --git a/README.md b/README.md index 534f8a4c..9b359e25 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and 📊 **Real-Time Stats Dashboard** — Live insights into stream health and client activity\ 🧠 **EPG Auto-Match** — Match program data to channels automatically\ ⚙️ **Streamlink + FFmpeg Support** — Flexible backend options for streaming and recording\ +🎬 **VOD Management** — Full Video on Demand support with movies and TV series\ 🧼 **UI & UX Enhancements** — Smoother, faster, more responsive interface\ 🛁 **Output Compatibility** — HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more @@ -31,6 +32,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and ✅ **Full IPTV Control** — Import, organize, proxy, and monitor IPTV streams on your own terms\ ✅ **Smart Playlist Handling** — M3U import, filtering, grouping, and failover support\ +✅ **VOD Content Management** — Organize movies and TV series with metadata and streaming\ ✅ **Reliable EPG Integration** — Match and manage TV guide data with ease\ ✅ **Clean & Responsive Interface** — Modern design that gets out of your way\ ✅ **Fully Self-Hosted** — Total control, zero reliance on third-party services @@ -104,7 +106,7 @@ Here’s how you can join the party: ## 📚 Roadmap & Documentation - 📚 **Roadmap:** Coming soon! -- 📖 **Wiki:** In progress — tutorials, API references, and advanced setup guides on the way! +- 📖 **Documentation:** [Dispatcharr Docs](https://dispatcharr.github.io/Dispatcharr-Docs/) --- @@ -133,4 +135,4 @@ Have a question? Want to suggest a feature? Just want to say hi?\ --- -### 🚀 *Happy Streaming! The Dispatcharr Team* \ No newline at end of file +### 🚀 *Happy Streaming! The Dispatcharr Team* diff --git a/apps/accounts/api_urls.py b/apps/accounts/api_urls.py index e1518105..dda3832c 100644 --- a/apps/accounts/api_urls.py +++ b/apps/accounts/api_urls.py @@ -1,41 +1,39 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter from .api_views import ( - AuthViewSet, UserViewSet, GroupViewSet, - list_permissions, initialize_superuser + AuthViewSet, + UserViewSet, + GroupViewSet, + TokenObtainPairView, + TokenRefreshView, + list_permissions, + initialize_superuser, ) from rest_framework_simplejwt import views as jwt_views -app_name = 'accounts' +app_name = "accounts" # 🔹 Register ViewSets with a Router router = DefaultRouter() -router.register(r'users', UserViewSet, basename='user') -router.register(r'groups', GroupViewSet, basename='group') +router.register(r"users", UserViewSet, basename="user") +router.register(r"groups", GroupViewSet, basename="group") # 🔹 Custom Authentication Endpoints -auth_view = AuthViewSet.as_view({ - 'post': 'login' -}) +auth_view = AuthViewSet.as_view({"post": "login"}) -logout_view = AuthViewSet.as_view({ - 'post': 'logout' -}) +logout_view = AuthViewSet.as_view({"post": "logout"}) # 🔹 Define API URL patterns urlpatterns = [ # Authentication - path('auth/login/', auth_view, name='user-login'), - path('auth/logout/', logout_view, name='user-logout'), - + path("auth/login/", auth_view, name="user-login"), + path("auth/logout/", logout_view, name="user-logout"), # Superuser API - path('initialize-superuser/', initialize_superuser, name='initialize_superuser'), - + path("initialize-superuser/", initialize_superuser, name="initialize_superuser"), # Permissions API - path('permissions/', list_permissions, name='list-permissions'), - - path('token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'), - path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'), + path("permissions/", list_permissions, name="list-permissions"), + path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"), + path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"), ] # 🔹 Include ViewSet routes diff --git a/apps/accounts/api_views.py b/apps/accounts/api_views.py index 27d844df..41e2f077 100644 --- a/apps/accounts/api_views.py +++ b/apps/accounts/api_views.py @@ -2,16 +2,110 @@ from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group, Permission from django.http import JsonResponse, HttpResponse from django.views.decorators.csrf import csrf_exempt -from rest_framework.decorators import api_view, permission_classes -from rest_framework.permissions import IsAuthenticated, AllowAny +from rest_framework.decorators import api_view, permission_classes, action from rest_framework.response import Response -from rest_framework import viewsets +from rest_framework import viewsets, status from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi import json +from .permissions import IsAdmin, Authenticated +from dispatcharr.utils import network_access_allowed from .models import User from .serializers import UserSerializer, GroupSerializer, PermissionSerializer +from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView + + +class TokenObtainPairView(TokenObtainPairView): + def post(self, request, *args, **kwargs): + # Custom logic here + if not network_access_allowed(request, "UI"): + # Log blocked login attempt due to network restrictions + from core.utils import log_system_event + username = request.data.get("username", 'unknown') + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user=username, + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied', + ) + return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN) + + # Get the response from the parent class first + username = request.data.get("username") + + # Log login attempt + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + try: + response = super().post(request, *args, **kwargs) + + # If login was successful, update last_login and log success + if response.status_code == 200: + if username: + from django.utils import timezone + try: + user = User.objects.get(username=username) + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + except User.DoesNotExist: + pass # User doesn't exist, but login somehow succeeded + else: + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) + + return response + + except Exception as e: + # If parent class raises an exception (e.g., validation error), log failed attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason=f'Authentication error: {str(e)[:100]}', + ) + raise # Re-raise the exception to maintain normal error flow + + +class TokenRefreshView(TokenRefreshView): + def post(self, request, *args, **kwargs): + # Custom logic here + if not network_access_allowed(request, "UI"): + # Log blocked token refresh attempt due to network restrictions + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user='token_refresh', + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied (token refresh)', + ) + return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN) + + return super().post(request, *args, **kwargs) + @csrf_exempt # In production, consider CSRF protection strategies or ensure this endpoint is only accessible when no superuser exists. def initialize_superuser(request): @@ -26,56 +120,114 @@ def initialize_superuser(request): password = data.get("password") email = data.get("email", "") if not username or not password: - return JsonResponse({"error": "Username and password are required."}, status=400) + return JsonResponse( + {"error": "Username and password are required."}, status=400 + ) # Create the superuser - User.objects.create_superuser(username=username, password=password, email=email) + User.objects.create_superuser( + username=username, password=password, email=email, user_level=10 + ) return JsonResponse({"superuser_exists": True}) except Exception as e: return JsonResponse({"error": str(e)}, status=500) # For GET requests, indicate no superuser exists return JsonResponse({"superuser_exists": False}) + # 🔹 1) Authentication APIs class AuthViewSet(viewsets.ViewSet): """Handles user login and logout""" + def get_permissions(self): + """ + Login doesn't require auth, but logout does + """ + if self.action == 'logout': + from rest_framework.permissions import IsAuthenticated + return [IsAuthenticated()] + return [] + @swagger_auto_schema( operation_description="Authenticate and log in a user", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, - required=['username', 'password'], + required=["username", "password"], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING), - 'password': openapi.Schema(type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD) + "username": openapi.Schema(type=openapi.TYPE_STRING), + "password": openapi.Schema( + type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD + ), }, ), responses={200: "Login successful", 400: "Invalid credentials"}, ) def login(self, request): """Logs in a user and returns user details""" - username = request.data.get('username') - password = request.data.get('password') + username = request.data.get("username") + password = request.data.get("password") user = authenticate(request, username=username, password=password) + # Get client info for logging + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + if user: login(request, user) - return Response({ - "message": "Login successful", - "user": { - "id": user.id, - "username": user.username, - "email": user.email, - "groups": list(user.groups.values_list('name', flat=True)) + # Update last_login timestamp + from django.utils import timezone + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + + return Response( + { + "message": "Login successful", + "user": { + "id": user.id, + "username": user.username, + "email": user.email, + "groups": list(user.groups.values_list("name", flat=True)), + }, } - }) + ) + + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) return Response({"error": "Invalid credentials"}, status=400) @swagger_auto_schema( operation_description="Log out the current user", - responses={200: "Logout successful"} + responses={200: "Logout successful"}, ) def logout(self, request): """Logs out the authenticated user""" + # Log logout event before actually logging out + from core.utils import log_system_event + username = request.user.username if request.user and request.user.is_authenticated else 'unknown' + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + log_system_event( + event_type='logout', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + logout(request) return Response({"message": "Logout successful"}) @@ -83,13 +235,19 @@ class AuthViewSet(viewsets.ViewSet): # 🔹 2) User Management APIs class UserViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Users""" - queryset = User.objects.all() + + queryset = User.objects.all().prefetch_related('channel_profiles') serializer_class = UserSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + if self.action == "me": + return [Authenticated()] + + return [IsAdmin()] @swagger_auto_schema( operation_description="Retrieve a list of users", - responses={200: UserSerializer(many=True)} + responses={200: UserSerializer(many=True)}, ) def list(self, request, *args, **kwargs): return super().list(request, *args, **kwargs) @@ -110,17 +268,28 @@ class UserViewSet(viewsets.ModelViewSet): def destroy(self, request, *args, **kwargs): return super().destroy(request, *args, **kwargs) + @swagger_auto_schema( + method="get", + operation_description="Get active user information", + ) + @action(detail=False, methods=["get"], url_path="me") + def me(self, request): + user = request.user + serializer = UserSerializer(user) + return Response(serializer.data) + # 🔹 3) Group Management APIs class GroupViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Groups""" + queryset = Group.objects.all() serializer_class = GroupSerializer - permission_classes = [IsAuthenticated] + permission_classes = [Authenticated] @swagger_auto_schema( operation_description="Retrieve a list of groups", - responses={200: GroupSerializer(many=True)} + responses={200: GroupSerializer(many=True)}, ) def list(self, request, *args, **kwargs): return super().list(request, *args, **kwargs) @@ -144,12 +313,12 @@ class GroupViewSet(viewsets.ModelViewSet): # 🔹 4) Permissions List API @swagger_auto_schema( - method='get', + method="get", operation_description="Retrieve a list of all permissions", - responses={200: PermissionSerializer(many=True)} + responses={200: PermissionSerializer(many=True)}, ) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) +@api_view(["GET"]) +@permission_classes([Authenticated]) def list_permissions(request): """Returns a list of all available permissions""" permissions = Permission.objects.all() diff --git a/apps/accounts/apps.py b/apps/accounts/apps.py index fe284bd6..603ea847 100644 --- a/apps/accounts/apps.py +++ b/apps/accounts/apps.py @@ -1,6 +1,7 @@ from django.apps import AppConfig + class AccountsConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'apps.accounts' + default_auto_field = "django.db.models.BigAutoField" + name = "apps.accounts" verbose_name = "Accounts & Authentication" diff --git a/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py b/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py new file mode 100644 index 00000000..2a095773 --- /dev/null +++ b/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py @@ -0,0 +1,43 @@ +# Generated by Django 5.1.6 on 2025-05-18 15:47 + +from django.db import migrations, models + + +def set_user_level_to_10(apps, schema_editor): + User = apps.get_model("accounts", "User") + User.objects.update(user_level=10) + + +class Migration(migrations.Migration): + + dependencies = [ + ("accounts", "0001_initial"), + ("dispatcharr_channels", "0021_channel_user_level"), + ] + + operations = [ + migrations.RemoveField( + model_name="user", + name="channel_groups", + ), + migrations.AddField( + model_name="user", + name="channel_profiles", + field=models.ManyToManyField( + blank=True, + related_name="users", + to="dispatcharr_channels.channelprofile", + ), + ), + migrations.AddField( + model_name="user", + name="user_level", + field=models.IntegerField(default=0), + ), + migrations.AddField( + model_name="user", + name="custom_properties", + field=models.TextField(blank=True, null=True), + ), + migrations.RunPython(set_user_level_to_10), + ] diff --git a/apps/accounts/migrations/0003_alter_user_custom_properties.py b/apps/accounts/migrations/0003_alter_user_custom_properties.py new file mode 100644 index 00000000..20411f75 --- /dev/null +++ b/apps/accounts/migrations/0003_alter_user_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('accounts', '0002_remove_user_channel_groups_user_channel_profiles_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='user', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/accounts/models.py b/apps/accounts/models.py index 5b24549f..da5e36bc 100644 --- a/apps/accounts/models.py +++ b/apps/accounts/models.py @@ -2,17 +2,26 @@ from django.db import models from django.contrib.auth.models import AbstractUser, Permission + class User(AbstractUser): """ Custom user model for Dispatcharr. Inherits from Django's AbstractUser to add additional fields if needed. """ + + class UserLevel(models.IntegerChoices): + STREAMER = 0, "Streamer" + STANDARD = 1, "Standard User" + ADMIN = 10, "Admin" + avatar_config = models.JSONField(default=dict, blank=True, null=True) - channel_groups = models.ManyToManyField( - 'dispatcharr_channels.ChannelGroup', # Updated reference to renamed model + channel_profiles = models.ManyToManyField( + "dispatcharr_channels.ChannelProfile", blank=True, - related_name="users" + related_name="users", ) + user_level = models.IntegerField(default=UserLevel.STREAMER) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return self.username diff --git a/apps/accounts/permissions.py b/apps/accounts/permissions.py new file mode 100644 index 00000000..62673038 --- /dev/null +++ b/apps/accounts/permissions.py @@ -0,0 +1,56 @@ +from rest_framework.permissions import IsAuthenticated +from .models import User +from dispatcharr.utils import network_access_allowed + + +class Authenticated(IsAuthenticated): + def has_permission(self, request, view): + is_authenticated = super().has_permission(request, view) + network_allowed = network_access_allowed(request, "UI") + + return is_authenticated and network_allowed + + +class IsStandardUser(Authenticated): + def has_permission(self, request, view): + if not super().has_permission(request, view): + return False + + return request.user and request.user.user_level >= User.UserLevel.STANDARD + + +class IsAdmin(Authenticated): + def has_permission(self, request, view): + if not super().has_permission(request, view): + return False + + return request.user.user_level >= 10 + + +class IsOwnerOfObject(Authenticated): + def has_object_permission(self, request, view, obj): + if not super().has_permission(request, view): + return False + + is_admin = IsAdmin().has_permission(request, view) + is_owner = request.user in obj.users.all() + + return is_admin or is_owner + + +permission_classes_by_action = { + "list": [IsStandardUser], + "create": [IsAdmin], + "retrieve": [IsStandardUser], + "update": [IsAdmin], + "partial_update": [IsAdmin], + "destroy": [IsAdmin], +} + +permission_classes_by_method = { + "GET": [IsStandardUser], + "POST": [IsAdmin], + "PATCH": [IsAdmin], + "PUT": [IsAdmin], + "DELETE": [IsAdmin], +} diff --git a/apps/accounts/serializers.py b/apps/accounts/serializers.py index 2346946e..865d29af 100644 --- a/apps/accounts/serializers.py +++ b/apps/accounts/serializers.py @@ -1,13 +1,14 @@ from rest_framework import serializers from django.contrib.auth.models import Group, Permission from .models import User +from apps.channels.models import ChannelProfile # 🔹 Fix for Permission serialization class PermissionSerializer(serializers.ModelSerializer): class Meta: model = Permission - fields = ['id', 'name', 'codename'] + fields = ["id", "name", "codename"] # 🔹 Fix for Group serialization @@ -18,15 +19,61 @@ class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group - fields = ['id', 'name', 'permissions'] + fields = ["id", "name", "permissions"] # 🔹 Fix for User serialization class UserSerializer(serializers.ModelSerializer): - groups = serializers.SlugRelatedField( - many=True, queryset=Group.objects.all(), slug_field="name" - ) # ✅ Fix ManyToMany `_meta` error + password = serializers.CharField(write_only=True) + channel_profiles = serializers.PrimaryKeyRelatedField( + queryset=ChannelProfile.objects.all(), many=True, required=False + ) class Meta: model = User - fields = ['id', 'username', 'email', 'groups'] + fields = [ + "id", + "username", + "email", + "user_level", + "password", + "channel_profiles", + "custom_properties", + "avatar_config", + "is_active", + "is_staff", + "is_superuser", + "last_login", + "date_joined", + "first_name", + "last_name", + ] + + def create(self, validated_data): + channel_profiles = validated_data.pop("channel_profiles", []) + + user = User(**validated_data) + user.set_password(validated_data["password"]) + user.is_active = True + user.save() + + user.channel_profiles.set(channel_profiles) + + return user + + def update(self, instance, validated_data): + password = validated_data.pop("password", None) + channel_profiles = validated_data.pop("channel_profiles", None) + + for attr, value in validated_data.items(): + setattr(instance, attr, value) + + if password: + instance.set_password(password) + + instance.save() + + if channel_profiles is not None: + instance.channel_profiles.set(channel_profiles) + + return instance diff --git a/apps/accounts/signals.py b/apps/accounts/signals.py index 3bd1e246..dfc4f425 100644 --- a/apps/accounts/signals.py +++ b/apps/accounts/signals.py @@ -5,6 +5,7 @@ from django.db.models.signals import post_save from django.dispatch import receiver from .models import User + @receiver(post_save, sender=User) def handle_new_user(sender, instance, created, **kwargs): if created: diff --git a/apps/api/urls.py b/apps/api/urls.py index a2810f06..4c92c70a 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -1,11 +1,10 @@ -from django.urls import path, include +from django.urls import path, include, re_path from drf_yasg.views import get_schema_view from drf_yasg import openapi from rest_framework.permissions import AllowAny app_name = 'api' -# Configure Swagger Schema schema_view = get_schema_view( openapi.Info( title="Dispatcharr API", @@ -26,6 +25,9 @@ urlpatterns = [ path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')), path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')), path('core/', include(('core.api_urls', 'core'), namespace='core')), + path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')), + path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), + path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), @@ -34,7 +36,7 @@ urlpatterns = [ # Swagger Documentation api_urls - path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), + re_path(r'^swagger/?$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'), ] diff --git a/apps/backups/__init__.py b/apps/backups/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/api_urls.py b/apps/backups/api_urls.py new file mode 100644 index 00000000..226758cc --- /dev/null +++ b/apps/backups/api_urls.py @@ -0,0 +1,18 @@ +from django.urls import path + +from . import api_views + +app_name = "backups" + +urlpatterns = [ + path("", api_views.list_backups, name="backup-list"), + path("create/", api_views.create_backup, name="backup-create"), + path("upload/", api_views.upload_backup, name="backup-upload"), + path("schedule/", api_views.get_schedule, name="backup-schedule-get"), + path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"), + path("status//", api_views.backup_status, name="backup-status"), + path("/download-token/", api_views.get_download_token, name="backup-download-token"), + path("/download/", api_views.download_backup, name="backup-download"), + path("/delete/", api_views.delete_backup, name="backup-delete"), + path("/restore/", api_views.restore_backup, name="backup-restore"), +] diff --git a/apps/backups/api_views.py b/apps/backups/api_views.py new file mode 100644 index 00000000..c6ff7d26 --- /dev/null +++ b/apps/backups/api_views.py @@ -0,0 +1,364 @@ +import hashlib +import hmac +import logging +import os +from pathlib import Path + +from celery.result import AsyncResult +from django.conf import settings +from django.http import HttpResponse, StreamingHttpResponse, Http404 +from rest_framework import status +from rest_framework.decorators import api_view, permission_classes, parser_classes +from rest_framework.permissions import IsAdminUser, AllowAny +from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.response import Response + +from . import services +from .tasks import create_backup_task, restore_backup_task +from .scheduler import get_schedule_settings, update_schedule_settings + +logger = logging.getLogger(__name__) + + +def _generate_task_token(task_id: str) -> str: + """Generate a signed token for task status access without auth.""" + secret = settings.SECRET_KEY.encode() + return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32] + + +def _verify_task_token(task_id: str, token: str) -> bool: + """Verify a task token is valid.""" + expected = _generate_task_token(task_id) + return hmac.compare_digest(expected, token) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def list_backups(request): + """List all available backup files.""" + try: + backups = services.list_backups() + return Response(backups, status=status.HTTP_200_OK) + except Exception as e: + return Response( + {"detail": f"Failed to list backups: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def create_backup(request): + """Create a new backup (async via Celery).""" + try: + task = create_backup_task.delay() + return Response( + { + "detail": "Backup started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"detail": f"Failed to start backup: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def backup_status(request, task_id): + """Check the status of a backup/restore task. + + Requires either: + - Valid admin authentication, OR + - Valid task_token query parameter + """ + # Check for token-based auth (for restore when session is invalidated) + token = request.query_params.get("token") + if token: + if not _verify_task_token(task_id, token): + return Response( + {"detail": "Invalid task token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + result = AsyncResult(task_id) + + if result.ready(): + task_result = result.get() + if task_result.get("status") == "completed": + return Response({ + "state": "completed", + "result": task_result, + }) + else: + return Response({ + "state": "failed", + "error": task_result.get("error", "Unknown error"), + }) + elif result.failed(): + return Response({ + "state": "failed", + "error": str(result.result), + }) + else: + return Response({ + "state": result.state.lower(), + }) + except Exception as e: + return Response( + {"detail": f"Failed to get task status: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_download_token(request, filename): + """Get a signed token for downloading a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + token = _generate_task_token(filename) + return Response({"token": token}) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to generate token: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def download_backup(request, filename): + """Download a backup file. + + Requires either: + - Valid admin authentication, OR + - Valid download_token query parameter + """ + # Check for token-based auth (avoids CORS preflight issues) + token = request.query_params.get("token") + if token: + if not _verify_task_token(filename, token): + return Response( + {"detail": "Invalid download token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + # Security: prevent path traversal by checking for suspicious characters + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = (backup_dir / filename).resolve() + + # Security: ensure the resolved path is still within backup_dir + if not str(backup_file).startswith(str(backup_dir.resolve())): + raise Http404("Invalid filename") + + if not backup_file.exists() or not backup_file.is_file(): + raise Http404("Backup file not found") + + file_size = backup_file.stat().st_size + + # Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly + # Fall back to streaming for non-nginx deployments + use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true" + logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}") + + if use_nginx_accel: + # X-Accel-Redirect: Django returns immediately, nginx serves file + logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}") + response = HttpResponse() + response["X-Accel-Redirect"] = f"/protected-backups/{filename}" + response["Content-Type"] = "application/zip" + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + else: + # Streaming fallback for non-nginx deployments + logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)") + def file_iterator(file_path, chunk_size=2 * 1024 * 1024): + with open(file_path, "rb") as f: + while chunk := f.read(chunk_size): + yield chunk + + response = StreamingHttpResponse( + file_iterator(backup_file), + content_type="application/zip", + ) + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Download failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["DELETE"]) +@permission_classes([IsAdminUser]) +def delete_backup(request, filename): + """Delete a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + services.delete_backup(filename) + return Response( + {"detail": "Backup deleted successfully"}, + status=status.HTTP_204_NO_CONTENT, + ) + except FileNotFoundError: + raise Http404("Backup file not found") + except Exception as e: + return Response( + {"detail": f"Delete failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +@parser_classes([MultiPartParser, FormParser]) +def upload_backup(request): + """Upload a backup file for restoration.""" + uploaded = request.FILES.get("file") + if not uploaded: + return Response( + {"detail": "No file uploaded"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + backup_dir = services.get_backup_dir() + filename = uploaded.name or "uploaded-backup.zip" + + # Ensure unique filename + backup_file = backup_dir / filename + counter = 1 + while backup_file.exists(): + name_parts = filename.rsplit(".", 1) + if len(name_parts) == 2: + backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}" + else: + backup_file = backup_dir / f"{filename}-{counter}" + counter += 1 + + # Save uploaded file + with backup_file.open("wb") as f: + for chunk in uploaded.chunks(): + f.write(chunk) + + return Response( + { + "detail": "Backup uploaded successfully", + "filename": backup_file.name, + }, + status=status.HTTP_201_CREATED, + ) + except Exception as e: + return Response( + {"detail": f"Upload failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def restore_backup(request, filename): + """Restore from a backup file (async via Celery). WARNING: This will flush the database!""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + task = restore_backup_task.delay(filename) + return Response( + { + "detail": "Restore started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to start restore: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_schedule(request): + """Get backup schedule settings.""" + try: + settings = get_schedule_settings() + return Response(settings) + except Exception as e: + return Response( + {"detail": f"Failed to get schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["PUT"]) +@permission_classes([IsAdminUser]) +def update_schedule(request): + """Update backup schedule settings.""" + try: + settings = update_schedule_settings(request.data) + return Response(settings) + except ValueError as e: + return Response( + {"detail": str(e)}, + status=status.HTTP_400_BAD_REQUEST, + ) + except Exception as e: + return Response( + {"detail": f"Failed to update schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/apps/backups/apps.py b/apps/backups/apps.py new file mode 100644 index 00000000..ee644149 --- /dev/null +++ b/apps/backups/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class BackupsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "apps.backups" + verbose_name = "Backups" diff --git a/apps/backups/migrations/__init__.py b/apps/backups/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/models.py b/apps/backups/models.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/backups/scheduler.py b/apps/backups/scheduler.py new file mode 100644 index 00000000..aa7e9bcd --- /dev/null +++ b/apps/backups/scheduler.py @@ -0,0 +1,202 @@ +import json +import logging + +from django_celery_beat.models import PeriodicTask, CrontabSchedule + +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + +BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task" + +DEFAULTS = { + "schedule_enabled": True, + "schedule_frequency": "daily", + "schedule_time": "03:00", + "schedule_day_of_week": 0, # Sunday + "retention_count": 3, + "schedule_cron_expression": "", +} + + +def _get_backup_settings(): + """Get all backup settings from CoreSettings grouped JSON.""" + try: + settings_obj = CoreSettings.objects.get(key="backup_settings") + return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy() + except CoreSettings.DoesNotExist: + return DEFAULTS.copy() + + +def _update_backup_settings(updates: dict) -> None: + """Update backup settings in the grouped JSON.""" + obj, created = CoreSettings.objects.get_or_create( + key="backup_settings", + defaults={"name": "Backup Settings", "value": DEFAULTS.copy()} + ) + current = obj.value if isinstance(obj.value, dict) else {} + current.update(updates) + obj.value = current + obj.save() + + +def get_schedule_settings() -> dict: + """Get all backup schedule settings.""" + settings = _get_backup_settings() + return { + "enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])), + "frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])), + "time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])), + "day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])), + "retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])), + "cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])), + } + + +def update_schedule_settings(data: dict) -> dict: + """Update backup schedule settings and sync the PeriodicTask.""" + # Validate + if "frequency" in data and data["frequency"] not in ("daily", "weekly"): + raise ValueError("frequency must be 'daily' or 'weekly'") + + if "time" in data: + try: + hour, minute = data["time"].split(":") + int(hour) + int(minute) + except (ValueError, AttributeError): + raise ValueError("time must be in HH:MM format") + + if "day_of_week" in data: + day = int(data["day_of_week"]) + if day < 0 or day > 6: + raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)") + + if "retention_count" in data: + count = int(data["retention_count"]) + if count < 0: + raise ValueError("retention_count must be >= 0") + + # Update settings with proper key names + updates = {} + if "enabled" in data: + updates["schedule_enabled"] = bool(data["enabled"]) + if "frequency" in data: + updates["schedule_frequency"] = str(data["frequency"]) + if "time" in data: + updates["schedule_time"] = str(data["time"]) + if "day_of_week" in data: + updates["schedule_day_of_week"] = int(data["day_of_week"]) + if "retention_count" in data: + updates["retention_count"] = int(data["retention_count"]) + if "cron_expression" in data: + updates["schedule_cron_expression"] = str(data["cron_expression"]) + + _update_backup_settings(updates) + + # Sync the periodic task + _sync_periodic_task() + + return get_schedule_settings() + + +def _sync_periodic_task() -> None: + """Create, update, or delete the scheduled backup task based on settings.""" + settings = get_schedule_settings() + + if not settings["enabled"]: + # Delete the task if it exists + task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first() + if task: + old_crontab = task.crontab + task.delete() + _cleanup_orphaned_crontab(old_crontab) + logger.info("Backup schedule disabled, removed periodic task") + return + + # Get old crontab before creating new one + old_crontab = None + try: + old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME) + old_crontab = old_task.crontab + except PeriodicTask.DoesNotExist: + pass + + # Check if using cron expression (advanced mode) + if settings["cron_expression"]: + # Parse cron expression: "minute hour day month weekday" + try: + parts = settings["cron_expression"].split() + if len(parts) != 5: + raise ValueError("Cron expression must have 5 parts: minute hour day month weekday") + + minute, hour, day_of_month, month_of_year, day_of_week = parts + + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=day_of_week, + day_of_month=day_of_month, + month_of_year=month_of_year, + timezone=CoreSettings.get_system_time_zone(), + ) + except Exception as e: + logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}") + raise ValueError(f"Invalid cron expression: {e}") + else: + # Use simple frequency-based scheduling + # Parse time + hour, minute = settings["time"].split(":") + + # Build crontab based on frequency + system_tz = CoreSettings.get_system_time_zone() + if settings["frequency"] == "daily": + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week="*", + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + else: # weekly + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=str(settings["day_of_week"]), + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + + # Create or update the periodic task + task, created = PeriodicTask.objects.update_or_create( + name=BACKUP_SCHEDULE_TASK_NAME, + defaults={ + "task": "apps.backups.tasks.scheduled_backup_task", + "crontab": crontab, + "enabled": True, + "kwargs": json.dumps({"retention_count": settings["retention_count"]}), + }, + ) + + # Clean up old crontab if it changed and is orphaned + if old_crontab and old_crontab.id != crontab.id: + _cleanup_orphaned_crontab(old_crontab) + + action = "Created" if created else "Updated" + logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}") + + +def _cleanup_orphaned_crontab(crontab_schedule): + """Delete old CrontabSchedule if no other tasks are using it.""" + if crontab_schedule is None: + return + + # Check if any other tasks are using this crontab + if PeriodicTask.objects.filter(crontab=crontab_schedule).exists(): + logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting") + return + + logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}") + crontab_schedule.delete() diff --git a/apps/backups/services.py b/apps/backups/services.py new file mode 100644 index 00000000..b638e701 --- /dev/null +++ b/apps/backups/services.py @@ -0,0 +1,350 @@ +import datetime +import json +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from zipfile import ZipFile, ZIP_DEFLATED +import logging +import pytz + +from django.conf import settings +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + + +def get_backup_dir() -> Path: + """Get the backup directory, creating it if necessary.""" + backup_dir = Path(settings.BACKUP_ROOT) + backup_dir.mkdir(parents=True, exist_ok=True) + return backup_dir + + +def _is_postgresql() -> bool: + """Check if we're using PostgreSQL.""" + return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql" + + +def _get_pg_env() -> dict: + """Get environment variables for PostgreSQL commands.""" + db_config = settings.DATABASES["default"] + env = os.environ.copy() + env["PGPASSWORD"] = db_config.get("PASSWORD", "") + return env + + +def _get_pg_args() -> list[str]: + """Get common PostgreSQL command arguments.""" + db_config = settings.DATABASES["default"] + return [ + "-h", db_config.get("HOST", "localhost"), + "-p", str(db_config.get("PORT", 5432)), + "-U", db_config.get("USER", "postgres"), + "-d", db_config.get("NAME", "dispatcharr"), + ] + + +def _dump_postgresql(output_file: Path) -> None: + """Dump PostgreSQL database using pg_dump.""" + logger.info("Dumping PostgreSQL database with pg_dump...") + + cmd = [ + "pg_dump", + *_get_pg_args(), + "-Fc", # Custom format for pg_restore + "-v", # Verbose + "-f", str(output_file), + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"pg_dump failed: {result.stderr}") + raise RuntimeError(f"pg_dump failed: {result.stderr}") + + logger.debug(f"pg_dump output: {result.stderr}") + + +def _clean_postgresql_schema() -> None: + """Drop and recreate the public schema to ensure a completely clean restore.""" + logger.info("[PG_CLEAN] Dropping and recreating public schema...") + + # Commands to drop and recreate schema + sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;" + + cmd = [ + "psql", + *_get_pg_args(), + "-c", sql_commands, + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}") + raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}") + + logger.info("[PG_CLEAN] Schema cleaned successfully") + + +def _restore_postgresql(dump_file: Path) -> None: + """Restore PostgreSQL database using pg_restore.""" + logger.info("[PG_RESTORE] Starting pg_restore...") + logger.info(f"[PG_RESTORE] Dump file: {dump_file}") + + # Drop and recreate schema to ensure a completely clean restore + _clean_postgresql_schema() + + pg_args = _get_pg_args() + logger.info(f"[PG_RESTORE] Connection args: {pg_args}") + + cmd = [ + "pg_restore", + "--no-owner", # Skip ownership commands (we already created schema) + *pg_args, + "-v", # Verbose + str(dump_file), + ] + + logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}") + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + logger.info(f"[PG_RESTORE] Return code: {result.returncode}") + + # pg_restore may return non-zero even on partial success + # Check for actual errors vs warnings + if result.returncode != 0: + # Some errors during restore are expected (e.g., "does not exist" when cleaning) + # Only fail on critical errors + stderr = result.stderr.lower() + if "fatal" in stderr or "could not connect" in stderr: + logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}") + raise RuntimeError(f"pg_restore failed: {result.stderr}") + else: + logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...") + + logger.info("[PG_RESTORE] Completed successfully") + + +def _dump_sqlite(output_file: Path) -> None: + """Dump SQLite database using sqlite3 .backup command.""" + logger.info("Dumping SQLite database with sqlite3 .backup...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + + if not db_path.exists(): + raise FileNotFoundError(f"SQLite database not found: {db_path}") + + # Use sqlite3 .backup command via stdin for reliable execution + result = subprocess.run( + ["sqlite3", str(db_path)], + input=f".backup '{output_file}'\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 backup failed: {result.stderr}") + raise RuntimeError(f"sqlite3 backup failed: {result.stderr}") + + # Verify the backup file was created + if not output_file.exists(): + raise RuntimeError("sqlite3 backup failed: output file not created") + + logger.info(f"sqlite3 backup completed successfully: {output_file}") + + +def _restore_sqlite(dump_file: Path) -> None: + """Restore SQLite database by replacing the database file.""" + logger.info("Restoring SQLite database...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + backup_current = None + + # Backup current database before overwriting + if db_path.exists(): + backup_current = db_path.with_suffix(".db.bak") + shutil.copy2(db_path, backup_current) + logger.info(f"Backed up current database to {backup_current}") + + # Ensure parent directory exists + db_path.parent.mkdir(parents=True, exist_ok=True) + + # The backup file from _dump_sqlite is a complete SQLite database file + # We can simply copy it over the existing database + shutil.copy2(dump_file, db_path) + + # Verify the restore worked by checking if sqlite3 can read it + result = subprocess.run( + ["sqlite3", str(db_path)], + input=".tables\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 verification failed: {result.stderr}") + # Try to restore from backup + if backup_current and backup_current.exists(): + shutil.copy2(backup_current, db_path) + logger.info("Restored original database from backup") + raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}") + + logger.info("sqlite3 restore completed successfully") + + +def create_backup() -> Path: + """ + Create a backup archive containing database dump and data directories. + Returns the path to the created backup file. + """ + backup_dir = get_backup_dir() + + # Use system timezone for filename (user-friendly), but keep internal timestamps as UTC + system_tz_name = CoreSettings.get_system_time_zone() + try: + system_tz = pytz.timezone(system_tz_name) + now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz) + timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S") + except Exception as e: + logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC") + timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S") + + backup_name = f"dispatcharr-backup-{timestamp}.zip" + backup_file = backup_dir / backup_name + + logger.info(f"Creating backup: {backup_name}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir: + temp_path = Path(temp_dir) + + # Determine database type and dump accordingly + if _is_postgresql(): + db_dump_file = temp_path / "database.dump" + _dump_postgresql(db_dump_file) + db_type = "postgresql" + else: + db_dump_file = temp_path / "database.sqlite3" + _dump_sqlite(db_dump_file) + db_type = "sqlite" + + # Create ZIP archive with compression and ZIP64 support for large files + with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file: + # Add database dump + zip_file.write(db_dump_file, db_dump_file.name) + + # Add metadata + metadata = { + "format": "dispatcharr-backup", + "version": 2, + "database_type": db_type, + "database_file": db_dump_file.name, + "created_at": datetime.datetime.now(datetime.UTC).isoformat(), + } + zip_file.writestr("metadata.json", json.dumps(metadata, indent=2)) + + logger.info(f"Backup created successfully: {backup_file}") + return backup_file + + +def restore_backup(backup_file: Path) -> None: + """ + Restore from a backup archive. + WARNING: This will overwrite the database! + """ + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_file}") + + logger.info(f"Restoring from backup: {backup_file}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir: + temp_path = Path(temp_dir) + + # Extract backup + logger.debug("Extracting backup archive...") + with ZipFile(backup_file, "r") as zip_file: + zip_file.extractall(temp_path) + + # Read metadata + metadata_file = temp_path / "metadata.json" + if not metadata_file.exists(): + raise ValueError("Invalid backup: missing metadata.json") + + with open(metadata_file) as f: + metadata = json.load(f) + + # Restore database + _restore_database(temp_path, metadata) + + logger.info("Restore completed successfully") + + +def _restore_database(temp_path: Path, metadata: dict) -> None: + """Restore database from backup.""" + db_type = metadata.get("database_type", "postgresql") + db_file = metadata.get("database_file", "database.dump") + dump_file = temp_path / db_file + + if not dump_file.exists(): + raise ValueError(f"Invalid backup: missing {db_file}") + + current_db_type = "postgresql" if _is_postgresql() else "sqlite" + + if db_type != current_db_type: + raise ValueError( + f"Database type mismatch: backup is {db_type}, " + f"but current database is {current_db_type}" + ) + + if db_type == "postgresql": + _restore_postgresql(dump_file) + else: + _restore_sqlite(dump_file) + + +def list_backups() -> list[dict]: + """List all available backup files with metadata.""" + backup_dir = get_backup_dir() + backups = [] + + for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True): + # Use UTC timezone so frontend can convert to user's local time + created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC) + backups.append({ + "name": backup_file.name, + "size": backup_file.stat().st_size, + "created": created_time.isoformat(), + }) + + return backups + + +def delete_backup(filename: str) -> None: + """Delete a backup file.""" + backup_dir = get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {filename}") + + if not backup_file.is_file(): + raise ValueError(f"Invalid backup file: {filename}") + + backup_file.unlink() + logger.info(f"Deleted backup: {filename}") diff --git a/apps/backups/tasks.py b/apps/backups/tasks.py new file mode 100644 index 00000000..f531fef8 --- /dev/null +++ b/apps/backups/tasks.py @@ -0,0 +1,106 @@ +import logging +import traceback +from celery import shared_task + +from . import services + +logger = logging.getLogger(__name__) + + +def _cleanup_old_backups(retention_count: int) -> int: + """Delete old backups, keeping only the most recent N. Returns count deleted.""" + if retention_count <= 0: + return 0 + + backups = services.list_backups() + if len(backups) <= retention_count: + return 0 + + # Backups are sorted newest first, so delete from the end + to_delete = backups[retention_count:] + deleted = 0 + + for backup in to_delete: + try: + services.delete_backup(backup["name"]) + deleted += 1 + logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}") + except Exception as e: + logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}") + + return deleted + + +@shared_task(bind=True) +def create_backup_task(self): + """Celery task to create a backup asynchronously.""" + try: + logger.info(f"[BACKUP] Starting backup task {self.request.id}") + backup_file = services.create_backup() + logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}") + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + } + except Exception as e: + logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}") + logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def restore_backup_task(self, filename: str): + """Celery task to restore a backup asynchronously.""" + try: + logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}") + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + logger.info(f"[RESTORE] Backup file path: {backup_file}") + services.restore_backup(backup_file) + logger.info(f"[RESTORE] Task {self.request.id} completed successfully") + return { + "status": "completed", + "filename": filename, + } + except Exception as e: + logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}") + logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def scheduled_backup_task(self, retention_count: int = 0): + """Celery task for scheduled backups with optional retention cleanup.""" + try: + logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}") + + # Create backup + backup_file = services.create_backup() + logger.info(f"[SCHEDULED] Backup created: {backup_file.name}") + + # Cleanup old backups if retention is set + deleted = 0 + if retention_count > 0: + deleted = _cleanup_old_backups(retention_count) + logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)") + + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + "deleted_count": deleted, + } + except Exception as e: + logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}") + logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } diff --git a/apps/backups/tests.py b/apps/backups/tests.py new file mode 100644 index 00000000..dc8a5136 --- /dev/null +++ b/apps/backups/tests.py @@ -0,0 +1,1163 @@ +import json +import tempfile +from io import BytesIO +from pathlib import Path +from zipfile import ZipFile +from unittest.mock import patch, MagicMock + +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework_simplejwt.tokens import RefreshToken + +from . import services + +User = get_user_model() + + +class BackupServicesTestCase(TestCase): + """Test cases for backup services""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.services.settings') + def test_get_backup_dir_creates_directory(self, mock_settings): + """Test that get_backup_dir creates the directory if it doesn't exist""" + mock_settings.BACKUP_ROOT = self.temp_backup_dir + + with patch('apps.backups.services.Path') as mock_path: + mock_path_instance = MagicMock() + mock_path_instance.mkdir = MagicMock() + mock_path.return_value = mock_path_instance + + services.get_backup_dir() + mock_path_instance.mkdir.assert_called_once_with(parents=True, exist_ok=True) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_sqlite') + def test_create_backup_success_sqlite(self, mock_dump_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with SQLite""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = False + + # Mock SQLite dump to create a temp file + def mock_dump(output_file): + output_file.write_text("sqlite dump") + + mock_dump_sqlite.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + self.assertTrue(result.name.startswith('dispatcharr-backup-')) + self.assertTrue(result.name.endswith('.zip')) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.sqlite3', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'sqlite') + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_postgresql') + def test_create_backup_success_postgresql(self, mock_dump_pg, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with PostgreSQL""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = True + + # Mock PostgreSQL dump to create a temp file + def mock_dump(output_file): + output_file.write_bytes(b"pg dump data") + + mock_dump_pg.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.dump', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'postgresql') + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_empty(self, mock_get_backup_dir): + """Test listing backups when none exist""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + result = services.list_backups() + + self.assertEqual(result, []) + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_with_files(self, mock_get_backup_dir): + """Test listing backups with existing backup files""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-2025.01.01.12.00.00.zip" + test_backup.write_text("fake backup content") + + result = services.list_backups() + + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['name'], test_backup.name) + self.assertIn('size', result[0]) + self.assertIn('created', result[0]) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_success(self, mock_get_backup_dir): + """Test successful backup deletion""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-test.zip" + test_backup.write_text("fake backup content") + + self.assertTrue(test_backup.exists()) + + services.delete_backup(test_backup.name) + + self.assertFalse(test_backup.exists()) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_not_found(self, mock_get_backup_dir): + """Test deleting a non-existent backup raises error""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + with self.assertRaises(FileNotFoundError): + services.delete_backup("nonexistent-backup.zip") + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_postgresql') + def test_restore_backup_postgresql(self, mock_restore_pg, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of PostgreSQL backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create PostgreSQL backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'pg dump data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + services.restore_backup(backup_file) + + mock_restore_pg.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_sqlite') + def test_restore_backup_sqlite(self, mock_restore_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of SQLite backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = False + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', + 'database_file': 'database.sqlite3' + })) + + services.restore_backup(backup_file) + + mock_restore_sqlite.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_database_type_mismatch(self, mock_is_pg, mock_get_backup_dir): + """Test restore fails when database type doesn't match""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True # Current system is PostgreSQL + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', # Backup is SQLite + 'database_file': 'database.sqlite3' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('mismatch', str(context.exception).lower()) + + def test_restore_backup_not_found(self): + """Test restoring from non-existent backup file""" + fake_path = Path("/tmp/nonexistent-backup-12345.zip") + + with self.assertRaises(FileNotFoundError): + services.restore_backup(fake_path) + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_missing_metadata(self, mock_get_backup_dir): + """Test restoring from backup without metadata.json""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a backup file missing metadata.json + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'fake dump data') + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('metadata.json', str(context.exception)) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_missing_database(self, mock_is_pg, mock_get_backup_dir): + """Test restoring from backup missing database dump""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create backup file missing database dump + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('database.dump', str(context.exception)) + + +class BackupAPITestCase(TestCase): + """Test cases for backup API endpoints""" + + def setUp(self): + self.client = APIClient() + self.user = User.objects.create_user( + username='testuser', + email='test@example.com', + password='testpass123' + ) + self.admin_user = User.objects.create_superuser( + username='admin', + email='admin@example.com', + password='adminpass123' + ) + self.temp_backup_dir = tempfile.mkdtemp() + + def get_auth_header(self, user): + """Helper method to get JWT auth header for a user""" + refresh = RefreshToken.for_user(user) + return f'Bearer {str(refresh.access_token)}' + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + def test_list_backups_requires_admin(self): + """Test that listing backups requires admin privileges""" + url = '/api/backups/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.list_backups') + def test_list_backups_success(self, mock_list_backups): + """Test successful backup listing""" + mock_list_backups.return_value = [ + { + 'name': 'backup-test.zip', + 'size': 1024, + 'created': '2025-01-01T12:00:00' + } + ] + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(len(data), 1) + self.assertEqual(data[0]['name'], 'backup-test.zip') + + def test_create_backup_requires_admin(self): + """Test that creating backups requires admin privileges""" + url = '/api/backups/create/' + + # Unauthenticated request + response = self.client.post(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.post(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_success(self, mock_create_task): + """Test successful backup creation via API (async task)""" + mock_task = MagicMock() + mock_task.id = 'test-task-id-123' + mock_create_task.return_value = mock_task + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-task-id-123') + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_failure(self, mock_create_task): + """Test backup creation failure handling""" + mock_create_task.side_effect = Exception("Failed to start task") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_success(self, mock_get_backup_dir): + """Test successful backup download""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response['Content-Type'], 'application/zip') + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_not_found(self, mock_get_backup_dir): + """Test downloading non-existent backup""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_success(self, mock_delete_backup): + """Test successful backup deletion via API""" + mock_delete_backup.return_value = None + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 204) + mock_delete_backup.assert_called_once_with('test-backup.zip') + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_not_found(self, mock_delete_backup): + """Test deleting non-existent backup via API""" + mock_delete_backup.side_effect = FileNotFoundError("Not found") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + def test_upload_backup_requires_file(self): + """Test that upload requires a file""" + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('No file uploaded', data['detail']) + + @patch('apps.backups.services.get_backup_dir') + def test_upload_backup_success(self, mock_get_backup_dir): + """Test successful backup upload""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + # Create a fake backup file + fake_backup = BytesIO(b"fake backup content") + fake_backup.name = 'uploaded-backup.zip' + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, {'file': fake_backup}, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 201) + data = response.json() + self.assertIn('filename', data) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_success(self, mock_restore_task, mock_get_backup_dir): + """Test successful backup restoration via API (async task)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + mock_task = MagicMock() + mock_task.id = 'test-restore-task-456' + mock_restore_task.return_value = mock_task + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-restore-task-456') + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_not_found(self, mock_get_backup_dir): + """Test restoring from non-existent backup via API""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Backup Status Endpoint Tests --- + + def test_backup_status_requires_auth_or_token(self): + """Test that backup_status requires auth or valid token""" + url = '/api/backups/status/fake-task-id/' + + # Unauthenticated request without token + response = self.client.get(url) + self.assertEqual(response.status_code, 401) + + def test_backup_status_invalid_token(self): + """Test that backup_status rejects invalid tokens""" + url = '/api/backups/status/fake-task-id/?token=invalid-token' + response = self.client.get(url) + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_with_admin_auth(self, mock_async_result): + """Test backup_status with admin authentication""" + mock_result = MagicMock() + mock_result.ready.return_value = False + mock_result.failed.return_value = False + mock_result.state = 'PENDING' + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'pending') + + @patch('apps.backups.api_views.AsyncResult') + @patch('apps.backups.api_views._verify_task_token') + def test_backup_status_with_valid_token(self, mock_verify, mock_async_result): + """Test backup_status with valid token""" + mock_verify.return_value = True + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'completed', 'filename': 'test.zip'} + mock_async_result.return_value = mock_result + + url = '/api/backups/status/test-task-id/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'completed') + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_task_failed(self, mock_async_result): + """Test backup_status when task failed""" + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'failed', 'error': 'Something went wrong'} + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'failed') + self.assertIn('Something went wrong', data['error']) + + # --- Download Token Endpoint Tests --- + + def test_get_download_token_requires_admin(self): + """Test that get_download_token requires admin privileges""" + url = '/api/backups/test.zip/download-token/' + + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_success(self, mock_get_backup_dir): + """Test successful download token generation""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertIn('token', data) + self.assertEqual(len(data['token']), 32) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_not_found(self, mock_get_backup_dir): + """Test download token for non-existent file""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Download with Token Auth Tests --- + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.api_views._verify_task_token') + def test_download_backup_with_valid_token(self, mock_verify, mock_get_backup_dir): + """Test downloading backup with valid token (no auth header)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_verify.return_value = True + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + url = '/api/backups/test-backup.zip/download/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_invalid_token(self, mock_get_backup_dir): + """Test downloading backup with invalid token""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + url = '/api/backups/test-backup.zip/download/?token=invalid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_task_start_failure(self, mock_restore_task, mock_get_backup_dir): + """Test restore task start failure via API""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_restore_task.side_effect = Exception("Failed to start restore task") + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + def test_get_schedule_requires_admin(self): + """Test that getting schedule requires admin privileges""" + url = '/api/backups/schedule/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.get_schedule_settings') + def test_get_schedule_success(self, mock_get_settings): + """Test successful schedule retrieval""" + mock_get_settings.return_value = { + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'day_of_week': 0, + 'retention_count': 5, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['enabled'], True) + self.assertEqual(data['frequency'], 'daily') + self.assertEqual(data['retention_count'], 5) + + def test_update_schedule_requires_admin(self): + """Test that updating schedule requires admin privileges""" + url = '/api/backups/schedule/update/' + + # Unauthenticated request + response = self.client.put(url, {}, content_type='application/json') + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.put( + url, + {}, + content_type='application/json', + HTTP_AUTHORIZATION=self.get_auth_header(self.user) + ) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_success(self, mock_update_settings): + """Test successful schedule update""" + mock_update_settings.return_value = { + 'enabled': True, + 'frequency': 'weekly', + 'time': '02:00', + 'day_of_week': 1, + 'retention_count': 10, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'enabled': True, 'frequency': 'weekly', 'time': '02:00', 'day_of_week': 1, 'retention_count': 10}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['frequency'], 'weekly') + self.assertEqual(data['day_of_week'], 1) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_validation_error(self, mock_update_settings): + """Test schedule update with invalid data""" + mock_update_settings.side_effect = ValueError("frequency must be 'daily' or 'weekly'") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'frequency': 'invalid'}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('frequency', data['detail']) + + +class BackupSchedulerTestCase(TestCase): + """Test cases for backup scheduler""" + + databases = {'default'} + + @classmethod + def setUpClass(cls): + pass + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + from core.models import CoreSettings + # Clean up any existing settings + CoreSettings.objects.filter(key__startswith='backup_').delete() + + def tearDown(self): + from core.models import CoreSettings + from django_celery_beat.models import PeriodicTask + CoreSettings.objects.filter(key__startswith='backup_').delete() + PeriodicTask.objects.filter(name='backup-scheduled-task').delete() + + def test_get_schedule_settings_defaults(self): + """Test that get_schedule_settings returns defaults when no settings exist""" + from . import scheduler + + settings = scheduler.get_schedule_settings() + + self.assertEqual(settings['enabled'], False) + self.assertEqual(settings['frequency'], 'daily') + self.assertEqual(settings['time'], '03:00') + self.assertEqual(settings['day_of_week'], 0) + self.assertEqual(settings['retention_count'], 0) + self.assertEqual(settings['cron_expression'], '') + + def test_update_schedule_settings_stores_values(self): + """Test that update_schedule_settings stores values correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '04:30', + 'day_of_week': 3, + 'retention_count': 7, + }) + + self.assertEqual(result['enabled'], True) + self.assertEqual(result['frequency'], 'weekly') + self.assertEqual(result['time'], '04:30') + self.assertEqual(result['day_of_week'], 3) + self.assertEqual(result['retention_count'], 7) + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['enabled'], True) + self.assertEqual(settings['frequency'], 'weekly') + + def test_update_schedule_settings_invalid_frequency(self): + """Test that invalid frequency raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'frequency': 'monthly'}) + + self.assertIn('frequency', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_time(self): + """Test that invalid time raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'time': 'invalid'}) + + self.assertIn('HH:MM', str(context.exception)) + + def test_update_schedule_settings_invalid_day_of_week(self): + """Test that invalid day_of_week raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'day_of_week': 7}) + + self.assertIn('day_of_week', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_retention(self): + """Test that negative retention_count raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'retention_count': -1}) + + self.assertIn('retention_count', str(context.exception).lower()) + + def test_sync_creates_periodic_task_when_enabled(self): + """Test that enabling schedule creates a PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertTrue(task.enabled) + self.assertEqual(task.crontab.hour, '05') + self.assertEqual(task.crontab.minute, '00') + + def test_sync_deletes_periodic_task_when_disabled(self): + """Test that disabling schedule removes PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + # First enable + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + self.assertTrue(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + # Then disable + scheduler.update_schedule_settings({'enabled': False}) + + self.assertFalse(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + def test_weekly_schedule_sets_day_of_week(self): + """Test that weekly schedule sets correct day_of_week in crontab""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '06:00', + 'day_of_week': 3, # Wednesday + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.day_of_week, '3') + + def test_cron_expression_stores_value(self): + """Test that cron_expression is stored and retrieved correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/5 * * * *', + }) + + self.assertEqual(result['cron_expression'], '*/5 * * * *') + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['cron_expression'], '*/5 * * * *') + + def test_cron_expression_creates_correct_schedule(self): + """Test that cron expression creates correct CrontabSchedule""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/15 2 * * 1-5', # Every 15 mins during 2 AM hour on weekdays + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '*/15') + self.assertEqual(task.crontab.hour, '2') + self.assertEqual(task.crontab.day_of_month, '*') + self.assertEqual(task.crontab.month_of_year, '*') + self.assertEqual(task.crontab.day_of_week, '1-5') + + def test_cron_expression_invalid_format(self): + """Test that invalid cron expression raises ValueError""" + from . import scheduler + + # Too few parts + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '0 3 *', + }) + self.assertIn('5 parts', str(context.exception)) + + def test_cron_expression_empty_uses_simple_mode(self): + """Test that empty cron_expression falls back to simple frequency mode""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '04:00', + 'cron_expression': '', # Empty, should use simple mode + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '00') + self.assertEqual(task.crontab.hour, '04') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_cron_expression_overrides_simple_settings(self): + """Test that cron_expression takes precedence over frequency/time""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'cron_expression': '0 */6 * * *', # Every 6 hours (should override daily at 3 AM) + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '0') + self.assertEqual(task.crontab.hour, '*/6') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_periodic_task_uses_system_timezone(self): + """Test that CrontabSchedule is created with the system timezone""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Set a non-UTC timezone + CoreSettings.set_system_time_zone('America/New_York') + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/New_York') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_periodic_task_timezone_updates_with_schedule(self): + """Test that CrontabSchedule timezone is updated when schedule is modified""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Create initial schedule with one timezone + CoreSettings.set_system_time_zone('America/Los_Angeles') + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '02:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/Los_Angeles') + + # Change system timezone and update schedule + CoreSettings.set_system_time_zone('Europe/London') + scheduler.update_schedule_settings({ + 'enabled': True, + 'time': '04:00', + }) + + task.refresh_from_db() + self.assertEqual(str(task.crontab.timezone), 'Europe/London') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_orphaned_crontab_cleanup(self): + """Test that old CrontabSchedule is deleted when schedule changes""" + from . import scheduler + from django_celery_beat.models import PeriodicTask, CrontabSchedule + + # Create initial daily schedule + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + first_crontab_id = task.crontab.id + initial_count = CrontabSchedule.objects.count() + + # Change to weekly schedule (different crontab) + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'day_of_week': 3, + 'time': '03:00', + }) + + task.refresh_from_db() + second_crontab_id = task.crontab.id + + # Verify old crontab was deleted + self.assertNotEqual(first_crontab_id, second_crontab_id) + self.assertFalse(CrontabSchedule.objects.filter(id=first_crontab_id).exists()) + self.assertEqual(CrontabSchedule.objects.count(), initial_count) + + # Cleanup + scheduler.update_schedule_settings({'enabled': False}) + + +class BackupTasksTestCase(TestCase): + """Test cases for backup Celery tasks""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_keeps_recent(self, mock_delete, mock_list): + """Test that cleanup keeps the most recent backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, # newest + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, # oldest + ] + + deleted = _cleanup_old_backups(retention_count=2) + + self.assertEqual(deleted, 1) + mock_delete.assert_called_once_with('backup-1.zip') + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_does_nothing_when_under_limit(self, mock_delete, mock_list): + """Test that cleanup does nothing when under retention limit""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=5) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_zero_retention_keeps_all(self, mock_delete, mock_list): + """Test that retention_count=0 keeps all backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=0) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_success(self, mock_cleanup, mock_create): + """Test scheduled backup task success""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + mock_cleanup.return_value = 2 + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['filename'], 'scheduled-backup.zip') + self.assertEqual(result['size'], 1024) + self.assertEqual(result['deleted_count'], 2) + mock_cleanup.assert_called_once_with(5) + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_no_cleanup_when_retention_zero(self, mock_cleanup, mock_create): + """Test scheduled backup skips cleanup when retention is 0""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + + result = scheduled_backup_task(retention_count=0) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['deleted_count'], 0) + mock_cleanup.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + def test_scheduled_backup_task_failure(self, mock_create): + """Test scheduled backup task handles failure""" + from .tasks import scheduled_backup_task + + mock_create.side_effect = Exception("Backup failed") + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'failed') + self.assertIn('Backup failed', result['error']) diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 4246373e..bd53ae45 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -6,12 +6,21 @@ from .api_views import ( ChannelGroupViewSet, BulkDeleteStreamsAPIView, BulkDeleteChannelsAPIView, + BulkDeleteLogosAPIView, + CleanupUnusedLogosAPIView, LogoViewSet, ChannelProfileViewSet, UpdateChannelMembershipAPIView, BulkUpdateChannelMembershipAPIView, RecordingViewSet, + RecurringRecordingRuleViewSet, GetChannelStreamsAPIView, + SeriesRulesAPIView, + DeleteSeriesRuleAPIView, + EvaluateSeriesRulesAPIView, + BulkRemoveSeriesRecordingsAPIView, + BulkDeleteUpcomingRecordingsAPIView, + ComskipConfigAPIView, ) app_name = 'channels' # for DRF routing @@ -23,14 +32,24 @@ router.register(r'channels', ChannelViewSet, basename='channel') router.register(r'logos', LogoViewSet, basename='logo') router.register(r'profiles', ChannelProfileViewSet, basename='profile') router.register(r'recordings', RecordingViewSet, basename='recording') +router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule') urlpatterns = [ # Bulk delete is a single APIView, not a ViewSet path('streams/bulk-delete/', BulkDeleteStreamsAPIView.as_view(), name='bulk_delete_streams'), path('channels/bulk-delete/', BulkDeleteChannelsAPIView.as_view(), name='bulk_delete_channels'), + path('logos/bulk-delete/', BulkDeleteLogosAPIView.as_view(), name='bulk_delete_logos'), + path('logos/cleanup/', CleanupUnusedLogosAPIView.as_view(), name='cleanup_unused_logos'), path('channels//streams/', GetChannelStreamsAPIView.as_view(), name='get_channel_streams'), path('profiles//channels//', UpdateChannelMembershipAPIView.as_view(), name='update_channel_membership'), path('profiles//channels/bulk-update/', BulkUpdateChannelMembershipAPIView.as_view(), name='bulk_update_channel_membership'), + # DVR series rules (order matters: specific routes before catch-all slug) + path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'), + path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'), + path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), + path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), + path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), + path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] urlpatterns += router.urls diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index ab206afb..c2ba7a06 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1,57 +1,120 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated, AllowAny +from rest_framework.permissions import AllowAny from rest_framework.decorators import action -from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.parsers import MultiPartParser, FormParser, JSONParser from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.shortcuts import get_object_or_404, get_list_or_404 from django.db import transaction -import os, json, requests +from django.db.models import Q +import os, json, requests, logging, mimetypes +from django.utils.http import http_date +from urllib.parse import unquote +from apps.accounts.permissions import ( + Authenticated, + IsAdmin, + IsOwnerOfObject, + permission_classes_by_action, + permission_classes_by_method, +) -from .models import Stream, Channel, ChannelGroup, Logo, ChannelProfile, ChannelProfileMembership, Recording -from .serializers import StreamSerializer, ChannelSerializer, ChannelGroupSerializer, LogoSerializer, ChannelProfileMembershipSerializer, BulkChannelProfileMembershipSerializer, ChannelProfileSerializer, RecordingSerializer -from .tasks import match_epg_channels +from core.models import UserAgent, CoreSettings +from core.utils import RedisClient + +from .models import ( + Stream, + Channel, + ChannelGroup, + Logo, + ChannelProfile, + ChannelProfileMembership, + Recording, + RecurringRecordingRule, +) +from .serializers import ( + StreamSerializer, + ChannelSerializer, + ChannelGroupSerializer, + LogoSerializer, + ChannelProfileMembershipSerializer, + BulkChannelProfileMembershipSerializer, + ChannelProfileSerializer, + RecordingSerializer, + RecurringRecordingRuleSerializer, +) +from .tasks import ( + match_epg_channels, + evaluate_series_rules, + evaluate_series_rules_impl, + match_single_channel_epg, + match_selected_channels_epg, + sync_recurring_rule_impl, + purge_recurring_rule_impl, +) import django_filters from django_filters.rest_framework import DjangoFilterBackend from rest_framework.filters import SearchFilter, OrderingFilter from apps.epg.models import EPGData +from apps.vod.models import Movie, Series from django.db.models import Q from django.http import StreamingHttpResponse, FileResponse, Http404 +from django.utils import timezone import mimetypes +from django.conf import settings from rest_framework.pagination import PageNumberPagination + +logger = logging.getLogger(__name__) + + class OrInFilter(django_filters.Filter): """ Custom filter that handles the OR condition instead of AND. """ + def filter(self, queryset, value): if value: # Create a Q object for each value and combine them with OR query = Q() - for val in value.split(','): + for val in value.split(","): query |= Q(**{self.field_name: val}) return queryset.filter(query) return queryset + class StreamPagination(PageNumberPagination): - page_size = 25 # Default page size - page_size_query_param = 'page_size' # Allow clients to specify page size + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size max_page_size = 10000 # Prevent excessive page sizes + class StreamFilter(django_filters.FilterSet): - name = django_filters.CharFilter(lookup_expr='icontains') - channel_group_name = OrInFilter(field_name="channel_group__name", lookup_expr="icontains") + name = django_filters.CharFilter(lookup_expr="icontains") + channel_group_name = OrInFilter( + field_name="channel_group__name", lookup_expr="icontains" + ) m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") - m3u_account_name = django_filters.CharFilter(field_name="m3u_account__name", lookup_expr="icontains") - m3u_account_is_active = django_filters.BooleanFilter(field_name="m3u_account__is_active") + m3u_account_name = django_filters.CharFilter( + field_name="m3u_account__name", lookup_expr="icontains" + ) + m3u_account_is_active = django_filters.BooleanFilter( + field_name="m3u_account__is_active" + ) class Meta: model = Stream - fields = ['name', 'channel_group_name', 'm3u_account', 'm3u_account_name', 'm3u_account_is_active'] + fields = [ + "name", + "channel_group_name", + "m3u_account", + "m3u_account_name", + "m3u_account_is_active", + ] + # ───────────────────────────────────────────────────────── # 1) Stream API (CRUD) @@ -59,46 +122,53 @@ class StreamFilter(django_filters.FilterSet): class StreamViewSet(viewsets.ModelViewSet): queryset = Stream.objects.all() serializer_class = StreamSerializer - permission_classes = [IsAuthenticated] pagination_class = StreamPagination filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = StreamFilter - search_fields = ['name', 'channel_group__name'] - ordering_fields = ['name', 'channel_group__name'] - ordering = ['-name'] + search_fields = ["name", "channel_group__name"] + ordering_fields = ["name", "channel_group__name", "m3u_account__name"] + ordering = ["-name"] + + def get_permissions(self): + if self.action == "duplicate": + return [IsAdmin()] + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): qs = super().get_queryset() # Exclude streams from inactive M3U accounts qs = qs.exclude(m3u_account__is_active=False) - assigned = self.request.query_params.get('assigned') + assigned = self.request.query_params.get("assigned") if assigned is not None: qs = qs.filter(channels__id=assigned) - unassigned = self.request.query_params.get('unassigned') - if unassigned == '1': + unassigned = self.request.query_params.get("unassigned") + if unassigned == "1": qs = qs.filter(channels__isnull=True) - channel_group = self.request.query_params.get('channel_group') + channel_group = self.request.query_params.get("channel_group") if channel_group: - group_names = channel_group.split(',') + group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) return qs def list(self, request, *args, **kwargs): - ids = request.query_params.get('ids', None) + ids = request.query_params.get("ids", None) if ids: - ids = ids.split(',') + ids = ids.split(",") streams = get_list_or_404(Stream, id__in=ids) serializer = self.get_serializer(streams, many=True) return Response(serializer.data) return super().list(request, *args, **kwargs) - @action(detail=False, methods=['get'], url_path='ids') + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset queryset = self.get_queryset() @@ -107,85 +177,569 @@ class StreamViewSet(viewsets.ModelViewSet): queryset = self.filter_queryset(queryset) # Return only the IDs from the queryset - stream_ids = queryset.values_list('id', flat=True) + stream_ids = queryset.values_list("id", flat=True) # Return the response with the list of IDs return Response(list(stream_ids)) - @action(detail=False, methods=['get'], url_path='groups') + @action(detail=False, methods=["get"], url_path="groups") def get_groups(self, request, *args, **kwargs): # Get unique ChannelGroup names that are linked to streams - group_names = ChannelGroup.objects.filter(streams__isnull=False).order_by('name').values_list('name', flat=True).distinct() + group_names = ( + ChannelGroup.objects.filter(streams__isnull=False) + .order_by("name") + .values_list("name", flat=True) + .distinct() + ) # Return the response with the list of unique group names return Response(list(group_names)) + @swagger_auto_schema( + method="post", + operation_description="Retrieve streams by a list of IDs using POST to avoid URL length limitations", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=["ids"], + properties={ + "ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="List of stream IDs to retrieve" + ), + }, + ), + responses={200: StreamSerializer(many=True)}, + ) + @action(detail=False, methods=["post"], url_path="by-ids") + def get_by_ids(self, request, *args, **kwargs): + ids = request.data.get("ids", []) + if not isinstance(ids, list): + return Response( + {"error": "ids must be a list of integers"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + streams = Stream.objects.filter(id__in=ids) + serializer = self.get_serializer(streams, many=True) + return Response(serializer.data) + + # ───────────────────────────────────────────────────────── # 2) Channel Group Management (CRUD) # ───────────────────────────────────────────────────────── class ChannelGroupViewSet(viewsets.ModelViewSet): queryset = ChannelGroup.objects.all() serializer_class = ChannelGroupSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + """Return channel groups with prefetched relations for efficient counting""" + return ChannelGroup.objects.prefetch_related('channels', 'm3u_accounts').all() + + def update(self, request, *args, **kwargs): + """Override update to check M3U associations""" + instance = self.get_object() + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot edit group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().update(request, *args, **kwargs) + + def partial_update(self, request, *args, **kwargs): + """Override partial_update to check M3U associations""" + instance = self.get_object() + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot edit group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().partial_update(request, *args, **kwargs) + + @swagger_auto_schema( + method="post", + operation_description="Delete all channel groups that have no associations (no channels or M3U accounts)", + responses={200: "Cleanup completed"}, + ) + @action(detail=False, methods=["post"], url_path="cleanup") + def cleanup_unused_groups(self, request): + """Delete all channel groups with no channels or M3U account associations""" + from django.db.models import Q, Exists, OuterRef + + # Find groups with no channels and no M3U account associations using Exists subqueries + from .models import Channel, ChannelGroupM3UAccount + + has_channels = Channel.objects.filter(channel_group_id=OuterRef('pk')) + has_accounts = ChannelGroupM3UAccount.objects.filter(channel_group_id=OuterRef('pk')) + + unused_groups = ChannelGroup.objects.annotate( + has_channels=Exists(has_channels), + has_accounts=Exists(has_accounts) + ).filter( + has_channels=False, + has_accounts=False + ) + + deleted_count = unused_groups.count() + group_names = list(unused_groups.values_list('name', flat=True)) + + # Delete the unused groups + unused_groups.delete() + + return Response({ + "message": f"Successfully deleted {deleted_count} unused channel groups", + "deleted_count": deleted_count, + "deleted_groups": group_names + }) + + def destroy(self, request, *args, **kwargs): + """Override destroy to check for associations before deletion""" + instance = self.get_object() + + # Check if group has associated channels + if instance.channels.exists(): + return Response( + {"error": "Cannot delete group with associated channels"}, + status=status.HTTP_400_BAD_REQUEST + ) + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot delete group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().destroy(request, *args, **kwargs) # ───────────────────────────────────────────────────────── # 3) Channel Management (CRUD) # ───────────────────────────────────────────────────────── class ChannelPagination(PageNumberPagination): - page_size = 25 # Default page size - page_size_query_param = 'page_size' # Allow clients to specify page size + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size max_page_size = 10000 # Prevent excessive page sizes - def paginate_queryset(self, queryset, request, view=None): if not request.query_params.get(self.page_query_param): return None # disables pagination, returns full queryset return super().paginate_queryset(queryset, request, view) + +class EPGFilter(django_filters.Filter): + """ + Filter channels by EPG source name or null (unlinked). + """ + def filter(self, queryset, value): + if not value: + return queryset + + # Split comma-separated values + values = [v.strip() for v in value.split(',')] + query = Q() + + for val in values: + if val == 'null': + # Filter for channels with no EPG data + query |= Q(epg_data__isnull=True) + else: + # Filter for channels with specific EPG source name + query |= Q(epg_data__epg_source__name__icontains=val) + + return queryset.filter(query) + + class ChannelFilter(django_filters.FilterSet): - name = django_filters.CharFilter(lookup_expr='icontains') - channel_group_name = OrInFilter(field_name="channel_group__name", lookup_expr="icontains") + name = django_filters.CharFilter(lookup_expr="icontains") + channel_group = OrInFilter( + field_name="channel_group__name", lookup_expr="icontains" + ) + epg = EPGFilter() class Meta: model = Channel - fields = ['name', 'channel_group_name',] + fields = [ + "name", + "channel_group", + "epg", + ] + class ChannelViewSet(viewsets.ModelViewSet): queryset = Channel.objects.all() serializer_class = ChannelSerializer - permission_classes = [IsAuthenticated] pagination_class = ChannelPagination filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = ChannelFilter - search_fields = ['name', 'channel_group__name'] - ordering_fields = ['channel_number', 'name', 'channel_group__name'] - ordering = ['-channel_number'] + search_fields = ["name", "channel_group__name"] + ordering_fields = ["channel_number", "name", "channel_group__name"] + ordering = ["-channel_number"] + + def create(self, request, *args, **kwargs): + """Override create to handle channel profile membership""" + serializer = self.get_serializer(data=request.data) + serializer.is_valid(raise_exception=True) + + with transaction.atomic(): + channel = serializer.save() + + # Handle channel profile membership + # Semantics: + # - Omitted (None): add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only + channel_profile_ids = request.data.get("channel_profile_ids") + if channel_profile_ids is not None: + # Normalize single ID to array + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] + + # Determine action based on semantics + if channel_profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + elif isinstance(channel_profile_ids, list) and len(channel_profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(channel_profile_ids, list) and 0 in channel_profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + else: + # Specific profile IDs + try: + channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids) + if len(channel_profiles) != len(channel_profile_ids): + missing_ids = set(channel_profile_ids) - set(channel_profiles.values_list('id', flat=True)) + return Response( + {"error": f"Channel profiles with IDs {list(missing_ids)} not found"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in channel_profiles + ]) + except Exception as e: + return Response( + {"error": f"Error creating profile memberships: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + headers = self.get_success_headers(serializer.data) + return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) + + def get_permissions(self): + if self.action in [ + "edit_bulk", + "assign", + "from_stream", + "from_stream_bulk", + "match_epg", + "set_epg", + "batch_set_epg", + ]: + return [IsAdmin()] + + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): - qs = super().get_queryset().select_related( - 'channel_group', - 'logo', - 'epg_data', - 'stream_profile', - ).prefetch_related('streams') + qs = ( + super() + .get_queryset() + .select_related( + "channel_group", + "logo", + "epg_data", + "stream_profile", + ) + .prefetch_related("streams") + ) - channel_group = self.request.query_params.get('channel_group') + channel_group = self.request.query_params.get("channel_group") if channel_group: - group_names = channel_group.split(',') + group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) - return qs + filters = {} + q_filters = Q() + + channel_profile_id = self.request.query_params.get("channel_profile_id") + show_disabled_param = self.request.query_params.get("show_disabled", None) + only_streamless = self.request.query_params.get("only_streamless", None) + + if channel_profile_id: + try: + profile_id_int = int(channel_profile_id) + + if show_disabled_param is None: + # Show only enabled channels: channels that have a membership + # record for this profile with enabled=True + # Default is DISABLED (channels without membership are hidden) + filters["channelprofilemembership__channel_profile_id"] = profile_id_int + filters["channelprofilemembership__enabled"] = True + # If show_disabled is True, show all channels (no filtering needed) + + except (ValueError, TypeError): + # Ignore invalid profile id values + pass + + if only_streamless: + q_filters &= Q(streams__isnull=True) + + if self.request.user.user_level < 10: + filters["user_level__lte"] = self.request.user.user_level + + if filters: + qs = qs.filter(**filters) + if q_filters: + qs = qs.filter(q_filters) + + return qs.distinct() def get_serializer_context(self): context = super().get_serializer_context() - include_streams = self.request.query_params.get('include_streams', 'false') == 'true' - context['include_streams'] = include_streams + include_streams = ( + self.request.query_params.get("include_streams", "false") == "true" + ) + context["include_streams"] = include_streams return context - @action(detail=False, methods=['get'], url_path='ids') + @action(detail=False, methods=["patch"], url_path="edit/bulk") + def edit_bulk(self, request): + """ + Bulk edit channels efficiently. + Validates all updates first, then applies in a single transaction. + """ + data = request.data + if not isinstance(data, list): + return Response( + {"error": "Expected a list of channel updates"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract IDs and validate presence + channel_updates = {} + missing_ids = [] + + for i, channel_data in enumerate(data): + channel_id = channel_data.get("id") + if not channel_id: + missing_ids.append(f"Item {i}: Channel ID is required") + else: + channel_updates[channel_id] = channel_data + + if missing_ids: + return Response( + {"errors": missing_ids}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Fetch all channels at once (one query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } + + # Validate and prepare updates + validated_updates = [] + errors = [] + + for channel_id, channel_data in channel_updates.items(): + channel = channels_dict.get(channel_id) + + if not channel: + errors.append({ + "channel_id": channel_id, + "error": "Channel not found" + }) + continue + + # Handle channel_group_id conversion + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None + + # Validate with serializer + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + validated_updates.append((channel, serializer.validated_data)) + else: + errors.append({ + "channel_id": channel_id, + "errors": serializer.errors + }) + + if errors: + return Response( + {"errors": errors, "updated_count": len(validated_updates)}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Apply all updates in a transaction + with transaction.atomic(): + for channel, validated_data in validated_updates: + for key, value in validated_data.items(): + setattr(channel, key, value) + + # Single bulk_update query instead of individual saves + channels_to_update = [channel for channel, _ in validated_updates] + if channels_to_update: + # Collect all unique field names from all updates + all_fields = set() + for _, validated_data in validated_updates: + all_fields.update(validated_data.keys()) + + # Only call bulk_update if there are fields to update + if all_fields: + Channel.objects.bulk_update( + channels_to_update, + fields=list(all_fields), + batch_size=100 + ) + + # Return the updated objects (already in memory) + serialized_channels = ChannelSerializer( + [channel for channel, _ in validated_updates], + many=True, + context=self.get_serializer_context() + ).data + + return Response({ + "message": f"Successfully updated {len(validated_updates)} channels", + "channels": serialized_channels + }) + + @action(detail=False, methods=["post"], url_path="set-names-from-epg") + def set_names_from_epg(self, request): + """ + Trigger a Celery task to set channel names from EPG data + """ + from .tasks import set_channels_names_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_names_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG name setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-logos-from-epg") + def set_logos_from_epg(self, request): + """ + Trigger a Celery task to set channel logos from EPG data + """ + from .tasks import set_channels_logos_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_logos_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG logo setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg") + def set_tvg_ids_from_epg(self, request): + """ + Trigger a Celery task to set channel TVG-IDs from EPG data + """ + from .tasks import set_channels_tvg_ids_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_tvg_ids_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset queryset = self.get_queryset() @@ -194,42 +748,57 @@ class ChannelViewSet(viewsets.ModelViewSet): queryset = self.filter_queryset(queryset) # Return only the IDs from the queryset - channel_ids = queryset.values_list('id', flat=True) + channel_ids = queryset.values_list("id", flat=True) # Return the response with the list of IDs return Response(list(channel_ids)) @swagger_auto_schema( - method='post', + method="post", operation_description="Auto-assign channel_number in bulk by an ordered list of channel IDs.", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, - required=["channel_order"], + required=["channel_ids"], properties={ - "channel_order": openapi.Schema( + "starting_number": openapi.Schema( + type=openapi.TYPE_NUMBER, + description="Starting channel number to assign (can be decimal)", + ), + "channel_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="List of channel IDs in the new order" - ) - } + description="Channel IDs to assign", + ), + }, ), - responses={200: "Channels have been auto-assigned!"} + responses={200: "Channels have been auto-assigned!"}, ) - @action(detail=False, methods=['post'], url_path='assign') + @action(detail=False, methods=["post"], url_path="assign") def assign(self, request): with transaction.atomic(): - channel_order = request.data.get('channel_order', []) - for order, channel_id in enumerate(channel_order, start=1): - Channel.objects.filter(id=channel_id).update(channel_number=order) + channel_ids = request.data.get("channel_ids", []) + # Ensure starting_number is processed as a float + try: + channel_num = float(request.data.get("starting_number", 1)) + except (ValueError, TypeError): + channel_num = 1.0 - return Response({"message": "Channels have been auto-assigned!"}, status=status.HTTP_200_OK) + for channel_id in channel_ids: + Channel.objects.filter(id=channel_id).update(channel_number=channel_num) + channel_num = channel_num + 1 + + return Response( + {"message": "Channels have been auto-assigned!"}, status=status.HTTP_200_OK + ) @swagger_auto_schema( - method='post', + method="post", operation_description=( "Create a new channel from an existing stream. " "If 'channel_number' is provided, it will be used (if available); " - "otherwise, the next available channel number is assigned." + "otherwise, the next available channel number is assigned. " + "If 'channel_profile_ids' is provided, the channel will only be added to those profiles. " + "Accepts either a single ID or an array of IDs." ), request_body=openapi.Schema( type=openapi.TYPE_OBJECT, @@ -239,68 +808,94 @@ class ChannelViewSet(viewsets.ModelViewSet): type=openapi.TYPE_INTEGER, description="ID of the stream to link" ), "channel_number": openapi.Schema( - type=openapi.TYPE_INTEGER, - description="(Optional) Desired channel number. Must not be in use." + type=openapi.TYPE_NUMBER, + description="(Optional) Desired channel number. Must not be in use.", ), "name": openapi.Schema( type=openapi.TYPE_STRING, description="Desired channel name" - ) - } + ), + "channel_profile_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="(Optional) Channel profile ID(s). Behavior: omitted = add to ALL profiles (default); empty array [] = add to NO profiles; [0] = add to ALL profiles (explicit); [1,2,...] = add only to specified profiles." + ), + }, ), - responses={201: ChannelSerializer()} + responses={201: ChannelSerializer()}, ) - @action(detail=False, methods=['post'], url_path='from-stream') + @action(detail=False, methods=["post"], url_path="from-stream") def from_stream(self, request): - stream_id = request.data.get('stream_id') + stream_id = request.data.get("stream_id") if not stream_id: - return Response({"error": "Missing stream_id"}, status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Missing stream_id"}, status=status.HTTP_400_BAD_REQUEST + ) stream = get_object_or_404(Stream, pk=stream_id) channel_group = stream.channel_group - name = request.data.get('name') + name = request.data.get("name") + + if name is None: name = stream.name # Check if client provided a channel_number; if not, auto-assign one. - stream_custom_props = json.loads(stream.custom_properties) if stream.custom_properties else {} - - channel_number = None - if 'tvg-chno' in stream_custom_props: - channel_number = int(stream_custom_props['tvg-chno']) - elif 'channel-number' in stream_custom_props: - channel_number = int(stream_custom_props['channel-number']) + stream_custom_props = stream.custom_properties or {} + channel_number = request.data.get("channel_number") if channel_number is None: - provided_number = request.data.get('channel_number') - if provided_number is None: - channel_number = Channel.get_next_available_channel_number() - else: - try: - channel_number = int(provided_number) - except ValueError: - return Response({"error": "channel_number must be an integer."}, status=status.HTTP_400_BAD_REQUEST) - # If the provided number is already used, return an error. - if Channel.objects.filter(channel_number=channel_number).exists(): - return Response( - {"error": f"Channel number {channel_number} is already in use. Please choose a different number."}, - status=status.HTTP_400_BAD_REQUEST - ) + # Channel number not provided by client, check stream properties or auto-assign + if "tvg-chno" in stream_custom_props: + channel_number = float(stream_custom_props["tvg-chno"]) + elif "channel-number" in stream_custom_props: + channel_number = float(stream_custom_props["channel-number"]) + elif "num" in stream_custom_props: + channel_number = float(stream_custom_props["num"]) + elif channel_number == 0: + # Special case: 0 means ignore provider numbers and auto-assign + channel_number = None + + if channel_number is None: + # Still None, auto-assign the next available channel number + channel_number = Channel.get_next_available_channel_number() + try: + channel_number = float(channel_number) + except ValueError: + return Response( + {"error": "channel_number must be an integer."}, + status=status.HTTP_400_BAD_REQUEST, + ) + # If the provided number is already used, return an error. + if Channel.objects.filter(channel_number=channel_number).exists(): + channel_number = Channel.get_next_available_channel_number(channel_number) + # Get the tvc_guide_stationid from custom properties if it exists + tvc_guide_stationid = None + if "tvc-guide-stationid" in stream_custom_props: + tvc_guide_stationid = stream_custom_props["tvc-guide-stationid"] channel_data = { - 'channel_number': channel_number, - 'name': name, - 'tvg_id': stream.tvg_id, - 'channel_group_id': channel_group.id, - 'streams': [stream_id], + "channel_number": channel_number, + "name": name, + "tvg_id": stream.tvg_id, + "tvc_guide_stationid": tvc_guide_stationid, + "streams": [stream_id], } + # Only add channel_group_id if the stream has a channel group + if channel_group: + channel_data["channel_group_id"] = channel_group.id + if stream.logo_url: - logo, _ = Logo.objects.get_or_create(url=stream.logo_url, defaults={ - "name": stream.name or stream.tvg_id - }) - channel_data["logo_id"] = logo.id + # Import validation function + from apps.channels.tasks import validate_logo_url + validated_logo_url = validate_logo_url(stream.logo_url) + if validated_logo_url: + logo, _ = Logo.objects.get_or_create( + url=validated_logo_url, defaults={"name": stream.name or stream.tvg_id} + ) + channel_data["logo_id"] = logo.id # Attempt to find existing EPGs with the same tvg-id epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) @@ -309,310 +904,389 @@ class ChannelViewSet(viewsets.ModelViewSet): serializer = self.get_serializer(data=channel_data) serializer.is_valid(raise_exception=True) - channel = serializer.save() - channel.streams.add(stream) + + with transaction.atomic(): + channel = serializer.save() + channel.streams.add(stream) + + # Handle channel profile membership + # Semantics: + # - Omitted (None): add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only + channel_profile_ids = request.data.get("channel_profile_ids") + if channel_profile_ids is not None: + # Normalize single ID to array + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] + + # Determine action based on semantics + if channel_profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + elif isinstance(channel_profile_ids, list) and len(channel_profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(channel_profile_ids, list) and 0 in channel_profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + else: + # Specific profile IDs + try: + channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids) + if len(channel_profiles) != len(channel_profile_ids): + missing_ids = set(channel_profile_ids) - set(channel_profiles.values_list('id', flat=True)) + return Response( + {"error": f"Channel profiles with IDs {list(missing_ids)} not found"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in channel_profiles + ]) + except Exception as e: + return Response( + {"error": f"Error creating profile memberships: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Send WebSocket notification for single channel creation + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'channels_created', + 'count': 1, + 'channel_id': channel.id, + 'channel_name': channel.name, + 'channel_number': channel.channel_number + }) + return Response(serializer.data, status=status.HTTP_201_CREATED) @swagger_auto_schema( - method='post', + method="post", operation_description=( - "Bulk create channels from existing streams. For each object, if 'channel_number' is provided, " - "it is used (if available); otherwise, the next available number is auto-assigned. " - "Each object must include 'stream_id' and 'name'." + "Asynchronously bulk create channels from stream IDs. " + "Returns a task ID to track progress via WebSocket. " + "This is the recommended approach for large bulk operations." ), request_body=openapi.Schema( - type=openapi.TYPE_ARRAY, - items=openapi.Schema( - type=openapi.TYPE_OBJECT, - required=["stream_id"], - properties={ - "stream_id": openapi.Schema( - type=openapi.TYPE_INTEGER, description="ID of the stream to link" - ), - "channel_number": openapi.Schema( - type=openapi.TYPE_INTEGER, - description="(Optional) Desired channel number. Must not be in use." - ), - "name": openapi.Schema( - type=openapi.TYPE_STRING, description="Desired channel name" - ) - } - ) + type=openapi.TYPE_OBJECT, + required=["stream_ids"], + properties={ + "stream_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="List of stream IDs to create channels from" + ), + "channel_profile_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="(Optional) Channel profile ID(s). Behavior: omitted = add to ALL profiles (default); empty array [] = add to NO profiles; [0] = add to ALL profiles (explicit); [1,2,...] = add only to specified profiles." + ), + "starting_channel_number": openapi.Schema( + type=openapi.TYPE_INTEGER, + description="(Optional) Starting channel number mode: null=use provider numbers, 0=lowest available, other=start from specified number" + ), + }, ), - responses={201: "Bulk channels created"} + responses={202: "Task started successfully"}, ) - @action(detail=False, methods=['post'], url_path='from-stream/bulk') + @action(detail=False, methods=["post"], url_path="from-stream/bulk") def from_stream_bulk(self, request): - data_list = request.data - if not isinstance(data_list, list): - return Response({"error": "Expected a list of channel objects"}, status=status.HTTP_400_BAD_REQUEST) + from .tasks import bulk_create_channels_from_streams - created_channels = [] - errors = [] + stream_ids = request.data.get("stream_ids", []) + channel_profile_ids = request.data.get("channel_profile_ids") + starting_channel_number = request.data.get("starting_channel_number") - # Gather current used numbers once. - used_numbers = set(Channel.objects.all().values_list('channel_number', flat=True)) - next_number = 1 + if not stream_ids: + return Response( + {"error": "stream_ids is required and cannot be empty"}, + status=status.HTTP_400_BAD_REQUEST, + ) - def get_auto_number(): - nonlocal next_number - while next_number in used_numbers: - next_number += 1 - used_numbers.add(next_number) - return next_number + if not isinstance(stream_ids, list): + return Response( + {"error": "stream_ids must be a list of integers"}, + status=status.HTTP_400_BAD_REQUEST, + ) - logos_to_create = [] - channels_to_create = [] - streams_map = [] - logo_map = [] - for item in data_list: - stream_id = item.get('stream_id') - if not all([stream_id]): - errors.append({"item": item, "error": "Missing required fields: stream_id and name are required."}) - continue + # Normalize channel_profile_ids to array if single ID provided + if channel_profile_ids is not None: + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] - try: - stream = get_object_or_404(Stream, pk=stream_id) - except Exception as e: - errors.append({"item": item, "error": str(e)}) - continue + # Start the async task + task = bulk_create_channels_from_streams.delay(stream_ids, channel_profile_ids, starting_channel_number) - name = item.get('name') - if name is None: - name = stream.name - - channel_group = stream.channel_group - - stream_custom_props = json.loads(stream.custom_properties) if stream.custom_properties else {} - - channel_number = None - if 'tvg-chno' in stream_custom_props: - channel_number = int(stream_custom_props['tvg-chno']) - elif 'channel-number' in stream_custom_props: - channel_number = int(stream_custom_props['channel-number']) - - # Determine channel number: if provided, use it (if free); else auto assign. - if channel_number is None: - provided_number = item.get('channel_number') - if provided_number is None: - channel_number = get_auto_number() - else: - try: - channel_number = int(provided_number) - except ValueError: - errors.append({"item": item, "error": "channel_number must be an integer."}) - continue - if channel_number in used_numbers or Channel.objects.filter(channel_number=channel_number).exists(): - errors.append({"item": item, "error": f"Channel number {channel_number} is already in use."}) - continue - used_numbers.add(channel_number) - - channel_data = { - "channel_number": channel_number, - "name": name, - "tvg_id": stream.tvg_id, - "channel_group_id": channel_group.id, - } - - # Attempt to find existing EPGs with the same tvg-id - epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) - if epgs: - channel_data["epg_data_id"] = epgs.first().id - - serializer = self.get_serializer(data=channel_data) - if serializer.is_valid(): - validated_data = serializer.validated_data - channel = Channel(**validated_data) - channels_to_create.append(channel) - - streams_map.append([stream_id]) - if stream.logo_url: - logos_to_create.append(Logo( - url=stream.logo_url, - name=stream.name or stream.tvg_id, - )) - logo_map.append(stream.logo_url) - else: - logo_map.append(None) - - # channel = serializer.save() - # channel.streams.add(stream) - # created_channels.append(serializer.data) - else: - errors.append({"item": item, "error": serializer.errors}) - - if logos_to_create: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) - - channel_logos = {logo.url: logo for logo in Logo.objects.filter(url__in=[url for url in logo_map if url is not None])} - - profiles = ChannelProfile.objects.all() - channel_profile_memberships = [] - if channels_to_create: - with transaction.atomic(): - created_channels = Channel.objects.bulk_create(channels_to_create) - - update = [] - for channel, stream_ids, logo_url in zip(created_channels, streams_map, logo_map): - if logo_url: - channel.logo = channel_logos[logo_url] - update.append(channel) - channel_profile_memberships = channel_profile_memberships + [ - ChannelProfileMembership(channel_profile=profile, channel=channel) - for profile in profiles - ] - - ChannelProfileMembership.objects.bulk_create(channel_profile_memberships) - Channel.objects.bulk_update(update, ['logo']) - - for channel, stream_ids in zip(created_channels, streams_map): - channel.streams.set(stream_ids) - - response_data = {"created": ChannelSerializer(created_channels, many=True).data} - if errors: - response_data["errors"] = errors - - return Response(response_data, status=status.HTTP_201_CREATED) + return Response({ + "task_id": task.id, + "message": f"Bulk channel creation task started for {len(stream_ids)} streams", + "stream_count": len(stream_ids), + "status": "started" + }, status=status.HTTP_202_ACCEPTED) # ───────────────────────────────────────────────────────── # 6) EPG Fuzzy Matching # ───────────────────────────────────────────────────────── @swagger_auto_schema( - method='post', - operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data.", - responses={202: "EPG matching task initiated"} + method="post", + operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data. If channel_ids are provided, only those channels will be processed.", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'channel_ids': openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_INTEGER), + description='List of channel IDs to process. If empty or not provided, all channels without EPG will be processed.' + ) + } + ), + responses={202: "EPG matching task initiated"}, ) - @action(detail=False, methods=['post'], url_path='match-epg') + @action(detail=False, methods=["post"], url_path="match-epg") def match_epg(self, request): - match_epg_channels.delay() - return Response({"message": "EPG matching task initiated."}, status=status.HTTP_202_ACCEPTED) + # Get channel IDs from request body if provided + channel_ids = request.data.get('channel_ids', []) + + if channel_ids: + # Process only selected channels + from .tasks import match_selected_channels_epg + match_selected_channels_epg.delay(channel_ids) + message = f"EPG matching task initiated for {len(channel_ids)} selected channel(s)." + else: + # Process all channels without EPG (original behavior) + match_epg_channels.delay() + message = "EPG matching task initiated for all channels without EPG." + + return Response( + {"message": message}, status=status.HTTP_202_ACCEPTED + ) + + @swagger_auto_schema( + method="post", + operation_description="Try to auto-match this specific channel with EPG data.", + responses={200: "EPG matching completed", 202: "EPG matching task initiated"}, + ) + @action(detail=True, methods=["post"], url_path="match-epg") + def match_channel_epg(self, request, pk=None): + channel = self.get_object() + + # Import the matching logic + from apps.channels.tasks import match_single_channel_epg + + try: + # Try to match this specific channel - call synchronously for immediate response + result = match_single_channel_epg.apply_async(args=[channel.id]).get(timeout=30) + + # Refresh the channel from DB to get any updates + channel.refresh_from_db() + + return Response({ + "message": result.get("message", "Channel matching completed"), + "matched": result.get("matched", False), + "channel": self.get_serializer(channel).data + }) + except Exception as e: + return Response({"error": str(e)}, status=400) # ───────────────────────────────────────────────────────── # 7) Set EPG and Refresh # ───────────────────────────────────────────────────────── @swagger_auto_schema( - method='post', + method="post", operation_description="Set EPG data for a channel and refresh program data", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, - required=['epg_data_id'], + required=["epg_data_id"], properties={ - 'epg_data_id': openapi.Schema( + "epg_data_id": openapi.Schema( type=openapi.TYPE_INTEGER, description="EPG data ID to link" ) - } + }, ), - responses={200: "EPG data linked and refresh triggered"} + responses={200: "EPG data linked and refresh triggered"}, ) - @action(detail=True, methods=['post'], url_path='set-epg') + @action(detail=True, methods=["post"], url_path="set-epg") def set_epg(self, request, pk=None): channel = self.get_object() - epg_data_id = request.data.get('epg_data_id') + epg_data_id = request.data.get("epg_data_id") # Handle removing EPG link - if epg_data_id in (None, '', '0', 0): + if epg_data_id in (None, "", "0", 0): channel.epg_data = None - channel.save(update_fields=['epg_data']) - return Response({"message": f"EPG data removed from channel {channel.name}"}) + channel.save(update_fields=["epg_data"]) + return Response( + {"message": f"EPG data removed from channel {channel.name}"} + ) try: # Get the EPG data object from apps.epg.models import EPGData + epg_data = EPGData.objects.get(pk=epg_data_id) # Set the EPG data and save channel.epg_data = epg_data - channel.save(update_fields=['epg_data']) + channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - return Response({ - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", - "channel": self.get_serializer(channel).data, - "task_status": status_message - }) + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" + + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" + + return Response( + { + "message": message, + "channel": self.get_serializer(channel).data, + "task_status": status_message, + } + ) except Exception as e: return Response({"error": str(e)}, status=400) @swagger_auto_schema( - method='post', + method="post", operation_description="Associate multiple channels with EPG data without triggering a full refresh", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'associations': openapi.Schema( + "associations": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'channel_id': openapi.Schema(type=openapi.TYPE_INTEGER), - 'epg_data_id': openapi.Schema(type=openapi.TYPE_INTEGER) - } - ) + "channel_id": openapi.Schema(type=openapi.TYPE_INTEGER), + "epg_data_id": openapi.Schema(type=openapi.TYPE_INTEGER), + }, + ), ) - } + }, ), - responses={200: "EPG data linked for multiple channels"} + responses={200: "EPG data linked for multiple channels"}, ) - @action(detail=False, methods=['post'], url_path='batch-set-epg') + @action(detail=False, methods=["post"], url_path="batch-set-epg") def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" - associations = request.data.get('associations', []) - channels_updated = 0 - programs_refreshed = 0 + associations = request.data.get("associations", []) + + if not associations: + return Response( + {"error": "associations list is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract channel IDs upfront + channel_updates = {} unique_epg_ids = set() for assoc in associations: - channel_id = assoc.get('channel_id') - epg_data_id = assoc.get('epg_data_id') + channel_id = assoc.get("channel_id") + epg_data_id = assoc.get("epg_data_id") if not channel_id: continue - try: - # Get the channel - channel = Channel.objects.get(id=channel_id) + channel_updates[channel_id] = epg_data_id + if epg_data_id: + unique_epg_ids.add(epg_data_id) - # Set the EPG data - channel.epg_data_id = epg_data_id - channel.save(update_fields=['epg_data']) - channels_updated += 1 + # Batch fetch all channels (single query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - # Track unique EPG data IDs - if epg_data_id: - unique_epg_ids.add(epg_data_id) - - except Channel.DoesNotExist: + # Collect channels to update + channels_to_update = [] + for channel_id, epg_data_id in channel_updates.items(): + if channel_id not in channels_dict: logger.error(f"Channel with ID {channel_id} not found") - except Exception as e: - logger.error(f"Error setting EPG data for channel {channel_id}: {str(e)}") + continue - # Trigger program refresh for unique EPG data IDs + channel = channels_dict[channel_id] + channel.epg_data_id = epg_data_id + channels_to_update.append(channel) + + # Bulk update all channels (single query) + if channels_to_update: + with transaction.atomic(): + Channel.objects.bulk_update( + channels_to_update, + fields=["epg_data_id"], + batch_size=100 + ) + + channels_updated = len(channels_to_update) + + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData + + # Batch fetch EPG data (single query) + epg_data_dict = { + epg.id: epg + for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source') + } + + programs_refreshed = 0 for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + epg_data = epg_data_dict.get(epg_id) + if not epg_data: + logger.error(f"EPGData with ID {epg_id} not found") + continue + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 + return Response( + { + "success": True, + "channels_updated": channels_updated, + "programs_refreshed": programs_refreshed, + } + ) - return Response({ - 'success': True, - 'channels_updated': channels_updated, - 'programs_refreshed': programs_refreshed - }) # ───────────────────────────────────────────────────────── # 4) Bulk Delete Streams # ───────────────────────────────────────────────────────── class BulkDeleteStreamsAPIView(APIView): - permission_classes = [IsAuthenticated] + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] @swagger_auto_schema( operation_description="Bulk delete streams by ID", @@ -623,23 +1297,32 @@ class BulkDeleteStreamsAPIView(APIView): "stream_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="Stream IDs to delete" + description="Stream IDs to delete", ) }, ), - responses={204: "Streams deleted"} + responses={204: "Streams deleted"}, ) def delete(self, request, *args, **kwargs): - stream_ids = request.data.get('stream_ids', []) + stream_ids = request.data.get("stream_ids", []) Stream.objects.filter(id__in=stream_ids).delete() - return Response({"message": "Streams deleted successfully!"}, status=status.HTTP_204_NO_CONTENT) + return Response( + {"message": "Streams deleted successfully!"}, + status=status.HTTP_204_NO_CONTENT, + ) # ───────────────────────────────────────────────────────── # 5) Bulk Delete Channels # ───────────────────────────────────────────────────────── class BulkDeleteChannelsAPIView(APIView): - permission_classes = [IsAuthenticated] + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] @swagger_auto_schema( operation_description="Bulk delete channels by ID", @@ -650,69 +1333,368 @@ class BulkDeleteChannelsAPIView(APIView): "channel_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="Channel IDs to delete" + description="Channel IDs to delete", ) }, ), - responses={204: "Channels deleted"} + responses={204: "Channels deleted"}, ) def delete(self, request): - channel_ids = request.data.get('channel_ids', []) + channel_ids = request.data.get("channel_ids", []) Channel.objects.filter(id__in=channel_ids).delete() - return Response({"message": "Channels deleted"}, status=status.HTTP_204_NO_CONTENT) + return Response( + {"message": "Channels deleted"}, status=status.HTTP_204_NO_CONTENT + ) + + +# ───────────────────────────────────────────────────────── +# 6) Bulk Delete Logos +# ───────────────────────────────────────────────────────── +class BulkDeleteLogosAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Bulk delete logos by ID", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=["logo_ids"], + properties={ + "logo_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="Logo IDs to delete", + ) + }, + ), + responses={204: "Logos deleted"}, + ) + def delete(self, request): + logo_ids = request.data.get("logo_ids", []) + delete_files = request.data.get("delete_files", False) + + # Get logos and their usage info before deletion + logos_to_delete = Logo.objects.filter(id__in=logo_ids) + total_channels_affected = 0 + local_files_deleted = 0 + + for logo in logos_to_delete: + # Handle file deletion for local files + if delete_files and logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + local_files_deleted += 1 + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file {logo.url}: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + if logo.channels.exists(): + channel_count = logo.channels.count() + total_channels_affected += channel_count + # Remove logo from channels + logo.channels.update(logo=None) + logger.info(f"Removed logo {logo.name} from {channel_count} channels before deletion") + + # Delete logos + deleted_count = logos_to_delete.delete()[0] + + message = f"Successfully deleted {deleted_count} logos" + if total_channels_affected > 0: + message += f" and removed them from {total_channels_affected} channels" + if local_files_deleted > 0: + message += f" and deleted {local_files_deleted} local files" + + return Response( + {"message": message}, + status=status.HTTP_204_NO_CONTENT + ) + + +class CleanupUnusedLogosAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Delete all channel logos that are not used by any channels", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + "delete_files": openapi.Schema( + type=openapi.TYPE_BOOLEAN, + description="Whether to delete local logo files from disk", + default=False + ) + }, + ), + responses={200: "Cleanup completed"}, + ) + def post(self, request): + """Delete all channel logos with no channel associations""" + delete_files = request.data.get("delete_files", False) + + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + local_files_deleted = 0 + + # Handle file deletion for local files if requested + if delete_files: + for logo in unused_logos: + if logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + local_files_deleted += 1 + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file {logo.url}: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + # Delete the unused logos + unused_logos.delete() + + message = f"Successfully deleted {deleted_count} unused logos" + if local_files_deleted > 0: + message += f" and deleted {local_files_deleted} local files" + + return Response({ + "message": message, + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "local_files_deleted": local_files_deleted + }) + + +class LogoPagination(PageNumberPagination): + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 1000 # Prevent excessive page sizes + + def paginate_queryset(self, queryset, request, view=None): + # Check if pagination should be disabled for specific requests + if request.query_params.get('no_pagination') == 'true': + return None # disables pagination, returns full queryset + + return super().paginate_queryset(queryset, request, view) + class LogoViewSet(viewsets.ModelViewSet): - permission_classes = [IsAuthenticated] queryset = Logo.objects.all() serializer_class = LogoSerializer - parser_classes = (MultiPartParser, FormParser) + pagination_class = LogoPagination + parser_classes = (MultiPartParser, FormParser, JSONParser) - @action(detail=False, methods=['post']) + def get_permissions(self): + if self.action in ["upload"]: + return [IsAdmin()] + + if self.action in ["cache"]: + return [AllowAny()] + + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + # Start with basic prefetch for channels + queryset = Logo.objects.prefetch_related('channels').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + # Convert string IDs to integers and filter + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + pass # Invalid IDs, return empty queryset + queryset = Logo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() + elif used_filter == 'false': + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) + + # Filter by name + name_filter = self.request.query_params.get('name', None) + if name_filter: + queryset = queryset.filter(name__icontains=name_filter) + + return queryset + + def create(self, request, *args, **kwargs): + """Create a new logo entry""" + serializer = self.get_serializer(data=request.data) + if serializer.is_valid(): + logo = serializer.save() + return Response(self.get_serializer(logo).data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def update(self, request, *args, **kwargs): + """Update an existing logo""" + partial = kwargs.pop('partial', False) + instance = self.get_object() + serializer = self.get_serializer(instance, data=request.data, partial=partial) + if serializer.is_valid(): + logo = serializer.save() + return Response(self.get_serializer(logo).data) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def destroy(self, request, *args, **kwargs): + """Delete a logo and remove it from any channels using it""" + logo = self.get_object() + delete_file = request.query_params.get('delete_file', 'false').lower() == 'true' + + # Check if it's a local file that should be deleted + if delete_file and logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + # Instead of preventing deletion, remove the logo from channels + if logo.channels.exists(): + channel_count = logo.channels.count() + logo.channels.update(logo=None) + logger.info(f"Removed logo {logo.name} from {channel_count} channels before deletion") + + return super().destroy(request, *args, **kwargs) + + @action(detail=False, methods=["post"]) def upload(self, request): - if 'file' not in request.FILES: - return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST) + if "file" not in request.FILES: + return Response( + {"error": "No file uploaded"}, status=status.HTTP_400_BAD_REQUEST + ) + + file = request.FILES["file"] + + # Validate file + try: + from dispatcharr.utils import validate_logo_file + validate_logo_file(file) + except Exception as e: + return Response( + {"error": str(e)}, status=status.HTTP_400_BAD_REQUEST + ) - file = request.FILES['file'] file_name = file.name - file_path = os.path.join('/data/logos', file_name) + file_path = os.path.join("/data/logos", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) - logo, _ = Logo.objects.get_or_create(url=file_path, defaults={ - "name": file_name, - }) + # Mark file as processed in Redis to prevent file scanner notifications + try: + redis_client = RedisClient.get_client() + if redis_client: + # Use the same key format as the file scanner + redis_key = f"processed_file:{file_path}" + # Store the actual file modification time to match the file scanner's expectation + file_mtime = os.path.getmtime(file_path) + redis_client.setex(redis_key, 60 * 60 * 24 * 3, str(file_mtime)) # 3 day TTL + logger.debug(f"Marked uploaded logo file as processed in Redis: {file_path} (mtime: {file_mtime})") + except Exception as e: + logger.warning(f"Failed to mark logo file as processed in Redis: {e}") - return Response({'id': logo.id, 'name': logo.name, 'url': logo.url}, status=status.HTTP_201_CREATED) + # Get custom name from request data, fallback to filename + custom_name = request.data.get('name', '').strip() + logo_name = custom_name if custom_name else file_name - @action(detail=True, methods=['get'], permission_classes=[AllowAny]) + logo, _ = Logo.objects.get_or_create( + url=file_path, + defaults={ + "name": logo_name, + }, + ) + + # Use get_serializer to ensure proper context + serializer = self.get_serializer(logo) + return Response( + serializer.data, + status=status.HTTP_201_CREATED, + ) + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) def cache(self, request, pk=None): """Streams the logo file, whether it's local or remote.""" logo = self.get_object() logo_url = logo.url - if logo_url.startswith("/data"): # Local file if not os.path.exists(logo_url): raise Http404("Image not found") - + stat = os.stat(logo_url) # Get proper mime type (first item of the tuple) content_type, _ = mimetypes.guess_type(logo_url) if not content_type: - content_type = 'image/jpeg' # Default to a common image type + content_type = "image/jpeg" # Default to a common image type # Use context manager and set Content-Disposition to inline - response = StreamingHttpResponse(open(logo_url, "rb"), content_type=content_type) - response['Content-Disposition'] = 'inline; filename="{}"'.format(os.path.basename(logo_url)) + response = StreamingHttpResponse( + open(logo_url, "rb"), content_type=content_type + ) + response["Cache-Control"] = "public, max-age=14400" # Cache in browser for 4 hours + response["Last-Modified"] = http_date(stat.st_mtime) + response["Content-Disposition"] = 'inline; filename="{}"'.format( + os.path.basename(logo_url) + ) return response else: # Remote image try: - remote_response = requests.get(logo_url, stream=True) + # Get the default user agent + try: + default_user_agent_id = CoreSettings.get_default_user_agent_id() + user_agent_obj = UserAgent.objects.get(id=int(default_user_agent_id)) + user_agent = user_agent_obj.user_agent + except (CoreSettings.DoesNotExist, UserAgent.DoesNotExist, ValueError): + # Fallback to hardcoded if default not found + user_agent = 'Dispatcharr/1.0' + + # Add proper timeouts to prevent hanging + remote_response = requests.get( + logo_url, + stream=True, + timeout=(3, 5), # (connect_timeout, read_timeout) + headers={'User-Agent': user_agent} + ) if remote_response.status_code == 200: # Try to get content type from response headers first - content_type = remote_response.headers.get('Content-Type') + content_type = remote_response.headers.get("Content-Type") # If no content type in headers or it's empty, guess based on URL if not content_type: @@ -720,43 +1702,179 @@ class LogoViewSet(viewsets.ModelViewSet): # If still no content type, default to common image type if not content_type: - content_type = 'image/jpeg' + content_type = "image/jpeg" - response = StreamingHttpResponse(remote_response.iter_content(chunk_size=8192), content_type=content_type) - response['Content-Disposition'] = 'inline; filename="{}"'.format(os.path.basename(logo_url)) + response = StreamingHttpResponse( + remote_response.iter_content(chunk_size=8192), + content_type=content_type, + ) + if(remote_response.headers.get("Cache-Control")): + response["Cache-Control"] = remote_response.headers.get("Cache-Control") + if(remote_response.headers.get("Last-Modified")): + response["Last-Modified"] = remote_response.headers.get("Last-Modified") + response["Content-Disposition"] = 'inline; filename="{}"'.format( + os.path.basename(logo_url) + ) return response raise Http404("Remote image not found") - except requests.RequestException: + except requests.exceptions.Timeout: + logger.warning(f"Timeout fetching logo from {logo_url}") + raise Http404("Logo request timed out") + except requests.exceptions.ConnectionError: + logger.warning(f"Connection error fetching logo from {logo_url}") + raise Http404("Unable to connect to logo server") + except requests.RequestException as e: + logger.warning(f"Error fetching logo from {logo_url}: {e}") raise Http404("Error fetching remote image") + class ChannelProfileViewSet(viewsets.ModelViewSet): queryset = ChannelProfile.objects.all() serializer_class = ChannelProfileSerializer - permission_classes = [IsAuthenticated] + + def get_queryset(self): + user = self.request.user + + # If user_level is 10, return all ChannelProfiles + if hasattr(user, "user_level") and user.user_level == 10: + return ChannelProfile.objects.all() + + # Otherwise, return only ChannelProfiles related to the user + return self.request.user.channel_profiles.all() + + def get_permissions(self): + if self.action == "duplicate": + return [IsAdmin()] + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + @action(detail=True, methods=["post"], url_path="duplicate", permission_classes=[IsAdmin]) + def duplicate(self, request, pk=None): + requested_name = str(request.data.get("name", "")).strip() + + if not requested_name: + return Response( + {"detail": "Name is required to duplicate a profile."}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if ChannelProfile.objects.filter(name=requested_name).exists(): + return Response( + {"detail": "A channel profile with this name already exists."}, + status=status.HTTP_400_BAD_REQUEST, + ) + + source_profile = self.get_object() + + with transaction.atomic(): + new_profile = ChannelProfile.objects.create(name=requested_name) + + source_memberships = ChannelProfileMembership.objects.filter( + channel_profile=source_profile + ) + source_enabled_map = { + membership.channel_id: membership.enabled + for membership in source_memberships + } + + new_memberships = list( + ChannelProfileMembership.objects.filter(channel_profile=new_profile) + ) + for membership in new_memberships: + membership.enabled = source_enabled_map.get( + membership.channel_id, False + ) + + if new_memberships: + ChannelProfileMembership.objects.bulk_update( + new_memberships, ["enabled"] + ) + + serializer = self.get_serializer(new_profile) + return Response(serializer.data, status=status.HTTP_201_CREATED) + class GetChannelStreamsAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + def get(self, request, channel_id): channel = get_object_or_404(Channel, id=channel_id) # Order the streams by channelstream__order to match the order in the channel view - streams = channel.streams.all().order_by('channelstream__order') + streams = channel.streams.all().order_by("channelstream__order") serializer = StreamSerializer(streams, many=True) return Response(serializer.data) + class UpdateChannelMembershipAPIView(APIView): + permission_classes = [IsOwnerOfObject] + def patch(self, request, profile_id, channel_id): """Enable or disable a channel for a specific group""" channel_profile = get_object_or_404(ChannelProfile, id=profile_id) channel = get_object_or_404(Channel, id=channel_id) - membership = get_object_or_404(ChannelProfileMembership, channel_profile=channel_profile, channel=channel) + try: + membership = ChannelProfileMembership.objects.get( + channel_profile=channel_profile, channel=channel + ) + except ChannelProfileMembership.DoesNotExist: + # Create the membership if it does not exist (for custom channels) + membership = ChannelProfileMembership.objects.create( + channel_profile=channel_profile, + channel=channel, + enabled=False # Default to False, will be updated below + ) - serializer = ChannelProfileMembershipSerializer(membership, data=request.data, partial=True) + serializer = ChannelProfileMembershipSerializer( + membership, data=request.data, partial=True + ) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + class BulkUpdateChannelMembershipAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Bulk enable or disable channels for a specific profile. Creates membership records if they don't exist.", + request_body=BulkChannelProfileMembershipSerializer, + responses={ + 200: openapi.Response( + description="Channels updated successfully", + schema=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + "status": openapi.Schema(type=openapi.TYPE_STRING, example="success"), + "updated": openapi.Schema(type=openapi.TYPE_INTEGER, description="Number of channels updated"), + "created": openapi.Schema(type=openapi.TYPE_INTEGER, description="Number of new memberships created"), + "invalid_channels": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_INTEGER), + description="List of channel IDs that don't exist" + ), + }, + ), + ), + 400: "Invalid request data", + 404: "Profile not found", + }, + ) def patch(self, request, profile_id): """Bulk enable or disable channels for a specific profile""" # Get the channel profile @@ -766,30 +1884,450 @@ class BulkUpdateChannelMembershipAPIView(APIView): serializer = BulkChannelProfileMembershipSerializer(data=request.data) if serializer.is_valid(): - updates = serializer.validated_data['channels'] - channel_ids = [entry['channel_id'] for entry in updates] + updates = serializer.validated_data["channels"] + channel_ids = [entry["channel_id"] for entry in updates] - - memberships = ChannelProfileMembership.objects.filter( - channel_profile=channel_profile, - channel_id__in=channel_ids + # Validate that all channels exist + existing_channels = set( + Channel.objects.filter(id__in=channel_ids).values_list("id", flat=True) ) + invalid_channels = [cid for cid in channel_ids if cid not in existing_channels] - membership_dict = {m.channel.id: m for m in memberships} + if invalid_channels: + return Response( + { + "error": "Some channels do not exist", + "invalid_channels": invalid_channels, + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Get existing memberships + existing_memberships = ChannelProfileMembership.objects.filter( + channel_profile=channel_profile, channel_id__in=channel_ids + ) + membership_dict = {m.channel_id: m for m in existing_memberships} + + # Prepare lists for bulk operations + memberships_to_update = [] + memberships_to_create = [] for entry in updates: - channel_id = entry['channel_id'] - enabled_status = entry['enabled'] + channel_id = entry["channel_id"] + enabled_status = entry["enabled"] + if channel_id in membership_dict: + # Update existing membership membership_dict[channel_id].enabled = enabled_status + memberships_to_update.append(membership_dict[channel_id]) + else: + # Create new membership + memberships_to_create.append( + ChannelProfileMembership( + channel_profile=channel_profile, + channel_id=channel_id, + enabled=enabled_status, + ) + ) - ChannelProfileMembership.objects.bulk_update(memberships, ['enabled']) + # Perform bulk operations + with transaction.atomic(): + if memberships_to_update: + ChannelProfileMembership.objects.bulk_update( + memberships_to_update, ["enabled"] + ) + if memberships_to_create: + ChannelProfileMembership.objects.bulk_create(memberships_to_create) - return Response({"status": "success"}, status=status.HTTP_200_OK) + return Response( + { + "status": "success", + "updated": len(memberships_to_update), + "created": len(memberships_to_create), + "invalid_channels": [], + }, + status=status.HTTP_200_OK, + ) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + +class RecurringRecordingRuleViewSet(viewsets.ModelViewSet): + queryset = RecurringRecordingRule.objects.all().select_related("channel") + serializer_class = RecurringRecordingRuleSerializer + + def get_permissions(self): + return [IsAdmin()] + + def perform_create(self, serializer): + rule = serializer.save() + try: + sync_recurring_rule_impl(rule.id, drop_existing=True) + except Exception as err: + logger.warning(f"Failed to initialize recurring rule {rule.id}: {err}") + return rule + + def perform_update(self, serializer): + rule = serializer.save() + try: + if rule.enabled: + sync_recurring_rule_impl(rule.id, drop_existing=True) + else: + purge_recurring_rule_impl(rule.id) + except Exception as err: + logger.warning(f"Failed to resync recurring rule {rule.id}: {err}") + return rule + + def perform_destroy(self, instance): + rule_id = instance.id + super().perform_destroy(instance) + try: + purge_recurring_rule_impl(rule_id) + except Exception as err: + logger.warning(f"Failed to purge recordings for rule {rule_id}: {err}") + + class RecordingViewSet(viewsets.ModelViewSet): queryset = Recording.objects.all() serializer_class = RecordingSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + # Allow unauthenticated playback of recording files (like other streaming endpoints) + if self.action == 'file': + return [AllowAny()] + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + @action(detail=True, methods=["post"], url_path="comskip") + def comskip(self, request, pk=None): + """Trigger comskip processing for this recording.""" + from .tasks import comskip_process_recording + rec = get_object_or_404(Recording, pk=pk) + try: + comskip_process_recording.delay(rec.id) + return Response({"success": True, "queued": True}) + except Exception as e: + return Response({"success": False, "error": str(e)}, status=400) + + @action(detail=True, methods=["get"], url_path="file") + def file(self, request, pk=None): + """Stream a recorded file with HTTP Range support for seeking.""" + recording = get_object_or_404(Recording, pk=pk) + cp = recording.custom_properties or {} + file_path = cp.get("file_path") + file_name = cp.get("file_name") or "recording" + + if not file_path or not os.path.exists(file_path): + raise Http404("Recording file not found") + + # Guess content type + ext = os.path.splitext(file_path)[1].lower() + if ext == ".mp4": + content_type = "video/mp4" + elif ext == ".mkv": + content_type = "video/x-matroska" + else: + content_type = mimetypes.guess_type(file_path)[0] or "application/octet-stream" + + file_size = os.path.getsize(file_path) + range_header = request.META.get("HTTP_RANGE", "").strip() + + def file_iterator(path, start=0, end=None, chunk_size=8192): + with open(path, "rb") as f: + f.seek(start) + remaining = (end - start + 1) if end is not None else None + while True: + if remaining is not None and remaining <= 0: + break + bytes_to_read = min(chunk_size, remaining) if remaining is not None else chunk_size + data = f.read(bytes_to_read) + if not data: + break + if remaining is not None: + remaining -= len(data) + yield data + + if range_header and range_header.startswith("bytes="): + # Parse Range header + try: + range_spec = range_header.split("=", 1)[1] + start_str, end_str = range_spec.split("-", 1) + start = int(start_str) if start_str else 0 + end = int(end_str) if end_str else file_size - 1 + start = max(0, start) + end = min(file_size - 1, end) + length = end - start + 1 + + resp = StreamingHttpResponse( + file_iterator(file_path, start, end), + status=206, + content_type=content_type, + ) + resp["Content-Range"] = f"bytes {start}-{end}/{file_size}" + resp["Content-Length"] = str(length) + resp["Accept-Ranges"] = "bytes" + resp["Content-Disposition"] = f"inline; filename=\"{file_name}\"" + return resp + except Exception: + # Fall back to full file if parsing fails + pass + + # Full file response + response = FileResponse(open(file_path, "rb"), content_type=content_type) + response["Content-Length"] = str(file_size) + response["Accept-Ranges"] = "bytes" + response["Content-Disposition"] = f"inline; filename=\"{file_name}\"" + return response + + def destroy(self, request, *args, **kwargs): + """Delete the Recording and ensure any active DVR client connection is closed. + + Also removes the associated file(s) from disk if present. + """ + instance = self.get_object() + + # Attempt to close the DVR client connection for this channel if active + try: + channel_uuid = str(instance.channel.uuid) + # Lazy imports to avoid module overhead if proxy isn't used + from core.utils import RedisClient + from apps.proxy.ts_proxy.redis_keys import RedisKeys + from apps.proxy.ts_proxy.services.channel_service import ChannelService + + r = RedisClient.get_client() + if r: + client_set_key = RedisKeys.clients(channel_uuid) + client_ids = r.smembers(client_set_key) or [] + stopped = 0 + for raw_id in client_ids: + try: + cid = raw_id.decode("utf-8") if isinstance(raw_id, (bytes, bytearray)) else str(raw_id) + meta_key = RedisKeys.client_metadata(channel_uuid, cid) + ua = r.hget(meta_key, "user_agent") + ua_s = ua.decode("utf-8") if isinstance(ua, (bytes, bytearray)) else (ua or "") + # Identify DVR recording client by its user agent + if ua_s and "Dispatcharr-DVR" in ua_s: + try: + ChannelService.stop_client(channel_uuid, cid) + stopped += 1 + except Exception as inner_e: + logger.debug(f"Failed to stop DVR client {cid} for channel {channel_uuid}: {inner_e}") + except Exception as inner: + logger.debug(f"Error while checking client metadata: {inner}") + if stopped: + logger.info(f"Stopped {stopped} DVR client(s) for channel {channel_uuid} due to recording cancellation") + # If no clients remain after stopping DVR clients, proactively stop the channel + try: + remaining = r.scard(client_set_key) or 0 + except Exception: + remaining = 0 + if remaining == 0: + try: + ChannelService.stop_channel(channel_uuid) + logger.info(f"Stopped channel {channel_uuid} (no clients remain)") + except Exception as sc_e: + logger.debug(f"Unable to stop channel {channel_uuid}: {sc_e}") + except Exception as e: + logger.debug(f"Unable to stop DVR clients for cancelled recording: {e}") + + # Capture paths before deletion + cp = instance.custom_properties or {} + file_path = cp.get("file_path") + temp_ts_path = cp.get("_temp_file_path") + + # Perform DB delete first, then try to remove files + response = super().destroy(request, *args, **kwargs) + + # Notify frontends to refresh recordings + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + library_dir = '/data' + allowed_roots = ['/data/', library_dir.rstrip('/') + '/'] + + def _safe_remove(path: str): + if not path or not isinstance(path, str): + return + try: + if any(path.startswith(root) for root in allowed_roots) and os.path.exists(path): + os.remove(path) + logger.info(f"Deleted recording artifact: {path}") + except Exception as ex: + logger.warning(f"Failed to delete recording artifact {path}: {ex}") + + _safe_remove(file_path) + _safe_remove(temp_ts_path) + + return response + + +class ComskipConfigAPIView(APIView): + """Upload or inspect the custom comskip.ini used by DVR processing.""" + + parser_classes = [MultiPartParser, FormParser] + + def get_permissions(self): + return [IsAdmin()] + + def get(self, request): + path = CoreSettings.get_dvr_comskip_custom_path() + exists = bool(path and os.path.exists(path)) + return Response({"path": path, "exists": exists}) + + def post(self, request): + uploaded = request.FILES.get("file") or request.FILES.get("comskip_ini") + if not uploaded: + return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST) + + name = (uploaded.name or "").lower() + if not name.endswith(".ini"): + return Response({"error": "Only .ini files are allowed"}, status=status.HTTP_400_BAD_REQUEST) + + if uploaded.size and uploaded.size > 1024 * 1024: + return Response({"error": "File too large (limit 1MB)"}, status=status.HTTP_400_BAD_REQUEST) + + dest_dir = os.path.join(settings.MEDIA_ROOT, "comskip") + os.makedirs(dest_dir, exist_ok=True) + dest_path = os.path.join(dest_dir, "comskip.ini") + + try: + with open(dest_path, "wb") as dest: + for chunk in uploaded.chunks(): + dest.write(chunk) + except Exception as e: + logger.error(f"Failed to save uploaded comskip.ini: {e}") + return Response({"error": "Unable to save file"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Persist path setting so DVR processing picks it up immediately + CoreSettings.set_dvr_comskip_custom_path(dest_path) + + return Response({"success": True, "path": dest_path, "exists": os.path.exists(dest_path)}) + + +class BulkDeleteUpcomingRecordingsAPIView(APIView): + """Delete all upcoming (future) recordings.""" + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + now = timezone.now() + qs = Recording.objects.filter(start_time__gt=now) + removed = qs.count() + qs.delete() + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed", "removed": removed}) + except Exception: + pass + return Response({"success": True, "removed": removed}) + + +class SeriesRulesAPIView(APIView): + """Manage DVR series recording rules (list/add).""" + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def get(self, request): + return Response({"rules": CoreSettings.get_dvr_series_rules()}) + + def post(self, request): + data = request.data or {} + tvg_id = str(data.get("tvg_id") or "").strip() + mode = (data.get("mode") or "all").lower() + title = data.get("title") or "" + if mode not in ("all", "new"): + return Response({"error": "mode must be 'all' or 'new'"}, status=status.HTTP_400_BAD_REQUEST) + if not tvg_id: + return Response({"error": "tvg_id is required"}, status=status.HTTP_400_BAD_REQUEST) + rules = CoreSettings.get_dvr_series_rules() + # Upsert by tvg_id + existing = next((r for r in rules if str(r.get("tvg_id")) == tvg_id), None) + if existing: + existing.update({"mode": mode, "title": title}) + else: + rules.append({"tvg_id": tvg_id, "mode": mode, "title": title}) + CoreSettings.set_dvr_series_rules(rules) + # Evaluate immediately for this tvg_id (async) + try: + evaluate_series_rules.delay(tvg_id) + except Exception: + pass + return Response({"success": True, "rules": rules}) + + +class DeleteSeriesRuleAPIView(APIView): + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def delete(self, request, tvg_id): + tvg_id = unquote(str(tvg_id or "")) + rules = [r for r in CoreSettings.get_dvr_series_rules() if str(r.get("tvg_id")) != tvg_id] + CoreSettings.set_dvr_series_rules(rules) + return Response({"success": True, "rules": rules}) + + +class EvaluateSeriesRulesAPIView(APIView): + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + tvg_id = request.data.get("tvg_id") + # Run synchronously so UI sees results immediately + result = evaluate_series_rules_impl(str(tvg_id)) if tvg_id else evaluate_series_rules_impl() + return Response({"success": True, **result}) + + +class BulkRemoveSeriesRecordingsAPIView(APIView): + """Bulk remove scheduled recordings for a series rule. + + POST body: + - tvg_id: required (EPG channel id) + - title: optional (series title) + - scope: 'title' (default) or 'channel' + """ + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + from django.utils import timezone + tvg_id = str(request.data.get("tvg_id") or "").strip() + title = request.data.get("title") + scope = (request.data.get("scope") or "title").lower() + if not tvg_id: + return Response({"error": "tvg_id is required"}, status=status.HTTP_400_BAD_REQUEST) + + qs = Recording.objects.filter( + start_time__gte=timezone.now(), + custom_properties__program__tvg_id=tvg_id, + ) + if scope == "title" and title: + qs = qs.filter(custom_properties__program__title=title) + + count = qs.count() + qs.delete() + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed", "removed": count}) + except Exception: + pass + return Response({"success": True, "removed": count}) diff --git a/apps/channels/forms.py b/apps/channels/forms.py index 342bd0fe..a566adbd 100644 --- a/apps/channels/forms.py +++ b/apps/channels/forms.py @@ -14,6 +14,13 @@ class ChannelGroupForm(forms.ModelForm): # Channel Form # class ChannelForm(forms.ModelForm): + # Explicitly define channel_number as FloatField to ensure decimal values work + channel_number = forms.FloatField( + required=False, + widget=forms.NumberInput(attrs={'step': '0.1'}), # Allow decimal steps + help_text="Channel number can include decimals (e.g., 1.1, 2.5)" + ) + channel_group = forms.ModelChoiceField( queryset=ChannelGroup.objects.all(), required=False, diff --git a/apps/channels/migrations/0018_channelgroupm3uaccount_custom_properties_and_more.py b/apps/channels/migrations/0018_channelgroupm3uaccount_custom_properties_and_more.py new file mode 100644 index 00000000..51507843 --- /dev/null +++ b/apps/channels/migrations/0018_channelgroupm3uaccount_custom_properties_and_more.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-04-27 14:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0017_alter_channelgroup_name'), + ] + + operations = [ + migrations.AddField( + model_name='channelgroupm3uaccount', + name='custom_properties', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/channels/migrations/0019_channel_tvc_guide_stationid.py b/apps/channels/migrations/0019_channel_tvc_guide_stationid.py new file mode 100644 index 00000000..86f76b82 --- /dev/null +++ b/apps/channels/migrations/0019_channel_tvc_guide_stationid.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-04 00:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0018_channelgroupm3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='tvc_guide_stationid', + field=models.CharField(blank=True, max_length=255, null=True), + ), + ] diff --git a/apps/channels/migrations/0020_alter_channel_channel_number.py b/apps/channels/migrations/0020_alter_channel_channel_number.py new file mode 100644 index 00000000..0a1b6ead --- /dev/null +++ b/apps/channels/migrations/0020_alter_channel_channel_number.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-15 19:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0019_channel_tvc_guide_stationid'), + ] + + operations = [ + migrations.AlterField( + model_name='channel', + name='channel_number', + field=models.FloatField(db_index=True), + ), + ] diff --git a/apps/channels/migrations/0021_channel_user_level.py b/apps/channels/migrations/0021_channel_user_level.py new file mode 100644 index 00000000..2aa55eeb --- /dev/null +++ b/apps/channels/migrations/0021_channel_user_level.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-18 14:31 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0020_alter_channel_channel_number'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='user_level', + field=models.IntegerField(default=0), + ), + ] diff --git a/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py b/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py new file mode 100644 index 00000000..b1450c09 --- /dev/null +++ b/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py @@ -0,0 +1,35 @@ +# Generated by Django 5.1.6 on 2025-07-13 23:08 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0021_channel_user_level'), + ('m3u', '0012_alter_m3uaccount_refresh_interval'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='auto_created', + field=models.BooleanField(default=False, help_text='Whether this channel was automatically created via M3U auto channel sync'), + ), + migrations.AddField( + model_name='channel', + name='auto_created_by', + field=models.ForeignKey(blank=True, help_text='The M3U account that auto-created this channel', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='auto_created_channels', to='m3u.m3uaccount'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='auto_channel_sync', + field=models.BooleanField(default=False, help_text='Automatically create/delete channels to match streams in this group'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='auto_sync_channel_start', + field=models.FloatField(blank=True, help_text='Starting channel number for auto-created channels in this group', null=True), + ), + ] diff --git a/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py b/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py new file mode 100644 index 00000000..1b0fdbe8 --- /dev/null +++ b/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py @@ -0,0 +1,23 @@ +# Generated by Django 5.1.6 on 2025-07-29 02:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0022_channel_auto_created_channel_auto_created_by_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='stream', + name='stream_stats', + field=models.JSONField(blank=True, help_text='JSON object containing stream statistics like video codec, resolution, etc.', null=True), + ), + migrations.AddField( + model_name='stream', + name='stream_stats_updated_at', + field=models.DateTimeField(blank=True, db_index=True, help_text='When stream statistics were last updated', null=True), + ), + ] diff --git a/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py new file mode 100644 index 00000000..7ee5544c --- /dev/null +++ b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py @@ -0,0 +1,19 @@ +# Generated by Django 5.2.4 on 2025-08-22 20:14 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'), + ] + + operations = [ + migrations.AlterField( + model_name='channelgroupm3uaccount', + name='channel_group', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'), + ), + ] diff --git a/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py b/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py new file mode 100644 index 00000000..980682cb --- /dev/null +++ b/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'), + ] + + operations = [ + migrations.AlterField( + model_name='channelgroupm3uaccount', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='recording', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='stream', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/channels/migrations/0026_recurringrecordingrule.py b/apps/channels/migrations/0026_recurringrecordingrule.py new file mode 100644 index 00000000..1b8cfdb8 --- /dev/null +++ b/apps/channels/migrations/0026_recurringrecordingrule.py @@ -0,0 +1,31 @@ +# Generated by Django 5.0.14 on 2025-09-18 14:56 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='RecurringRecordingRule', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('days_of_week', models.JSONField(default=list)), + ('start_time', models.TimeField()), + ('end_time', models.TimeField()), + ('enabled', models.BooleanField(default=True)), + ('name', models.CharField(blank=True, max_length=255)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')), + ], + options={ + 'ordering': ['channel', 'start_time'], + }, + ), + ] diff --git a/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py new file mode 100644 index 00000000..8cdb9868 --- /dev/null +++ b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-05 20:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0026_recurringrecordingrule'), + ] + + operations = [ + migrations.AddField( + model_name='recurringrecordingrule', + name='end_date', + field=models.DateField(blank=True, null=True), + ), + migrations.AddField( + model_name='recurringrecordingrule', + name='start_date', + field=models.DateField(blank=True, null=True), + ), + ] diff --git a/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py new file mode 100644 index 00000000..08c426b1 --- /dev/null +++ b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py @@ -0,0 +1,25 @@ +# Generated by Django 5.2.4 on 2025-10-06 22:55 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'), + preserve_default=False, + ), + migrations.AddField( + model_name='channel', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'), + ), + ] diff --git a/apps/channels/migrations/0029_backfill_custom_stream_hashes.py b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py new file mode 100644 index 00000000..3e270be2 --- /dev/null +++ b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py @@ -0,0 +1,54 @@ +# Generated migration to backfill stream_hash for existing custom streams + +from django.db import migrations +import hashlib + + +def backfill_custom_stream_hashes(apps, schema_editor): + """ + Generate stream_hash for all custom streams that don't have one. + Uses stream ID to create a stable hash that won't change when name/url is edited. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams_without_hash = Stream.objects.filter( + is_custom=True, + stream_hash__isnull=True + ) + + updated_count = 0 + for stream in custom_streams_without_hash: + # Generate a stable hash using the stream's ID + # This ensures the hash never changes even if name/url is edited + unique_string = f"custom_stream_{stream.id}" + stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + stream.save(update_fields=['stream_hash']) + updated_count += 1 + + if updated_count > 0: + print(f"Backfilled stream_hash for {updated_count} custom streams") + else: + print("No custom streams needed stream_hash backfill") + + +def reverse_backfill(apps, schema_editor): + """ + Reverse migration - clear stream_hash for custom streams. + Note: This will break preview functionality for custom streams. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams = Stream.objects.filter(is_custom=True) + count = custom_streams.update(stream_hash=None) + print(f"Cleared stream_hash for {count} custom streams") + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'), + ] + + operations = [ + migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill), + ] diff --git a/apps/channels/migrations/0030_alter_stream_url.py b/apps/channels/migrations/0030_alter_stream_url.py new file mode 100644 index 00000000..203e411a --- /dev/null +++ b/apps/channels/migrations/0030_alter_stream_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-28 20:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0029_backfill_custom_stream_hashes'), + ] + + operations = [ + migrations.AlterField( + model_name='stream', + name='url', + field=models.URLField(blank=True, max_length=4096, null=True), + ), + ] diff --git a/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py new file mode 100644 index 00000000..2428a97b --- /dev/null +++ b/apps/channels/migrations/0031_channelgroupm3uaccount_is_stale_and_more.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.9 on 2026-01-09 18:19 + +import datetime +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0030_alter_stream_url'), + ] + + operations = [ + migrations.AddField( + model_name='channelgroupm3uaccount', + name='is_stale', + field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='last_seen', + field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'), + ), + migrations.AddField( + model_name='stream', + name='is_stale', + field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index 4485936e..6d199520 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -1,6 +1,5 @@ from django.db import models from django.core.exceptions import ValidationError -from core.models import StreamProfile from django.conf import settings from core.models import StreamProfile, CoreSettings from core.utils import RedisClient @@ -10,12 +9,14 @@ from datetime import datetime import hashlib import json from apps.epg.models import EPGData +from apps.accounts.models import User logger = logging.getLogger(__name__) # If you have an M3UAccount model in apps.m3u, you can still import it: from apps.m3u.models import M3UAccount + # Add fallback functions if Redis isn't available def get_total_viewers(channel_id): """Get viewer count from Redis or return 0 if Redis isn't available""" @@ -26,6 +27,7 @@ def get_total_viewers(channel_id): except Exception: return 0 + class ChannelGroup(models.Model): name = models.TextField(unique=True, db_index=True) @@ -46,12 +48,14 @@ class ChannelGroup(models.Model): return created_objects + class Stream(models.Model): """ Represents a single stream (e.g. from an M3U source or custom URL). """ + name = models.CharField(max_length=255, default="Default Stream") - url = models.URLField(max_length=2000, blank=True, null=True) + url = models.URLField(max_length=4096, blank=True, null=True) m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, @@ -61,7 +65,7 @@ class Stream(models.Model): ) logo_url = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, blank=True, null=True) - local_file = models.FileField(upload_to='uploads/', blank=True, null=True) + local_file = models.FileField(upload_to="uploads/", blank=True, null=True) current_viewers = models.PositiveIntegerField(default=0) updated_at = models.DateTimeField(auto_now=True) channel_group = models.ForeignKey( @@ -69,18 +73,18 @@ class Stream(models.Model): on_delete=models.SET_NULL, null=True, blank=True, - related_name='streams' + related_name="streams", ) stream_profile = models.ForeignKey( StreamProfile, null=True, blank=True, on_delete=models.SET_NULL, - related_name='streams' + related_name="streams", ) is_custom = models.BooleanField( default=False, - help_text="Whether this is a user-created stream or from an M3U account" + help_text="Whether this is a user-created stream or from an M3U account", ) stream_hash = models.CharField( max_length=255, @@ -90,30 +94,48 @@ class Stream(models.Model): db_index=True, ) last_seen = models.DateTimeField(db_index=True, default=datetime.now) - custom_properties = models.TextField(null=True, blank=True) + is_stale = models.BooleanField( + default=False, + db_index=True, + help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)" + ) + custom_properties = models.JSONField(default=dict, blank=True, null=True) + + # Stream statistics fields + stream_stats = models.JSONField( + null=True, + blank=True, + help_text="JSON object containing stream statistics like video codec, resolution, etc." + ) + stream_stats_updated_at = models.DateTimeField( + null=True, + blank=True, + help_text="When stream statistics were last updated", + db_index=True + ) class Meta: # If you use m3u_account, you might do unique_together = ('name','url','m3u_account') verbose_name = "Stream" verbose_name_plural = "Streams" - ordering = ['-updated_at'] + ordering = ["-updated_at"] def __str__(self): return self.name or self.url or f"Stream ID {self.id}" @classmethod - def generate_hash_key(cls, name, url, tvg_id, keys=None): + def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None): if keys is None: keys = CoreSettings.get_m3u_hash_key().split(",") - stream_parts = { - "name": name, "url": url, "tvg_id": tvg_id - } + stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group} hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts} # Serialize and hash the dictionary - serialized_obj = json.dumps(hash_parts, sort_keys=True) # sort_keys ensures consistent ordering + serialized_obj = json.dumps( + hash_parts, sort_keys=True + ) # sort_keys ensures consistent ordering hash_object = hashlib.sha256(serialized_obj.encode()) return hash_object.hexdigest() @@ -129,13 +151,23 @@ class Stream(models.Model): return stream, False # False means it was updated, not created except cls.DoesNotExist: # If it doesn't exist, create a new object with the given hash - fields_to_update['stream_hash'] = hash_value # Make sure the hash field is set + fields_to_update["stream_hash"] = ( + hash_value # Make sure the hash field is set + ) stream = cls.objects.create(**fields_to_update) return stream, True # True means it was created - # @TODO: honor stream's stream profile def get_stream_profile(self): - stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) + """ + Get the stream profile for this stream. + Uses the stream's own profile if set, otherwise returns the default. + """ + if self.stream_profile: + return self.stream_profile + + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) return stream_profile @@ -153,7 +185,9 @@ class Stream(models.Model): m3u_account = self.m3u_account m3u_profiles = m3u_account.profiles.all() default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + profiles = [default_profile] + [ + obj for obj in m3u_profiles if not obj.is_default + ] for profile in profiles: logger.info(profile) @@ -168,13 +202,19 @@ class Stream(models.Model): if profile.max_streams == 0 or current_connections < profile.max_streams: # Start a new stream redis_client.set(f"channel_stream:{self.id}", self.id) - redis_client.set(f"stream_profile:{self.id}", profile.id) # Store only the matched profile + redis_client.set( + f"stream_profile:{self.id}", profile.id + ) # Store only the matched profile # Increment connection count for profiles with limits if profile.max_streams > 0: redis_client.incr(profile_connections_key) - return self.id, profile.id, None # Return newly assigned stream and matched profile + return ( + self.id, + profile.id, + None, + ) # Return newly assigned stream and matched profile # 4. No available streams return None, None, None @@ -195,7 +235,9 @@ class Stream(models.Model): redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association profile_id = int(profile_id) - logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}") + logger.debug( + f"Found profile ID {profile_id} associated with stream {stream_id}" + ) profile_connections_key = f"profile_connections:{profile_id}" @@ -204,45 +246,45 @@ class Stream(models.Model): if current_count > 0: redis_client.decr(profile_connections_key) + class ChannelManager(models.Manager): def active(self): return self.all() class Channel(models.Model): - channel_number = models.IntegerField(db_index=True) + channel_number = models.FloatField(db_index=True) name = models.CharField(max_length=255) logo = models.ForeignKey( - 'Logo', + "Logo", on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels', + related_name="channels", ) # M2M to Stream now in the same file streams = models.ManyToManyField( - Stream, - blank=True, - through='ChannelStream', - related_name='channels' + Stream, blank=True, through="ChannelStream", related_name="channels" ) channel_group = models.ForeignKey( - 'ChannelGroup', + "ChannelGroup", on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels', - help_text="Channel group this channel belongs to." + related_name="channels", + help_text="Channel group this channel belongs to.", ) tvg_id = models.CharField(max_length=255, blank=True, null=True) + tvc_guide_stationid = models.CharField(max_length=255, blank=True, null=True) + epg_data = models.ForeignKey( EPGData, on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels' + related_name="channels", ) stream_profile = models.ForeignKey( @@ -250,16 +292,41 @@ class Channel(models.Model): on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels' + related_name="channels", ) - uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True, db_index=True) + uuid = models.UUIDField( + default=uuid.uuid4, editable=False, unique=True, db_index=True + ) + + user_level = models.IntegerField(default=0) + + auto_created = models.BooleanField( + default=False, + help_text="Whether this channel was automatically created via M3U auto channel sync" + ) + auto_created_by = models.ForeignKey( + "m3u.M3UAccount", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="auto_created_channels", + help_text="The M3U account that auto-created this channel" + ) + + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Timestamp when this channel was created" + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Timestamp when this channel was last updated" + ) def clean(self): # Enforce unique channel_number within a given group existing = Channel.objects.filter( - channel_number=self.channel_number, - channel_group=self.channel_group + channel_number=self.channel_number, channel_group=self.channel_group ).exclude(id=self.id) if existing.exists(): raise ValidationError( @@ -271,7 +338,7 @@ class Channel(models.Model): @classmethod def get_next_available_channel_number(cls, starting_from=1): - used_numbers = set(cls.objects.all().values_list('channel_number', flat=True)) + used_numbers = set(cls.objects.all().values_list("channel_number", flat=True)) n = starting_from while n in used_numbers: n += 1 @@ -281,7 +348,9 @@ class Channel(models.Model): def get_stream_profile(self): stream_profile = self.stream_profile if not stream_profile: - stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) return stream_profile @@ -311,44 +380,55 @@ class Channel(models.Model): profile_id = int(profile_id_bytes) return stream_id, profile_id, None except (ValueError, TypeError): - logger.debug(f"Invalid profile ID retrieved from Redis: {profile_id_bytes}") + logger.debug( + f"Invalid profile ID retrieved from Redis: {profile_id_bytes}" + ) except (ValueError, TypeError): - logger.debug(f"Invalid stream ID retrieved from Redis: {stream_id_bytes}") + logger.debug( + f"Invalid stream ID retrieved from Redis: {stream_id_bytes}" + ) # No existing active stream, attempt to assign a new one has_streams_but_maxed_out = False has_active_profiles = False # Iterate through channel streams and their profiles - for stream in self.streams.all().order_by('channelstream__order'): + for stream in self.streams.all().order_by("channelstream__order"): # Retrieve the M3U account associated with the stream. m3u_account = stream.m3u_account if not m3u_account: logger.debug(f"Stream {stream.id} has no M3U account") continue - - m3u_profiles = m3u_account.profiles.all() - default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - - if not default_profile: - logger.debug(f"M3U account {m3u_account.id} has no default profile") + if m3u_account.is_active == False: + logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.") continue - profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next( + (obj for obj in m3u_profiles if obj.is_default), None + ) + + if not default_profile: + logger.debug(f"M3U account {m3u_account.id} has no active default profile") + continue + + profiles = [default_profile] + [ + obj for obj in m3u_profiles if not obj.is_default + ] for profile in profiles: - # Skip inactive profiles - if not profile.is_active: - logger.debug(f"Skipping inactive profile {profile.id}") - continue - has_active_profiles = True profile_connections_key = f"profile_connections:{profile.id}" - current_connections = int(redis_client.get(profile_connections_key) or 0) + current_connections = int( + redis_client.get(profile_connections_key) or 0 + ) # Check if profile has available slots (or unlimited connections) - if profile.max_streams == 0 or current_connections < profile.max_streams: + if ( + profile.max_streams == 0 + or current_connections < profile.max_streams + ): # Start a new stream redis_client.set(f"channel_stream:{self.id}", stream.id) redis_client.set(f"stream_profile:{stream.id}", profile.id) @@ -357,17 +437,23 @@ class Channel(models.Model): if profile.max_streams > 0: redis_client.incr(profile_connections_key) - return stream.id, profile.id, None # Return newly assigned stream and matched profile + return ( + stream.id, + profile.id, + None, + ) # Return newly assigned stream and matched profile else: # This profile is at max connections has_streams_but_maxed_out = True - logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}") + logger.debug( + f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}" + ) # No available streams - determine specific reason if has_streams_but_maxed_out: - error_reason = "All M3U profiles have reached maximum connection limits" + error_reason = "All active M3U profiles have reached maximum connection limits" elif has_active_profiles: - error_reason = "No compatible profile found for any assigned stream" + error_reason = "No compatible active profile found for any assigned stream" else: error_reason = "No active profiles found for any assigned stream" @@ -387,7 +473,9 @@ class Channel(models.Model): redis_client.delete(f"channel_stream:{self.id}") # Remove active stream stream_id = int(stream_id) - logger.debug(f"Found stream ID {stream_id} associated with channel stream {self.id}") + logger.debug( + f"Found stream ID {stream_id} associated with channel stream {self.id}" + ) # Get the matched profile for cleanup profile_id = redis_client.get(f"stream_profile:{stream_id}") @@ -398,7 +486,9 @@ class Channel(models.Model): redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association profile_id = int(profile_id) - logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}") + logger.debug( + f"Found profile ID {profile_id} associated with stream {stream_id}" + ) profile_connections_key = f"profile_connections:{profile_id}" @@ -451,20 +541,26 @@ class Channel(models.Model): # Increment connection count for new profile new_profile_connections_key = f"profile_connections:{new_profile_id}" redis_client.incr(new_profile_connections_key) - logger.info(f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}") + logger.info( + f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}" + ) return True class ChannelProfile(models.Model): name = models.CharField(max_length=100, unique=True) + class ChannelProfileMembership(models.Model): channel_profile = models.ForeignKey(ChannelProfile, on_delete=models.CASCADE) channel = models.ForeignKey(Channel, on_delete=models.CASCADE) - enabled = models.BooleanField(default=True) # Track if the channel is enabled for this group + enabled = models.BooleanField( + default=True + ) # Track if the channel is enabled for this group class Meta: - unique_together = ('channel_profile', 'channel') + unique_together = ("channel_profile", "channel") + class ChannelStream(models.Model): channel = models.ForeignKey(Channel, on_delete=models.CASCADE) @@ -472,26 +568,45 @@ class ChannelStream(models.Model): order = models.PositiveIntegerField(default=0) # Ordering field class Meta: - ordering = ['order'] # Ensure streams are retrieved in order + ordering = ["order"] # Ensure streams are retrieved in order constraints = [ - models.UniqueConstraint(fields=['channel', 'stream'], name='unique_channel_stream') + models.UniqueConstraint( + fields=["channel", "stream"], name="unique_channel_stream" + ) ] + class ChannelGroupM3UAccount(models.Model): channel_group = models.ForeignKey( - ChannelGroup, - on_delete=models.CASCADE, - related_name='m3u_account' + ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts" ) m3u_account = models.ForeignKey( - M3UAccount, - on_delete=models.CASCADE, - related_name='channel_group' + M3UAccount, on_delete=models.CASCADE, related_name="channel_group" ) + custom_properties = models.JSONField(default=dict, blank=True, null=True) enabled = models.BooleanField(default=True) + auto_channel_sync = models.BooleanField( + default=False, + help_text='Automatically create/delete channels to match streams in this group' + ) + auto_sync_channel_start = models.FloatField( + null=True, + blank=True, + help_text='Starting channel number for auto-created channels in this group' + ) + last_seen = models.DateTimeField( + default=datetime.now, + db_index=True, + help_text='Last time this group was seen in the M3U source during a refresh' + ) + is_stale = models.BooleanField( + default=False, + db_index=True, + help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)' + ) class Meta: - unique_together = ('channel_group', 'm3u_account') + unique_together = ("channel_group", "m3u_account") def __str__(self): return f"{self.channel_group.name} - {self.m3u_account.name} (Enabled: {self.enabled})" @@ -504,12 +619,47 @@ class Logo(models.Model): def __str__(self): return self.name + class Recording(models.Model): - channel = models.ForeignKey("Channel", on_delete=models.CASCADE, related_name="recordings") + channel = models.ForeignKey( + "Channel", on_delete=models.CASCADE, related_name="recordings" + ) start_time = models.DateTimeField() end_time = models.DateTimeField() task_id = models.CharField(max_length=255, null=True, blank=True) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return f"{self.channel.name} - {self.start_time} to {self.end_time}" + + +class RecurringRecordingRule(models.Model): + """Rule describing a recurring manual DVR schedule.""" + + channel = models.ForeignKey( + "Channel", + on_delete=models.CASCADE, + related_name="recurring_rules", + ) + days_of_week = models.JSONField(default=list) + start_time = models.TimeField() + end_time = models.TimeField() + enabled = models.BooleanField(default=True) + name = models.CharField(max_length=255, blank=True) + start_date = models.DateField(null=True, blank=True) + end_date = models.DateField(null=True, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + ordering = ["channel", "start_time"] + + def __str__(self): + channel_name = getattr(self.channel, "name", str(self.channel_id)) + return f"Recurring rule for {channel_name}" + + def cleaned_days(self): + try: + return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6}) + except Exception: + return [] diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 69f25286..c1919e24 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,107 +1,234 @@ +import json +from datetime import datetime + from rest_framework import serializers -from .models import Stream, Channel, ChannelGroup, ChannelStream, ChannelGroupM3UAccount, Logo, ChannelProfile, ChannelProfileMembership, Recording +from .models import ( + Stream, + Channel, + ChannelGroup, + ChannelStream, + ChannelGroupM3UAccount, + Logo, + ChannelProfile, + ChannelProfileMembership, + Recording, + RecurringRecordingRule, +) from apps.epg.serializers import EPGDataSerializer from core.models import StreamProfile from apps.epg.models import EPGData from django.urls import reverse from rest_framework import serializers from django.utils import timezone +from core.utils import validate_flexible_url + class LogoSerializer(serializers.ModelSerializer): cache_url = serializers.SerializerMethodField() + channel_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + channel_names = serializers.SerializerMethodField() class Meta: model = Logo - fields = ['id', 'name', 'url', 'cache_url'] + fields = ["id", "name", "url", "cache_url", "channel_count", "is_used", "channel_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if Logo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return Logo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance def get_cache_url(self, obj): # return f"/api/channels/logos/{obj.id}/cache/" - request = self.context.get('request') + request = self.context.get("request") if request: - return request.build_absolute_uri(reverse('api:channels:logo-cache', args=[obj.id])) - return reverse('api:channels:logo-cache', args=[obj.id]) + return request.build_absolute_uri( + reverse("api:channels:logo-cache", args=[obj.id]) + ) + return reverse("api:channels:logo-cache", args=[obj.id]) + + def get_channel_count(self, obj): + """Get the number of channels using this logo""" + return obj.channels.count() + + def get_is_used(self, obj): + """Check if this logo is used by any channels""" + return obj.channels.exists() + + def get_channel_names(self, obj): + """Get the names of channels using this logo (limited to first 5)""" + names = [] + + # Get channel names + channels = obj.channels.all()[:5] + for channel in channels: + names.append(f"Channel: {channel.name}") + + # Calculate total count for "more" message + total_count = self.get_channel_count(obj) + if total_count > 5: + names.append(f"...and {total_count - 5} more") + + return names + # # Stream # class StreamSerializer(serializers.ModelSerializer): + url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url] + ) stream_profile_id = serializers.PrimaryKeyRelatedField( queryset=StreamProfile.objects.all(), - source='stream_profile', + source="stream_profile", allow_null=True, - required=False + required=False, ) - read_only_fields = ['is_custom', 'm3u_account', 'stream_hash'] + read_only_fields = ["is_custom", "m3u_account", "stream_hash"] class Meta: model = Stream fields = [ - 'id', - 'name', - 'url', - 'm3u_account', # Uncomment if using M3U fields - 'logo_url', - 'tvg_id', - 'local_file', - 'current_viewers', - 'updated_at', - 'stream_profile_id', - 'is_custom', - 'channel_group', - 'stream_hash', + "id", + "name", + "url", + "m3u_account", # Uncomment if using M3U fields + "logo_url", + "tvg_id", + "local_file", + "current_viewers", + "updated_at", + "last_seen", + "is_stale", + "stream_profile_id", + "is_custom", + "channel_group", + "stream_hash", + "stream_stats", + "stream_stats_updated_at", ] def get_fields(self): fields = super().get_fields() # Unable to edit specific properties if this stream was created from an M3U account - if self.instance and getattr(self.instance, 'm3u_account', None) and not self.instance.is_custom: - fields['id'].read_only = True - fields['name'].read_only = True - fields['url'].read_only = True - fields['m3u_account'].read_only = True - fields['tvg_id'].read_only = True - fields['channel_group'].read_only = True - + if ( + self.instance + and getattr(self.instance, "m3u_account", None) + and not self.instance.is_custom + ): + fields["id"].read_only = True + fields["name"].read_only = True + fields["url"].read_only = True + fields["m3u_account"].read_only = True + fields["tvg_id"].read_only = True + fields["channel_group"].read_only = True return fields +class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): + m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True) + enabled = serializers.BooleanField() + auto_channel_sync = serializers.BooleanField(default=False) + auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False) + custom_properties = serializers.JSONField(required=False) + + class Meta: + model = ChannelGroupM3UAccount + fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"] + + def to_representation(self, instance): + data = super().to_representation(instance) + + custom_props = instance.custom_properties or {} + + return data + + def to_internal_value(self, data): + # Accept both dict and JSON string for custom_properties (for backward compatibility) + val = data.get("custom_properties") + if isinstance(val, str): + try: + data["custom_properties"] = json.loads(val) + except Exception: + pass + + return super().to_internal_value(data) + # # Channel Group # class ChannelGroupSerializer(serializers.ModelSerializer): + channel_count = serializers.SerializerMethodField() + m3u_account_count = serializers.SerializerMethodField() + m3u_accounts = ChannelGroupM3UAccountSerializer( + many=True, + read_only=True + ) + class Meta: model = ChannelGroup - fields = ['id', 'name'] + fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"] + + def get_channel_count(self, obj): + """Get count of channels in this group""" + return obj.channels.count() + + def get_m3u_account_count(self, obj): + """Get count of M3U accounts associated with this group""" + return obj.m3u_accounts.count() + class ChannelProfileSerializer(serializers.ModelSerializer): channels = serializers.SerializerMethodField() class Meta: model = ChannelProfile - fields = ['id', 'name', 'channels'] + fields = ["id", "name", "channels"] def get_channels(self, obj): - memberships = ChannelProfileMembership.objects.filter(channel_profile=obj, enabled=True) - return [ - membership.channel.id - for membership in memberships - ] + memberships = ChannelProfileMembership.objects.filter( + channel_profile=obj, enabled=True + ) + return [membership.channel.id for membership in memberships] + class ChannelProfileMembershipSerializer(serializers.ModelSerializer): class Meta: model = ChannelProfileMembership - fields = ['channel', 'enabled'] + fields = ["channel", "enabled"] + class ChanneProfilelMembershipUpdateSerializer(serializers.Serializer): channel_id = serializers.IntegerField() # Ensure channel_id is an integer enabled = serializers.BooleanField() + class BulkChannelProfileMembershipSerializer(serializers.Serializer): channels = serializers.ListField( child=ChanneProfilelMembershipUpdateSerializer(), # Use the nested serializer - allow_empty=False + allow_empty=False, ) def validate_channels(self, value): @@ -109,16 +236,20 @@ class BulkChannelProfileMembershipSerializer(serializers.Serializer): raise serializers.ValidationError("At least one channel must be provided.") return value + # # Channel # class ChannelSerializer(serializers.ModelSerializer): # Show nested group data, or ID - channel_number = serializers.IntegerField(allow_null=True, required=False) + # Ensure channel_number is explicitly typed as FloatField and properly validated + channel_number = serializers.FloatField( + allow_null=True, + required=False, + error_messages={"invalid": "Channel number must be a valid decimal number."}, + ) channel_group_id = serializers.PrimaryKeyRelatedField( - queryset=ChannelGroup.objects.all(), - source="channel_group", - required=False + queryset=ChannelGroup.objects.all(), source="channel_group", required=False ) epg_data_id = serializers.PrimaryKeyRelatedField( queryset=EPGData.objects.all(), @@ -129,64 +260,88 @@ class ChannelSerializer(serializers.ModelSerializer): stream_profile_id = serializers.PrimaryKeyRelatedField( queryset=StreamProfile.objects.all(), - source='stream_profile', - allow_null=True, - required=False - ) - - streams = serializers.PrimaryKeyRelatedField(queryset=Stream.objects.all(), many=True, required=False) - - logo_id = serializers.PrimaryKeyRelatedField( - queryset=Logo.objects.all(), - source='logo', + source="stream_profile", allow_null=True, required=False, ) + streams = serializers.PrimaryKeyRelatedField( + queryset=Stream.objects.all(), many=True, required=False + ) + + logo_id = serializers.PrimaryKeyRelatedField( + queryset=Logo.objects.all(), + source="logo", + allow_null=True, + required=False, + ) + + auto_created_by_name = serializers.SerializerMethodField() + class Meta: model = Channel fields = [ - 'id', - 'channel_number', - 'name', - 'channel_group_id', - 'tvg_id', - 'epg_data_id', - 'streams', - 'stream_profile_id', - 'uuid', - 'logo_id', + "id", + "channel_number", + "name", + "channel_group_id", + "tvg_id", + "tvc_guide_stationid", + "epg_data_id", + "streams", + "stream_profile_id", + "uuid", + "logo_id", + "user_level", + "auto_created", + "auto_created_by", + "auto_created_by_name", ] def to_representation(self, instance): - include_streams = self.context.get('include_streams', False) + include_streams = self.context.get("include_streams", False) if include_streams: - self.fields['streams'] = serializers.SerializerMethodField() - - return super().to_representation(instance) + self.fields["streams"] = serializers.SerializerMethodField() + return super().to_representation(instance) + else: + # Fix: For PATCH/PUT responses, ensure streams are ordered + representation = super().to_representation(instance) + if "streams" in representation: + representation["streams"] = list( + instance.streams.all() + .order_by("channelstream__order") + .values_list("id", flat=True) + ) + return representation def get_logo(self, obj): return LogoSerializer(obj.logo).data def get_streams(self, obj): """Retrieve ordered stream IDs for GET requests.""" - return StreamSerializer(obj.streams.all().order_by('channelstream__order'), many=True).data + return StreamSerializer( + obj.streams.all().order_by("channelstream__order"), many=True + ).data def create(self, validated_data): - streams = validated_data.pop('streams', []) - channel_number = validated_data.pop('channel_number', Channel.get_next_available_channel_number()) + streams = validated_data.pop("streams", []) + channel_number = validated_data.pop( + "channel_number", Channel.get_next_available_channel_number() + ) validated_data["channel_number"] = channel_number channel = Channel.objects.create(**validated_data) # Add streams in the specified order for index, stream in enumerate(streams): - ChannelStream.objects.create(channel=channel, stream_id=stream.id, order=index) + ChannelStream.objects.create( + channel=channel, stream_id=stream.id, order=index + ) return channel def update(self, instance, validated_data): - streams = validated_data.pop('streams', None) + streams = validated_data.pop("streams", None) # Update standard fields for attr, value in validated_data.items(): @@ -197,8 +352,7 @@ class ChannelSerializer(serializers.ModelSerializer): if streams is not None: # Normalize stream IDs normalized_ids = [ - stream.id if hasattr(stream, "id") else stream - for stream in streams + stream.id if hasattr(stream, "id") else stream for stream in streams ] print(normalized_ids) @@ -225,40 +379,85 @@ class ChannelSerializer(serializers.ModelSerializer): cs.save(update_fields=["order"]) else: ChannelStream.objects.create( - channel=instance, - stream_id=stream_id, - order=order + channel=instance, stream_id=stream_id, order=order ) return instance + def validate_channel_number(self, value): + """Ensure channel_number is properly processed as a float""" + if value is None: + return value + + try: + # Ensure it's processed as a float + return float(value) + except (ValueError, TypeError): + raise serializers.ValidationError( + "Channel number must be a valid decimal number." + ) def validate_stream_profile(self, value): """Handle special case where empty/0 values mean 'use default' (null)""" - if value == '0' or value == 0 or value == '' or value is None: + if value == "0" or value == 0 or value == "" or value is None: return None return value # PrimaryKeyRelatedField will handle the conversion to object -class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): - enabled = serializers.BooleanField() - - class Meta: - model = ChannelGroupM3UAccount - fields = ['id', 'channel_group', 'enabled'] - - # Optionally, if you only need the id of the ChannelGroup, you can customize it like this: - # channel_group = serializers.PrimaryKeyRelatedField(queryset=ChannelGroup.objects.all()) + def get_auto_created_by_name(self, obj): + """Get the name of the M3U account that auto-created this channel.""" + if obj.auto_created_by: + return obj.auto_created_by.name + return None class RecordingSerializer(serializers.ModelSerializer): class Meta: model = Recording - fields = '__all__' - read_only_fields = ['task_id'] + fields = "__all__" + read_only_fields = ["task_id"] def validate(self, data): - start_time = data.get('start_time') - end_time = data.get('end_time') + from core.models import CoreSettings + start_time = data.get("start_time") + end_time = data.get("end_time") + + if start_time and timezone.is_naive(start_time): + start_time = timezone.make_aware(start_time, timezone.get_current_timezone()) + data["start_time"] = start_time + if end_time and timezone.is_naive(end_time): + end_time = timezone.make_aware(end_time, timezone.get_current_timezone()) + data["end_time"] = end_time + + # If this is an EPG-based recording (program provided), apply global pre/post offsets + try: + cp = data.get("custom_properties") or {} + is_epg_based = isinstance(cp, dict) and isinstance(cp.get("program"), (dict,)) + except Exception: + is_epg_based = False + + if is_epg_based and start_time and end_time: + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + from datetime import timedelta + try: + if pre_min and pre_min > 0: + start_time = start_time - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + end_time = end_time + timedelta(minutes=post_min) + except Exception: + pass + # write back adjusted times so scheduling uses them + data["start_time"] = start_time + data["end_time"] = end_time now = timezone.now() # timezone-aware current time @@ -267,8 +466,61 @@ class RecordingSerializer(serializers.ModelSerializer): if start_time < now: # Optional: Adjust start_time if it's in the past but end_time is in the future - data['start_time'] = now # or: timezone.now() + timedelta(seconds=1) - if end_time <= data['start_time']: + data["start_time"] = now # or: timezone.now() + timedelta(seconds=1) + if end_time <= data["start_time"]: raise serializers.ValidationError("End time must be after start time.") return data + + +class RecurringRecordingRuleSerializer(serializers.ModelSerializer): + class Meta: + model = RecurringRecordingRule + fields = "__all__" + read_only_fields = ["created_at", "updated_at"] + + def validate_days_of_week(self, value): + if not value: + raise serializers.ValidationError("Select at least one day of the week") + cleaned = [] + for entry in value: + try: + iv = int(entry) + except (TypeError, ValueError): + raise serializers.ValidationError("Days of week must be integers 0-6") + if iv < 0 or iv > 6: + raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)") + cleaned.append(iv) + return sorted(set(cleaned)) + + def validate(self, attrs): + start = attrs.get("start_time") or getattr(self.instance, "start_time", None) + end = attrs.get("end_time") or getattr(self.instance, "end_time", None) + start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None) + end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None) + if start_date is None: + existing_start = getattr(self.instance, "start_date", None) + if existing_start is None: + raise serializers.ValidationError("Start date is required") + if start_date and end_date and end_date < start_date: + raise serializers.ValidationError("End date must be on or after start date") + if end_date is None: + existing_end = getattr(self.instance, "end_date", None) + if existing_end is None: + raise serializers.ValidationError("End date is required") + if start and end and start_date and end_date: + start_dt = datetime.combine(start_date, start) + end_dt = datetime.combine(end_date, end) + if end_dt <= start_dt: + raise serializers.ValidationError("End datetime must be after start datetime") + elif start and end and end == start: + raise serializers.ValidationError("End time must be different from start time") + # Normalize empty strings to None for dates + if attrs.get("end_date") == "": + attrs["end_date"] = None + if attrs.get("start_date") == "": + attrs["start_date"] = None + return super().validate(attrs) + + def create(self, validated_data): + return super().create(validated_data) diff --git a/apps/channels/signals.py b/apps/channels/signals.py index f98c1c97..27b361ba 100644 --- a/apps/channels/signals.py +++ b/apps/channels/signals.py @@ -8,7 +8,7 @@ from .models import Channel, Stream, ChannelProfile, ChannelProfileMembership, R from apps.m3u.models import M3UAccount from apps.epg.tasks import parse_programs_for_tvg_id import logging, requests, time -from .tasks import run_recording +from .tasks import run_recording, prefetch_recording_artwork from django.utils.timezone import now, is_aware, make_aware from datetime import timedelta @@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs): else: raise ValueError("No default M3UAccount found.") +@receiver(post_save, sender=Stream) +def generate_custom_stream_hash(sender, instance, created, **kwargs): + """ + Generate a stable stream_hash for custom streams after creation. + Uses the stream's ID to ensure the hash never changes even if name/url is edited. + """ + if instance.is_custom and not instance.stream_hash and created: + import hashlib + # Use stream ID for a stable, unique hash that never changes + unique_string = f"custom_stream_{instance.id}" + instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + # Use update to avoid triggering signals again + Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash) + @receiver(post_save, sender=Channel) def refresh_epg_programs(sender, instance, created, **kwargs): """ @@ -62,15 +76,6 @@ def refresh_epg_programs(sender, instance, created, **kwargs): logger.info(f"New channel {instance.id} ({instance.name}) created with EPG data, refreshing program data") parse_programs_for_tvg_id.delay(instance.epg_data.id) -@receiver(post_save, sender=Channel) -def add_new_channel_to_groups(sender, instance, created, **kwargs): - if created: - profiles = ChannelProfile.objects.all() - ChannelProfileMembership.objects.bulk_create([ - ChannelProfileMembership(channel_profile=profile, channel=instance) - for profile in profiles - ]) - @receiver(post_save, sender=ChannelProfile) def create_profile_memberships(sender, instance, created, **kwargs): if created: @@ -82,8 +87,9 @@ def create_profile_memberships(sender, instance, created, **kwargs): def schedule_recording_task(instance): eta = instance.start_time + # Pass recording_id first so task can persist metadata to the correct row task = run_recording.apply_async( - args=[instance.channel_id, str(instance.start_time), str(instance.end_time)], + args=[instance.id, instance.channel_id, str(instance.start_time), str(instance.end_time)], eta=eta ) return task.id @@ -132,6 +138,11 @@ def schedule_task_on_save(sender, instance, created, **kwargs): instance.save(update_fields=['task_id']) else: print("Start time is in the past. Not scheduling.") + # Kick off poster/artwork prefetch to enrich Upcoming cards + try: + prefetch_recording_artwork.apply_async(args=[instance.id], countdown=1) + except Exception as e: + print("Error scheduling artwork prefetch:", e) except Exception as e: import traceback print("Error in post_save signal:", e) diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 88d040e8..b3e11251 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -1,15 +1,20 @@ # apps/channels/tasks.py import logging import os +import select import re import requests import time import json import subprocess -from datetime import datetime +import signal +from zoneinfo import ZoneInfo +from datetime import datetime, timedelta +import gc from celery import shared_task from django.utils.text import slugify +from rapidfuzz import fuzz from apps.channels.models import Channel from apps.epg.models import EPGData @@ -21,9 +26,108 @@ from asgiref.sync import async_to_sync from asgiref.sync import async_to_sync from channels.layers import get_channel_layer import tempfile +from urllib.parse import quote logger = logging.getLogger(__name__) +# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size) +# We use 2000 as a safe maximum to account for multibyte characters +def validate_logo_url(logo_url, max_length=2000): + """ + Fast validation for logo URLs during bulk creation. + Returns None if URL is too long (would exceed PostgreSQL btree index limit), + original URL otherwise. + + PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than + this cannot be indexed and would cause database errors. These are typically + base64-encoded images embedded in URLs. + """ + if logo_url and len(logo_url) > max_length: + logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...") + return None + return logo_url + +def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"): + """ + Send EPG matching progress via WebSocket + """ + try: + channel_layer = get_channel_layer() + if channel_layer: + progress_data = { + 'type': 'epg_matching_progress', + 'total': total_channels, + 'matched': len(matched_channels) if isinstance(matched_channels, list) else matched_channels, + 'remaining': total_channels - (len(matched_channels) if isinstance(matched_channels, list) else matched_channels), + 'current_channel': current_channel_name, + 'stage': stage, + 'progress_percent': round((len(matched_channels) if isinstance(matched_channels, list) else matched_channels) / total_channels * 100, 1) if total_channels > 0 else 0 + } + + async_to_sync(channel_layer.group_send)( + "updates", + { + "type": "update", + "data": { + "type": "epg_matching_progress", + **progress_data + } + } + ) + except Exception as e: + logger.warning(f"Failed to send EPG matching progress: {e}") + +# Lazy loading for ML models - only imported/loaded when needed +_ml_model_cache = { + 'sentence_transformer': None +} + +def get_sentence_transformer(): + """Lazy load the sentence transformer model only when needed""" + if _ml_model_cache['sentence_transformer'] is None: + try: + from sentence_transformers import SentenceTransformer + from sentence_transformers import util + + model_name = "sentence-transformers/all-MiniLM-L6-v2" + cache_dir = "/data/models" + + # Check environment variable to disable downloads + disable_downloads = os.environ.get('DISABLE_ML_DOWNLOADS', 'false').lower() == 'true' + + if disable_downloads: + # Check if model exists before attempting to load + hf_model_path = os.path.join(cache_dir, f"models--{model_name.replace('/', '--')}") + if not os.path.exists(hf_model_path): + logger.warning("ML model not found and downloads disabled (DISABLE_ML_DOWNLOADS=true). Skipping ML matching.") + return None, None + + # Ensure cache directory exists + os.makedirs(cache_dir, exist_ok=True) + + # Let sentence-transformers handle all cache detection and management + logger.info(f"Loading sentence transformer model (cache: {cache_dir})") + _ml_model_cache['sentence_transformer'] = SentenceTransformer( + model_name, + cache_folder=cache_dir + ) + + return _ml_model_cache['sentence_transformer'], util + except ImportError: + logger.warning("sentence-transformers not available - ML-enhanced matching disabled") + return None, None + except Exception as e: + logger.error(f"Failed to load sentence transformer: {e}") + return None, None + else: + from sentence_transformers import util + return _ml_model_cache['sentence_transformer'], util + +# ML matching thresholds (same as original script) +BEST_FUZZY_THRESHOLD = 85 +LOWER_FUZZY_THRESHOLD = 40 +EMBED_SIM_THRESHOLD = 0.65 + # Words we remove to help with fuzzy + embedding matching COMMON_EXTRANEOUS_WORDS = [ "tv", "channel", "network", "television", @@ -46,174 +150,1294 @@ def normalize_name(name: str) -> str: norm = name.lower() norm = re.sub(r"\[.*?\]", "", norm) + + # Extract and preserve important call signs from parentheses before removing them + # This captures call signs like (KVLY), (KING), (KARE), etc. + call_sign_match = re.search(r"\(([A-Z]{3,5})\)", name) + preserved_call_sign = "" + if call_sign_match: + preserved_call_sign = " " + call_sign_match.group(1).lower() + + # Now remove all parentheses content norm = re.sub(r"\(.*?\)", "", norm) + + # Add back the preserved call sign + norm = norm + preserved_call_sign + norm = re.sub(r"[^\w\s]", "", norm) tokens = norm.split() tokens = [t for t in tokens if t not in COMMON_EXTRANEOUS_WORDS] norm = " ".join(tokens).strip() return norm +def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True, send_progress=True): + """ + EPG matching logic that finds the best EPG matches for channels using + multiple matching strategies including fuzzy matching and ML models. + + Automatically uses conservative thresholds for bulk matching (multiple channels) + to avoid bad matches that create user cleanup work, and aggressive thresholds + for single channel matching where users specifically requested a match attempt. + """ + channels_to_update = [] + matched_channels = [] + total_channels = len(channels_data) + + # Send initial progress + if send_progress: + send_epg_matching_progress(total_channels, 0, stage="starting") + + # Try to get ML models if requested (but don't load yet - lazy loading) + st_model, util = None, None + epg_embeddings = None + ml_available = use_ml + + # Automatically determine matching strategy based on number of channels + is_bulk_matching = len(channels_data) > 1 + + # Adjust matching thresholds based on operation type + if is_bulk_matching: + # Conservative thresholds for bulk matching to avoid creating cleanup work + FUZZY_HIGH_CONFIDENCE = 90 # Only very high fuzzy scores + FUZZY_MEDIUM_CONFIDENCE = 70 # Higher threshold for ML enhancement + ML_HIGH_CONFIDENCE = 0.75 # Higher ML confidence required + ML_LAST_RESORT = 0.65 # More conservative last resort + FUZZY_LAST_RESORT_MIN = 50 # Higher fuzzy minimum for last resort + logger.info(f"Using conservative thresholds for bulk matching ({total_channels} channels)") + else: + # More aggressive thresholds for single channel matching (user requested specific match) + FUZZY_HIGH_CONFIDENCE = 85 # Original threshold + FUZZY_MEDIUM_CONFIDENCE = 40 # Original threshold + ML_HIGH_CONFIDENCE = 0.65 # Original threshold + ML_LAST_RESORT = 0.50 # Original desperate threshold + FUZZY_LAST_RESORT_MIN = 20 # Original minimum + logger.info("Using aggressive thresholds for single channel matching") # Process each channel + for index, chan in enumerate(channels_data): + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Send progress update every 5 channels or for the first few + if send_progress and (index < 5 or index % 5 == 0 or index == total_channels - 1): + send_epg_matching_progress( + total_channels, + len(matched_channels), + current_channel_name=chan["name"][:50], # Truncate long names + stage="matching" + ) + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Step 1: Exact TVG ID match + epg_by_tvg_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_tvg_id), None) + if normalized_tvg_id and epg_by_tvg_id: + chan["epg_data_id"] = epg_by_tvg_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, epg_by_tvg_id["tvg_id"])) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact tvg_id={epg_by_tvg_id['tvg_id']}") + continue + + # Step 2: Secondary TVG ID check (legacy compatibility) + if chan["tvg_id"]: + epg_match = [epg["id"] for epg in epg_data if epg["tvg_id"] == chan["tvg_id"]] + if epg_match: + chan["epg_data_id"] = epg_match[0] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, chan["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by secondary tvg_id={chan['tvg_id']}") + continue + + # Step 2.5: Exact Gracenote ID match + normalized_gracenote_id = chan.get("gracenote_id", "") + if normalized_gracenote_id: + epg_by_gracenote_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_gracenote_id), None) + if epg_by_gracenote_id: + chan["epg_data_id"] = epg_by_gracenote_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, f"gracenote:{epg_by_gracenote_id['tvg_id']}")) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact gracenote_id={normalized_gracenote_id}") + continue + + # Step 3: Name-based fuzzy matching + if not chan["norm_chan"]: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping") + continue + + best_score = 0 + best_epg = None + + # Debug: show what we're matching against + logger.debug(f"Fuzzy matching '{chan['norm_chan']}' against EPG entries...") + + # Find best fuzzy match + for row in epg_data: + if not row.get("norm_name"): + continue + + base_score = fuzz.ratio(chan["norm_chan"], row["norm_name"]) + bonus = 0 + + # Apply region-based bonus/penalty + if region_code and row.get("tvg_id"): + combined_text = row["tvg_id"].lower() + " " + row["name"].lower() + dot_regions = re.findall(r'\.([a-z]{2})', combined_text) + + if dot_regions: + if region_code in dot_regions: + bonus = 15 # Bigger bonus for matching region + else: + bonus = -15 # Penalty for different region + elif region_code in combined_text: + bonus = 10 + + score = base_score + bonus + + # Debug the best few matches + if score > 50: # Only show decent matches + logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") + + # When scores are equal, prefer higher priority EPG source + row_priority = row.get('epg_source_priority', 0) + best_priority = best_epg.get('epg_source_priority', 0) if best_epg else -1 + + if score > best_score or (score == best_score and row_priority > best_priority): + best_score = score + best_epg = row + + # Log the best score we found + if best_epg: + logger.info(f"Channel {chan['id']} '{chan['name']}' => best match: '{best_epg['name']}' (score: {best_score})") + else: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => no EPG entries with valid norm_name found") + continue + + # High confidence match - accept immediately + if best_score >= FUZZY_HIGH_CONFIDENCE: + chan["epg_data_id"] = best_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], best_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched tvg_id={best_epg['tvg_id']} (score={best_score})") + + # Medium confidence - use ML if available (lazy load models here) + elif best_score >= FUZZY_MEDIUM_CONFIDENCE and ml_available: + # Lazy load ML models only when we actually need them + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings only when we actually need them + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_HIGH_CONFIDENCE: + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy={best_score}, ML-sim={top_value:.2f} < {ML_HIGH_CONFIDENCE}, trying last resort...") + + # Last resort: try ML with very low fuzzy threshold + if top_value >= ML_LAST_RESORT: # Dynamic last resort threshold + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => even last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, skipping") + + except Exception as e: + logger.warning(f"ML matching failed for channel {chan['id']}: {e}") + # Fall back to non-ML decision + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy score {best_score} below threshold, skipping") + + # Last resort: Try ML matching even with very low fuzzy scores + elif best_score >= FUZZY_LAST_RESORT_MIN and ml_available: + # Lazy load ML models for last resort attempts + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings for last resort attempts + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (last resort lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings for last resort: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + logger.info(f"Channel {chan['id']} '{chan['name']}' => trying ML as last resort (fuzzy={best_score})") + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_LAST_RESORT: # Dynamic threshold for desperate attempts + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => DESPERATE LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => desperate last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, giving up") + except Exception as e: + logger.warning(f"Last resort ML matching failed for channel {chan['id']}: {e}") + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, giving up") + else: + # No ML available or very low fuzzy score + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, no ML fallback available") + + # Clean up ML models from memory after matching (infrequent operation) + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + # Send final progress update + if send_progress: + send_epg_matching_progress( + total_channels, + len(matched_channels), + stage="completed" + ) + + return { + "channels_to_update": channels_to_update, + "matched_channels": matched_channels + } + @shared_task def match_epg_channels(): """ - Goes through all Channels and tries to find a matching EPGData row by: - 1) If channel.tvg_id is valid in EPGData, skip. - 2) If channel has a tvg_id but not found in EPGData, attempt direct EPGData lookup. - 3) Otherwise, perform name-based fuzzy matching with optional region-based bonus. - 4) If a match is found, we set channel.tvg_id - 5) Summarize and log results. + Uses integrated EPG matching instead of external script. + Provides the same functionality with better performance and maintainability. """ - logger.info("Starting EPG matching logic...") - - # Attempt to retrieve a "preferred-region" if configured try: - region_obj = CoreSettings.objects.get(key="preferred-region") - region_code = region_obj.value.strip().lower() - except CoreSettings.DoesNotExist: - region_code = None + logger.info("Starting integrated EPG matching...") - matched_channels = [] - channels_to_update = [] + # Get region preference + try: + region_obj = CoreSettings.objects.get(key="preferred-region") + region_code = region_obj.value.strip().lower() + except CoreSettings.DoesNotExist: + region_code = None - # Get channels that don't have EPG data assigned - channels_without_epg = Channel.objects.filter(epg_data__isnull=True) - logger.info(f"Found {channels_without_epg.count()} channels without EPG data") + # Get channels that don't have EPG data assigned + channels_without_epg = Channel.objects.filter(epg_data__isnull=True) + logger.info(f"Found {channels_without_epg.count()} channels without EPG data") - channels_json = [] - for channel in channels_without_epg: - # Normalize TVG ID - strip whitespace and convert to lowercase - normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" - if normalized_tvg_id: - logger.info(f"Processing channel {channel.id} '{channel.name}' with TVG ID='{normalized_tvg_id}'") + channels_data = [] + for channel in channels_without_epg: + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! + }) - channels_json.append({ - "id": channel.id, - "name": channel.name, - "tvg_id": normalized_tvg_id, # Use normalized TVG ID - "original_tvg_id": channel.tvg_id, # Keep original for reference - "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, - "norm_chan": normalize_name(normalized_tvg_id if normalized_tvg_id else channel.name) - }) + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches + epg_data = [] + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): + normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data.append({ + 'id': epg.id, + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, + }) - # Similarly normalize EPG data TVG IDs - epg_json = [] - for epg in EPGData.objects.all(): - normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" - epg_json.append({ - 'id': epg.id, - 'tvg_id': normalized_tvg_id, # Use normalized TVG ID - 'original_tvg_id': epg.tvg_id, # Keep original for reference - 'name': epg.name, - 'norm_name': normalize_name(epg.name), - 'epg_source_id': epg.epg_source.id if epg.epg_source else None, - }) + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) - # Log available EPG data TVG IDs for debugging - unique_epg_tvg_ids = set(e['tvg_id'] for e in epg_json if e['tvg_id']) - logger.info(f"Available EPG TVG IDs: {', '.join(sorted(unique_epg_tvg_ids))}") + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries (from active sources only)") - payload = { - "channels": channels_json, - "epg_data": epg_json, - "region_code": region_code, - } + # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) + channels_to_update_dicts = result["channels_to_update"] + matched_channels = result["matched_channels"] - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - temp_file.write(json.dumps(payload).encode('utf-8')) - temp_file_path = temp_file.name + # Update channels in database + if channels_to_update_dicts: + channel_ids = [d["id"] for d in channels_to_update_dicts] + channels_qs = Channel.objects.filter(id__in=channel_ids) + channels_list = list(channels_qs) - process = subprocess.Popen( - ['python', '/app/scripts/epg_match.py', temp_file_path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} - # Log stderr in real-time - for line in iter(process.stderr.readline, ''): - if line: - logger.info(line.strip()) + # Update each channel with matched EPG data + for channel_obj in channels_list: + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") - process.stderr.close() - stdout, stderr = process.communicate() + # Bulk update all channels + Channel.objects.bulk_update(channels_list, ["epg_data"]) - os.remove(temp_file_path) + total_matched = len(matched_channels) + if total_matched: + logger.info(f"Match Summary: {total_matched} channel(s) matched.") + for (cid, cname, tvg) in matched_channels: + logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") + else: + logger.info("No new channels were matched.") - if process.returncode != 0: - return f"Failed to process EPG matching: {stderr}" + logger.info("Finished integrated EPG matching.") - result = json.loads(stdout) - # This returns lists of dicts, not model objects - channels_to_update_dicts = result["channels_to_update"] - matched_channels = result["matched_channels"] + # Send WebSocket update + channel_layer = get_channel_layer() + associations = [ + {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} + for chan in channels_to_update_dicts + ] - # Convert your dict-based 'channels_to_update' into real Channel objects - if channels_to_update_dicts: - # Extract IDs of the channels that need updates - channel_ids = [d["id"] for d in channels_to_update_dicts] - - # Fetch them from DB - channels_qs = Channel.objects.filter(id__in=channel_ids) - channels_list = list(channels_qs) - - # Build a map from channel_id -> epg_data_id (or whatever fields you need) - epg_mapping = { - d["id"]: d["epg_data_id"] for d in channels_to_update_dicts - } - - # Populate each Channel object with the updated epg_data_id - for channel_obj in channels_list: - # The script sets 'epg_data_id' in the returned dict - # We either assign directly, or fetch the EPGData instance if needed. - channel_obj.epg_data_id = epg_mapping.get(channel_obj.id) - - # Now we have real model objects, so bulk_update will work - Channel.objects.bulk_update(channels_list, ["epg_data"]) - - total_matched = len(matched_channels) - if total_matched: - logger.info(f"Match Summary: {total_matched} channel(s) matched.") - for (cid, cname, tvg) in matched_channels: - logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") - else: - logger.info("No new channels were matched.") - - logger.info("Finished EPG matching logic.") - - # Send update with additional information for refreshing UI - channel_layer = get_channel_layer() - associations = [ - {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} - for chan in channels_to_update_dicts - ] - - async_to_sync(channel_layer.group_send)( - 'updates', - { - 'type': 'update', - "data": { - "success": True, - "type": "epg_match", - "refresh_channels": True, # Flag to tell frontend to refresh channels - "matches_count": total_matched, - "message": f"EPG matching complete: {total_matched} channel(s) matched", - "associations": associations # Add the associations data + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": total_matched, + "message": f"EPG matching complete: {total_matched} channel(s) matched", + "associations": associations + } } - } - ) + ) - return f"Done. Matched {total_matched} channel(s)." + return f"Done. Matched {total_matched} channel(s)." + + finally: + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup + gc.collect() + from core.utils import cleanup_memory + cleanup_memory(log_usage=True, force_collection=True) @shared_task -def run_recording(channel_id, start_time_str, end_time_str): +def match_selected_channels_epg(channel_ids): + """ + Match EPG data for only the specified selected channels. + Uses the same integrated EPG matching logic but processes only selected channels. + """ + try: + logger.info(f"Starting integrated EPG matching for {len(channel_ids)} selected channels...") + + # Get region preference + try: + region_obj = CoreSettings.objects.get(key="preferred-region") + region_code = region_obj.value.strip().lower() + except CoreSettings.DoesNotExist: + region_code = None + + # Get only the specified channels that don't have EPG data assigned + channels_without_epg = Channel.objects.filter( + id__in=channel_ids, + epg_data__isnull=True + ) + logger.info(f"Found {channels_without_epg.count()} selected channels without EPG data") + + if not channels_without_epg.exists(): + logger.info("No selected channels need EPG matching.") + + # Send WebSocket update + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": 0, + "message": "No selected channels need EPG matching", + "associations": [] + } + } + ) + return "No selected channels needed EPG matching." + + channels_data = [] + for channel in channels_without_epg: + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) + }) + + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches + epg_data = [] + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): + normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data.append({ + 'id': epg.id, + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, + }) + + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries (from active sources only)") + + # Run EPG matching with progress updates - automatically uses appropriate thresholds + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) + channels_to_update_dicts = result["channels_to_update"] + matched_channels = result["matched_channels"] + + # Update channels in database + if channels_to_update_dicts: + channel_ids_to_update = [d["id"] for d in channels_to_update_dicts] + channels_qs = Channel.objects.filter(id__in=channel_ids_to_update) + channels_list = list(channels_qs) + + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} + + # Update each channel with matched EPG data + for channel_obj in channels_list: + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") + + # Bulk update all channels + Channel.objects.bulk_update(channels_list, ["epg_data"]) + + total_matched = len(matched_channels) + if total_matched: + logger.info(f"Selected Channel Match Summary: {total_matched} channel(s) matched.") + for (cid, cname, tvg) in matched_channels: + logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") + else: + logger.info("No selected channels were matched.") + + logger.info("Finished integrated EPG matching for selected channels.") + + # Send WebSocket update + channel_layer = get_channel_layer() + associations = [ + {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} + for chan in channels_to_update_dicts + ] + + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": total_matched, + "message": f"EPG matching complete: {total_matched} selected channel(s) matched", + "associations": associations + } + } + ) + + return f"Done. Matched {total_matched} selected channel(s)." + + finally: + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup + gc.collect() + from core.utils import cleanup_memory + cleanup_memory(log_usage=True, force_collection=True) + + +@shared_task +def match_single_channel_epg(channel_id): + """ + Try to match a single channel with EPG data using the integrated matching logic + that includes both fuzzy and ML-enhanced matching. Returns a dict with match status and message. + """ + try: + from apps.channels.models import Channel + from apps.epg.models import EPGData + + logger.info(f"Starting integrated single channel EPG matching for channel ID {channel_id}") + + # Get the channel + try: + channel = Channel.objects.get(id=channel_id) + except Channel.DoesNotExist: + return {"matched": False, "message": "Channel not found"} + + # If channel already has EPG data, skip + if channel.epg_data: + return {"matched": False, "message": f"Channel '{channel.name}' already has EPG data assigned"} + + # Prepare single channel data for matching (same format as bulk matching) + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channel_data = { + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! + } + + logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', gracenote_id='{normalized_gracenote_id}', norm_chan='{channel_data['norm_chan']}'") + + # Debug: Test what the normalization does to preserve call signs + test_name = "NBC 11 (KVLY) - Fargo" # Example for testing + test_normalized = normalize_name(test_name) + logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)") + + # Get all EPG data for matching from active sources - must include norm_name field + # Ordered by source priority (highest first) so we prefer higher priority matches + epg_data_list = [] + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True, name__isnull=False).exclude(name=''): + normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data_list.append({ + 'id': epg.id, + 'tvg_id': normalized_epg_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, + }) + + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data_list.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + if not epg_data_list: + return {"matched": False, "message": "No EPG data available for matching (from active sources)"} + + logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") + + # Send progress for single channel matching + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="matching") + + # Use the EPG matching function - automatically uses aggressive thresholds for single channel + result = match_channels_to_epg([channel_data], epg_data_list, send_progress=False) + channels_to_update = result.get("channels_to_update", []) + matched_channels = result.get("matched_channels", []) + + if channels_to_update: + # Find our channel in the results + channel_match = None + for update in channels_to_update: + if update["id"] == channel.id: + channel_match = update + break + + if channel_match: + # Apply the match to the channel + try: + epg_data = EPGData.objects.get(id=channel_match['epg_data_id']) + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + + # Find match details from matched_channels for better reporting + match_details = None + for match_info in matched_channels: + if match_info[0] == channel.id: # matched_channels format: (channel_id, channel_name, epg_info) + match_details = match_info + break + + success_msg = f"Channel '{channel.name}' matched with EPG '{epg_data.name}'" + if match_details: + success_msg += f" (matched via: {match_details[2]})" + + logger.info(success_msg) + + # Send completion progress for single channel + send_epg_matching_progress(1, 1, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": True, + "message": success_msg, + "epg_name": epg_data.name, + "epg_id": epg_data.id + } + except EPGData.DoesNotExist: + return {"matched": False, "message": "Matched EPG data not found"} + + # No match found + # Send completion progress for single channel (failed) + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": False, + "message": f"No suitable EPG match found for channel '{channel.name}'" + } + + except Exception as e: + logger.error(f"Error in integrated single channel EPG matching: {e}", exc_info=True) + + # Clean up ML models from memory even on error + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory after error") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return {"matched": False, "message": f"Error during matching: {str(e)}"} + + +def evaluate_series_rules_impl(tvg_id: str | None = None): + """Synchronous implementation of series rule evaluation; returns details for debugging.""" + from django.utils import timezone + from apps.channels.models import Recording, Channel + from apps.epg.models import EPGData, ProgramData + + rules = CoreSettings.get_dvr_series_rules() + result = {"scheduled": 0, "details": []} + if not isinstance(rules, list) or not rules: + return result + + # Optionally filter for tvg_id + if tvg_id: + rules = [r for r in rules if str(r.get("tvg_id")) == str(tvg_id)] + if not rules: + result["details"].append({"tvg_id": tvg_id, "status": "no_rule"}) + return result + + now = timezone.now() + horizon = now + timedelta(days=7) + + # Preload existing recordings' program ids to avoid duplicates + existing_program_ids = set() + for rec in Recording.objects.all().only("custom_properties"): + try: + pid = rec.custom_properties.get("program", {}).get("id") if rec.custom_properties else None + if pid is not None: + # Normalize to string for consistent comparisons + existing_program_ids.add(str(pid)) + except Exception: + continue + + for rule in rules: + rv_tvg = str(rule.get("tvg_id") or "").strip() + mode = (rule.get("mode") or "all").lower() + series_title = (rule.get("title") or "").strip() + norm_series = normalize_name(series_title) if series_title else None + if not rv_tvg: + result["details"].append({"tvg_id": rv_tvg, "status": "invalid_rule"}) + continue + + epg = EPGData.objects.filter(tvg_id=rv_tvg).first() + if not epg: + result["details"].append({"tvg_id": rv_tvg, "status": "no_epg_match"}) + continue + + programs_qs = ProgramData.objects.filter( + epg=epg, + start_time__gte=now, + start_time__lte=horizon, + ) + if series_title: + programs_qs = programs_qs.filter(title__iexact=series_title) + programs = list(programs_qs.order_by("start_time")) + # Fallback: if no direct matches and we have a title, try normalized comparison in Python + if series_title and not programs: + all_progs = ProgramData.objects.filter( + epg=epg, + start_time__gte=now, + start_time__lte=horizon, + ).only("id", "title", "start_time", "end_time", "custom_properties", "tvg_id") + programs = [p for p in all_progs if normalize_name(p.title) == norm_series] + + channel = Channel.objects.filter(epg_data=epg).order_by("channel_number").first() + if not channel: + result["details"].append({"tvg_id": rv_tvg, "status": "no_channel_for_epg"}) + continue + + # + # Many providers list multiple future airings of the same episode + # (e.g., prime-time and a late-night repeat). Previously we scheduled + # a recording for each airing which shows up as duplicates in the DVR. + # + # To avoid that, we collapse programs to the earliest airing per + # unique episode using the best identifier available: + # - season+episode from ProgramData.custom_properties + # - onscreen_episode (e.g., S08E03) + # - sub_title (episode name), scoped by tvg_id+series title + # If none of the above exist, we fall back to keeping each program + # (usually movies or specials without episode identifiers). + # + def _episode_key(p: "ProgramData"): + try: + props = p.custom_properties or {} + season = props.get("season") + episode = props.get("episode") + onscreen = props.get("onscreen_episode") + except Exception: + season = episode = onscreen = None + base = f"{p.tvg_id or ''}|{(p.title or '').strip().lower()}" # series scope + if season is not None and episode is not None: + return f"{base}|s{season}e{episode}" + if onscreen: + return f"{base}|{str(onscreen).strip().lower()}" + if p.sub_title: + return f"{base}|{p.sub_title.strip().lower()}" + # No reliable episode identity; use the program id to avoid over-merging + return f"id:{p.id}" + + # Optionally filter to only brand-new episodes before grouping + if mode == "new": + filtered = [] + for p in programs: + try: + if (p.custom_properties or {}).get("new"): + filtered.append(p) + except Exception: + pass + programs = filtered + + # Pick the earliest airing for each episode key + earliest_by_key = {} + for p in programs: + k = _episode_key(p) + cur = earliest_by_key.get(k) + if cur is None or p.start_time < cur.start_time: + earliest_by_key[k] = p + + unique_programs = list(earliest_by_key.values()) + + created_here = 0 + for prog in unique_programs: + try: + # Skip if already scheduled by program id + if str(prog.id) in existing_program_ids: + continue + # Extra guard: skip if a recording exists for the same channel + timeslot + try: + from django.db.models import Q + if Recording.objects.filter( + channel=channel, + start_time=prog.start_time, + end_time=prog.end_time, + ).filter(Q(custom_properties__program__id=prog.id) | Q(custom_properties__program__title=prog.title)).exists(): + continue + except Exception: + continue # already scheduled/recorded + + # Apply global DVR pre/post offsets (in minutes) + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + + adj_start = prog.start_time + adj_end = prog.end_time + try: + if pre_min and pre_min > 0: + adj_start = adj_start - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + adj_end = adj_end + timedelta(minutes=post_min) + except Exception: + pass + + rec = Recording.objects.create( + channel=channel, + start_time=adj_start, + end_time=adj_end, + custom_properties={ + "program": { + "id": prog.id, + "tvg_id": prog.tvg_id, + "title": prog.title, + "sub_title": prog.sub_title, + "description": prog.description, + "start_time": prog.start_time.isoformat(), + "end_time": prog.end_time.isoformat(), + } + }, + ) + existing_program_ids.add(str(prog.id)) + created_here += 1 + try: + prefetch_recording_artwork.apply_async(args=[rec.id], countdown=1) + except Exception: + pass + except Exception as e: + result["details"].append({"tvg_id": rv_tvg, "status": "error", "error": str(e)}) + continue + result["scheduled"] += created_here + result["details"].append({"tvg_id": rv_tvg, "title": series_title, "status": "ok", "created": created_here}) + + # Notify frontend to refresh + try: + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + {'type': 'update', 'data': {"success": True, "type": "recordings_refreshed", "scheduled": result["scheduled"]}}, + ) + except Exception: + pass + + return result + + +@shared_task +def evaluate_series_rules(tvg_id: str | None = None): + return evaluate_series_rules_impl(tvg_id) + + +def reschedule_upcoming_recordings_for_offset_change_impl(): + """Recalculate start/end for all future EPG-based recordings using current DVR offsets. + + Only recordings that have not yet started (start_time > now) and that were + scheduled from EPG data (custom_properties.program present) are updated. + """ + from django.utils import timezone + from django.utils.dateparse import parse_datetime + from apps.channels.models import Recording + + now = timezone.now() + + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + + changed = 0 + scanned = 0 + + for rec in Recording.objects.filter(start_time__gt=now).iterator(): + scanned += 1 + try: + cp = rec.custom_properties or {} + program = cp.get("program") if isinstance(cp, dict) else None + if not isinstance(program, dict): + continue + base_start = program.get("start_time") + base_end = program.get("end_time") + if not base_start or not base_end: + continue + start_dt = parse_datetime(str(base_start)) + end_dt = parse_datetime(str(base_end)) + if start_dt is None or end_dt is None: + continue + + adj_start = start_dt + adj_end = end_dt + try: + if pre_min and pre_min > 0: + adj_start = adj_start - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + adj_end = adj_end + timedelta(minutes=post_min) + except Exception: + pass + + if rec.start_time != adj_start or rec.end_time != adj_end: + rec.start_time = adj_start + rec.end_time = adj_end + rec.save(update_fields=["start_time", "end_time"]) + changed += 1 + except Exception: + continue + + # Notify frontend to refresh + try: + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + {'type': 'update', 'data': {"success": True, "type": "recordings_refreshed", "rescheduled": changed}}, + ) + except Exception: + pass + + return {"changed": changed, "scanned": scanned, "pre": pre_min, "post": post_min} + + +@shared_task +def reschedule_upcoming_recordings_for_offset_change(): + return reschedule_upcoming_recordings_for_offset_change_impl() + + +def _notify_recordings_refresh(): + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + +def purge_recurring_rule_impl(rule_id: int) -> int: + """Remove all future recordings created by a recurring rule.""" + from django.utils import timezone + from .models import Recording + + now = timezone.now() + try: + removed, _ = Recording.objects.filter( + start_time__gte=now, + custom_properties__rule__id=rule_id, + ).delete() + except Exception: + removed = 0 + if removed: + _notify_recordings_refresh() + return removed + + +def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int: + """Ensure recordings exist for a recurring rule within the scheduling horizon.""" + from django.utils import timezone + from .models import RecurringRecordingRule, Recording + + rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first() + now = timezone.now() + removed = 0 + if drop_existing: + removed = purge_recurring_rule_impl(rule_id) + + if not rule or not rule.enabled: + return 0 + + days = rule.cleaned_days() + if not days: + return 0 + + tz_name = CoreSettings.get_system_time_zone() + try: + tz = ZoneInfo(tz_name) + except Exception: + logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name) + tz = timezone.get_current_timezone() + start_limit = rule.start_date or now.date() + end_limit = rule.end_date + horizon = now + timedelta(days=horizon_days) + start_window = max(start_limit, now.date()) + if drop_existing and end_limit: + end_window = end_limit + else: + end_window = horizon.date() + if end_limit and end_limit < end_window: + end_window = end_limit + if end_window < start_window: + return 0 + total_created = 0 + + for offset in range((end_window - start_window).days + 1): + target_date = start_window + timedelta(days=offset) + if target_date.weekday() not in days: + continue + if end_limit and target_date > end_limit: + continue + try: + start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz) + end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz) + except Exception: + continue + if end_dt <= start_dt: + end_dt = end_dt + timedelta(days=1) + if start_dt <= now: + continue + exists = Recording.objects.filter( + channel=rule.channel, + start_time=start_dt, + custom_properties__rule__id=rule.id, + ).exists() + if exists: + continue + description = rule.name or f"Recurring recording for {rule.channel.name}" + cp = { + "rule": { + "type": "recurring", + "id": rule.id, + "days_of_week": days, + "name": rule.name or "", + }, + "status": "scheduled", + "description": description, + "program": { + "title": rule.name or rule.channel.name, + "description": description, + "start_time": start_dt.isoformat(), + "end_time": end_dt.isoformat(), + }, + } + try: + Recording.objects.create( + channel=rule.channel, + start_time=start_dt, + end_time=end_dt, + custom_properties=cp, + ) + total_created += 1 + except Exception as err: + logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}") + + if removed or total_created: + _notify_recordings_refresh() + + return total_created + + +@shared_task +def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14): + return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days) + + +@shared_task +def maintain_recurring_recordings(): + from .models import RecurringRecordingRule + + total = 0 + for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True): + try: + total += sync_recurring_rule_impl(rule_id, drop_existing=False) + except Exception as err: + logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}") + return total + + +@shared_task +def purge_recurring_rule(rule_id: int): + return purge_recurring_rule_impl(rule_id) + +@shared_task +def _safe_name(s): + try: + import re + s = s or "" + # Remove forbidden filename characters and normalize spaces + s = re.sub(r'[\\/:*?"<>|]+', '', s) + s = s.strip() + return s + except Exception: + return s or "" + + +def _parse_epg_tv_movie_info(program): + """Return tuple (is_movie, season, episode, year, sub_title) from EPG ProgramData if available.""" + is_movie = False + season = None + episode = None + year = None + sub_title = program.get('sub_title') if isinstance(program, dict) else None + try: + from apps.epg.models import ProgramData + prog_id = program.get('id') if isinstance(program, dict) else None + epg_program = ProgramData.objects.filter(id=prog_id).only('custom_properties').first() if prog_id else None + if epg_program and epg_program.custom_properties: + cp = epg_program.custom_properties + # Determine categories + cats = [c.lower() for c in (cp.get('categories') or []) if isinstance(c, str)] + is_movie = 'movie' in cats or 'film' in cats + season = cp.get('season') + episode = cp.get('episode') + onscreen = cp.get('onscreen_episode') + if (season is None or episode is None) and isinstance(onscreen, str): + import re as _re + m = _re.search(r'[sS](\d+)[eE](\d+)', onscreen) + if m: + season = season or int(m.group(1)) + episode = episode or int(m.group(2)) + d = cp.get('date') + if d: + year = str(d)[:4] + except Exception: + pass + return is_movie, season, episode, year, sub_title + + +def _build_output_paths(channel, program, start_time, end_time): + """ + Build (final_path, temp_ts_path, final_filename) using DVR templates. + """ + from core.models import CoreSettings + # Root for DVR recordings: fixed to /data/recordings inside the container + library_root = '/data/recordings' + + is_movie, season, episode, year, sub_title = _parse_epg_tv_movie_info(program) + show = _safe_name(program.get('title') if isinstance(program, dict) else channel.name) + title = _safe_name(program.get('title') if isinstance(program, dict) else channel.name) + sub_title = _safe_name(sub_title) + season = int(season) if season is not None else 0 + episode = int(episode) if episode is not None else 0 + year = year or str(start_time.year) + + values = { + 'show': show, + 'title': title, + 'sub_title': sub_title, + 'season': season, + 'episode': episode, + 'year': year, + 'channel': _safe_name(channel.name), + 'start': start_time.strftime('%Y%m%d_%H%M%S'), + 'end': end_time.strftime('%Y%m%d_%H%M%S'), + } + + template = CoreSettings.get_dvr_movie_template() if is_movie else CoreSettings.get_dvr_tv_template() + # Build relative path from templates with smart fallbacks + rel_path = None + if not is_movie and (season == 0 or episode == 0): + # TV fallback template when S/E are missing + try: + tv_fb = CoreSettings.get_dvr_tv_fallback_template() + rel_path = tv_fb.format(**values) + except Exception: + # Older setting support + try: + fallback_root = CoreSettings.get_dvr_tv_fallback_dir() + except Exception: + fallback_root = "TV_Shows" + rel_path = f"{fallback_root}/{show}/{values['start']}.mkv" + if not rel_path: + try: + rel_path = template.format(**values) + except Exception: + rel_path = None + # Movie-specific fallback if formatting failed or title missing + if is_movie and not rel_path: + try: + m_fb = CoreSettings.get_dvr_movie_fallback_template() + rel_path = m_fb.format(**values) + except Exception: + rel_path = f"Movies/{values['start']}.mkv" + # As a last resort for TV + if not is_movie and not rel_path: + rel_path = f"TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv" + # Keep any leading folder like 'Recordings/' from the template so users can + # structure their library under /data as desired. + if not rel_path.lower().endswith('.mkv'): + rel_path = f"{rel_path}.mkv" + + # Normalize path (strip ./) + if rel_path.startswith('./'): + rel_path = rel_path[2:] + final_path = rel_path if rel_path.startswith('/') else os.path.join(library_root, rel_path) + final_path = os.path.normpath(final_path) + # Ensure directory exists + os.makedirs(os.path.dirname(final_path), exist_ok=True) + + # Derive temp TS path in same directory + base_no_ext = os.path.splitext(os.path.basename(final_path))[0] + temp_ts_path = os.path.join(os.path.dirname(final_path), f"{base_no_ext}.ts") + return final_path, temp_ts_path, os.path.basename(final_path) + + +@shared_task +def run_recording(recording_id, channel_id, start_time_str, end_time_str): + """ + Execute a scheduled recording for the given channel/recording. + + Enhancements: + - Accepts recording_id so we can persist metadata back to the Recording row + - Persists basic file info (name/path) to Recording.custom_properties + - Attempts to capture stream stats from TS proxy (codec, resolution, fps, etc.) + - Attempts to capture a poster (via program.custom_properties) and store a Logo reference + """ channel = Channel.objects.get(id=channel_id) start_time = datetime.fromisoformat(start_time_str) end_time = datetime.fromisoformat(end_time_str) duration_seconds = int((end_time - start_time).total_seconds()) - filename = f'{slugify(channel.name)}-{start_time.strftime("%Y-%m-%d_%H-%M-%S")}.mp4' + # Build output paths from templates + # We need program info; will refine after we load Recording cp below + filename = None + final_path = None + temp_ts_path = None channel_layer = get_channel_layer() @@ -226,21 +1450,401 @@ def run_recording(channel_id, start_time_str, end_time_str): ) logger.info(f"Starting recording for channel {channel.name}") - with requests.get(f"http://localhost:5656/proxy/ts/stream/{channel.uuid}", headers={ - 'User-Agent': 'Dispatcharr-DVR', - }, stream=True) as response: - # Raise an exception for bad responses (4xx, 5xx) - response.raise_for_status() - # Open the file in write-binary mode - with open(f"/data/recordings/{filename}", 'wb') as file: - start_time = time.time() # Start the timer - for chunk in response.iter_content(chunk_size=8192): # 8KB chunks - if time.time() - start_time > duration_seconds: - print(f"Timeout reached: {duration_seconds} seconds") + # Log system event for recording start + try: + from core.utils import log_system_event + log_system_event( + 'recording_start', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id + ) + except Exception as e: + logger.error(f"Could not log recording start event: {e}") + + # Try to resolve the Recording row up front + recording_obj = None + try: + from .models import Recording, Logo + recording_obj = Recording.objects.get(id=recording_id) + # Prime custom_properties with file info/status + cp = recording_obj.custom_properties or {} + cp.update({ + "status": "recording", + "started_at": str(datetime.now()), + }) + # Provide a predictable playback URL for the frontend + cp["file_url"] = f"/api/channels/recordings/{recording_id}/file/" + cp["output_file_url"] = cp["file_url"] + + # Determine program info (may include id for deeper details) + program = cp.get("program") or {} + final_path, temp_ts_path, filename = _build_output_paths(channel, program, start_time, end_time) + cp["file_name"] = filename + cp["file_path"] = final_path + cp["_temp_file_path"] = temp_ts_path + + # Resolve poster the same way VODs do: + # 1) Prefer image(s) from EPG Program custom_properties (images/icon) + # 2) Otherwise reuse an existing VOD logo matching title (Movie/Series) + # 3) Otherwise save any direct poster URL from provided program fields + program = (cp.get("program") or {}) if isinstance(cp, dict) else {} + + def pick_best_image_from_epg_props(epg_props): + try: + images = epg_props.get("images") or [] + if not isinstance(images, list): + return None + # Prefer poster/cover and larger sizes + size_order = {"xxl": 6, "xl": 5, "l": 4, "m": 3, "s": 2, "xs": 1} + def score(img): + t = (img.get("type") or "").lower() + size = (img.get("size") or "").lower() + return ( + 2 if t in ("poster", "cover") else 1, + size_order.get(size, 0) + ) + best = None + for im in images: + if not isinstance(im, dict): + continue + url = im.get("url") + if not url: + continue + if best is None or score(im) > score(best): + best = im + return best.get("url") if best else None + except Exception: + return None + + poster_logo_id = None + poster_url = None + + # Try EPG Program custom_properties by ID + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and epg_program.custom_properties: + epg_props = epg_program.custom_properties or {} + poster_url = pick_best_image_from_epg_props(epg_props) + if not poster_url: + icon = epg_props.get("icon") + if isinstance(icon, str) and icon: + poster_url = icon + except Exception as e: + logger.debug(f"EPG image lookup failed: {e}") + + # Fallback: reuse VOD Logo by matching title + if not poster_url and not poster_logo_id: + try: + from apps.vod.models import Movie, Series + title = program.get("title") or channel.name + vod_logo = None + movie = Movie.objects.filter(name__iexact=title).select_related("logo").first() + if movie and movie.logo: + vod_logo = movie.logo + if not vod_logo: + series = Series.objects.filter(name__iexact=title).select_related("logo").first() + if series and series.logo: + vod_logo = series.logo + if vod_logo: + poster_logo_id = vod_logo.id + except Exception as e: + logger.debug(f"VOD logo fallback failed: {e}") + + # External metadata lookups (TMDB/OMDb) when EPG/VOD didn't provide an image + if not poster_url and not poster_logo_id: + try: + tmdb_key = os.environ.get('TMDB_API_KEY') + omdb_key = os.environ.get('OMDB_API_KEY') + title = (program.get('title') or channel.name or '').strip() + year = None + imdb_id = None + + # Try to derive year and imdb from EPG program custom_properties + try: + from apps.epg.models import ProgramData + prog_id = program.get('id') + epg_program = ProgramData.objects.filter(id=prog_id).only('custom_properties').first() if prog_id else None + if epg_program and epg_program.custom_properties: + d = epg_program.custom_properties.get('date') + if d and len(str(d)) >= 4: + year = str(d)[:4] + imdb_id = epg_program.custom_properties.get('imdb.com_id') or imdb_id + except Exception: + pass + + # TMDB: by IMDb ID + if not poster_url and tmdb_key and imdb_id: + try: + url = f"https://api.themoviedb.org/3/find/{quote(imdb_id)}?api_key={tmdb_key}&external_source=imdb_id" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + picks = [] + for k in ('movie_results', 'tv_results', 'tv_episode_results', 'tv_season_results'): + lst = data.get(k) or [] + picks.extend(lst) + poster_path = None + for item in picks: + if item.get('poster_path'): + poster_path = item['poster_path'] + break + if poster_path: + poster_url = f"https://image.tmdb.org/t/p/w780{poster_path}" + except Exception: + pass + + # TMDB: by title (and year if available) + if not poster_url and tmdb_key and title: + try: + q = quote(title) + extra = f"&year={year}" if year else "" + url = f"https://api.themoviedb.org/3/search/multi?api_key={tmdb_key}&query={q}{extra}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + results.sort(key=lambda x: float(x.get('popularity') or 0), reverse=True) + for item in results: + if item.get('poster_path'): + poster_url = f"https://image.tmdb.org/t/p/w780{item['poster_path']}" + break + except Exception: + pass + + # OMDb fallback + if not poster_url and omdb_key: + try: + if imdb_id: + url = f"https://www.omdbapi.com/?apikey={omdb_key}&i={quote(imdb_id)}" + elif title: + yy = f"&y={year}" if year else "" + url = f"https://www.omdbapi.com/?apikey={omdb_key}&t={quote(title)}{yy}" + else: + url = None + if url: + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + p = data.get('Poster') + if p and p != 'N/A': + poster_url = p + except Exception: + pass + except Exception as e: + logger.debug(f"External poster lookup failed: {e}") + + # Keyless fallback providers (no API keys required) + if not poster_url and not poster_logo_id: + try: + title = (program.get('title') or channel.name or '').strip() + if title: + # 1) TVMaze (TV shows) - singlesearch by title + try: + url = f"https://api.tvmaze.com/singlesearch/shows?q={quote(title)}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + img = (data.get('image') or {}) + p = img.get('original') or img.get('medium') + if p: + poster_url = p + except Exception: + pass + + # 2) iTunes Search API (movies or tv shows) + if not poster_url: + try: + for media in ('movie', 'tvShow'): + url = f"https://itunes.apple.com/search?term={quote(title)}&media={media}&limit=1" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + if results: + art = results[0].get('artworkUrl100') + if art: + # Scale up to 600x600 by convention + poster_url = art.replace('100x100', '600x600') + break + except Exception: + pass + except Exception as e: + logger.debug(f"Keyless poster lookup failed: {e}") + + # Last: check direct fields on provided program object + if not poster_url and not poster_logo_id: + for key in ("poster", "cover", "cover_big", "image", "icon"): + val = program.get(key) + if isinstance(val, dict): + candidate = val.get("url") + if candidate: + poster_url = candidate + break + elif isinstance(val, str) and val: + poster_url = val break - # Write the chunk to the file - file.write(chunk) + + # Create or assign Logo + if not poster_logo_id and poster_url and len(poster_url) <= 1000: + try: + logo, _ = Logo.objects.get_or_create(url=poster_url, defaults={"name": program.get("title") or channel.name}) + poster_logo_id = logo.id + except Exception as e: + logger.debug(f"Unable to persist poster to Logo: {e}") + + if poster_logo_id: + cp["poster_logo_id"] = poster_logo_id + if poster_url and "poster_url" not in cp: + cp["poster_url"] = poster_url + + # Ensure destination exists so it's visible immediately + try: + os.makedirs(os.path.dirname(final_path), exist_ok=True) + if not os.path.exists(final_path): + open(final_path, 'ab').close() + except Exception: + pass + + recording_obj.custom_properties = cp + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Unable to prime Recording metadata: {e}") + interrupted = False + interrupted_reason = None + bytes_written = 0 + + from requests.exceptions import ReadTimeout, ConnectionError as ReqConnectionError, ChunkedEncodingError + + # Determine internal base URL(s) for TS streaming + # Prefer explicit override, then try common ports for debug and docker + explicit = os.environ.get('DISPATCHARR_INTERNAL_TS_BASE_URL') + is_dev = (os.environ.get('DISPATCHARR_ENV', '').lower() == 'dev') or \ + (os.environ.get('DISPATCHARR_DEBUG', '').lower() == 'true') or \ + (os.environ.get('REDIS_HOST', 'redis') in ('localhost', '127.0.0.1')) + candidates = [] + if explicit: + candidates.append(explicit) + if is_dev: + # Debug container typically exposes API on 5656 + candidates.extend(['http://127.0.0.1:5656', 'http://127.0.0.1:9191']) + # Docker service name fallback + candidates.append(os.environ.get('DISPATCHARR_INTERNAL_API_BASE', 'http://web:9191')) + # Last-resort localhost ports + candidates.extend(['http://localhost:5656', 'http://localhost:9191']) + + chosen_base = None + last_error = None + bytes_written = 0 + interrupted = False + interrupted_reason = None + + # We'll attempt each base until we receive some data + for base in candidates: + try: + test_url = f"{base.rstrip('/')}/proxy/ts/stream/{channel.uuid}" + logger.info(f"DVR: trying TS base {base} -> {test_url}") + + with requests.get( + test_url, + headers={ + 'User-Agent': 'Dispatcharr-DVR', + }, + stream=True, + timeout=(10, 15), + ) as response: + response.raise_for_status() + + # Open the file and start copying; if we get any data within a short window, accept this base + got_any_data = False + test_window = 3.0 # seconds to detect first bytes + window_start = time.time() + + with open(temp_ts_path, 'wb') as file: + started_at = time.time() + for chunk in response.iter_content(chunk_size=8192): + if not chunk: + # keep-alives may be empty; continue + if not got_any_data and (time.time() - window_start) > test_window: + break + continue + # We have data + got_any_data = True + chosen_base = base + # Fall through to full recording loop using this same response/connection + file.write(chunk) + bytes_written += len(chunk) + elapsed = time.time() - started_at + if elapsed > duration_seconds: + break + # Continue draining the stream + for chunk2 in response.iter_content(chunk_size=8192): + if not chunk2: + continue + file.write(chunk2) + bytes_written += len(chunk2) + elapsed = time.time() - started_at + if elapsed > duration_seconds: + break + break # exit outer for-loop once we switched to full drain + + # If we wrote any bytes, treat as success and stop trying candidates + if bytes_written > 0: + logger.info(f"DVR: selected TS base {base}; wrote initial {bytes_written} bytes") + break + else: + last_error = f"no_data_from_{base}" + logger.warning(f"DVR: no data received from {base} within {test_window}s, trying next base") + # Clean up empty temp file + try: + if os.path.exists(temp_ts_path) and os.path.getsize(temp_ts_path) == 0: + os.remove(temp_ts_path) + except Exception: + pass + except Exception as e: + last_error = str(e) + logger.warning(f"DVR: attempt failed for base {base}: {e}") + + if chosen_base is None and bytes_written == 0: + interrupted = True + interrupted_reason = f"no_stream_data: {last_error or 'all_bases_failed'}" + else: + # If we ended before reaching planned duration, record reason + actual_elapsed = 0 + try: + actual_elapsed = os.path.getsize(temp_ts_path) and (duration_seconds) # Best effort; we streamed until duration or disconnect above + except Exception: + pass + # We cannot compute accurate elapsed here; fine to leave as is + pass + + # If no bytes were written at all, mark detail + if bytes_written == 0 and not interrupted: + interrupted = True + interrupted_reason = f"no_stream_data: {last_error or 'unknown'}" + + # Update DB status immediately so the UI reflects the change on the event below + try: + if recording_obj is None: + from .models import Recording + recording_obj = Recording.objects.get(id=recording_id) + cp_now = recording_obj.custom_properties or {} + cp_now.update({ + "status": "interrupted" if interrupted else "completed", + "ended_at": str(datetime.now()), + "file_name": filename or cp_now.get("file_name"), + "file_path": final_path or cp_now.get("file_path"), + }) + if interrupted and interrupted_reason: + cp_now["interrupted_reason"] = interrupted_reason + recording_obj.custom_properties = cp_now + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Failed to update immediate recording status: {e}") async_to_sync(channel_layer.group_send)( "updates", @@ -249,6 +1853,1247 @@ def run_recording(channel_id, start_time_str, end_time_str): "data": {"success": True, "type": "recording_ended", "channel": channel.name} }, ) - # After the loop, the file and response are closed automatically. logger.info(f"Finished recording for channel {channel.name}") + + # Log system event for recording end + try: + from core.utils import log_system_event + log_system_event( + 'recording_end', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id, + interrupted=interrupted, + bytes_written=bytes_written + ) + except Exception as e: + logger.error(f"Could not log recording end event: {e}") + + # Remux TS to MKV container + remux_success = False + try: + if temp_ts_path and os.path.exists(temp_ts_path): + subprocess.run([ + "ffmpeg", "-y", "-i", temp_ts_path, "-c", "copy", final_path + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + remux_success = os.path.exists(final_path) + # Clean up temp file on success + if remux_success: + try: + os.remove(temp_ts_path) + except Exception: + pass + except Exception as e: + logger.warning(f"MKV remux failed: {e}") + + # Persist final metadata to Recording (status, ended_at, and stream stats if available) + try: + if recording_obj is None: + from .models import Recording + recording_obj = Recording.objects.get(id=recording_id) + + cp = recording_obj.custom_properties or {} + cp.update({ + "ended_at": str(datetime.now()), + }) + if interrupted: + cp["status"] = "interrupted" + if interrupted_reason: + cp["interrupted_reason"] = interrupted_reason + else: + cp["status"] = "completed" + cp["bytes_written"] = bytes_written + cp["remux_success"] = remux_success + + # Try to get stream stats from TS proxy Redis metadata + try: + from core.utils import RedisClient + from apps.proxy.ts_proxy.redis_keys import RedisKeys + from apps.proxy.ts_proxy.constants import ChannelMetadataField + + r = RedisClient.get_client() + if r is not None: + metadata_key = RedisKeys.channel_metadata(str(channel.uuid)) + md = r.hgetall(metadata_key) + if md: + def _gv(bkey): + return md.get(bkey.encode('utf-8')) + + def _d(bkey, cast=str): + v = _gv(bkey) + try: + if v is None: + return None + s = v.decode('utf-8') + return cast(s) if cast is not str else s + except Exception: + return None + + stream_info = {} + # Video fields + for key, caster in [ + (ChannelMetadataField.VIDEO_CODEC, str), + (ChannelMetadataField.RESOLUTION, str), + (ChannelMetadataField.WIDTH, float), + (ChannelMetadataField.HEIGHT, float), + (ChannelMetadataField.SOURCE_FPS, float), + (ChannelMetadataField.PIXEL_FORMAT, str), + (ChannelMetadataField.VIDEO_BITRATE, float), + ]: + val = _d(key, caster) + if val is not None: + stream_info[key] = val + + # Audio fields + for key, caster in [ + (ChannelMetadataField.AUDIO_CODEC, str), + (ChannelMetadataField.SAMPLE_RATE, float), + (ChannelMetadataField.AUDIO_CHANNELS, str), + (ChannelMetadataField.AUDIO_BITRATE, float), + ]: + val = _d(key, caster) + if val is not None: + stream_info[key] = val + + if stream_info: + cp["stream_info"] = stream_info + except Exception as e: + logger.debug(f"Unable to capture stream stats for recording: {e}") + + # Removed: local thumbnail generation. We rely on EPG/VOD/TMDB/OMDb/keyless providers only. + + recording_obj.custom_properties = cp + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Unable to finalize Recording metadata: {e}") + + # Optionally run comskip post-process + try: + from core.models import CoreSettings + if CoreSettings.get_dvr_comskip_enabled(): + comskip_process_recording.delay(recording_id) + except Exception: + pass + + +@shared_task +def recover_recordings_on_startup(): + """ + On service startup, reschedule or resume recordings to handle server restarts. + - For recordings whose window includes 'now': mark interrupted and start a new recording for the remainder. + - For future recordings: ensure a task is scheduled at start_time. + Uses a Redis lock to ensure only one worker runs this recovery. + """ + try: + from django.utils import timezone + from .models import Recording + from core.utils import RedisClient + from .signals import schedule_recording_task + + redis = RedisClient.get_client() + if redis: + lock_key = "dvr:recover_lock" + # Set lock with 60s TTL; only first winner proceeds + if not redis.set(lock_key, "1", ex=60, nx=True): + return "Recovery already in progress" + + now = timezone.now() + + # Resume in-window recordings + active = Recording.objects.filter(start_time__lte=now, end_time__gt=now) + for rec in active: + try: + cp = rec.custom_properties or {} + # Mark interrupted due to restart; will flip to 'recording' when task starts + cp["status"] = "interrupted" + cp["interrupted_reason"] = "server_restarted" + rec.custom_properties = cp + rec.save(update_fields=["custom_properties"]) + + # Start recording for remaining window + run_recording.apply_async( + args=[rec.id, rec.channel_id, str(now), str(rec.end_time)], eta=now + ) + except Exception as e: + logger.warning(f"Failed to resume recording {rec.id}: {e}") + + # Ensure future recordings are scheduled + upcoming = Recording.objects.filter(start_time__gt=now, end_time__gt=now) + for rec in upcoming: + try: + # Schedule task at start_time + task_id = schedule_recording_task(rec) + if task_id: + rec.task_id = task_id + rec.save(update_fields=["task_id"]) + except Exception as e: + logger.warning(f"Failed to schedule recording {rec.id}: {e}") + + return "Recovery complete" + except Exception as e: + logger.error(f"Error during DVR recovery: {e}") + return f"Error: {e}" + +@shared_task +def comskip_process_recording(recording_id: int): + """Run comskip on the MKV to remove commercials and replace the file in place. + Safe to call even if comskip is not installed; stores status in custom_properties.comskip. + """ + import shutil + from django.db import DatabaseError + from .models import Recording + # Helper to broadcast status over websocket + def _ws(status: str, extra: dict | None = None): + try: + from core.utils import send_websocket_update + payload = {"success": True, "type": "comskip_status", "status": status, "recording_id": recording_id} + if extra: + payload.update(extra) + send_websocket_update('updates', 'update', payload) + except Exception: + pass + + try: + rec = Recording.objects.get(id=recording_id) + except Recording.DoesNotExist: + return "not_found" + + cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {} + + def _persist_custom_properties(): + """Persist updated custom_properties without raising if the row disappeared.""" + try: + updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp) + if not updated: + logger.warning( + "Recording %s vanished before comskip status could be saved", + recording_id, + ) + return False + except DatabaseError as db_err: + logger.warning( + "Failed to persist comskip status for recording %s: %s", + recording_id, + db_err, + ) + return False + except Exception as unexpected: + logger.warning( + "Unexpected error while saving comskip status for recording %s: %s", + recording_id, + unexpected, + ) + return False + return True + file_path = (cp or {}).get("file_path") + if not file_path or not os.path.exists(file_path): + return "no_file" + + if isinstance(cp.get("comskip"), dict) and cp["comskip"].get("status") == "completed": + return "already_processed" + + comskip_bin = shutil.which("comskip") + if not comskip_bin: + cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"} + _persist_custom_properties() + _ws('skipped', {"reason": "comskip_not_installed"}) + return "comskip_missing" + + base, _ = os.path.splitext(file_path) + edl_path = f"{base}.edl" + + # Notify start + _ws('started', {"title": (cp.get('program') or {}).get('title') or os.path.basename(file_path)}) + + try: + cmd = [comskip_bin, "--output", os.path.dirname(file_path)] + # Prefer user-specified INI, fall back to known defaults + ini_candidates = [] + try: + custom_ini = CoreSettings.get_dvr_comskip_custom_path() + if custom_ini: + ini_candidates.append(custom_ini) + except Exception as ini_err: + logger.debug(f"Unable to load custom comskip.ini path: {ini_err}") + ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"]) + selected_ini = None + for ini_path in ini_candidates: + if ini_path and os.path.exists(ini_path): + selected_ini = ini_path + cmd.extend([f"--ini={ini_path}"]) + break + cmd.append(file_path) + subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + except subprocess.CalledProcessError as e: + stderr_tail = (e.stderr or "").strip().splitlines() + stderr_tail = stderr_tail[-5:] if stderr_tail else [] + detail = { + "status": "error", + "reason": "comskip_failed", + "returncode": e.returncode, + } + if e.returncode and e.returncode < 0: + try: + detail["signal"] = signal.Signals(-e.returncode).name + except Exception: + detail["signal"] = f"signal_{-e.returncode}" + if stderr_tail: + detail["stderr"] = "\n".join(stderr_tail) + if selected_ini: + detail["ini_path"] = selected_ini + cp["comskip"] = detail + _persist_custom_properties() + _ws('error', {"reason": "comskip_failed", "returncode": e.returncode}) + return "comskip_failed" + except Exception as e: + cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"} + _persist_custom_properties() + _ws('error', {"reason": str(e)}) + return "comskip_failed" + + if not os.path.exists(edl_path): + cp["comskip"] = {"status": "error", "reason": "edl_not_found"} + _persist_custom_properties() + _ws('error', {"reason": "edl_not_found"}) + return "no_edl" + + # Duration via ffprobe + def _ffprobe_duration(path): + try: + p = subprocess.run([ + "ffprobe", "-v", "error", "-show_entries", "format=duration", + "-of", "default=noprint_wrappers=1:nokey=1", path + ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + return float(p.stdout.strip()) + except Exception: + return None + + duration = _ffprobe_duration(file_path) + if duration is None: + cp["comskip"] = {"status": "error", "reason": "duration_unknown"} + _persist_custom_properties() + _ws('error', {"reason": "duration_unknown"}) + return "no_duration" + + commercials = [] + try: + with open(edl_path, "r") as f: + for line in f: + parts = line.strip().split() + if len(parts) >= 2: + try: + s = float(parts[0]); e = float(parts[1]) + commercials.append((max(0.0, s), min(duration, e))) + except Exception: + pass + except Exception: + pass + + commercials.sort() + keep = [] + cur = 0.0 + for s, e in commercials: + if s > cur: + keep.append((cur, max(cur, s))) + cur = max(cur, e) + if cur < duration: + keep.append((cur, duration)) + + if not commercials or sum((e - s) for s, e in commercials) <= 0.5: + cp["comskip"] = { + "status": "completed", + "skipped": True, + "edl": os.path.basename(edl_path), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() + _ws('skipped', {"reason": "no_commercials", "commercials": 0}) + return "no_commercials" + + workdir = os.path.dirname(file_path) + parts = [] + try: + for idx, (s, e) in enumerate(keep): + seg = os.path.join(workdir, f"segment_{idx:03d}.mkv") + dur = max(0.0, e - s) + if dur <= 0.01: + continue + subprocess.run([ + "ffmpeg", "-y", "-ss", f"{s:.3f}", "-i", file_path, "-t", f"{dur:.3f}", + "-c", "copy", "-avoid_negative_ts", "1", seg + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + parts.append(seg) + + if not parts: + raise RuntimeError("no_parts") + + list_path = os.path.join(workdir, "concat_list.txt") + with open(list_path, "w") as lf: + for pth in parts: + escaped = pth.replace("'", "'\\''") + lf.write(f"file '{escaped}'\n") + + output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv") + subprocess.run([ + "ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", list_path, "-c", "copy", output_path + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + try: + os.replace(output_path, file_path) + except Exception: + shutil.copy(output_path, file_path) + + try: + os.remove(list_path) + except Exception: + pass + for pth in parts: + try: os.remove(pth) + except Exception: pass + + cp["comskip"] = { + "status": "completed", + "edl": os.path.basename(edl_path), + "segments_kept": len(parts), + "commercials": len(commercials), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() + _ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)}) + return "ok" + except Exception as e: + cp["comskip"] = {"status": "error", "reason": str(e)} + _persist_custom_properties() + _ws('error', {"reason": str(e)}) + return f"error:{e}" +def _resolve_poster_for_program(channel_name, program): + """Internal helper that attempts to resolve a poster URL and/or Logo id. + Returns (poster_logo_id, poster_url) where either may be None. + """ + poster_logo_id = None + poster_url = None + + # Try EPG Program images first + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") if isinstance(program, dict) else None + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and epg_program.custom_properties: + epg_props = epg_program.custom_properties or {} + + def pick_best_image_from_epg_props(epg_props): + images = epg_props.get("images") or [] + if not isinstance(images, list): + return None + size_order = {"xxl": 6, "xl": 5, "l": 4, "m": 3, "s": 2, "xs": 1} + def score(img): + t = (img.get("type") or "").lower() + size = (img.get("size") or "").lower() + return (2 if t in ("poster", "cover") else 1, size_order.get(size, 0)) + best = None + for im in images: + if not isinstance(im, dict): + continue + url = im.get("url") + if not url: + continue + if best is None or score(im) > score(best): + best = im + return best.get("url") if best else None + + poster_url = pick_best_image_from_epg_props(epg_props) + if not poster_url: + icon = epg_props.get("icon") + if isinstance(icon, str) and icon: + poster_url = icon + except Exception: + pass + + # VOD logo fallback by title + if not poster_url and not poster_logo_id: + try: + from apps.vod.models import Movie, Series + title = (program.get("title") if isinstance(program, dict) else None) or channel_name + vod_logo = None + movie = Movie.objects.filter(name__iexact=title).select_related("logo").first() + if movie and movie.logo: + vod_logo = movie.logo + if not vod_logo: + series = Series.objects.filter(name__iexact=title).select_related("logo").first() + if series and series.logo: + vod_logo = series.logo + if vod_logo: + poster_logo_id = vod_logo.id + except Exception: + pass + + # Keyless providers (TVMaze & iTunes) + if not poster_url and not poster_logo_id: + try: + title = (program.get('title') if isinstance(program, dict) else None) or channel_name + if title: + # TVMaze + try: + url = f"https://api.tvmaze.com/singlesearch/shows?q={quote(title)}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + img = (data.get('image') or {}) + p = img.get('original') or img.get('medium') + if p: + poster_url = p + except Exception: + pass + # iTunes + if not poster_url: + try: + for media in ('movie', 'tvShow'): + url = f"https://itunes.apple.com/search?term={quote(title)}&media={media}&limit=1" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + if results: + art = results[0].get('artworkUrl100') + if art: + poster_url = art.replace('100x100', '600x600') + break + except Exception: + pass + except Exception: + pass + + # Fallback: search existing Logo entries by name if we still have nothing + if not poster_logo_id and not poster_url: + try: + from .models import Logo + title = (program.get("title") if isinstance(program, dict) else None) or channel_name + existing = Logo.objects.filter(name__iexact=title).first() + if existing: + poster_logo_id = existing.id + poster_url = existing.url + except Exception: + pass + + # Save to Logo if URL available + if not poster_logo_id and poster_url and len(poster_url) <= 1000: + try: + from .models import Logo + logo, _ = Logo.objects.get_or_create(url=poster_url, defaults={"name": (program.get("title") if isinstance(program, dict) else None) or channel_name}) + poster_logo_id = logo.id + except Exception: + pass + + return poster_logo_id, poster_url + + +@shared_task +def prefetch_recording_artwork(recording_id): + """Prefetch poster info for a scheduled recording so the UI can show art in Upcoming.""" + try: + from .models import Recording + rec = Recording.objects.get(id=recording_id) + cp = rec.custom_properties or {} + program = cp.get("program") or {} + poster_logo_id, poster_url = _resolve_poster_for_program(rec.channel.name, program) + updated = False + if poster_logo_id and cp.get("poster_logo_id") != poster_logo_id: + cp["poster_logo_id"] = poster_logo_id + updated = True + if poster_url and cp.get("poster_url") != poster_url: + cp["poster_url"] = poster_url + updated = True + # Enrich with rating if available from ProgramData.custom_properties + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") if isinstance(program, dict) else None + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and isinstance(epg_program.custom_properties, dict): + rating_val = epg_program.custom_properties.get("rating") + rating_sys = epg_program.custom_properties.get("rating_system") + season_val = epg_program.custom_properties.get("season") + episode_val = epg_program.custom_properties.get("episode") + onscreen = epg_program.custom_properties.get("onscreen_episode") + if rating_val and cp.get("rating") != rating_val: + cp["rating"] = rating_val + updated = True + if rating_sys and cp.get("rating_system") != rating_sys: + cp["rating_system"] = rating_sys + updated = True + if season_val is not None and cp.get("season") != season_val: + cp["season"] = season_val + updated = True + if episode_val is not None and cp.get("episode") != episode_val: + cp["episode"] = episode_val + updated = True + if onscreen and cp.get("onscreen_episode") != onscreen: + cp["onscreen_episode"] = onscreen + updated = True + except Exception: + pass + + if updated: + rec.custom_properties = cp + rec.save(update_fields=["custom_properties"]) + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recording_updated", "recording_id": rec.id}) + except Exception: + pass + return "ok" + except Exception as e: + logger.debug(f"prefetch_recording_artwork failed: {e}") + return f"error: {e}" + + +@shared_task(bind=True) +def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None, starting_channel_number=None): + """ + Asynchronously create channels from a list of stream IDs. + Provides progress updates via WebSocket. + + Args: + stream_ids: List of stream IDs to create channels from + channel_profile_ids: Optional list of channel profile IDs to assign channels to + starting_channel_number: Optional starting channel number behavior: + - None: Use provider channel numbers, then auto-assign from 1 + - 0: Start with lowest available number and increment by 1 + - Other number: Use as starting number for auto-assignment + """ + from apps.channels.models import Stream, Channel, ChannelGroup, ChannelProfile, ChannelProfileMembership, Logo + from apps.epg.models import EPGData + from django.db import transaction + from django.shortcuts import get_object_or_404 + from core.utils import send_websocket_update + + task_id = self.request.id + total_streams = len(stream_ids) + created_channels = [] + errors = [] + + try: + # Send initial progress update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_streams, + 'status': 'starting', + 'message': f'Starting bulk creation of {total_streams} channels...' + }) + + # Gather current used numbers once + used_numbers = set(Channel.objects.all().values_list("channel_number", flat=True)) + + # Initialize next_number based on starting_channel_number mode + if starting_channel_number is None: + # Mode 1: Use provider numbers when available, auto-assign when not + next_number = 1 + elif starting_channel_number == 0: + # Mode 2: Start from lowest available number + next_number = 1 + else: + # Mode 3: Start from specified number + next_number = starting_channel_number + + def get_auto_number(): + nonlocal next_number + while next_number in used_numbers: + next_number += 1 + used_numbers.add(next_number) + return next_number + + logos_to_create = [] + channels_to_create = [] + streams_map = [] + logo_map = [] + profile_map = [] + + # Process streams in batches to avoid memory issues + batch_size = 100 + processed = 0 + + for i in range(0, total_streams, batch_size): + batch_stream_ids = stream_ids[i:i + batch_size] + # Fetch streams and preserve the order from batch_stream_ids + batch_streams_dict = {stream.id: stream for stream in Stream.objects.filter(id__in=batch_stream_ids)} + batch_streams = [batch_streams_dict[stream_id] for stream_id in batch_stream_ids if stream_id in batch_streams_dict] + + # Send progress update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'processing', + 'message': f'Processing streams {processed + 1}-{min(processed + batch_size, total_streams)} of {total_streams}...' + }) + + for stream in batch_streams: + try: + name = stream.name + channel_group = stream.channel_group + stream_custom_props = stream.custom_properties or {} + + # Determine channel number based on starting_channel_number mode + channel_number = None + + if starting_channel_number is None: + # Mode 1: Use provider numbers when available + if "tvg-chno" in stream_custom_props: + channel_number = float(stream_custom_props["tvg-chno"]) + elif "channel-number" in stream_custom_props: + channel_number = float(stream_custom_props["channel-number"]) + elif "num" in stream_custom_props: + channel_number = float(stream_custom_props["num"]) + + # For modes 2 and 3 (starting_channel_number == 0 or specific number), + # ignore provider numbers and use sequential assignment + + # Get TVC guide station ID + tvc_guide_stationid = None + if "tvc-guide-stationid" in stream_custom_props: + tvc_guide_stationid = stream_custom_props["tvc-guide-stationid"] + + # Check if the determined/provider number is available + if channel_number is not None and ( + channel_number in used_numbers + or Channel.objects.filter(channel_number=channel_number).exists() + ): + # Provider number is taken, use auto-assignment + channel_number = get_auto_number() + elif channel_number is not None: + # Provider number is available, use it + used_numbers.add(channel_number) + else: + # No provider number or ignoring provider numbers, use auto-assignment + channel_number = get_auto_number() + + channel_data = { + "channel_number": channel_number, + "name": name, + "tvc_guide_stationid": tvc_guide_stationid, + "tvg_id": stream.tvg_id, + } + + # Only add channel_group_id if the stream has a channel group + if channel_group: + channel_data["channel_group_id"] = channel_group.id + + # Attempt to find existing EPGs with the same tvg-id + epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) + if epgs: + channel_data["epg_data_id"] = epgs.first().id + + channel = Channel(**channel_data) + channels_to_create.append(channel) + streams_map.append([stream.id]) + + # Store profile IDs for this channel + profile_map.append(channel_profile_ids) + + # Handle logo - validate URL length to avoid PostgreSQL btree index errors + validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None + if validated_logo_url: + logos_to_create.append( + Logo( + url=validated_logo_url, + name=stream.name or stream.tvg_id, + ) + ) + logo_map.append(validated_logo_url) + else: + logo_map.append(None) + + processed += 1 + + except Exception as e: + errors.append({ + 'stream_id': stream.id if 'stream' in locals() else 'unknown', + 'error': str(e) + }) + processed += 1 + + # Create logos first + if logos_to_create: + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'creating_logos', + 'message': f'Creating {len(logos_to_create)} logos...' + }) + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + + # Get logo objects for association + channel_logos = { + logo.url: logo + for logo in Logo.objects.filter( + url__in=[url for url in logo_map if url is not None] + ) + } + + # Create channels in database + if channels_to_create: + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'creating_channels', + 'message': f'Creating {len(channels_to_create)} channels in database...' + }) + + with transaction.atomic(): + created_channels = Channel.objects.bulk_create(channels_to_create) + + # Update channels with logos and create stream associations + update = [] + channel_stream_associations = [] + channel_profile_memberships = [] + + for channel, stream_ids, logo_url, profile_ids in zip( + created_channels, streams_map, logo_map, profile_map + ): + if logo_url: + channel.logo = channel_logos[logo_url] + update.append(channel) + + # Create stream associations + for stream_id in stream_ids: + from apps.channels.models import ChannelStream + channel_stream_associations.append( + ChannelStream(channel=channel, stream_id=stream_id, order=0) + ) + + # Handle channel profile membership + # Semantics: + # - None: add to ALL profiles (backward compatible default) + # - Empty array []: add to NO profiles + # - Sentinel [0] or 0 in array: add to ALL profiles (explicit) + # - [1,2,...]: add to specified profile IDs only + if profile_ids is None: + # Omitted -> add to all profiles (backward compatible) + all_profiles = ChannelProfile.objects.all() + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in all_profiles + ]) + elif isinstance(profile_ids, list) and len(profile_ids) == 0: + # Empty array -> add to no profiles + pass + elif isinstance(profile_ids, list) and 0 in profile_ids: + # Sentinel 0 -> add to all profiles (explicit) + all_profiles = ChannelProfile.objects.all() + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in all_profiles + ]) + else: + # Specific profile IDs + try: + specific_profiles = ChannelProfile.objects.filter(id__in=profile_ids) + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in specific_profiles + ]) + except Exception as e: + errors.append({ + 'channel_id': channel.id, + 'error': f'Failed to add to profiles: {str(e)}' + }) + + # Bulk update channels with logos + if update: + Channel.objects.bulk_update(update, ["logo"]) + + # Bulk create channel-stream associations + if channel_stream_associations: + from apps.channels.models import ChannelStream + ChannelStream.objects.bulk_create(channel_stream_associations, ignore_conflicts=True) + + # Bulk create profile memberships + if channel_profile_memberships: + ChannelProfileMembership.objects.bulk_create(channel_profile_memberships, ignore_conflicts=True) + + # Send completion update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': total_streams, + 'total': total_streams, + 'status': 'completed', + 'message': f'Successfully created {len(created_channels)} channels', + 'created_count': len(created_channels), + 'error_count': len(errors), + 'errors': errors[:10] # Send first 10 errors only + }) + + # Send general channel update notification + send_websocket_update('updates', 'update', { + 'type': 'channels_created', + 'count': len(created_channels) + }) + + return { + 'status': 'completed', + 'created_count': len(created_channels), + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"Bulk channel creation failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_streams, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_names_from_epg(self, channel_ids): + """ + Celery task to set channel names from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG name setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG name setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.name: + if channel.name != channel.epg_data.name: + channel.name = channel.epg_data.name + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['name']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel names...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel names from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG name setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG name setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_logos_from_epg(self, channel_ids): + """ + Celery task to set channel logos from EPG data for multiple channels + Creates logos from EPG icon URLs if they don't exist + """ + from .models import Logo + from core.utils import send_websocket_update + import requests + from urllib.parse import urlparse + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + created_logos_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG logo setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG logo setting...' + }) + + batch_size = 50 # Smaller batch for logo processing + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data', 'logo') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.icon_url: + icon_url = channel.epg_data.icon_url.strip() + + # Try to find existing logo with this URL + try: + logo = Logo.objects.get(url=icon_url) + except Logo.DoesNotExist: + # Create new logo from EPG icon URL + try: + # Generate a name for the logo + logo_name = channel.epg_data.name or f"Logo for {channel.epg_data.tvg_id}" + + # Create the logo record + logo = Logo.objects.create( + name=logo_name, + url=icon_url + ) + created_logos_count += 1 + logger.info(f"Created new logo from EPG: {logo_name} - {icon_url}") + + except Exception as create_error: + errors.append(f"Channel {channel.id}: Failed to create logo from {icon_url}: {str(create_error)}") + logger.error(f"Failed to create logo for channel {channel.id}: {create_error}") + continue + + # Update channel logo if different + if channel.logo != logo: + channel.logo = logo + batch_updates.append(channel) + updated_count += 1 + + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['logo']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel logos, created {created_logos_count} new logos...', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel logos and created {created_logos_count} new logos from EPG data', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG logo setting task completed. Updated {updated_count} channels, created {created_logos_count} logos") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG logo setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_tvg_ids_from_epg(self, channel_ids): + """ + Celery task to set channel TVG-IDs from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG TVG-ID setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.tvg_id: + if channel.tvg_id != channel.epg_data.tvg_id: + channel.tvg_id = channel.epg_data.tvg_id + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['tvg_id']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel TVG-IDs...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG TVG-ID setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise diff --git a/apps/channels/tests/__init__.py b/apps/channels/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/channels/tests/test_channel_api.py b/apps/channels/tests/test_channel_api.py new file mode 100644 index 00000000..bb245da1 --- /dev/null +++ b/apps/channels/tests/test_channel_api.py @@ -0,0 +1,211 @@ +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework import status + +from apps.channels.models import Channel, ChannelGroup + +User = get_user_model() + + +class ChannelBulkEditAPITests(TestCase): + def setUp(self): + # Create a test admin user (user_level >= 10) and authenticate + self.user = User.objects.create_user(username="testuser", password="testpass123") + self.user.user_level = 10 # Set admin level + self.user.save() + self.client = APIClient() + self.client.force_authenticate(user=self.user) + self.bulk_edit_url = "/api/channels/channels/edit/bulk/" + + # Create test channel group + self.group1 = ChannelGroup.objects.create(name="Test Group 1") + self.group2 = ChannelGroup.objects.create(name="Test Group 2") + + # Create test channels + self.channel1 = Channel.objects.create( + channel_number=1.0, + name="Channel 1", + tvg_id="channel1", + channel_group=self.group1 + ) + self.channel2 = Channel.objects.create( + channel_number=2.0, + name="Channel 2", + tvg_id="channel2", + channel_group=self.group1 + ) + self.channel3 = Channel.objects.create( + channel_number=3.0, + name="Channel 3", + tvg_id="channel3" + ) + + def test_bulk_edit_success(self): + """Test successful bulk update of multiple channels""" + data = [ + {"id": self.channel1.id, "name": "Updated Channel 1"}, + {"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + self.assertEqual(len(response.data["channels"]), 2) + + # Verify database changes + self.channel1.refresh_from_db() + self.channel2.refresh_from_db() + self.assertEqual(self.channel1.name, "Updated Channel 1") + self.assertEqual(self.channel2.name, "Updated Channel 2") + self.assertEqual(self.channel2.channel_number, 22.0) + + def test_bulk_edit_with_empty_validated_data_first(self): + """ + Test the bug fix: when first channel has empty validated_data. + This was causing: ValueError: Field names must be given to bulk_update() + """ + # Create a channel with data that will be "unchanged" (empty validated_data) + # We'll send the same data it already has + data = [ + # First channel: no actual changes (this would create empty validated_data) + {"id": self.channel1.id}, + # Second channel: has changes + {"id": self.channel2.id, "name": "Updated Channel 2"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should not crash with ValueError + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + + # Verify the channel with changes was updated + self.channel2.refresh_from_db() + self.assertEqual(self.channel2.name, "Updated Channel 2") + + def test_bulk_edit_all_empty_updates(self): + """Test when all channels have empty updates (no actual changes)""" + data = [ + {"id": self.channel1.id}, + {"id": self.channel2.id}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should succeed without calling bulk_update + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 2 channels") + + def test_bulk_edit_mixed_fields(self): + """Test bulk update where different channels update different fields""" + data = [ + {"id": self.channel1.id, "name": "New Name 1"}, + {"id": self.channel2.id, "channel_number": 99.0}, + {"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 3 channels") + + # Verify all updates + self.channel1.refresh_from_db() + self.channel2.refresh_from_db() + self.channel3.refresh_from_db() + + self.assertEqual(self.channel1.name, "New Name 1") + self.assertEqual(self.channel2.channel_number, 99.0) + self.assertEqual(self.channel3.tvg_id, "new_tvg_id") + self.assertEqual(self.channel3.name, "New Name 3") + + def test_bulk_edit_with_channel_group(self): + """Test bulk update with channel_group_id changes""" + data = [ + {"id": self.channel1.id, "channel_group_id": self.group2.id}, + {"id": self.channel3.id, "channel_group_id": self.group1.id}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + + # Verify group changes + self.channel1.refresh_from_db() + self.channel3.refresh_from_db() + self.assertEqual(self.channel1.channel_group, self.group2) + self.assertEqual(self.channel3.channel_group, self.group1) + + def test_bulk_edit_nonexistent_channel(self): + """Test bulk update with a channel that doesn't exist""" + nonexistent_id = 99999 + data = [ + {"id": nonexistent_id, "name": "Should Fail"}, + {"id": self.channel1.id, "name": "Should Still Update"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should return 400 with errors + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertIn("errors", response.data) + self.assertEqual(len(response.data["errors"]), 1) + self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id) + self.assertEqual(response.data["errors"][0]["error"], "Channel not found") + + # The valid channel should still be updated + self.assertEqual(response.data["updated_count"], 1) + + def test_bulk_edit_validation_error(self): + """Test bulk update with invalid data (validation error)""" + data = [ + {"id": self.channel1.id, "channel_number": "invalid_number"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Should return 400 with validation errors + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertIn("errors", response.data) + self.assertEqual(len(response.data["errors"]), 1) + self.assertIn("channel_number", response.data["errors"][0]["errors"]) + + def test_bulk_edit_empty_channel_updates(self): + """Test bulk update with empty list""" + data = [] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + # Empty list is accepted and returns success with 0 updates + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["message"], "Successfully updated 0 channels") + + def test_bulk_edit_missing_channel_updates(self): + """Test bulk update without proper format (dict instead of list)""" + data = {"channel_updates": {}} + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data["error"], "Expected a list of channel updates") + + def test_bulk_edit_preserves_other_fields(self): + """Test that bulk update only changes specified fields""" + original_channel_number = self.channel1.channel_number + original_tvg_id = self.channel1.tvg_id + + data = [ + {"id": self.channel1.id, "name": "Only Name Changed"}, + ] + + response = self.client.patch(self.bulk_edit_url, data, format="json") + + self.assertEqual(response.status_code, status.HTTP_200_OK) + + # Verify only name changed, other fields preserved + self.channel1.refresh_from_db() + self.assertEqual(self.channel1.name, "Only Name Changed") + self.assertEqual(self.channel1.channel_number, original_channel_number) + self.assertEqual(self.channel1.tvg_id, original_tvg_id) diff --git a/apps/channels/tests/test_recurring_rules.py b/apps/channels/tests/test_recurring_rules.py new file mode 100644 index 00000000..982ecb93 --- /dev/null +++ b/apps/channels/tests/test_recurring_rules.py @@ -0,0 +1,40 @@ +from datetime import datetime, timedelta +from django.test import TestCase +from django.utils import timezone + +from apps.channels.models import Channel, RecurringRecordingRule, Recording +from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl + + +class RecurringRecordingRuleTasksTests(TestCase): + def test_sync_recurring_rule_creates_and_purges_recordings(self): + now = timezone.now() + channel = Channel.objects.create(channel_number=1, name='Test Channel') + + start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0) + end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0) + + rule = RecurringRecordingRule.objects.create( + channel=channel, + days_of_week=[now.weekday()], + start_time=start_time, + end_time=end_time, + ) + + created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1) + self.assertEqual(created, 1) + + recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first() + self.assertIsNotNone(recording) + self.assertEqual(recording.channel, channel) + self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id) + + expected_start = timezone.make_aware( + datetime.combine(recording.start_time.date(), start_time), + timezone.get_current_timezone(), + ) + self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60) + + removed = purge_recurring_rule_impl(rule.id) + self.assertEqual(removed, 1) + self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists()) diff --git a/apps/epg/api_views.py b/apps/epg/api_views.py index 526172f1..2fc5a743 100644 --- a/apps/epg/api_views.py +++ b/apps/epg/api_views.py @@ -2,47 +2,66 @@ import logging, os from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.utils import timezone from datetime import timedelta from .models import EPGSource, ProgramData, EPGData # Added ProgramData -from .serializers import ProgramDataSerializer, EPGSourceSerializer, EPGDataSerializer # Updated serializer +from .serializers import ( + ProgramDataSerializer, + EPGSourceSerializer, + EPGDataSerializer, +) # Updated serializer from .tasks import refresh_epg_data +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, + permission_classes_by_method, +) logger = logging.getLogger(__name__) + # ───────────────────────────── # 1) EPG Source API (CRUD) # ───────────────────────────── class EPGSourceViewSet(viewsets.ModelViewSet): - """Handles CRUD operations for EPG sources""" + """ + API endpoint that allows EPG sources to be viewed or edited. + """ + queryset = EPGSource.objects.all() serializer_class = EPGSourceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def list(self, request, *args, **kwargs): logger.debug("Listing all EPG sources.") return super().list(request, *args, **kwargs) - @action(detail=False, methods=['post']) + @action(detail=False, methods=["post"]) def upload(self, request): - if 'file' not in request.FILES: - return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST) + if "file" not in request.FILES: + return Response( + {"error": "No file uploaded"}, status=status.HTTP_400_BAD_REQUEST + ) - file = request.FILES['file'] + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/epgs', file_name) + file_path = os.path.join("/data/uploads/epgs", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) new_obj_data = request.data.copy() - new_obj_data['file_path'] = file_path + new_obj_data["file_path"] = file_path serializer = self.get_serializer(data=new_obj_data) serializer.is_valid(raise_exception=True) @@ -50,72 +69,241 @@ class EPGSourceViewSet(viewsets.ModelViewSet): return Response(serializer.data, status=status.HTTP_201_CREATED) + def partial_update(self, request, *args, **kwargs): + """Handle partial updates with special logic for is_active field""" + instance = self.get_object() + + # Check if we're toggling is_active + if ( + "is_active" in request.data + and instance.is_active != request.data["is_active"] + ): + # Set appropriate status based on new is_active value + if request.data["is_active"]: + request.data["status"] = "idle" + else: + request.data["status"] = "disabled" + + # Continue with regular partial update + return super().partial_update(request, *args, **kwargs) + + # ───────────────────────────── # 2) Program API (CRUD) # ───────────────────────────── class ProgramViewSet(viewsets.ModelViewSet): """Handles CRUD operations for EPG programs""" + queryset = ProgramData.objects.all() serializer_class = ProgramDataSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def list(self, request, *args, **kwargs): logger.debug("Listing all EPG programs.") return super().list(request, *args, **kwargs) + # ───────────────────────────── # 3) EPG Grid View # ───────────────────────────── class EPGGridAPIView(APIView): """Returns all programs airing in the next 24 hours including currently running ones and recent ones""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Retrieve programs from the previous hour, currently running and upcoming for the next 24 hours", - responses={200: ProgramDataSerializer(many=True)} + responses={200: ProgramDataSerializer(many=True)}, ) def get(self, request, format=None): # Use current time instead of midnight now = timezone.now() one_hour_ago = now - timedelta(hours=1) twenty_four_hours_later = now + timedelta(hours=24) - logger.debug(f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}.") + logger.debug( + f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}." + ) # Use select_related to prefetch EPGData and include programs from the last hour - programs = ProgramData.objects.select_related('epg').filter( + programs = ProgramData.objects.select_related("epg").filter( # Programs that end after one hour ago (includes recently ended programs) end_time__gt=one_hour_ago, # AND start before the end time window - start_time__lt=twenty_four_hours_later + start_time__lt=twenty_four_hours_later, ) count = programs.count() - logger.debug(f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows.") + logger.debug( + f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows." + ) - # Generate dummy programs for channels that have no EPG data + # Generate dummy programs for channels that have no EPG data OR dummy EPG sources from apps.channels.models import Channel + from apps.epg.models import EPGSource from django.db.models import Q - # Get channels with no EPG data + # Get channels with no EPG data at all (standard dummy) channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True)) - channels_count = channels_without_epg.count() - # Log more detailed information about channels missing EPG data - if channels_count > 0: + # Get channels with custom dummy EPG sources (generate on-demand with patterns) + channels_with_custom_dummy = Channel.objects.filter( + epg_data__epg_source__source_type='dummy' + ).distinct() + + # Log what we found + without_count = channels_without_epg.count() + custom_count = channels_with_custom_dummy.count() + + if without_count > 0: channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg] - logger.warning(f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}") + logger.debug( + f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}" + ) - logger.debug(f"EPGGridAPIView: Found {channels_count} channels with no EPG data.") + if custom_count > 0: + channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy] + logger.debug( + f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}" + ) + + logger.debug( + f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG." + ) # Serialize the regular programs serialized_programs = ProgramDataSerializer(programs, many=True).data + # Humorous program descriptions based on time of day - same as in output/views.py + time_descriptions = { + (0, 4): [ + "Late Night with {channel} - Where insomniacs unite!", + "The 'Why Am I Still Awake?' Show on {channel}", + "Counting Sheep - A {channel} production for the sleepless", + ], + (4, 8): [ + "Dawn Patrol - Rise and shine with {channel}!", + "Early Bird Special - Coffee not included", + "Morning Zombies - Before coffee viewing on {channel}", + ], + (8, 12): [ + "Mid-Morning Meetings - Pretend you're paying attention while watching {channel}", + "The 'I Should Be Working' Hour on {channel}", + "Productivity Killer - {channel}'s daytime programming", + ], + (12, 16): [ + "Lunchtime Laziness with {channel}", + "The Afternoon Slump - Brought to you by {channel}", + "Post-Lunch Food Coma Theater on {channel}", + ], + (16, 20): [ + "Rush Hour - {channel}'s alternative to traffic", + "The 'What's For Dinner?' Debate on {channel}", + "Evening Escapism - {channel}'s remedy for reality", + ], + (20, 24): [ + "Prime Time Placeholder - {channel}'s finest not-programming", + "The 'Netflix Was Too Complicated' Show on {channel}", + "Family Argument Avoider - Courtesy of {channel}", + ], + } + # Generate and append dummy programs dummy_programs = [] - for channel in channels_without_epg: - # Use the channel UUID as tvg_id for dummy programs to match in the guide + + # Import the function from output.views + from apps.output.views import generate_dummy_programs as gen_dummy_progs + + # Handle channels with CUSTOM dummy EPG sources (with patterns) + for channel in channels_with_custom_dummy: + # For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel + # This prevents multiple channels assigned to the same dummy EPG from showing identical data + # Each channel gets its own unique program data even if they share the same EPG source dummy_tvg_id = str(channel.uuid) try: - # Create programs every 4 hours for the next 24 hours + # Get the custom dummy EPG source + epg_source = channel.epg_data.epg_source if channel.epg_data else None + + logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Determine which name to parse based on custom properties + name_to_parse = channel.name + if epg_source and epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + # Get the stream index (1-based from user, convert to 0-based) + stream_index = custom_props.get('stream_index', 1) - 1 + + # Get streams ordered by channelstream order + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + name_to_parse = stream.name + logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + elif name_source == 'channel': + logger.debug(f"Using channel name for parsing: {name_to_parse}") + + # Generate programs using custom patterns from the dummy EPG source + # Use the same tvg_id that will be set in the program data + generated = gen_dummy_progs( + channel_id=dummy_tvg_id, + channel_name=name_to_parse, + num_days=1, + program_length_hours=4, + epg_source=epg_source + ) + + # Custom dummy should always return data (either from patterns or fallback) + if generated: + logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}") + # Convert generated programs to API format + for program in generated: + dummy_program = { + "id": f"dummy-custom-{channel.id}-{program['start_time'].hour}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": program['start_time'].isoformat(), + "end_time": program['end_time'].isoformat(), + "title": program['title'], + "description": program['description'], + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, + } + dummy_programs.append(dummy_program) + else: + logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}") + + except Exception as e: + logger.error( + f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) + + # Handle channels with NO EPG data (standard dummy with humorous descriptions) + for channel in channels_without_epg: + # For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic) + # The frontend uses: tvgRecord?.tvg_id ?? channel.uuid + # Since there's no EPG data, it will fall back to UUID + dummy_tvg_id = str(channel.uuid) + + try: + logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Create programs every 4 hours for the next 24 hours with humorous descriptions for hour_offset in range(0, 24, 4): # Use timedelta for time arithmetic instead of replace() to avoid hour overflow start_time = now + timedelta(hours=hour_offset) @@ -123,31 +311,51 @@ class EPGGridAPIView(APIView): start_time = start_time.replace(minute=0, second=0, microsecond=0) end_time = start_time + timedelta(hours=4) + # Get the hour for selecting a description + hour = start_time.hour + day = 0 # Use 0 as we're only doing 1 day + + # Find the appropriate time slot for description + for time_range, descriptions in time_descriptions.items(): + start_range, end_range = time_range + if start_range <= hour < end_range: + # Pick a description using the sum of the hour and day as seed + # This makes it somewhat random but consistent for the same timeslot + description = descriptions[ + (hour + day) % len(descriptions) + ].format(channel=channel.name) + break + else: + # Fallback description if somehow no range matches + description = f"Placeholder program for {channel.name} - EPG data went on vacation" + # Create a dummy program in the same format as regular programs dummy_program = { - 'id': f"dummy-{channel.id}-{hour_offset}", # Create a unique ID - 'epg': { - 'tvg_id': dummy_tvg_id, - 'name': channel.name - }, - 'start_time': start_time.isoformat(), - 'end_time': end_time.isoformat(), - 'title': f"{channel.name}", - 'description': f"Placeholder program for {channel.name}", - 'tvg_id': dummy_tvg_id, - 'sub_title': None, - 'custom_properties': None + "id": f"dummy-standard-{channel.id}-{hour_offset}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": start_time.isoformat(), + "end_time": end_time.isoformat(), + "title": f"{channel.name}", + "description": description, + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, } dummy_programs.append(dummy_program) except Exception as e: - logger.error(f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}") + logger.error( + f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) # Combine regular and dummy programs all_programs = list(serialized_programs) + dummy_programs - logger.debug(f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs).") + logger.debug( + f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs)." + ) + + return Response({"data": all_programs}, status=status.HTTP_200_OK) - return Response({'data': all_programs}, status=status.HTTP_200_OK) # ───────────────────────────── # 4) EPG Import View @@ -155,15 +363,41 @@ class EPGGridAPIView(APIView): class EPGImportAPIView(APIView): """Triggers an EPG data refresh""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers an EPG data import", - responses={202: "EPG data import initiated"} + responses={202: "EPG data import initiated"}, ) def post(self, request, format=None): logger.info("EPGImportAPIView: Received request to import EPG data.") - refresh_epg_data.delay(request.data.get('id', None)) # Trigger Celery task + epg_id = request.data.get("id", None) + + # Check if this is a dummy EPG source + try: + from .models import EPGSource + epg_source = EPGSource.objects.get(id=epg_id) + if epg_source.source_type == 'dummy': + logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}") + return Response( + {"success": False, "message": "Dummy EPG sources do not require refreshing."}, + status=status.HTTP_400_BAD_REQUEST, + ) + except EPGSource.DoesNotExist: + pass # Let the task handle the missing source + + refresh_epg_data.delay(epg_id) # Trigger Celery task logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.") - return Response({'success': True, 'message': 'EPG data import initiated.'}, status=status.HTTP_202_ACCEPTED) + return Response( + {"success": True, "message": "EPG data import initiated."}, + status=status.HTTP_202_ACCEPTED, + ) # ───────────────────────────── @@ -173,6 +407,13 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet): """ API endpoint that allows EPGData objects to be viewed. """ + queryset = EPGData.objects.all() serializer_class = EPGDataSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + diff --git a/apps/epg/migrations/0007_epgsource_status_epgsource_last_error.py b/apps/epg/migrations/0007_epgsource_status_epgsource_last_error.py new file mode 100644 index 00000000..050b1b4f --- /dev/null +++ b/apps/epg/migrations/0007_epgsource_status_epgsource_last_error.py @@ -0,0 +1,23 @@ +# Generated by Django + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0006_epgsource_refresh_interval_epgsource_refresh_task'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='status', + field=models.CharField(choices=[('idle', 'Idle'), ('fetching', 'Fetching'), ('parsing', 'Parsing'), ('error', 'Error'), ('success', 'Success')], default='idle', max_length=20), + ), + migrations.AddField( + model_name='epgsource', + name='last_error', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/epg/migrations/0010_merge_20250503_2147.py b/apps/epg/migrations/0010_merge_20250503_2147.py new file mode 100644 index 00000000..a65117ba --- /dev/null +++ b/apps/epg/migrations/0010_merge_20250503_2147.py @@ -0,0 +1,14 @@ +# Generated by Django 5.1.6 on 2025-05-03 21:47 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0007_epgsource_status_epgsource_last_error'), + ('epg', '0009_alter_epgsource_created_at_and_more'), + ] + + operations = [ + ] diff --git a/apps/epg/migrations/0011_update_epgsource_fields.py b/apps/epg/migrations/0011_update_epgsource_fields.py new file mode 100644 index 00000000..44c38a76 --- /dev/null +++ b/apps/epg/migrations/0011_update_epgsource_fields.py @@ -0,0 +1,42 @@ +# Generated by Django 5.1.6 on 2025-05-04 21:43 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0010_merge_20250503_2147'), + ] + + operations = [ + # Change updated_at field + migrations.AlterField( + model_name='epgsource', + name='updated_at', + field=models.DateTimeField(blank=True, help_text='Time when this source was last successfully refreshed', null=True), + ), + + # Add new last_message field + migrations.AddField( + model_name='epgsource', + name='last_message', + field=models.TextField(blank=True, help_text='Last status message, including success results or error information', null=True), + ), + + # Copy data from last_error to last_message + migrations.RunPython( + code=lambda apps, schema_editor: apps.get_model('epg', 'EPGSource').objects.all().update( + last_message=models.F('last_error') + ), + reverse_code=lambda apps, schema_editor: apps.get_model('epg', 'EPGSource').objects.all().update( + last_error=models.F('last_message') + ), + ), + + # Remove the old field + migrations.RemoveField( + model_name='epgsource', + name='last_error', + ), + ] diff --git a/apps/epg/migrations/0012_alter_epgsource_status.py b/apps/epg/migrations/0012_alter_epgsource_status.py new file mode 100644 index 00000000..39cce295 --- /dev/null +++ b/apps/epg/migrations/0012_alter_epgsource_status.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-15 01:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0011_update_epgsource_fields'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='status', + field=models.CharField(choices=[('idle', 'Idle'), ('fetching', 'Fetching'), ('parsing', 'Parsing'), ('error', 'Error'), ('success', 'Success'), ('disabled', 'Disabled')], default='idle', max_length=20), + ), + ] diff --git a/apps/epg/migrations/0013_alter_epgsource_refresh_interval.py b/apps/epg/migrations/0013_alter_epgsource_refresh_interval.py new file mode 100644 index 00000000..64be2c3c --- /dev/null +++ b/apps/epg/migrations/0013_alter_epgsource_refresh_interval.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-21 19:58 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0012_alter_epgsource_status'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='refresh_interval', + field=models.IntegerField(default=0), + ), + ] diff --git a/apps/epg/migrations/0014_epgsource_extracted_file_path.py b/apps/epg/migrations/0014_epgsource_extracted_file_path.py new file mode 100644 index 00000000..9ee1170b --- /dev/null +++ b/apps/epg/migrations/0014_epgsource_extracted_file_path.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-26 15:48 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0013_alter_epgsource_refresh_interval'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='extracted_file_path', + field=models.CharField(blank=True, help_text='Path to extracted XML file after decompression', max_length=1024, null=True), + ), + ] diff --git a/apps/epg/migrations/0015_alter_programdata_custom_properties.py b/apps/epg/migrations/0015_alter_programdata_custom_properties.py new file mode 100644 index 00000000..f33aa97f --- /dev/null +++ b/apps/epg/migrations/0015_alter_programdata_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0014_epgsource_extracted_file_path'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/epg/migrations/0016_epgdata_icon_url.py b/apps/epg/migrations/0016_epgdata_icon_url.py new file mode 100644 index 00000000..b934b024 --- /dev/null +++ b/apps/epg/migrations/0016_epgdata_icon_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-16 22:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0015_alter_programdata_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='epgdata', + name='icon_url', + field=models.URLField(blank=True, max_length=500, null=True), + ), + ] diff --git a/apps/epg/migrations/0017_alter_epgsource_url.py b/apps/epg/migrations/0017_alter_epgsource_url.py new file mode 100644 index 00000000..dcb55e20 --- /dev/null +++ b/apps/epg/migrations/0017_alter_epgsource_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-24 21:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0016_epgdata_icon_url'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='url', + field=models.URLField(blank=True, max_length=1000, null=True), + ), + ] diff --git a/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py new file mode 100644 index 00000000..70ebb214 --- /dev/null +++ b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-17 17:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0017_alter_epgsource_url'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True), + ), + migrations.AlterField( + model_name='epgsource', + name='source_type', + field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20), + ), + ] diff --git a/apps/epg/migrations/0019_alter_programdata_sub_title.py b/apps/epg/migrations/0019_alter_programdata_sub_title.py new file mode 100644 index 00000000..5a53627c --- /dev/null +++ b/apps/epg/migrations/0019_alter_programdata_sub_title.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-22 21:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0018_epgsource_custom_properties_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='sub_title', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py new file mode 100644 index 00000000..8f53bb0a --- /dev/null +++ b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py @@ -0,0 +1,119 @@ +# Generated migration to replace {time} placeholders with {starttime} + +import re +from django.db import migrations + + +def migrate_time_placeholders(apps, schema_editor): + """ + Replace {time} with {starttime} and {time24} with {starttime24} + in all dummy EPG source custom_properties templates. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + # Fields that contain templates with placeholders + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + # Get all dummy EPG sources + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Replace {time24} first (before {time}) to avoid double replacement + # e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime} + new_value = original_value + new_value = re.sub(r'\{time24\}', '{starttime24}', new_value) + new_value = re.sub(r'\{time\}', '{starttime}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.") + else: + print("No dummy EPG sources needed placeholder updates.") + + +def reverse_migration(apps, schema_editor): + """ + Reverse the migration by replacing {starttime} back to {time}. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Reverse the replacements + new_value = original_value + new_value = re.sub(r'\{starttime24\}', '{time24}', new_value) + new_value = re.sub(r'\{starttime\}', '{time}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.") + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0019_alter_programdata_sub_title'), + ] + + operations = [ + migrations.RunPython(migrate_time_placeholders, reverse_migration), + ] diff --git a/apps/epg/migrations/0021_epgsource_priority.py b/apps/epg/migrations/0021_epgsource_priority.py new file mode 100644 index 00000000..f2696d67 --- /dev/null +++ b/apps/epg/migrations/0021_epgsource_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-12-05 15:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0020_migrate_time_to_starttime_placeholders'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index a0e5343b..b3696edc 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -8,32 +8,110 @@ class EPGSource(models.Model): SOURCE_TYPE_CHOICES = [ ('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), + ('dummy', 'Custom Dummy EPG'), ] + + STATUS_IDLE = 'idle' + STATUS_FETCHING = 'fetching' + STATUS_PARSING = 'parsing' + STATUS_ERROR = 'error' + STATUS_SUCCESS = 'success' + STATUS_DISABLED = 'disabled' + + STATUS_CHOICES = [ + (STATUS_IDLE, 'Idle'), + (STATUS_FETCHING, 'Fetching'), + (STATUS_PARSING, 'Parsing'), + (STATUS_ERROR, 'Error'), + (STATUS_SUCCESS, 'Success'), + (STATUS_DISABLED, 'Disabled'), + ] + name = models.CharField(max_length=255, unique=True) source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES) - url = models.URLField(blank=True, null=True) # For XMLTV + url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct is_active = models.BooleanField(default=True) file_path = models.CharField(max_length=1024, blank=True, null=True) - refresh_interval = models.IntegerField(default=24) + extracted_file_path = models.CharField(max_length=1024, blank=True, null=True, + help_text="Path to extracted XML file after decompression") + refresh_interval = models.IntegerField(default=0) refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" + ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel." + ) + status = models.CharField( + max_length=20, + choices=STATUS_CHOICES, + default=STATUS_IDLE + ) + last_message = models.TextField( + null=True, + blank=True, + help_text="Last status message, including success results or error information" + ) created_at = models.DateTimeField( auto_now_add=True, help_text="Time when this source was created" ) updated_at = models.DateTimeField( - auto_now=True, - help_text="Time when this source was last updated" + null=True, blank=True, + help_text="Time when this source was last successfully refreshed" ) def __str__(self): return self.name def get_cache_file(self): - # Decide on file extension - file_ext = ".gz" if self.url.lower().endswith('.gz') else ".xml" + import mimetypes + + # Use a temporary extension for initial download + # The actual extension will be determined after content inspection + file_ext = ".tmp" + + # If file_path is already set and contains an extension, use that + # This handles cases where we've already detected the proper type + if self.file_path and os.path.exists(self.file_path): + _, existing_ext = os.path.splitext(self.file_path) + if existing_ext: + file_ext = existing_ext + else: + # Try to detect the MIME type and map to extension + mime_type, _ = mimetypes.guess_type(self.file_path) + if mime_type: + if mime_type == 'application/gzip' or mime_type == 'application/x-gzip': + file_ext = '.gz' + elif mime_type == 'application/zip': + file_ext = '.zip' + elif mime_type == 'application/xml' or mime_type == 'text/xml': + file_ext = '.xml' + # For files without mime type detection, try peeking at content + else: + try: + with open(self.file_path, 'rb') as f: + header = f.read(4) + # Check for gzip magic number (1f 8b) + if header[:2] == b'\x1f\x8b': + file_ext = '.gz' + # Check for zip magic number (PK..) + elif header[:2] == b'PK': + file_ext = '.zip' + # Check for XML + elif header[:5] == b'': + file_ext = '.xml' + except Exception as e: + # If we can't read the file, just keep the default extension + pass + filename = f"{self.id}{file_ext}" # Build full path in MEDIA_ROOT/cached_epg @@ -46,11 +124,21 @@ class EPGSource(models.Model): return cache + def save(self, *args, **kwargs): + # Prevent auto_now behavior by handling updated_at manually + if 'update_fields' in kwargs and 'updated_at' not in kwargs['update_fields']: + # Don't modify updated_at for regular updates + kwargs.setdefault('update_fields', []) + if 'updated_at' in kwargs['update_fields']: + kwargs['update_fields'].remove('updated_at') + super().save(*args, **kwargs) + class EPGData(models.Model): # Removed the Channel foreign key. We now just store the original tvg_id # and a name (which might simply be the tvg_id if no real channel exists). tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True) name = models.CharField(max_length=255) + icon_url = models.URLField(max_length=500, null=True, blank=True) epg_source = models.ForeignKey( EPGSource, on_delete=models.CASCADE, @@ -71,10 +159,10 @@ class ProgramData(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() title = models.CharField(max_length=255) - sub_title = models.CharField(max_length=255, blank=True, null=True) + sub_title = models.TextField(blank=True, null=True) description = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, null=True, blank=True) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return f"{self.title} ({self.start_time} - {self.end_time})" diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index e4ff932e..e4d5f466 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -1,17 +1,41 @@ +from core.utils import validate_flexible_url from rest_framework import serializers from .models import EPGSource, EPGData, ProgramData from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] + url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url] + ) class Meta: model = EPGSource - fields = ['id', 'name', 'source_type', 'url', 'api_key', 'is_active', 'epg_data_ids', 'refresh_interval', 'created_at', 'updated_at'] + fields = [ + 'id', + 'name', + 'source_type', + 'url', + 'api_key', + 'is_active', + 'file_path', + 'refresh_interval', + 'priority', + 'status', + 'last_message', + 'created_at', + 'updated_at', + 'custom_properties', + 'epg_data_count' + ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: @@ -31,5 +55,6 @@ class EPGDataSerializer(serializers.ModelSerializer): 'id', 'tvg_id', 'name', + 'icon_url', 'epg_source', ] diff --git a/apps/epg/signals.py b/apps/epg/signals.py index 82db7fad..e41d3aaf 100644 --- a/apps/epg/signals.py +++ b/apps/epg/signals.py @@ -1,21 +1,88 @@ -from django.db.models.signals import post_save, post_delete +from django.db.models.signals import post_save, post_delete, pre_save from django.dispatch import receiver -from .models import EPGSource -from .tasks import refresh_epg_data +from .models import EPGSource, EPGData +from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id from django_celery_beat.models import PeriodicTask, IntervalSchedule +from core.utils import is_protected_path, send_websocket_update import json +import logging +import os + +logger = logging.getLogger(__name__) @receiver(post_save, sender=EPGSource) def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs): - # Trigger refresh only if the source is newly created and active - if created and instance.is_active: + # Trigger refresh only if the source is newly created, active, and not a dummy EPG + if created and instance.is_active and instance.source_type != 'dummy': refresh_epg_data.delay(instance.id) +@receiver(post_save, sender=EPGSource) +def create_dummy_epg_data(sender, instance, created, **kwargs): + """ + Automatically create EPGData for dummy EPG sources when they are created. + This allows channels to be assigned to dummy EPGs immediately without + requiring a refresh first. + """ + if instance.source_type == 'dummy': + # Ensure dummy EPGs always have idle status and no status message + if instance.status != EPGSource.STATUS_IDLE or instance.last_message: + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + instance.save(update_fields=['status', 'last_message']) + + # Create a URL-friendly tvg_id from the dummy EPG name + # Replace spaces and special characters with underscores + friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_') + # Remove any characters that aren't alphanumeric or underscores + friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_') + # Convert to lowercase for consistency + friendly_tvg_id = friendly_tvg_id.lower() + # Prefix with 'dummy_' to make it clear this is a dummy EPG + friendly_tvg_id = f"dummy_{friendly_tvg_id}" + + # Create or update the EPGData record + epg_data, data_created = EPGData.objects.get_or_create( + tvg_id=friendly_tvg_id, + epg_source=instance, + defaults={ + 'name': instance.name, + 'icon_url': None + } + ) + + # Update name if it changed and record already existed + if not data_created and epg_data.name != instance.name: + epg_data.name = instance.name + epg_data.save(update_fields=['name']) + + if data_created: + logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})") + + # Send websocket update to notify frontend that EPG data has been created + # This allows the channel form to immediately show the new dummy EPG without refreshing + send_websocket_update('updates', 'update', { + 'type': 'epg_data_created', + 'source_id': instance.id, + 'source_name': instance.name, + 'epg_data_id': epg_data.id + }) + else: + logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})") + @receiver(post_save, sender=EPGSource) def create_or_update_refresh_task(sender, instance, **kwargs): """ Create or update a Celery Beat periodic task when an EPGSource is created/updated. + Skip creating tasks for dummy EPG sources as they don't need refreshing. """ + # Skip task creation for dummy EPGs + if instance.source_type == 'dummy': + # If there's an existing task, disable it + if instance.refresh_task: + instance.refresh_task.enabled = False + instance.refresh_task.save(update_fields=['enabled']) + return + task_name = f"epg_source-refresh-{instance.id}" interval, _ = IntervalSchedule.objects.get_or_create( every=int(instance.refresh_interval), @@ -26,7 +93,7 @@ def create_or_update_refresh_task(sender, instance, **kwargs): "interval": interval, "task": "apps.epg.tasks.refresh_epg_data", "kwargs": json.dumps({"source_id": instance.id}), - "enabled": instance.refresh_interval != 0, + "enabled": instance.refresh_interval != 0 and instance.is_active, }) update_fields = [] @@ -36,8 +103,11 @@ def create_or_update_refresh_task(sender, instance, **kwargs): if task.interval != interval: task.interval = interval update_fields.append("interval") - if task.enabled != (instance.refresh_interval != 0): - task.enabled = instance.refresh_interval != 0 + + # Check both refresh_interval and is_active to determine if task should be enabled + should_be_enabled = instance.refresh_interval != 0 and instance.is_active + if task.enabled != should_be_enabled: + task.enabled = should_be_enabled update_fields.append("enabled") if update_fields: @@ -45,12 +115,82 @@ def create_or_update_refresh_task(sender, instance, **kwargs): if instance.refresh_task != task: instance.refresh_task = task - instance.save(update_fields=update_fields) + instance.save(update_fields=["refresh_task"]) # Fixed field name @receiver(post_delete, sender=EPGSource) def delete_refresh_task(sender, instance, **kwargs): """ - Delete the associated Celery Beat periodic task when a Channel is deleted. + Delete the associated Celery Beat periodic task when an EPGSource is deleted. """ - if instance.refresh_task: - instance.refresh_task.delete() + try: + # First try the foreign key relationship to find the task ID + task = None + if instance.refresh_task: + logger.info(f"Found task via foreign key: {instance.refresh_task.id} for EPGSource {instance.id}") + task = instance.refresh_task + + # Store task ID before deletion if we need to bypass the helper function + if task: + delete_epg_refresh_task_by_id(instance.id) + else: + # Otherwise use the helper function + delete_epg_refresh_task_by_id(instance.id) + except Exception as e: + logger.error(f"Error in delete_refresh_task signal handler: {str(e)}", exc_info=True) + +@receiver(pre_save, sender=EPGSource) +def update_status_on_active_change(sender, instance, **kwargs): + """ + When an EPGSource's is_active field changes, update the status accordingly. + For dummy EPGs, always ensure status is idle and no status message. + """ + # Dummy EPGs should always be idle with no status message + if instance.source_type == 'dummy': + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + return + + if instance.pk: # Only for existing records, not new ones + try: + # Get the current record from the database + old_instance = EPGSource.objects.get(pk=instance.pk) + + # If is_active changed, update the status + if old_instance.is_active != instance.is_active: + if instance.is_active: + # When activating, set status to idle + instance.status = 'idle' + else: + # When deactivating, set status to disabled + instance.status = 'disabled' + except EPGSource.DoesNotExist: + # New record, will use default status + pass + +@receiver(post_delete, sender=EPGSource) +def delete_cached_files(sender, instance, **kwargs): + """ + Delete cached files associated with an EPGSource when it's deleted. + Only deletes files that aren't in protected directories. + """ + # Check and delete the main file path if not protected + if instance.file_path and os.path.exists(instance.file_path): + if is_protected_path(instance.file_path): + logger.info(f"Skipping deletion of protected file: {instance.file_path}") + else: + try: + os.remove(instance.file_path) + logger.info(f"Deleted cached file: {instance.file_path}") + except OSError as e: + logger.error(f"Error deleting cached file {instance.file_path}: {e}") + + # Check and delete the extracted file path if it exists, is different from main path, and not protected + if instance.extracted_file_path and os.path.exists(instance.extracted_file_path) and instance.extracted_file_path != instance.file_path: + if is_protected_path(instance.extracted_file_path): + logger.info(f"Skipping deletion of protected extracted file: {instance.extracted_file_path}") + else: + try: + os.remove(instance.extracted_file_path) + logger.info(f"Deleted extracted file: {instance.extracted_file_path}") + except OSError as e: + logger.error(f"Error deleting extracted file {instance.extracted_file_path}: {e}") diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 74411bdb..97552171 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -5,141 +5,1126 @@ import gzip import os import uuid import requests -import xml.etree.ElementTree as ET +import time # Add import for tracking download progress from datetime import datetime, timedelta, timezone as dt_timezone +import gc # Add garbage collection module +import json +from lxml import etree # Using lxml exclusively +import psutil # Add import for memory tracking +import zipfile from celery import shared_task from django.conf import settings from django.db import transaction from django.utils import timezone from apps.channels.models import Channel +from core.models import UserAgent, CoreSettings from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from .models import EPGSource, EPGData, ProgramData -from core.utils import acquire_task_lock, release_task_lock +from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event logger = logging.getLogger(__name__) +def validate_icon_url_fast(icon_url, max_length=None): + """ + Fast validation for icon URLs during parsing. + Returns None if URL is too long, original URL otherwise. + If max_length is None, gets it dynamically from the EPGData model field. + """ + if max_length is None: + # Get max_length dynamically from the model field + max_length = EPGData._meta.get_field('icon_url').max_length + + if icon_url and len(icon_url) > max_length: + logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...") + return None + return icon_url + + +MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2) + + +def send_epg_update(source_id, action, progress, **kwargs): + """Send WebSocket update about EPG download/parsing progress""" + # Start with the base data dictionary + data = { + "progress": progress, + "type": "epg_refresh", + "source": source_id, + "action": action, + } + + # Add the additional key-value pairs from kwargs + data.update(kwargs) + + # Use the standardized update function with garbage collection for program parsing + # This is a high-frequency operation that needs more aggressive memory management + collect_garbage = action == "parsing_programs" and progress % 10 == 0 + send_websocket_update('updates', 'update', data, collect_garbage=collect_garbage) + + # Explicitly clear references + data = None + + # For high-frequency parsing, occasionally force additional garbage collection + # to prevent memory buildup + if action == "parsing_programs" and progress % 50 == 0: + gc.collect() + + +def delete_epg_refresh_task_by_id(epg_id): + """ + Delete the periodic task associated with an EPG source ID. + Can be called directly or from the post_delete signal. + Returns True if a task was found and deleted, False otherwise. + """ + try: + task = None + task_name = f"epg_source-refresh-{epg_id}" + + # Look for task by name + try: + from django_celery_beat.models import PeriodicTask, IntervalSchedule + task = PeriodicTask.objects.get(name=task_name) + logger.info(f"Found task by name: {task.id} for EPGSource {epg_id}") + except PeriodicTask.DoesNotExist: + logger.warning(f"No PeriodicTask found with name {task_name}") + return False + + # Now delete the task and its interval + if task: + # Store interval info before deleting the task + interval_id = None + if hasattr(task, 'interval') and task.interval: + interval_id = task.interval.id + + # Count how many TOTAL tasks use this interval (including this one) + tasks_with_same_interval = PeriodicTask.objects.filter(interval_id=interval_id).count() + logger.info(f"Interval {interval_id} is used by {tasks_with_same_interval} tasks total") + + # Delete the task first + task_id = task.id + task.delete() + logger.info(f"Successfully deleted periodic task {task_id}") + + # Now check if we should delete the interval + # We only delete if it was the ONLY task using this interval + if interval_id and tasks_with_same_interval == 1: + try: + interval = IntervalSchedule.objects.get(id=interval_id) + logger.info(f"Deleting interval schedule {interval_id} (not shared with other tasks)") + interval.delete() + logger.info(f"Successfully deleted interval {interval_id}") + except IntervalSchedule.DoesNotExist: + logger.warning(f"Interval {interval_id} no longer exists") + elif interval_id: + logger.info(f"Not deleting interval {interval_id} as it's shared with {tasks_with_same_interval-1} other tasks") + + return True + return False + except Exception as e: + logger.error(f"Error deleting periodic task for EPGSource {epg_id}: {str(e)}", exc_info=True) + return False + + @shared_task def refresh_all_epg_data(): logger.info("Starting refresh_epg_data task.") - active_sources = EPGSource.objects.filter(is_active=True) - logger.debug(f"Found {active_sources.count()} active EPGSource(s).") + # Exclude dummy EPG sources from refresh - they don't need refreshing + active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy') + logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).") for source in active_sources: refresh_epg_data(source.id) + # Force garbage collection between sources + gc.collect() logger.info("Finished refresh_epg_data task.") return "EPG data refreshed." + @shared_task def refresh_epg_data(source_id): if not acquire_task_lock('refresh_epg_data', source_id): logger.debug(f"EPG refresh for {source_id} already running") return - source = EPGSource.objects.get(id=source_id) - if not source.is_active: - logger.info(f"EPG source {source_id} is not active. Skipping.") - return + source = None + try: + # Try to get the EPG source + try: + source = EPGSource.objects.get(id=source_id) + except EPGSource.DoesNotExist: + # The EPG source doesn't exist, so delete the periodic task if it exists + logger.warning(f"EPG source with ID {source_id} not found, but task was triggered. Cleaning up orphaned task.") - logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})") - if source.source_type == 'xmltv': - fetch_xmltv(source) - parse_channels_only(source) - parse_programs_for_source(source) - elif source.source_type == 'schedules_direct': - fetch_schedules_direct(source) + # Call the shared function to delete the task + if delete_epg_refresh_task_by_id(source_id): + logger.info(f"Successfully cleaned up orphaned task for EPG source {source_id}") + else: + logger.info(f"No orphaned task found for EPG source {source_id}") - source.save(update_fields=['updated_at']) + # Release the lock and exit + release_task_lock('refresh_epg_data', source_id) + # Force garbage collection before exit + gc.collect() + return f"EPG source {source_id} does not exist, task cleaned up" + + # The source exists but is not active, just skip processing + if not source.is_active: + logger.info(f"EPG source {source_id} is not active. Skipping.") + release_task_lock('refresh_epg_data', source_id) + # Force garbage collection before exit + gc.collect() + return + + # Skip refresh for dummy EPG sources - they don't need refreshing + if source.source_type == 'dummy': + logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})") + release_task_lock('refresh_epg_data', source_id) + gc.collect() + return + + # Continue with the normal processing... + logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})") + if source.source_type == 'xmltv': + fetch_success = fetch_xmltv(source) + if not fetch_success: + logger.error(f"Failed to fetch XMLTV for source {source.name}") + release_task_lock('refresh_epg_data', source_id) + # Force garbage collection before exit + gc.collect() + return + + parse_channels_success = parse_channels_only(source) + if not parse_channels_success: + logger.error(f"Failed to parse channels for source {source.name}") + release_task_lock('refresh_epg_data', source_id) + # Force garbage collection before exit + gc.collect() + return + + parse_programs_for_source(source) + + elif source.source_type == 'schedules_direct': + fetch_schedules_direct(source) + + source.save(update_fields=['updated_at']) + # After successful EPG refresh, evaluate DVR series rules to schedule new episodes + try: + from apps.channels.tasks import evaluate_series_rules + evaluate_series_rules.delay() + except Exception: + pass + except Exception as e: + logger.error(f"Error in refresh_epg_data for source {source_id}: {e}", exc_info=True) + try: + if source: + source.status = 'error' + source.last_message = f"Error refreshing EPG data: {str(e)}" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source_id, "refresh", 100, status="error", error=str(e)) + except Exception as inner_e: + logger.error(f"Error updating source status: {inner_e}") + finally: + # Clear references to ensure proper garbage collection + source = None + # Force garbage collection before releasing the lock + gc.collect() + release_task_lock('refresh_epg_data', source_id) - release_task_lock('refresh_epg_data', source_id) def fetch_xmltv(source): - if not source.url: - return + # Handle cases with local file but no URL + if not source.url and source.file_path and os.path.exists(source.file_path): + logger.info(f"Using existing local file for EPG source: {source.name} at {source.file_path}") - if os.path.exists(source.get_cache_file()): - os.remove(source.get_cache_file()) + # Check if the existing file is compressed and we need to extract it + if source.file_path.endswith(('.gz', '.zip')) and not source.file_path.endswith('.xml'): + try: + # Define the path for the extracted file in the cache directory + cache_dir = os.path.join(settings.MEDIA_ROOT, "cached_epg") + os.makedirs(cache_dir, exist_ok=True) + xml_path = os.path.join(cache_dir, f"{source.id}.xml") + + # Extract to the cache location keeping the original + extracted_path = extract_compressed_file(source.file_path, xml_path, delete_original=False) + + if extracted_path: + logger.info(f"Extracted mapped compressed file to: {extracted_path}") + # Update to use extracted_file_path instead of changing file_path + source.extracted_file_path = extracted_path + source.save(update_fields=['extracted_file_path']) + else: + logger.error(f"Failed to extract mapped compressed file. Using original file: {source.file_path}") + except Exception as e: + logger.error(f"Failed to extract existing compressed file: {e}") + # Continue with the original file if extraction fails + + # Set the status to success in the database + source.status = 'success' + source.save(update_fields=['status']) + + # Send a download complete notification + send_epg_update(source.id, "downloading", 100, status="success") + + # Return True to indicate successful fetch, processing will continue with parse_channels_only + return True + + # Handle cases where no URL is provided and no valid file path exists + if not source.url: + # Update source status for missing URL + source.status = 'error' + source.last_message = "No URL provided and no valid local file exists" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source.id, "downloading", 100, status="error", error="No URL provided and no valid local file exists") + return False logger.info(f"Fetching XMLTV data from source: {source.name}") try: - response = requests.get(source.url, timeout=30) - response.raise_for_status() - logger.debug("XMLTV data fetched successfully.") + # Get default user agent from settings + stream_settings = CoreSettings.get_stream_settings() + user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default + default_user_agent_id = stream_settings.get('default_user_agent') + if default_user_agent_id: + try: + user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first() + if user_agent_obj and user_agent_obj.user_agent: + user_agent = user_agent_obj.user_agent + logger.debug(f"Using default user agent: {user_agent}") + except (ValueError, Exception) as e: + logger.warning(f"Error retrieving default user agent, using fallback: {e}") - cache_file = source.get_cache_file() + headers = { + 'User-Agent': user_agent + } - # Save raw data - with open(cache_file, 'wb') as f: - f.write(response.content) - logger.info(f"Cached EPG file saved to {cache_file}") + # Update status to fetching before starting download + source.status = 'fetching' + source.save(update_fields=['status']) + + # Send initial download notification + send_epg_update(source.id, "downloading", 0) + + # Use streaming response to track download progress + with requests.get(source.url, headers=headers, stream=True, timeout=60) as response: + # Handle 404 specifically + if response.status_code == 404: + logger.error(f"EPG URL not found (404): {source.url}") + # Update status to error in the database + source.status = 'error' + source.last_message = f"EPG source '{source.name}' returned 404 error - will retry on next scheduled run" + source.save(update_fields=['status', 'last_message']) + + # Notify users through the WebSocket about the EPG fetch failure + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + 'data': { + "success": False, + "type": "epg_fetch_error", + "source_id": source.id, + "source_name": source.name, + "error_code": 404, + "message": f"EPG source '{source.name}' returned 404 error - will retry on next scheduled run" + } + } + ) + # Ensure we update the download progress to 100 with error status + send_epg_update(source.id, "downloading", 100, status="error", error="URL not found (404)") + return False + + # For all other error status codes + if response.status_code >= 400: + error_message = f"HTTP error {response.status_code}" + user_message = f"EPG source '{source.name}' encountered HTTP error {response.status_code}" + + # Update status to error in the database + source.status = 'error' + source.last_message = user_message + source.save(update_fields=['status', 'last_message']) + + # Notify users through the WebSocket + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + 'data': { + "success": False, + "type": "epg_fetch_error", + "source_id": source.id, + "source_name": source.name, + "error_code": response.status_code, + "message": user_message + } + } + ) + # Update download progress + send_epg_update(source.id, "downloading", 100, status="error", error=user_message) + return False + + response.raise_for_status() + logger.debug("XMLTV data fetched successfully.") + + # Define base paths for consistent file naming + cache_dir = os.path.join(settings.MEDIA_ROOT, "cached_epg") + os.makedirs(cache_dir, exist_ok=True) + + # Create temporary download file with .tmp extension + temp_download_path = os.path.join(cache_dir, f"{source.id}.tmp") + + # Check if we have content length for progress tracking + total_size = int(response.headers.get('content-length', 0)) + downloaded = 0 + start_time = time.time() + last_update_time = start_time + update_interval = 0.5 # Only update every 0.5 seconds + + # Download to temporary file + with open(temp_download_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=16384): # Increased chunk size for better performance + if chunk: + f.write(chunk) + + downloaded += len(chunk) + elapsed_time = time.time() - start_time + + # Calculate download speed in KB/s + speed = downloaded / elapsed_time / 1024 if elapsed_time > 0 else 0 + + # Calculate progress percentage + if total_size and total_size > 0: + progress = min(100, int((downloaded / total_size) * 100)) + else: + # If no content length header, estimate progress + progress = min(95, int((downloaded / (10 * 1024 * 1024)) * 100)) # Assume 10MB if unknown + + # Time remaining (in seconds) + time_remaining = (total_size - downloaded) / (speed * 1024) if speed > 0 and total_size > 0 else 0 + + # Only send updates at specified intervals to avoid flooding + current_time = time.time() + if current_time - last_update_time >= update_interval and progress > 0: + last_update_time = current_time + send_epg_update( + source.id, + "downloading", + progress, + speed=round(speed, 2), + elapsed_time=round(elapsed_time, 1), + time_remaining=round(time_remaining, 1), + downloaded=f"{downloaded / (1024 * 1024):.2f} MB" + ) + + # Explicitly delete the chunk to free memory immediately + del chunk + + # Send completion notification + send_epg_update(source.id, "downloading", 100) + + # Determine the appropriate file extension based on content detection + with open(temp_download_path, 'rb') as f: + content_sample = f.read(1024) # Just need the first 1KB to detect format + + # Use our helper function to detect the format + format_type, is_compressed, file_extension = detect_file_format( + file_path=source.url, # Original URL as a hint + content=content_sample # Actual file content for detection + ) + + logger.debug(f"File format detection results: type={format_type}, compressed={is_compressed}, extension={file_extension}") + + # Ensure consistent final paths + compressed_path = os.path.join(cache_dir, f"{source.id}{file_extension}" if is_compressed else f"{source.id}.compressed") + xml_path = os.path.join(cache_dir, f"{source.id}.xml") + + # Clean up old files before saving new ones + if os.path.exists(compressed_path): + try: + os.remove(compressed_path) + logger.debug(f"Removed old compressed file: {compressed_path}") + except OSError as e: + logger.warning(f"Failed to remove old compressed file: {e}") + + if os.path.exists(xml_path): + try: + os.remove(xml_path) + logger.debug(f"Removed old XML file: {xml_path}") + except OSError as e: + logger.warning(f"Failed to remove old XML file: {e}") + + # Rename the temp file to appropriate final path + if is_compressed: + try: + os.rename(temp_download_path, compressed_path) + logger.debug(f"Renamed temp file to compressed file: {compressed_path}") + current_file_path = compressed_path + except OSError as e: + logger.error(f"Failed to rename temp file to compressed file: {e}") + current_file_path = temp_download_path # Fall back to using temp file + else: + try: + os.rename(temp_download_path, xml_path) + logger.debug(f"Renamed temp file to XML file: {xml_path}") + current_file_path = xml_path + except OSError as e: + logger.error(f"Failed to rename temp file to XML file: {e}") + current_file_path = temp_download_path # Fall back to using temp file + + # Now extract the file if it's compressed + if is_compressed: + try: + logger.info(f"Extracting compressed file {current_file_path}") + send_epg_update(source.id, "extracting", 0, message="Extracting downloaded file") + + # Always extract to the standard XML path - set delete_original to True to clean up + extracted = extract_compressed_file(current_file_path, xml_path, delete_original=True) + + if extracted: + logger.info(f"Successfully extracted to {xml_path}, compressed file deleted") + send_epg_update(source.id, "extracting", 100, message=f"File extracted successfully, temporary file removed") + # Update to store only the extracted file path since the compressed file is now gone + source.file_path = xml_path + source.extracted_file_path = None + else: + logger.error("Extraction failed, using compressed file") + send_epg_update(source.id, "extracting", 100, status="error", message="Extraction failed, using compressed file") + # Use the compressed file + source.file_path = current_file_path + source.extracted_file_path = None + except Exception as e: + logger.error(f"Error extracting file: {str(e)}", exc_info=True) + send_epg_update(source.id, "extracting", 100, status="error", message=f"Error during extraction: {str(e)}") + # Use the compressed file if extraction fails + source.file_path = current_file_path + source.extracted_file_path = None + else: + # It's already an XML file + source.file_path = current_file_path + source.extracted_file_path = None + + # Update the source's file paths + source.save(update_fields=['file_path', 'status', 'extracted_file_path']) + + # Update status to parsing + source.status = 'parsing' + source.save(update_fields=['status']) + + logger.info(f"Cached EPG file saved to {source.file_path}") + return True + + except requests.exceptions.HTTPError as e: + logger.error(f"HTTP Error fetching XMLTV from {source.name}: {e}", exc_info=True) + + # Get error details + status_code = e.response.status_code if hasattr(e, 'response') and e.response else 'unknown' + error_message = str(e) + + # Create a user-friendly message + user_message = f"EPG source '{source.name}' encountered HTTP error {status_code}" + + # Add specific handling for common HTTP errors + if status_code == 404: + user_message = f"EPG source '{source.name}' URL not found (404) - will retry on next scheduled run" + elif status_code == 401 or status_code == 403: + user_message = f"EPG source '{source.name}' access denied (HTTP {status_code}) - check credentials" + elif status_code == 429: + user_message = f"EPG source '{source.name}' rate limited (429) - try again later" + elif status_code >= 500: + user_message = f"EPG source '{source.name}' server error (HTTP {status_code}) - will retry later" + + # Update source status to error with the error message + source.status = 'error' + source.last_message = user_message + source.save(update_fields=['status', 'last_message']) + + # Notify users through the WebSocket about the EPG fetch failure + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + 'data': { + "success": False, + "type": "epg_fetch_error", + "source_id": source.id, + "source_name": source.name, + "error_code": status_code, + "message": user_message, + "details": error_message + } + } + ) + + # Ensure we update the download progress to 100 with error status + send_epg_update(source.id, "downloading", 100, status="error", error=user_message) + return False + except requests.exceptions.ConnectionError as e: + # Handle connection errors separately + error_message = str(e) + user_message = f"Connection error: Unable to connect to EPG source '{source.name}'" + logger.error(f"Connection error fetching XMLTV from {source.name}: {e}", exc_info=True) + + # Update source status + source.status = 'error' + source.last_message = user_message + source.save(update_fields=['status', 'last_message']) + + # Send notifications + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + 'data': { + "success": False, + "type": "epg_fetch_error", + "source_id": source.id, + "source_name": source.name, + "error_code": "connection_error", + "message": user_message + } + } + ) + send_epg_update(source.id, "downloading", 100, status="error", error=user_message) + return False + except requests.exceptions.Timeout as e: + # Handle timeout errors specifically + error_message = str(e) + user_message = f"Timeout error: EPG source '{source.name}' took too long to respond" + logger.error(f"Timeout error fetching XMLTV from {source.name}: {e}", exc_info=True) + + # Update source status + source.status = 'error' + source.last_message = user_message + source.save(update_fields=['status', 'last_message']) + + # Send notifications + send_epg_update(source.id, "downloading", 100, status="error", error=user_message) + return False + except Exception as e: + error_message = str(e) + logger.error(f"Error fetching XMLTV from {source.name}: {e}", exc_info=True) + + # Update source status for general exceptions too + source.status = 'error' + source.last_message = f"Error: {error_message}" + source.save(update_fields=['status', 'last_message']) + + # Ensure we update the download progress to 100 with error status + send_epg_update(source.id, "downloading", 100, status="error", error=f"Error: {error_message}") + return False + + +def extract_compressed_file(file_path, output_path=None, delete_original=False): + """ + Extracts a compressed file (.gz or .zip) to an XML file. + + Args: + file_path: Path to the compressed file + output_path: Specific path where the file should be extracted (optional) + delete_original: Whether to delete the original compressed file after successful extraction + + Returns: + Path to the extracted XML file, or None if extraction failed + """ + try: + if output_path is None: + base_path = os.path.splitext(file_path)[0] + extracted_path = f"{base_path}.xml" + else: + extracted_path = output_path + + # Make sure the output path doesn't already exist + if os.path.exists(extracted_path): + try: + os.remove(extracted_path) + logger.info(f"Removed existing extracted file: {extracted_path}") + except Exception as e: + logger.warning(f"Failed to remove existing extracted file: {e}") + # If we can't delete the existing file and no specific output was requested, + # create a unique filename instead + if output_path is None: + base_path = os.path.splitext(file_path)[0] + extracted_path = f"{base_path}_{uuid.uuid4().hex[:8]}.xml" + + # Use our detection helper to determine the file format instead of relying on extension + with open(file_path, 'rb') as f: + content_sample = f.read(4096) # Read a larger sample to ensure accurate detection + + format_type, is_compressed, _ = detect_file_format(file_path=file_path, content=content_sample) + + if format_type == 'gzip': + logger.debug(f"Extracting gzip file: {file_path}") + try: + # First check if the content is XML by reading a sample + with gzip.open(file_path, 'rb') as gz_file: + content_sample = gz_file.read(4096) # Read first 4KB for detection + detected_format, _, _ = detect_file_format(content=content_sample) + + if detected_format != 'xml': + logger.warning(f"GZIP file does not appear to contain XML content: {file_path} (detected as: {detected_format})") + # Continue anyway since GZIP only contains one file + + # Reset file pointer and extract the content + gz_file.seek(0) + with open(extracted_path, 'wb') as out_file: + while True: + chunk = gz_file.read(MAX_EXTRACT_CHUNK_SIZE) + if not chunk or len(chunk) == 0: + break + out_file.write(chunk) + except Exception as e: + logger.error(f"Error extracting GZIP file: {e}", exc_info=True) + return None + + logger.info(f"Successfully extracted gzip file to: {extracted_path}") + + # Delete original compressed file if requested + if delete_original: + try: + os.remove(file_path) + logger.info(f"Deleted original compressed file: {file_path}") + except Exception as e: + logger.warning(f"Failed to delete original compressed file {file_path}: {e}") + + return extracted_path + + elif format_type == 'zip': + logger.debug(f"Extracting zip file: {file_path}") + with zipfile.ZipFile(file_path, 'r') as zip_file: + # Find the first XML file in the ZIP archive + xml_files = [f for f in zip_file.namelist() if f.lower().endswith('.xml')] + + if not xml_files: + logger.info("No files with .xml extension found in ZIP archive, checking content of all files") + # Check content of each file to see if any are XML without proper extension + for filename in zip_file.namelist(): + if not filename.endswith('/'): # Skip directories + try: + # Read a sample of the file content + content_sample = zip_file.read(filename, 4096) # Read up to 4KB for detection + format_type, _, _ = detect_file_format(content=content_sample) + if format_type == 'xml': + logger.info(f"Found XML content in file without .xml extension: {filename}") + xml_files = [filename] + break + except Exception as e: + logger.warning(f"Error reading file {filename} from ZIP: {e}") + + if not xml_files: + logger.error("No XML file found in ZIP archive") + return None + + # Extract the first XML file + with open(extracted_path, 'wb') as out_file: + with zip_file.open(xml_files[0], "r") as xml_file: + while True: + chunk = xml_file.read(MAX_EXTRACT_CHUNK_SIZE) + if not chunk or len(chunk) == 0: + break + out_file.write(chunk) + + logger.info(f"Successfully extracted zip file to: {extracted_path}") + + # Delete original compressed file if requested + if delete_original: + try: + os.remove(file_path) + logger.info(f"Deleted original compressed file: {file_path}") + except Exception as e: + logger.warning(f"Failed to delete original compressed file {file_path}: {e}") + + return extracted_path + + else: + logger.error(f"Unsupported or unrecognized compressed file format: {file_path} (detected as: {format_type})") + return None except Exception as e: - logger.error(f"Error fetching XMLTV from {source.name}: {e}", exc_info=True) + logger.error(f"Error extracting {file_path}: {str(e)}", exc_info=True) + return None def parse_channels_only(source): - file_path = source.file_path + # Use extracted file if available, otherwise use the original file path + file_path = source.extracted_file_path if source.extracted_file_path else source.file_path if not file_path: file_path = source.get_cache_file() - logger.info(f"Parsing channels from EPG file: {file_path}") - existing_epgs = {e.tvg_id: e for e in EPGData.objects.filter(epg_source=source)} + # Send initial parsing notification + send_epg_update(source.id, "parsing_channels", 0) - # Read entire file (decompress if .gz) - if file_path.endswith('.gz'): - with open(file_path, 'rb') as gz_file: - decompressed = gzip.decompress(gz_file.read()) - xml_data = decompressed.decode('utf-8') - else: - with open(file_path, 'r', encoding='utf-8') as xml_file: - xml_data = xml_file.read() + process = None + should_log_memory = False - root = ET.fromstring(xml_data) - channels = root.findall('channel') + try: + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file does not exist at path: {file_path}") - epgs_to_create = [] - epgs_to_update = [] + # Update the source's file_path to the default cache location + new_path = source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + source.file_path = new_path + source.save(update_fields=['file_path']) - logger.info(f"Found {len(channels)} entries in {file_path}") - for channel_elem in channels: - tvg_id = channel_elem.get('id', '').strip() - if not tvg_id: - continue # skip blank/invalid IDs + # If the source has a URL, fetch the data before continuing + if source.url: + logger.info(f"Fetching new EPG data from URL: {source.url}") + fetch_success = fetch_xmltv(source) # Store the result - display_name = channel_elem.findtext('display-name', default=tvg_id).strip() + # Only proceed if fetch was successful AND file exists + if not fetch_success: + logger.error(f"Failed to fetch EPG data from URL: {source.url}") + # Update status to error + source.status = 'error' + source.last_message = f"Failed to fetch EPG data from URL" + source.save(update_fields=['status', 'last_message']) + # Send error notification + send_epg_update(source.id, "parsing_channels", 100, status="error", error="Failed to fetch EPG data") + return False - if tvg_id in existing_epgs: - epg_obj = existing_epgs[tvg_id] - if epg_obj.name != display_name: - epg_obj.name = display_name - epgs_to_update.append(epg_obj) - else: - epgs_to_create.append(EPGData( - tvg_id=tvg_id, - name=display_name, + # Verify the file was downloaded successfully + if not os.path.exists(source.file_path): + logger.error(f"Failed to fetch EPG data, file still missing at: {source.file_path}") + # Update status to error + source.status = 'error' + source.last_message = f"Failed to fetch EPG data, file missing after download" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source.id, "parsing_channels", 100, status="error", error="File not found after download") + return False + + # Update file_path with the new location + file_path = source.file_path + else: + logger.error(f"No URL provided for EPG source {source.name}, cannot fetch new data") + # Update status to error + source.status = 'error' + source.last_message = f"No URL provided, cannot fetch EPG data" + source.save(update_fields=['updated_at']) + + # Initialize process variable for memory tracking only in debug mode + try: + process = None + # Get current log level as a number + current_log_level = logger.getEffectiveLevel() + + # Only track memory usage when log level is DEBUG (10) or more verbose + # This is more future-proof than string comparisons + should_log_memory = current_log_level <= logging.DEBUG or settings.DEBUG + + if should_log_memory: + process = psutil.Process() + initial_memory = process.memory_info().rss / 1024 / 1024 + logger.debug(f"[parse_channels_only] Initial memory usage: {initial_memory:.2f} MB") + except (ImportError, NameError): + process = None + should_log_memory = False + logger.warning("psutil not available for memory tracking") + + # Replace full dictionary load with more efficient lookup set + existing_tvg_ids = set() + existing_epgs = {} # Initialize the dictionary that will lazily load objects + last_id = 0 + chunk_size = 5000 + + while True: + tvg_id_chunk = set(EPGData.objects.filter( epg_source=source, - )) + id__gt=last_id + ).order_by('id').values_list('tvg_id', flat=True)[:chunk_size]) - if epgs_to_create: - EPGData.objects.bulk_create(epgs_to_create, ignore_conflicts=True) - if epgs_to_update: - EPGData.objects.bulk_update(epgs_to_update, ["name"]) + if not tvg_id_chunk: + break + + existing_tvg_ids.update(tvg_id_chunk) + last_id = EPGData.objects.filter(tvg_id__in=tvg_id_chunk).order_by('-id')[0].id + # Update progress to show file read starting + send_epg_update(source.id, "parsing_channels", 10) + + # Stream parsing instead of loading entire file at once + # This can be simplified since we now always have XML files + epgs_to_create = [] + epgs_to_update = [] + total_channels = 0 + processed_channels = 0 + batch_size = 500 # Process in batches to limit memory usage + progress = 0 # Initialize progress variable here + icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field + + # Track memory at key points + if process: + logger.debug(f"[parse_channels_only] Memory before opening file: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + try: + # Attempt to count existing channels in the database + try: + total_channels = EPGData.objects.filter(epg_source=source).count() + logger.info(f"Found {total_channels} existing channels for this source") + except Exception as e: + logger.error(f"Error counting channels: {e}") + total_channels = 500 # Default estimate + if process: + logger.debug(f"[parse_channels_only] Memory after closing initial file: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + # Update progress after counting + send_epg_update(source.id, "parsing_channels", 25, total_channels=total_channels) + + # Open the file - no need to check file type since it's always XML now + logger.debug(f"Opening file for channel parsing: {file_path}") + source_file = open(file_path, 'rb') + + if process: + logger.debug(f"[parse_channels_only] Memory after opening file: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + # Change iterparse to look for both channel and programme elements + logger.debug(f"Creating iterparse context for channels and programmes") + channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True) + if process: + logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + channel_count = 0 + total_elements_processed = 0 # Track total elements processed, not just channels + for _, elem in channel_parser: + total_elements_processed += 1 + # Only process channel elements + if elem.tag == 'channel': + channel_count += 1 + tvg_id = elem.get('id', '').strip() + if tvg_id: + display_name = None + icon_url = None + for child in elem: + if display_name is None and child.tag == 'display-name' and child.text: + display_name = child.text.strip() + elif child.tag == 'icon': + raw_icon_url = child.get('src', '').strip() + icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length) + if display_name and icon_url: + break # No need to continue if we have both + + if not display_name: + display_name = tvg_id + + # Use lazy loading approach to reduce memory usage + if tvg_id in existing_tvg_ids: + # Only fetch the object if we need to update it and it hasn't been loaded yet + if tvg_id not in existing_epgs: + try: + # This loads the full EPG object from the database and caches it + existing_epgs[tvg_id] = EPGData.objects.get(tvg_id=tvg_id, epg_source=source) + except EPGData.DoesNotExist: + # Handle race condition where record was deleted + existing_tvg_ids.remove(tvg_id) + epgs_to_create.append(EPGData( + tvg_id=tvg_id, + name=display_name, + icon_url=icon_url, + epg_source=source, + )) + logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}") + processed_channels += 1 + continue + + # We use the cached object to check if the name or icon_url has changed + epg_obj = existing_epgs[tvg_id] + needs_update = False + if epg_obj.name != display_name: + epg_obj.name = display_name + needs_update = True + if epg_obj.icon_url != icon_url: + epg_obj.icon_url = icon_url + needs_update = True + + if needs_update: + epgs_to_update.append(epg_obj) + logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}") + else: + # No changes needed, just clear the element + logger.debug(f"[parse_channels_only] No changes needed for channel {tvg_id} - {display_name}") + else: + # This is a new channel that doesn't exist in our database + epgs_to_create.append(EPGData( + tvg_id=tvg_id, + name=display_name, + icon_url=icon_url, + epg_source=source, + )) + logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}") + + processed_channels += 1 + + # Batch processing + if len(epgs_to_create) >= batch_size: + logger.info(f"[parse_channels_only] Bulk creating {len(epgs_to_create)} EPG entries") + EPGData.objects.bulk_create(epgs_to_create, ignore_conflicts=True) + if process: + logger.info(f"[parse_channels_only] Memory after bulk_create: {process.memory_info().rss / 1024 / 1024:.2f} MB") + del epgs_to_create # Explicit deletion + epgs_to_create = [] + cleanup_memory(log_usage=should_log_memory, force_collection=True) + if process: + logger.info(f"[parse_channels_only] Memory after gc.collect(): {process.memory_info().rss / 1024 / 1024:.2f} MB") + + if len(epgs_to_update) >= batch_size: + logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries") + if process: + logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) + if process: + logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") + epgs_to_update = [] + # Force garbage collection + cleanup_memory(log_usage=should_log_memory, force_collection=True) + + # Periodically clear the existing_epgs cache to prevent memory buildup + if processed_channels % 1000 == 0: + logger.info(f"[parse_channels_only] Clearing existing_epgs cache at {processed_channels} channels") + existing_epgs.clear() + cleanup_memory(log_usage=should_log_memory, force_collection=True) + if process: + logger.info(f"[parse_channels_only] Memory after clearing cache: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + # Send progress updates + if processed_channels % 100 == 0 or processed_channels == total_channels: + progress = 25 + int((processed_channels / total_channels) * 65) if total_channels > 0 else 90 + send_epg_update( + source.id, + "parsing_channels", + progress, + processed=processed_channels, + total=total_channels + ) + if processed_channels > total_channels: + logger.debug(f"[parse_channels_only] Processed channel {tvg_id} - processed {processed_channels - total_channels} additional channels") + else: + logger.debug(f"[parse_channels_only] Processed channel {tvg_id} - processed {processed_channels}/{total_channels}") + if process: + logger.debug(f"[parse_channels_only] Memory before elem cleanup: {process.memory_info().rss / 1024 / 1024:.2f} MB") + # Clear memory + try: + # First clear the element's content + clear_element(elem) + + except Exception as e: + # Just log the error and continue - don't let cleanup errors stop processing + logger.debug(f"[parse_channels_only] Non-critical error during XML element cleanup: {e}") + if process: + logger.debug(f"[parse_channels_only] Memory after elem cleanup: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + logger.debug(f"[parse_channels_only] Total elements processed: {total_elements_processed}") + + else: + logger.trace(f"[parse_channels_only] Skipping non-channel element: {elem.get('channel', 'unknown')} - {elem.get('start', 'unknown')} {elem.tag}") + clear_element(elem) + continue + + except (etree.XMLSyntaxError, Exception) as xml_error: + logger.error(f"[parse_channels_only] XML parsing failed: {xml_error}") + # Update status to error + source.status = 'error' + source.last_message = f"Error parsing XML file: {str(xml_error)}" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source.id, "parsing_channels", 100, status="error", error=str(xml_error)) + return False + if process: + logger.info(f"[parse_channels_only] Processed {processed_channels} channels current memory: {process.memory_info().rss / 1024 / 1024:.2f} MB") + else: + logger.info(f"[parse_channels_only] Processed {processed_channels} channels") + # Process any remaining items + if epgs_to_create: + EPGData.objects.bulk_create(epgs_to_create, ignore_conflicts=True) + logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries") + + if epgs_to_update: + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) + logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries") + if process: + logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB") + + # Update source status with channel count + source.status = 'success' + source.last_message = f"Successfully parsed {processed_channels} channels" + source.save(update_fields=['status', 'last_message']) + + # Send completion notification + send_epg_update( + source.id, + "parsing_channels", + 100, + status="success", + channels_count=processed_channels + ) + + send_websocket_update('updates', 'update', {"success": True, "type": "epg_channels"}) + + logger.info(f"Finished parsing channel info. Found {processed_channels} channels.") + + return True + + except FileNotFoundError: + logger.error(f"EPG file not found at: {file_path}") + # Update status to error + source.status = 'error' + source.last_message = f"EPG file not found: {file_path}" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source.id, "parsing_channels", 100, status="error", error="File not found") + return False + except Exception as e: + logger.error(f"Error reading EPG file {file_path}: {e}", exc_info=True) + # Update status to error + source.status = 'error' + source.last_message = f"Error parsing EPG file: {str(e)}" + source.save(update_fields=['status', 'last_message']) + send_epg_update(source.id, "parsing_channels", 100, status="error", error=str(e)) + return False + finally: + # Cleanup memory and close file + if process: + logger.debug(f"[parse_channels_only] Memory before cleanup: {process.memory_info().rss / 1024 / 1024:.2f} MB") + try: + # Output any errors in the channel_parser error log + if 'channel_parser' in locals() and hasattr(channel_parser, 'error_log') and len(channel_parser.error_log) > 0: + logger.debug(f"XML parser errors found ({len(channel_parser.error_log)} total):") + for i, error in enumerate(channel_parser.error_log): + logger.debug(f" Error {i+1}: {error}") + if 'channel_parser' in locals(): + del channel_parser + if 'elem' in locals(): + del elem + if 'parent' in locals(): + del parent + + if 'source_file' in locals(): + source_file.close() + del source_file + # Clear remaining large data structures + existing_epgs.clear() + epgs_to_create.clear() + epgs_to_update.clear() + existing_epgs = None + epgs_to_create = None + epgs_to_update = None + cleanup_memory(log_usage=should_log_memory, force_collection=True) + except Exception as e: + logger.warning(f"Cleanup error: {e}") + + try: + if process: + final_memory = process.memory_info().rss / 1024 / 1024 + logger.debug(f"[parse_channels_only] Final memory usage: {final_memory:.2f} MB") + process = None + except: + pass - channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( - 'updates', - { - 'type': 'update', - "data": {"success": True, "type": "epg_channels"} - } - ) - logger.info("Finished parsing channel info.") @shared_task def parse_programs_for_tvg_id(epg_id): @@ -147,159 +1132,607 @@ def parse_programs_for_tvg_id(epg_id): logger.info(f"Program parse for {epg_id} already in progress, skipping duplicate task") return "Task already running" - epg = EPGData.objects.get(id=epg_id) - epg_source = epg.epg_source - - if not Channel.objects.filter(epg_data=epg).exists(): - logger.info(f"No channels matched to EPG {epg.tvg_id}") - release_task_lock('parse_epg_programs', epg_id) - return - - logger.info(f"Refreshing program data for tvg_id: {epg.tvg_id}") - - # First, remove all existing programs - ProgramData.objects.filter(epg=epg).delete() - - file_path = epg_source.file_path - if not file_path: - file_path = epg_source.get_cache_file() - if not os.path.exists(file_path): - fetch_xmltv(epg_source) - - # Read entire file (decompress if .gz) - if file_path.endswith('.gz'): - with open(file_path, 'rb') as gz_file: - decompressed = gzip.decompress(gz_file.read()) - xml_data = decompressed.decode('utf-8') - else: - with open(file_path, 'r', encoding='utf-8') as xml_file: - xml_data = xml_file.read() - - root = ET.fromstring(xml_data) - - # Find only elements for this tvg_id - matched_programmes = [p for p in root.findall('programme') if p.get('channel') == epg.tvg_id] - logger.debug(f"Found {len(matched_programmes)} programmes for tvg_id={epg.tvg_id}") - + source_file = None + program_parser = None programs_to_create = [] - for prog in matched_programmes: - start_time = parse_xmltv_time(prog.get('start')) - end_time = parse_xmltv_time(prog.get('stop')) - title = prog.findtext('title', default='No Title') - desc = prog.findtext('desc', default='') - sub_title = prog.findtext('sub-title', default='') + programs_processed = 0 + try: + # Add memory tracking only in trace mode or higher + try: + process = None + # Get current log level as a number + current_log_level = logger.getEffectiveLevel() - # Extract custom properties - custom_props = {} + # Only track memory usage when log level is TRACE or more verbose or if running in DEBUG mode + should_log_memory = current_log_level <= 5 or settings.DEBUG - # Extract categories - categories = [] - for cat_elem in prog.findall('category'): - if cat_elem.text and cat_elem.text.strip(): - categories.append(cat_elem.text.strip()) - if categories: - custom_props['categories'] = categories + if should_log_memory: + process = psutil.Process() + initial_memory = process.memory_info().rss / 1024 / 1024 + logger.info(f"[parse_programs_for_tvg_id] Initial memory usage: {initial_memory:.2f} MB") + mem_before = initial_memory + except ImportError: + process = None + should_log_memory = False - # Extract episode numbers - for ep_num in prog.findall('episode-num'): - system = ep_num.get('system', '') - if system == 'xmltv_ns' and ep_num.text: - # Parse XMLTV episode-num format (season.episode.part) - parts = ep_num.text.split('.') - if len(parts) >= 2: - if parts[0].strip() != '': - try: - season = int(parts[0]) + 1 # XMLTV format is zero-based - custom_props['season'] = season - except ValueError: - pass - if parts[1].strip() != '': - try: - episode = int(parts[1]) + 1 # XMLTV format is zero-based - custom_props['episode'] = episode - except ValueError: - pass - elif system == 'onscreen' and ep_num.text: - # Just store the raw onscreen format - custom_props['onscreen_episode'] = ep_num.text.strip() + epg = EPGData.objects.get(id=epg_id) + epg_source = epg.epg_source - # Extract ratings - for rating_elem in prog.findall('rating'): - if rating_elem.findtext('value'): - custom_props['rating'] = rating_elem.findtext('value').strip() - if rating_elem.get('system'): - custom_props['rating_system'] = rating_elem.get('system') - break # Just use the first rating + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return - # Extract credits (actors, directors, etc.) - credits_elem = prog.find('credits') - if credits_elem is not None: - credits = {} - for credit_type in ['director', 'actor', 'writer', 'presenter', 'producer']: - elements = credits_elem.findall(credit_type) - if elements: - names = [e.text.strip() for e in elements if e.text and e.text.strip()] - if names: - credits[credit_type] = names - if credits: - custom_props['credits'] = credits + if not Channel.objects.filter(epg_data=epg).exists(): + logger.info(f"No channels matched to EPG {epg.tvg_id}") + release_task_lock('parse_epg_programs', epg_id) + return - # Extract other common program metadata - if prog.findtext('date'): - custom_props['year'] = prog.findtext('date').strip()[:4] # Just the year part + logger.info(f"Refreshing program data for tvg_id: {epg.tvg_id}") - if prog.findtext('country'): - custom_props['country'] = prog.findtext('country').strip() + # Optimize deletion with a single delete query instead of chunking + # This is faster for most database engines + ProgramData.objects.filter(epg=epg).delete() - for icon_elem in prog.findall('icon'): - if icon_elem.get('src'): - custom_props['icon'] = icon_elem.get('src') - break # Just use the first icon + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + if not file_path: + file_path = epg_source.get_cache_file() - for kw in ['previously-shown', 'premiere', 'new']: - if prog.find(kw) is not None: - custom_props[kw.replace('-', '_')] = True + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file not found at: {file_path}") - # Convert custom_props to JSON string if not empty - custom_properties_json = None - if custom_props: - import json + if epg_source.url: + # Update the file path in the database + new_path = epg_source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + epg_source.file_path = new_path + epg_source.save(update_fields=['file_path']) + logger.info(f"Fetching new EPG data from URL: {epg_source.url}") + else: + logger.info(f"EPG source does not have a URL, using existing file path: {file_path} to rebuild cache") + + # Fetch new data before continuing + if epg_source: + + # Properly check the return value from fetch_xmltv + fetch_success = fetch_xmltv(epg_source) + + # If fetch was not successful or the file still doesn't exist, abort + if not fetch_success: + logger.error(f"Failed to fetch EPG data, cannot parse programs for tvg_id: {epg.tvg_id}") + # Update status to error if not already set + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data, cannot parse programs" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file") + release_task_lock('parse_epg_programs', epg_id) + return + + # Also check if the file exists after download + if not os.path.exists(epg_source.file_path): + logger.error(f"Failed to fetch EPG data, file still missing at: {epg_source.file_path}") + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data, file missing after download" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="File not found after download") + release_task_lock('parse_epg_programs', epg_id) + return + + # Update file_path with the new location + if epg_source.extracted_file_path: + file_path = epg_source.extracted_file_path + else: + file_path = epg_source.file_path + else: + logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data") + # Update status to error + epg_source.status = 'error' + epg_source.last_message = f"No URL provided, cannot fetch EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") + release_task_lock('parse_epg_programs', epg_id) + return + + # Use streaming parsing to reduce memory usage + # No need to check file type anymore since it's always XML + logger.debug(f"Parsing programs for tvg_id={epg.tvg_id} from {file_path}") + + # Memory usage tracking + if process: try: - custom_properties_json = json.dumps(custom_props) + mem_before = process.memory_info().rss / 1024 / 1024 + logger.debug(f"[parse_programs_for_tvg_id] Memory before parsing {epg.tvg_id} - {mem_before:.2f} MB") except Exception as e: - logger.error(f"Error serializing custom properties to JSON: {e}", exc_info=True) + logger.warning(f"Error tracking memory: {e}") + mem_before = 0 - programs_to_create.append(ProgramData( - epg=epg, - start_time=start_time, - end_time=end_time, - title=title, - description=desc, - sub_title=sub_title, - tvg_id=epg.tvg_id, - custom_properties=custom_properties_json - )) + programs_to_create = [] + batch_size = 1000 # Process in batches to limit memory usage - ProgramData.objects.bulk_create(programs_to_create) + try: + # Open the file directly - no need to check compression + logger.debug(f"Opening file for parsing: {file_path}") + source_file = open(file_path, 'rb') + + # Stream parse the file using lxml's iterparse + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) + + for _, elem in program_parser: + if elem.get('channel') == epg.tvg_id: + try: + start_time = parse_xmltv_time(elem.get('start')) + end_time = parse_xmltv_time(elem.get('stop')) + title = None + desc = None + sub_title = None + + # Efficiently process child elements + for child in elem: + if child.tag == 'title': + title = child.text or 'No Title' + elif child.tag == 'desc': + desc = child.text or '' + elif child.tag == 'sub-title': + sub_title = child.text or '' + + if not title: + title = 'No Title' + + # Extract custom properties + custom_props = extract_custom_properties(elem) + custom_properties_json = None + + if custom_props: + logger.trace(f"Number of custom properties: {len(custom_props)}") + custom_properties_json = custom_props + + programs_to_create.append(ProgramData( + epg=epg, + start_time=start_time, + end_time=end_time, + title=title, + description=desc, + sub_title=sub_title, + tvg_id=epg.tvg_id, + custom_properties=custom_properties_json + )) + programs_processed += 1 + # Clear the element to free memory + clear_element(elem) + # Batch processing + if len(programs_to_create) >= batch_size: + ProgramData.objects.bulk_create(programs_to_create) + logger.debug(f"Saved batch of {len(programs_to_create)} programs for {epg.tvg_id}") + programs_to_create = [] + # Only call gc.collect() every few batches + if programs_processed % (batch_size * 5) == 0: + gc.collect() + + except Exception as e: + logger.error(f"Error processing program for {epg.tvg_id}: {e}", exc_info=True) + else: + # Immediately clean up non-matching elements to reduce memory pressure + if elem is not None: + clear_element(elem) + continue + + # Make sure to close the file and release parser resources + if source_file: + source_file.close() + source_file = None + + if program_parser: + program_parser = None + + gc.collect() + + except zipfile.BadZipFile as zip_error: + logger.error(f"Bad ZIP file: {zip_error}") + raise + except etree.XMLSyntaxError as xml_error: + logger.error(f"XML syntax error parsing program data: {xml_error}") + raise + except Exception as e: + logger.error(f"Error parsing XML for programs: {e}", exc_info=True) + raise + finally: + # Ensure file is closed even if an exception occurs + if source_file: + source_file.close() + source_file = None + # Memory tracking after processing + if process: + try: + mem_after = process.memory_info().rss / 1024 / 1024 + logger.info(f"[parse_programs_for_tvg_id] Memory after parsing 1 {epg.tvg_id} - {programs_processed} programs: {mem_after:.2f} MB (change: {mem_after-mem_before:.2f} MB)") + except Exception as e: + logger.warning(f"Error tracking memory: {e}") + + # Process any remaining items + if programs_to_create: + ProgramData.objects.bulk_create(programs_to_create) + logger.debug(f"Saved final batch of {len(programs_to_create)} programs for {epg.tvg_id}") + programs_to_create = None + custom_props = None + custom_properties_json = None + + + logger.info(f"Completed program parsing for tvg_id={epg.tvg_id}.") + finally: + # Reset internal caches and pools that lxml might be keeping + try: + etree.clear_error_log() + except: + pass + # Explicit cleanup of all potentially large objects + if source_file: + try: + source_file.close() + except: + pass + source_file = None + program_parser = None + programs_to_create = None + + epg_source = None + # Add comprehensive cleanup before releasing lock + cleanup_memory(log_usage=should_log_memory, force_collection=True) + # Memory tracking after processing + if process: + try: + mem_after = process.memory_info().rss / 1024 / 1024 + logger.info(f"[parse_programs_for_tvg_id] Final memory usage {epg.tvg_id} - {programs_processed} programs: {mem_after:.2f} MB (change: {mem_after-mem_before:.2f} MB)") + except Exception as e: + logger.warning(f"Error tracking memory: {e}") + process = None + epg = None + programs_processed = None + release_task_lock('parse_epg_programs', epg_id) - release_task_lock('parse_epg_programs', epg_id) - logger.info(f"Completed program parsing for tvg_id={epg.tvg_id}.") def parse_programs_for_source(epg_source, tvg_id=None): - file_path = epg_source.file_path - epg_entries = EPGData.objects.filter(epg_source=epg_source) - for epg in epg_entries: - if epg.tvg_id: - parse_programs_for_tvg_id(epg.id) + """ + Parse programs for all MAPPED channels from an EPG source in a single pass. + This is an optimized version that: + 1. Only processes EPG entries that are actually mapped to channels + 2. Parses the XML file ONCE instead of once per channel + 3. Skips programmes for unmapped channels entirely during parsing + + This dramatically improves performance when an EPG source has many channels + but only a fraction are mapped. + """ + # Send initial programs parsing notification + send_epg_update(epg_source.id, "parsing_programs", 0) + should_log_memory = False + process = None + initial_memory = 0 + source_file = None + + # Add memory tracking only in trace mode or higher + try: + # Get current log level as a number + current_log_level = logger.getEffectiveLevel() + + # Only track memory usage when log level is TRACE or more verbose + should_log_memory = current_log_level <= 5 or settings.DEBUG # Assuming TRACE is level 5 or lower + + if should_log_memory: + process = psutil.Process() + initial_memory = process.memory_info().rss / 1024 / 1024 + logger.info(f"[parse_programs_for_source] Initial memory usage: {initial_memory:.2f} MB") + except ImportError: + logger.warning("psutil not available for memory tracking") + process = None + should_log_memory = False + + try: + # Only get EPG entries that are actually mapped to channels + mapped_epg_ids = set( + Channel.objects.filter( + epg_data__epg_source=epg_source, + epg_data__isnull=False + ).values_list('epg_data_id', flat=True) + ) + + if not mapped_epg_ids: + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} " + f"(source has {total_epg_count} EPG entries, 0 mapped)") + # Update status - this is not an error, just no mapped entries + epg_source.status = 'success' + epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="success") + return True + + # Get the mapped EPG entries with their tvg_ids + mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id') + tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']} + mapped_tvg_ids = set(tvg_id_to_epg_id.keys()) + + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + mapped_count = len(mapped_tvg_ids) + + logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} " + f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)") + + # Get the file path + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + if not file_path: + file_path = epg_source.get_cache_file() + + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file not found at: {file_path}") + + if epg_source.url: + # Update the file path in the database + new_path = epg_source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + epg_source.file_path = new_path + epg_source.save(update_fields=['file_path']) + logger.info(f"Fetching new EPG data from URL: {epg_source.url}") + + # Fetch new data before continuing + fetch_success = fetch_xmltv(epg_source) + + if not fetch_success: + logger.error(f"Failed to fetch EPG data for source: {epg_source.name}") + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file") + return False + + # Update file_path with the new location + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + else: + logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data") + epg_source.status = 'error' + epg_source.last_message = f"No URL provided, cannot fetch EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") + return False + + # SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory + # We parse FIRST, then do an atomic delete+insert to avoid race conditions + # where clients might see empty/partial EPG data during the transition + all_programs_to_create = [] + programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel + total_programs = 0 + skipped_programs = 0 + last_progress_update = 0 + + try: + logger.debug(f"Opening file for single-pass parsing: {file_path}") + source_file = open(file_path, 'rb') + + # Stream parse the file using lxml's iterparse + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) + + for _, elem in program_parser: + channel_id = elem.get('channel') + + # Skip programmes for unmapped channels immediately + if channel_id not in mapped_tvg_ids: + skipped_programs += 1 + # Clear element to free memory + clear_element(elem) + continue + + # This programme is for a mapped channel - process it + try: + start_time = parse_xmltv_time(elem.get('start')) + end_time = parse_xmltv_time(elem.get('stop')) + title = None + desc = None + sub_title = None + + # Efficiently process child elements + for child in elem: + if child.tag == 'title': + title = child.text or 'No Title' + elif child.tag == 'desc': + desc = child.text or '' + elif child.tag == 'sub-title': + sub_title = child.text or '' + + if not title: + title = 'No Title' + + # Extract custom properties + custom_props = extract_custom_properties(elem) + custom_properties_json = custom_props if custom_props else None + + epg_id = tvg_id_to_epg_id[channel_id] + all_programs_to_create.append(ProgramData( + epg_id=epg_id, + start_time=start_time, + end_time=end_time, + title=title, + description=desc, + sub_title=sub_title, + tvg_id=channel_id, + custom_properties=custom_properties_json + )) + total_programs += 1 + programs_by_channel[channel_id] += 1 + + # Clear the element to free memory + clear_element(elem) + + # Send progress update (estimate based on programs processed) + if total_programs - last_progress_update >= 5000: + last_progress_update = total_programs + # Cap at 70% during parsing phase (save 30% for DB operations) + progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60)) + send_epg_update(epg_source.id, "parsing_programs", progress, + processed=total_programs, channels=mapped_count) + + # Periodic garbage collection during parsing + if total_programs % 5000 == 0: + gc.collect() + + except Exception as e: + logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True) + clear_element(elem) + continue + + except etree.XMLSyntaxError as xml_error: + logger.error(f"XML syntax error parsing program data: {xml_error}") + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"XML parsing error: {str(xml_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error)) + return False + except Exception as e: + logger.error(f"Error parsing XML for programs: {e}", exc_info=True) + raise + finally: + if source_file: + source_file.close() + source_file = None + + # Now perform atomic delete + bulk insert + # This ensures clients never see empty/partial EPG data + logger.info(f"Parsed {total_programs} programs, performing atomic database update...") + send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...") + + batch_size = 1000 + try: + with transaction.atomic(): + # Delete existing programs for mapped EPGs + deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] + logger.debug(f"Deleted {deleted_count} existing programs") + + # Clean up orphaned programs for unmapped EPG entries + unmapped_epg_ids = list(EPGData.objects.filter( + epg_source=epg_source + ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True)) + + if unmapped_epg_ids: + orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] + if orphaned_count > 0: + logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") + + # Bulk insert all new programs in batches within the same transaction + for i in range(0, len(all_programs_to_create), batch_size): + batch = all_programs_to_create[i:i + batch_size] + ProgramData.objects.bulk_create(batch) + + # Update progress during insertion + progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95 + if i % (batch_size * 5) == 0: + send_epg_update(epg_source.id, "parsing_programs", min(95, progress), + message=f"Inserting programs... {i}/{len(all_programs_to_create)}") + + logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs") + + except Exception as db_error: + logger.error(f"Database error during atomic update: {db_error}", exc_info=True) + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"Database error: {str(db_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error)) + return False + finally: + # Clear the large list to free memory + all_programs_to_create = None + gc.collect() + + # Count channels that actually got programs + channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0) + + # Success message + epg_source.status = EPGSource.STATUS_SUCCESS + epg_source.last_message = ( + f"Parsed {total_programs:,} programs for {channels_with_programs} channels " + f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)" + ) + epg_source.updated_at = timezone.now() + epg_source.save(update_fields=['status', 'last_message', 'updated_at']) + + # Log system event for EPG refresh + log_system_event( + event_type='epg_refresh', + source_name=epg_source.name, + programs=total_programs, + channels=channels_with_programs, + skipped_programs=skipped_programs, + unmapped_channels=total_epg_count - mapped_count, + ) + + # Send completion notification with status + send_epg_update(epg_source.id, "parsing_programs", 100, + status="success", + message=epg_source.last_message, + updated_at=epg_source.updated_at.isoformat()) + + logger.info(f"Completed parsing programs for source: {epg_source.name} - " + f"{total_programs:,} programs for {channels_with_programs} channels, " + f"skipped {skipped_programs:,} programs for unmapped channels") + return True + + except Exception as e: + logger.error(f"Error in parse_programs_for_source: {e}", exc_info=True) + # Update status to error + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"Error parsing programs: {str(e)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, + status="error", + message=epg_source.last_message) + return False + finally: + # Final memory cleanup and tracking + if source_file: + try: + source_file.close() + except: + pass + source_file = None + + # Explicitly release any remaining large data structures + programs_to_create = None + programs_by_channel = None + mapped_epg_ids = None + mapped_tvg_ids = None + tvg_id_to_epg_id = None + gc.collect() + + # Add comprehensive memory cleanup at the end + cleanup_memory(log_usage=should_log_memory, force_collection=True) + if process: + final_memory = process.memory_info().rss / 1024 / 1024 + logger.info(f"[parse_programs_for_source] Final memory usage: {final_memory:.2f} MB difference: {final_memory - initial_memory:.2f} MB") + # Explicitly clear the process object to prevent potential memory leaks + process = None def fetch_schedules_direct(source): logger.info(f"Fetching Schedules Direct data from source: {source.name}") try: + # Get default user agent from settings + stream_settings = CoreSettings.get_stream_settings() + user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default + default_user_agent_id = stream_settings.get('default_user_agent') + + if default_user_agent_id: + try: + user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first() + if user_agent_obj and user_agent_obj.user_agent: + user_agent = user_agent_obj.user_agent + logger.debug(f"Using default user agent: {user_agent}") + except (ValueError, Exception) as e: + logger.warning(f"Error retrieving default user agent, using fallback: {e}") + api_url = '' headers = { 'Content-Type': 'application/json', 'Authorization': f'Bearer {source.api_key}', + 'User-Agent': user_agent } logger.debug(f"Requesting subscriptions from Schedules Direct using URL: {api_url}") response = requests.get(api_url, headers=headers, timeout=30) @@ -354,17 +1787,42 @@ def fetch_schedules_direct(source): # ------------------------------- def parse_xmltv_time(time_str): try: + # Basic format validation + if len(time_str) < 14: + logger.warning(f"XMLTV timestamp too short: '{time_str}', using as-is") + dt_obj = datetime.strptime(time_str, '%Y%m%d%H%M%S') + return timezone.make_aware(dt_obj, timezone=dt_timezone.utc) + + # Parse base datetime dt_obj = datetime.strptime(time_str[:14], '%Y%m%d%H%M%S') - tz_sign = time_str[15] - tz_hours = int(time_str[16:18]) - tz_minutes = int(time_str[18:20]) - if tz_sign == '+': - dt_obj = dt_obj - timedelta(hours=tz_hours, minutes=tz_minutes) - elif tz_sign == '-': - dt_obj = dt_obj + timedelta(hours=tz_hours, minutes=tz_minutes) - aware_dt = timezone.make_aware(dt_obj, timezone=dt_timezone.utc) - logger.debug(f"Parsed XMLTV time '{time_str}' to {aware_dt}") - return aware_dt + + # Handle timezone if present + if len(time_str) >= 20: # Has timezone info + tz_sign = time_str[15] + tz_hours = int(time_str[16:18]) + tz_minutes = int(time_str[18:20]) + + # Create a timezone object + if tz_sign == '+': + tz_offset = dt_timezone(timedelta(hours=tz_hours, minutes=tz_minutes)) + elif tz_sign == '-': + tz_offset = dt_timezone(timedelta(hours=-tz_hours, minutes=-tz_minutes)) + else: + tz_offset = dt_timezone.utc + + # Make datetime aware with correct timezone + aware_dt = datetime.replace(dt_obj, tzinfo=tz_offset) + # Convert to UTC + aware_dt = aware_dt.astimezone(dt_timezone.utc) + + logger.trace(f"Parsed XMLTV time '{time_str}' to {aware_dt}") + return aware_dt + else: + # No timezone info, assume UTC + aware_dt = timezone.make_aware(dt_obj, timezone=dt_timezone.utc) + logger.trace(f"Parsed XMLTV time without timezone '{time_str}' as UTC: {aware_dt}") + return aware_dt + except Exception as e: logger.error(f"Error parsing XMLTV time '{time_str}': {e}", exc_info=True) raise @@ -379,3 +1837,319 @@ def parse_schedules_direct_time(time_str): except Exception as e: logger.error(f"Error parsing Schedules Direct time '{time_str}': {e}", exc_info=True) raise + + +# Helper function to extract custom properties - moved to a separate function to clean up the code +def extract_custom_properties(prog): + # Create a new dictionary for each call + custom_props = {} + + # Extract categories with a single comprehension to reduce intermediate objects + categories = [cat.text.strip() for cat in prog.findall('category') if cat.text and cat.text.strip()] + if categories: + custom_props['categories'] = categories + + # Extract keywords (new) + keywords = [kw.text.strip() for kw in prog.findall('keyword') if kw.text and kw.text.strip()] + if keywords: + custom_props['keywords'] = keywords + + # Extract episode numbers + for ep_num in prog.findall('episode-num'): + system = ep_num.get('system', '') + if system == 'xmltv_ns' and ep_num.text: + # Parse XMLTV episode-num format (season.episode.part) + parts = ep_num.text.split('.') + if len(parts) >= 2: + if parts[0].strip() != '': + try: + season = int(parts[0]) + 1 # XMLTV format is zero-based + custom_props['season'] = season + except ValueError: + pass + if parts[1].strip() != '': + try: + episode = int(parts[1]) + 1 # XMLTV format is zero-based + custom_props['episode'] = episode + except ValueError: + pass + elif system == 'onscreen' and ep_num.text: + # Just store the raw onscreen format + custom_props['onscreen_episode'] = ep_num.text.strip() + elif system == 'dd_progid' and ep_num.text: + # Store the dd_progid format + custom_props['dd_progid'] = ep_num.text.strip() + # Add support for other systems like thetvdb.com, themoviedb.org, imdb.com + elif system in ['thetvdb.com', 'themoviedb.org', 'imdb.com'] and ep_num.text: + custom_props[f'{system}_id'] = ep_num.text.strip() + + # Extract ratings more efficiently + rating_elem = prog.find('rating') + if rating_elem is not None: + value_elem = rating_elem.find('value') + if value_elem is not None and value_elem.text: + custom_props['rating'] = value_elem.text.strip() + if rating_elem.get('system'): + custom_props['rating_system'] = rating_elem.get('system') + + # Extract star ratings (new) + star_ratings = [] + for star_rating in prog.findall('star-rating'): + value_elem = star_rating.find('value') + if value_elem is not None and value_elem.text: + rating_data = {'value': value_elem.text.strip()} + if star_rating.get('system'): + rating_data['system'] = star_rating.get('system') + star_ratings.append(rating_data) + if star_ratings: + custom_props['star_ratings'] = star_ratings + + # Extract credits more efficiently + credits_elem = prog.find('credits') + if credits_elem is not None: + credits = {} + for credit_type in ['director', 'actor', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if credit_type == 'actor': + # Handle actors with roles and guest status + actors = [] + for actor_elem in credits_elem.findall('actor'): + if actor_elem.text and actor_elem.text.strip(): + actor_data = {'name': actor_elem.text.strip()} + if actor_elem.get('role'): + actor_data['role'] = actor_elem.get('role') + if actor_elem.get('guest') == 'yes': + actor_data['guest'] = True + actors.append(actor_data) + if actors: + credits['actor'] = actors + else: + names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()] + if names: + credits[credit_type] = names + if credits: + custom_props['credits'] = credits + + # Extract other common program metadata + date_elem = prog.find('date') + if date_elem is not None and date_elem.text: + custom_props['date'] = date_elem.text.strip() + + country_elem = prog.find('country') + if country_elem is not None and country_elem.text: + custom_props['country'] = country_elem.text.strip() + + # Extract language information (new) + language_elem = prog.find('language') + if language_elem is not None and language_elem.text: + custom_props['language'] = language_elem.text.strip() + + orig_language_elem = prog.find('orig-language') + if orig_language_elem is not None and orig_language_elem.text: + custom_props['original_language'] = orig_language_elem.text.strip() + + # Extract length (new) + length_elem = prog.find('length') + if length_elem is not None and length_elem.text: + try: + length_value = int(length_elem.text.strip()) + length_units = length_elem.get('units', 'minutes') + custom_props['length'] = {'value': length_value, 'units': length_units} + except ValueError: + pass + + # Extract video information (new) + video_elem = prog.find('video') + if video_elem is not None: + video_info = {} + for video_attr in ['present', 'colour', 'aspect', 'quality']: + attr_elem = video_elem.find(video_attr) + if attr_elem is not None and attr_elem.text: + video_info[video_attr] = attr_elem.text.strip() + if video_info: + custom_props['video'] = video_info + + # Extract audio information (new) + audio_elem = prog.find('audio') + if audio_elem is not None: + audio_info = {} + for audio_attr in ['present', 'stereo']: + attr_elem = audio_elem.find(audio_attr) + if attr_elem is not None and attr_elem.text: + audio_info[audio_attr] = attr_elem.text.strip() + if audio_info: + custom_props['audio'] = audio_info + + # Extract subtitles information (new) + subtitles = [] + for subtitle_elem in prog.findall('subtitles'): + subtitle_data = {} + if subtitle_elem.get('type'): + subtitle_data['type'] = subtitle_elem.get('type') + lang_elem = subtitle_elem.find('language') + if lang_elem is not None and lang_elem.text: + subtitle_data['language'] = lang_elem.text.strip() + if subtitle_data: + subtitles.append(subtitle_data) + + if subtitles: + custom_props['subtitles'] = subtitles + + # Extract reviews (new) + reviews = [] + for review_elem in prog.findall('review'): + if review_elem.text and review_elem.text.strip(): + review_data = {'content': review_elem.text.strip()} + if review_elem.get('type'): + review_data['type'] = review_elem.get('type') + if review_elem.get('source'): + review_data['source'] = review_elem.get('source') + if review_elem.get('reviewer'): + review_data['reviewer'] = review_elem.get('reviewer') + reviews.append(review_data) + if reviews: + custom_props['reviews'] = reviews + + # Extract images (new) + images = [] + for image_elem in prog.findall('image'): + if image_elem.text and image_elem.text.strip(): + image_data = {'url': image_elem.text.strip()} + for attr in ['type', 'size', 'orient', 'system']: + if image_elem.get(attr): + image_data[attr] = image_elem.get(attr) + images.append(image_data) + if images: + custom_props['images'] = images + + icon_elem = prog.find('icon') + if icon_elem is not None and icon_elem.get('src'): + custom_props['icon'] = icon_elem.get('src') + + # Simpler approach for boolean flags - expanded list + for kw in ['previously-shown', 'premiere', 'new', 'live', 'last-chance']: + if prog.find(kw) is not None: + custom_props[kw.replace('-', '_')] = True + + # Extract premiere and last-chance text content if available + premiere_elem = prog.find('premiere') + if premiere_elem is not None: + custom_props['premiere'] = True + if premiere_elem.text and premiere_elem.text.strip(): + custom_props['premiere_text'] = premiere_elem.text.strip() + + last_chance_elem = prog.find('last-chance') + if last_chance_elem is not None: + custom_props['last_chance'] = True + if last_chance_elem.text and last_chance_elem.text.strip(): + custom_props['last_chance_text'] = last_chance_elem.text.strip() + + # Extract previously-shown details + prev_shown_elem = prog.find('previously-shown') + if prev_shown_elem is not None: + custom_props['previously_shown'] = True + prev_shown_data = {} + if prev_shown_elem.get('start'): + prev_shown_data['start'] = prev_shown_elem.get('start') + if prev_shown_elem.get('channel'): + prev_shown_data['channel'] = prev_shown_elem.get('channel') + if prev_shown_data: + custom_props['previously_shown_details'] = prev_shown_data + + return custom_props + + +def clear_element(elem): + """Clear an XML element and its parent to free memory.""" + try: + elem.clear() + parent = elem.getparent() + if parent is not None: + while elem.getprevious() is not None: + del parent[0] + parent.remove(elem) + except Exception as e: + logger.warning(f"Error clearing XML element: {e}", exc_info=True) + + +def detect_file_format(file_path=None, content=None): + """ + Detect file format by examining content or file path. + + Args: + file_path: Path to file (optional) + content: Raw file content bytes (optional) + + Returns: + tuple: (format_type, is_compressed, file_extension) + format_type: 'gzip', 'zip', 'xml', or 'unknown' + is_compressed: Boolean indicating if the file is compressed + file_extension: Appropriate file extension including dot (.gz, .zip, .xml) + """ + # Default return values + format_type = 'unknown' + is_compressed = False + file_extension = '.tmp' + + # First priority: check content magic numbers as they're most reliable + if content: + # We only need the first few bytes for magic number detection + header = content[:20] if len(content) >= 20 else content + + # Check for gzip magic number (1f 8b) + if len(header) >= 2 and header[:2] == b'\x1f\x8b': + return 'gzip', True, '.gz' + + # Check for zip magic number (PK..) + if len(header) >= 2 and header[:2] == b'PK': + return 'zip', True, '.zip' + + # Check for XML - either standard XML header or XMLTV-specific tag + if len(header) >= 5 and (b'' in header): + return 'xml', False, '.xml' + + # Second priority: check file extension - focus on the final extension for compression + if file_path: + logger.debug(f"Detecting file format for: {file_path}") + + # Handle compound extensions like .xml.gz - prioritize compression extensions + lower_path = file_path.lower() + + # Check for compression extensions explicitly + if lower_path.endswith('.gz') or lower_path.endswith('.gzip'): + return 'gzip', True, '.gz' + elif lower_path.endswith('.zip'): + return 'zip', True, '.zip' + elif lower_path.endswith('.xml'): + return 'xml', False, '.xml' + + # Fallback to mimetypes only if direct extension check doesn't work + import mimetypes + mime_type, _ = mimetypes.guess_type(file_path) + logger.debug(f"Guessed MIME type: {mime_type}") + if mime_type: + if mime_type == 'application/gzip' or mime_type == 'application/x-gzip': + return 'gzip', True, '.gz' + elif mime_type == 'application/zip': + return 'zip', True, '.zip' + elif mime_type == 'application/xml' or mime_type == 'text/xml': + return 'xml', False, '.xml' + + # If we reach here, we couldn't reliably determine the format + return format_type, is_compressed, file_extension + + +def generate_dummy_epg(source): + """ + DEPRECATED: This function is no longer used. + + Dummy EPG programs are now generated on-demand when they are requested + (during XMLTV export or EPG grid display), rather than being pre-generated + and stored in the database. + + See: apps/output/views.py - generate_custom_dummy_programs() + + This function remains for backward compatibility but should not be called. + """ + logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. " + f"Dummy EPG programs are now generated on-demand.") + return True diff --git a/apps/hdhr/api_views.py b/apps/hdhr/api_views.py index 676c0fb9..8f1609d4 100644 --- a/apps/hdhr/api_views.py +++ b/apps/hdhr/api_views.py @@ -1,7 +1,7 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import Authenticated, permission_classes_by_action from django.http import JsonResponse, HttpResponseForbidden, HttpResponse import logging from drf_yasg.utils import swagger_auto_schema @@ -17,22 +17,30 @@ from django.views import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt -from apps.m3u.models import M3UAccountProfile + # Configure logger logger = logging.getLogger(__name__) + @login_required def hdhr_dashboard_view(request): """Render the HDHR management page.""" hdhr_devices = HDHRDevice.objects.all() return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices}) + # 🔹 1) HDHomeRun Device API class HDHRDeviceViewSet(viewsets.ModelViewSet): """Handles CRUD operations for HDHomeRun devices""" + queryset = HDHRDevice.objects.all() serializer_class = HDHRDeviceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] # 🔹 2) Discover API @@ -41,56 +49,33 @@ class DiscoverAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve HDHomeRun device discovery information", - responses={200: openapi.Response("HDHR Discovery JSON")} + responses={200: openapi.Response("HDHR Discovery JSON")}, ) def get(self, request, profile=None): uri_parts = ["hdhr"] if profile is not None: uri_parts.append(profile) - base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip('/') + base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip("/") device = HDHRDevice.objects.first() - # Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile) - profiles = M3UAccountProfile.objects.filter( - is_active=True, - m3u_account__is_active=True # Only include profiles from enabled M3U accounts - ).exclude(id=1) - - # 1. Check if any profile has unlimited streams (max_streams=0) - has_unlimited = profiles.filter(max_streams=0).exists() - - # 2. Calculate tuner count from limited profiles - limited_tuners = 0 - if not has_unlimited: - limited_tuners = profiles.filter(max_streams__gt=0).aggregate( - total=models.Sum('max_streams') - ).get('total', 0) or 0 - - # 3. Add custom stream count to tuner count - custom_stream_count = Stream.objects.filter(is_custom=True).count() - logger.debug(f"Found {custom_stream_count} custom streams") - - # 4. Calculate final tuner count - if has_unlimited: - # If there are unlimited profiles, start with 10 plus custom streams - tuner_count = 10 + custom_stream_count - else: - # Otherwise use the limited profile sum plus custom streams - tuner_count = limited_tuners + custom_stream_count - - # 5. Ensure minimum of 2 tuners - tuner_count = max(2, tuner_count) - - logger.debug(f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})") + # Calculate tuner count using centralized function + from apps.m3u.utils import calculate_tuner_count + tuner_count = calculate_tuner_count(minimum=1, unlimited_default=10) + # Create a unique DeviceID for the HDHomeRun device based on profile ID or a default value + device_ID = "12345678" # Default DeviceID + friendly_name = "Dispatcharr HDHomeRun" + if profile is not None: + device_ID = f"dispatcharr-hdhr-{profile}" + friendly_name = f"Dispatcharr HDHomeRun - {profile}" if not device: data = { - "FriendlyName": "Dispatcharr HDHomeRun", + "FriendlyName": friendly_name, "ModelNumber": "HDTC-2US", "FirmwareName": "hdhomerun3_atsc", "FirmwareVersion": "20200101", - "DeviceID": "12345678", + "DeviceID": device_ID, "DeviceAuth": "test_auth_token", "BaseURL": base_url, "LineupURL": f"{base_url}/lineup.json", @@ -117,28 +102,38 @@ class LineupAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the available channel lineup", - responses={200: openapi.Response("Channel Lineup JSON")} + responses={200: openapi.Response("Channel Lineup JSON")}, ) def get(self, request, profile=None): if profile is not None: channel_profile = ChannelProfile.objects.get(name=profile) channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') + channelprofilemembership__enabled=True, + ).order_by("channel_number") else: - channels = Channel.objects.all().order_by('channel_number') + channels = Channel.objects.all().order_by("channel_number") - lineup = [ - { - "GuideNumber": str(ch.channel_number), - "GuideName": ch.name, - "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), - "Guide_ID": str(ch.channel_number), - "Station": str(ch.channel_number), - } - for ch in channels - ] + lineup = [] + for ch in channels: + # Format channel number as integer if it has no decimal component + if ch.channel_number is not None: + if ch.channel_number == int(ch.channel_number): + formatted_channel_number = str(int(ch.channel_number)) + else: + formatted_channel_number = str(ch.channel_number) + else: + formatted_channel_number = "" + + lineup.append( + { + "GuideNumber": formatted_channel_number, + "GuideName": ch.name, + "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), + "Guide_ID": formatted_channel_number, + "Station": formatted_channel_number, + } + ) return JsonResponse(lineup, safe=False) @@ -148,14 +143,14 @@ class LineupStatusAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun lineup status", - responses={200: openapi.Response("Lineup Status JSON")} + responses={200: openapi.Response("Lineup Status JSON")}, ) def get(self, request, profile=None): data = { "ScanInProgress": 0, "ScanPossible": 0, "Source": "Cable", - "SourceList": ["Cable"] + "SourceList": ["Cable"], } return JsonResponse(data) @@ -166,10 +161,10 @@ class HDHRDeviceXMLAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun device XML configuration", - responses={200: openapi.Response("HDHR Device XML")} + responses={200: openapi.Response("HDHR Device XML")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") xml_response = f""" diff --git a/apps/hdhr/ssdp.py b/apps/hdhr/ssdp.py index 660d9c2f..d794799a 100644 --- a/apps/hdhr/ssdp.py +++ b/apps/hdhr/ssdp.py @@ -2,6 +2,7 @@ import os import socket import threading import time +import gevent # Add this import from django.conf import settings # SSDP Multicast Address and Port @@ -59,7 +60,7 @@ def ssdp_broadcaster(host_ip): sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) while True: sock.sendto(notify.encode("utf-8"), (SSDP_MULTICAST, SSDP_PORT)) - time.sleep(30) + gevent.sleep(30) # Replace time.sleep with gevent.sleep def start_ssdp(): host_ip = get_host_ip() diff --git a/apps/hdhr/views.py b/apps/hdhr/views.py index 048eb340..40823259 100644 --- a/apps/hdhr/views.py +++ b/apps/hdhr/views.py @@ -1,7 +1,7 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import Authenticated, permission_classes_by_action from django.http import JsonResponse, HttpResponseForbidden, HttpResponse from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi @@ -16,18 +16,26 @@ from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt + @login_required def hdhr_dashboard_view(request): """Render the HDHR management page.""" hdhr_devices = HDHRDevice.objects.all() return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices}) + # 🔹 1) HDHomeRun Device API class HDHRDeviceViewSet(viewsets.ModelViewSet): """Handles CRUD operations for HDHomeRun devices""" + queryset = HDHRDevice.objects.all() serializer_class = HDHRDeviceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] # 🔹 2) Discover API @@ -36,10 +44,10 @@ class DiscoverAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve HDHomeRun device discovery information", - responses={200: openapi.Response("HDHR Discovery JSON")} + responses={200: openapi.Response("HDHR Discovery JSON")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") device = HDHRDevice.objects.first() if not device: @@ -75,15 +83,15 @@ class LineupAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the available channel lineup", - responses={200: openapi.Response("Channel Lineup JSON")} + responses={200: openapi.Response("Channel Lineup JSON")}, ) def get(self, request): - channels = Channel.objects.all().order_by('channel_number') + channels = Channel.objects.all().order_by("channel_number") lineup = [ { "GuideNumber": str(ch.channel_number), "GuideName": ch.name, - "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}") + "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), } for ch in channels ] @@ -96,14 +104,14 @@ class LineupStatusAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun lineup status", - responses={200: openapi.Response("Lineup Status JSON")} + responses={200: openapi.Response("Lineup Status JSON")}, ) def get(self, request): data = { "ScanInProgress": 0, "ScanPossible": 0, "Source": "Cable", - "SourceList": ["Cable"] + "SourceList": ["Cable"], } return JsonResponse(data) @@ -114,10 +122,10 @@ class HDHRDeviceXMLAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun device XML configuration", - responses={200: openapi.Response("HDHR Device XML")} + responses={200: openapi.Response("HDHR Device XML")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") xml_response = f""" diff --git a/apps/m3u/admin.py b/apps/m3u/admin.py index d4d6885b..c9b9ad0d 100644 --- a/apps/m3u/admin.py +++ b/apps/m3u/admin.py @@ -1,6 +1,8 @@ from django.contrib import admin from django.utils.html import format_html -from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent +from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent, M3UAccountProfile +import json + class M3UFilterInline(admin.TabularInline): model = M3UFilter @@ -8,50 +10,181 @@ class M3UFilterInline(admin.TabularInline): verbose_name = "M3U Filter" verbose_name_plural = "M3U Filters" + @admin.register(M3UAccount) class M3UAccountAdmin(admin.ModelAdmin): - list_display = ('name', 'server_url', 'server_group', 'max_streams', 'is_active', 'user_agent_display', 'uploaded_file_link', 'created_at', 'updated_at') - list_filter = ('is_active', 'server_group') - search_fields = ('name', 'server_url', 'server_group__name') + list_display = ( + "name", + "server_url", + "server_group", + "max_streams", + "priority", + "is_active", + "user_agent_display", + "uploaded_file_link", + "created_at", + "updated_at", + ) + list_filter = ("is_active", "server_group") + search_fields = ("name", "server_url", "server_group__name") inlines = [M3UFilterInline] - actions = ['activate_accounts', 'deactivate_accounts'] + actions = ["activate_accounts", "deactivate_accounts"] # Handle both ForeignKey and ManyToManyField cases for UserAgent def user_agent_display(self, obj): - if hasattr(obj, 'user_agent'): # ForeignKey case + if hasattr(obj, "user_agent"): # ForeignKey case return obj.user_agent.user_agent if obj.user_agent else "None" - elif hasattr(obj, 'user_agents'): # ManyToManyField case + elif hasattr(obj, "user_agents"): # ManyToManyField case return ", ".join([ua.user_agent for ua in obj.user_agents.all()]) or "None" return "None" + user_agent_display.short_description = "User Agent(s)" + def vod_enabled_display(self, obj): + """Display whether VOD is enabled for this account""" + if obj.custom_properties: + custom_props = obj.custom_properties or {} + return "Yes" if custom_props.get('enable_vod', False) else "No" + return "No" + vod_enabled_display.short_description = "VOD Enabled" + vod_enabled_display.boolean = True + def uploaded_file_link(self, obj): if obj.uploaded_file: - return format_html("Download M3U", obj.uploaded_file.url) + return format_html( + "Download M3U", obj.uploaded_file.url + ) return "No file uploaded" + uploaded_file_link.short_description = "Uploaded File" - @admin.action(description='Activate selected accounts') + @admin.action(description="Activate selected accounts") def activate_accounts(self, request, queryset): queryset.update(is_active=True) - @admin.action(description='Deactivate selected accounts') + @admin.action(description="Deactivate selected accounts") def deactivate_accounts(self, request, queryset): queryset.update(is_active=False) # Add ManyToManyField for Django Admin (if applicable) - if hasattr(M3UAccount, 'user_agents'): - filter_horizontal = ('user_agents',) # Only for ManyToManyField + if hasattr(M3UAccount, "user_agents"): + filter_horizontal = ("user_agents",) # Only for ManyToManyField + @admin.register(M3UFilter) class M3UFilterAdmin(admin.ModelAdmin): - list_display = ('m3u_account', 'filter_type', 'regex_pattern', 'exclude') - list_filter = ('filter_type', 'exclude') - search_fields = ('regex_pattern',) - ordering = ('m3u_account',) + list_display = ("m3u_account", "filter_type", "regex_pattern", "exclude") + list_filter = ("filter_type", "exclude") + search_fields = ("regex_pattern",) + ordering = ("m3u_account",) + @admin.register(ServerGroup) class ServerGroupAdmin(admin.ModelAdmin): - list_display = ('name',) - search_fields = ('name',) + list_display = ("name",) + search_fields = ("name",) + +@admin.register(M3UAccountProfile) +class M3UAccountProfileAdmin(admin.ModelAdmin): + list_display = ( + "name", + "m3u_account", + "is_default", + "is_active", + "max_streams", + "current_viewers", + "account_status_display", + "account_expiration_display", + "last_refresh_display", + ) + list_filter = ("is_active", "is_default", "m3u_account__account_type") + search_fields = ("name", "m3u_account__name") + readonly_fields = ("account_info_display",) + + def account_status_display(self, obj): + """Display account status from custom properties""" + status = obj.get_account_status() + if status: + # Create colored status display + color_map = { + 'Active': 'green', + 'Expired': 'red', + 'Disabled': 'red', + 'Banned': 'red', + } + color = color_map.get(status, 'black') + return format_html( + '{}', + color, + status + ) + return "Unknown" + account_status_display.short_description = "Account Status" + + def account_expiration_display(self, obj): + """Display account expiration from custom properties""" + expiration = obj.get_account_expiration() + if expiration: + from datetime import datetime + if expiration < datetime.now(): + return format_html( + '{}', + expiration.strftime('%Y-%m-%d %H:%M') + ) + else: + return format_html( + '{}', + expiration.strftime('%Y-%m-%d %H:%M') + ) + return "Unknown" + account_expiration_display.short_description = "Expires" + + def last_refresh_display(self, obj): + """Display last refresh time from custom properties""" + last_refresh = obj.get_last_refresh() + if last_refresh: + return last_refresh.strftime('%Y-%m-%d %H:%M:%S') + return "Never" + last_refresh_display.short_description = "Last Refresh" + + def account_info_display(self, obj): + """Display formatted account information from custom properties""" + if not obj.custom_properties: + return "No account information available" + + html_parts = [] + + # User Info + user_info = obj.custom_properties.get('user_info', {}) + if user_info: + html_parts.append("

User Information:

") + html_parts.append("
    ") + for key, value in user_info.items(): + if key == 'exp_date' and value: + try: + from datetime import datetime + exp_date = datetime.fromtimestamp(float(value)) + value = exp_date.strftime('%Y-%m-%d %H:%M:%S') + except (ValueError, TypeError): + pass + html_parts.append(f"
  • {key}: {value}
  • ") + html_parts.append("
") + + # Server Info + server_info = obj.custom_properties.get('server_info', {}) + if server_info: + html_parts.append("

Server Information:

") + html_parts.append("
    ") + for key, value in server_info.items(): + html_parts.append(f"
  • {key}: {value}
  • ") + html_parts.append("
") + + # Last Refresh + last_refresh = obj.custom_properties.get('last_refresh') + if last_refresh: + html_parts.append(f"

Last Refresh: {last_refresh}

") + + return format_html(''.join(html_parts)) if html_parts else "No account information available" + + account_info_display.short_description = "Account Information" diff --git a/apps/m3u/api_urls.py b/apps/m3u/api_urls.py index 41fc2fbc..6a80a1fe 100644 --- a/apps/m3u/api_urls.py +++ b/apps/m3u/api_urls.py @@ -1,18 +1,44 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import M3UAccountViewSet, M3UFilterViewSet, ServerGroupViewSet, RefreshM3UAPIView, RefreshSingleM3UAPIView, UserAgentViewSet, M3UAccountProfileViewSet +from .api_views import ( + M3UAccountViewSet, + M3UFilterViewSet, + ServerGroupViewSet, + RefreshM3UAPIView, + RefreshSingleM3UAPIView, + RefreshAccountInfoAPIView, + UserAgentViewSet, + M3UAccountProfileViewSet, +) -app_name = 'm3u' +app_name = "m3u" router = DefaultRouter() -router.register(r'accounts', M3UAccountViewSet, basename='m3u-account') -router.register(r'accounts\/(?P\d+)\/profiles', M3UAccountProfileViewSet, basename='m3u-account-profiles') -router.register(r'filters', M3UFilterViewSet, basename='m3u-filter') -router.register(r'server-groups', ServerGroupViewSet, basename='server-group') +router.register(r"accounts", M3UAccountViewSet, basename="m3u-account") +router.register( + r"accounts\/(?P\d+)\/profiles", + M3UAccountProfileViewSet, + basename="m3u-account-profiles", +) +router.register( + r"accounts\/(?P\d+)\/filters", + M3UFilterViewSet, + basename="m3u-filters", +) +router.register(r"server-groups", ServerGroupViewSet, basename="server-group") urlpatterns = [ - path('refresh/', RefreshM3UAPIView.as_view(), name='m3u_refresh'), - path('refresh//', RefreshSingleM3UAPIView.as_view(), name='m3u_refresh_single'), + path("refresh/", RefreshM3UAPIView.as_view(), name="m3u_refresh"), + path( + "refresh//", + RefreshSingleM3UAPIView.as_view(), + name="m3u_refresh_single", + ), + path( + "refresh-account-info//", + RefreshAccountInfoAPIView.as_view(), + name="m3u_refresh_account_info", + ), ] urlpatterns += router.urls diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index 054bdaa9..1f16f20f 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -1,7 +1,11 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, + permission_classes_by_method, +) from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.shortcuts import get_object_or_404 @@ -10,13 +14,15 @@ from django.core.cache import cache import os from rest_framework.decorators import action from django.conf import settings +from .tasks import refresh_m3u_groups +import json -# Import all models, including UserAgent. from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent from apps.channels.models import ChannelGroupM3UAccount from core.serializers import UserAgentSerializer -# Import all serializers, including the UserAgentSerializer. +from apps.vod.models import M3UVODCategoryRelation + from .serializers import ( M3UAccountSerializer, M3UFilterSerializer, @@ -24,130 +30,455 @@ from .serializers import ( M3UAccountProfileSerializer, ) -from .tasks import refresh_single_m3u_account, refresh_m3u_accounts -from django.core.files.storage import default_storage -from django.core.files.base import ContentFile +from .tasks import refresh_single_m3u_account, refresh_m3u_accounts, refresh_account_info +import json + class M3UAccountViewSet(viewsets.ModelViewSet): """Handles CRUD operations for M3U accounts""" - queryset = M3UAccount.objects.prefetch_related('channel_group') + + queryset = M3UAccount.objects.prefetch_related("channel_group") serializer_class = M3UAccountSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def create(self, request, *args, **kwargs): # Handle file upload first, if any file_path = None - if 'file' in request.FILES: - file = request.FILES['file'] + if "file" in request.FILES: + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/m3us', file_name) + file_path = os.path.join("/data/uploads/m3us", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) # Add file_path to the request data so it's available during creation request.data._mutable = True # Allow modification of the request data - request.data['file_path'] = file_path # Include the file path if a file was uploaded - request.data.pop('server_url') + request.data["file_path"] = ( + file_path # Include the file path if a file was uploaded + ) + + # Handle the user_agent field - convert "null" string to None + if "user_agent" in request.data and request.data["user_agent"] == "null": + request.data["user_agent"] = None + + # Handle server_url appropriately + if "server_url" in request.data and not request.data["server_url"]: + request.data.pop("server_url") + request.data._mutable = False # Make the request data immutable again # Now call super().create() to create the instance response = super().create(request, *args, **kwargs) + account_type = response.data.get("account_type") + account_id = response.data.get("id") + + # Notify frontend that a new playlist was created + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'playlist_created', + 'playlist_id': account_id + }) + + if account_type == M3UAccount.Types.XC: + refresh_m3u_groups(account_id) + + # Check if VOD is enabled + enable_vod = request.data.get("enable_vod", False) + if enable_vod: + from apps.vod.tasks import refresh_categories + + refresh_categories(account_id) + # After the instance is created, return the response return response def update(self, request, *args, **kwargs): instance = self.get_object() + old_vod_enabled = False + + # Check current VOD setting + if instance.custom_properties: + custom_props = instance.custom_properties or {} + old_vod_enabled = custom_props.get("enable_vod", False) # Handle file upload first, if any file_path = None - if 'file' in request.FILES: - file = request.FILES['file'] + if "file" in request.FILES: + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/m3us', file_name) + file_path = os.path.join("/data/uploads/m3us", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) # Add file_path to the request data so it's available during creation request.data._mutable = True # Allow modification of the request data - request.data['file_path'] = file_path # Include the file path if a file was uploaded - request.data.pop('server_url') + request.data["file_path"] = ( + file_path # Include the file path if a file was uploaded + ) + + # Handle the user_agent field - convert "null" string to None + if "user_agent" in request.data and request.data["user_agent"] == "null": + request.data["user_agent"] = None + + # Handle server_url appropriately + if "server_url" in request.data and not request.data["server_url"]: + request.data.pop("server_url") + request.data._mutable = False # Make the request data immutable again if instance.file_path and os.path.exists(instance.file_path): os.remove(instance.file_path) - # Now call super().create() to create the instance + # Now call super().update() to update the instance response = super().update(request, *args, **kwargs) - # After the instance is created, return the response + # Check if VOD setting changed and trigger refresh if needed + new_vod_enabled = request.data.get("enable_vod", old_vod_enabled) + + if ( + instance.account_type == M3UAccount.Types.XC + and not old_vod_enabled + and new_vod_enabled + ): + # Create Uncategorized categories immediately so they're available in the UI + from apps.vod.models import VODCategory, M3UVODCategoryRelation + + # Create movie Uncategorized category + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Create series Uncategorized category + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Create relations for both categories (disabled by default until first refresh) + account_custom_props = instance.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Trigger full VOD refresh + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(instance.id) + + # After the instance is updated, return the response return response + def partial_update(self, request, *args, **kwargs): + """Handle partial updates with special logic for is_active field""" + instance = self.get_object() + + # Check if we're toggling is_active + if ( + "is_active" in request.data + and instance.is_active != request.data["is_active"] + ): + # Set appropriate status based on new is_active value + if request.data["is_active"]: + request.data["status"] = M3UAccount.Status.IDLE + else: + request.data["status"] = M3UAccount.Status.DISABLED + + # Continue with regular partial update + return super().partial_update(request, *args, **kwargs) + + @action(detail=True, methods=["post"], url_path="refresh-vod") + def refresh_vod(self, request, pk=None): + """Trigger VOD content refresh for XtreamCodes accounts""" + account = self.get_object() + + if account.account_type != M3UAccount.Types.XC: + return Response( + {"error": "VOD refresh is only available for XtreamCodes accounts"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Check if VOD is enabled + vod_enabled = False + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get("enable_vod", False) + + if not vod_enabled: + return Response( + {"error": "VOD is not enabled for this account"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(account.id) + return Response( + {"message": f"VOD refresh initiated for account {account.name}"}, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"error": f"Failed to initiate VOD refresh: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + @action(detail=True, methods=["patch"], url_path="group-settings") + def update_group_settings(self, request, pk=None): + """Update auto channel sync settings for M3U account groups""" + account = self.get_object() + group_settings = request.data.get("group_settings", []) + category_settings = request.data.get("category_settings", []) + + try: + for setting in group_settings: + group_id = setting.get("channel_group") + enabled = setting.get("enabled", True) + auto_sync = setting.get("auto_channel_sync", False) + sync_start = setting.get("auto_sync_channel_start") + custom_properties = setting.get("custom_properties", {}) + + if group_id: + ChannelGroupM3UAccount.objects.update_or_create( + channel_group_id=group_id, + m3u_account=account, + defaults={ + "enabled": enabled, + "auto_channel_sync": auto_sync, + "auto_sync_channel_start": sync_start, + "custom_properties": custom_properties, + }, + ) + + for setting in category_settings: + category_id = setting.get("id") + enabled = setting.get("enabled", True) + custom_properties = setting.get("custom_properties", {}) + + if category_id: + M3UVODCategoryRelation.objects.update_or_create( + category_id=category_id, + m3u_account=account, + defaults={ + "enabled": enabled, + "custom_properties": custom_properties, + }, + ) + + return Response({"message": "Group settings updated successfully"}) + + except Exception as e: + return Response( + {"error": f"Failed to update group settings: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + class M3UFilterViewSet(viewsets.ModelViewSet): - """Handles CRUD operations for M3U filters""" queryset = M3UFilter.objects.all() serializer_class = M3UFilterSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + m3u_account_id = self.kwargs["account_id"] + return M3UFilter.objects.filter(m3u_account_id=m3u_account_id) + + def perform_create(self, serializer): + # Get the account ID from the URL + account_id = self.kwargs["account_id"] + + # # Get the M3UAccount instance for the account_id + # m3u_account = M3UAccount.objects.get(id=account_id) + + # Save the 'm3u_account' in the serializer context + serializer.context["m3u_account"] = account_id + + # Perform the actual save + serializer.save(m3u_account_id=account_id) + class ServerGroupViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Server Groups""" + queryset = ServerGroup.objects.all() serializer_class = ServerGroupSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + class RefreshM3UAPIView(APIView): """Triggers refresh for all active M3U accounts""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers a refresh of all active M3U accounts", - responses={202: "M3U refresh initiated"} + responses={202: "M3U refresh initiated"}, ) def post(self, request, format=None): refresh_m3u_accounts.delay() - return Response({'success': True, 'message': 'M3U refresh initiated.'}, status=status.HTTP_202_ACCEPTED) + return Response( + {"success": True, "message": "M3U refresh initiated."}, + status=status.HTTP_202_ACCEPTED, + ) + class RefreshSingleM3UAPIView(APIView): """Triggers refresh for a single M3U account""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers a refresh of a single M3U account", - responses={202: "M3U account refresh initiated"} + responses={202: "M3U account refresh initiated"}, ) def post(self, request, account_id, format=None): refresh_single_m3u_account.delay(account_id) - return Response({'success': True, 'message': f'M3U account {account_id} refresh initiated.'}, - status=status.HTTP_202_ACCEPTED) + return Response( + { + "success": True, + "message": f"M3U account {account_id} refresh initiated.", + }, + status=status.HTTP_202_ACCEPTED, + ) + + +class RefreshAccountInfoAPIView(APIView): + """Triggers account info refresh for a single M3U account""" + + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Triggers a refresh of account information for a specific M3U profile", + responses={202: "Account info refresh initiated", 400: "Profile not found or not XtreamCodes"}, + ) + def post(self, request, profile_id, format=None): + try: + from .models import M3UAccountProfile + profile = M3UAccountProfile.objects.get(id=profile_id) + account = profile.m3u_account + + if account.account_type != M3UAccount.Types.XC: + return Response( + { + "success": False, + "error": "Account info refresh is only available for XtreamCodes accounts", + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + refresh_account_info.delay(profile_id) + return Response( + { + "success": True, + "message": f"Account info refresh initiated for profile {profile.name}.", + }, + status=status.HTTP_202_ACCEPTED, + ) + except M3UAccountProfile.DoesNotExist: + return Response( + { + "success": False, + "error": "Profile not found", + }, + status=status.HTTP_404_NOT_FOUND, + ) + class UserAgentViewSet(viewsets.ModelViewSet): """Handles CRUD operations for User Agents""" + queryset = UserAgent.objects.all() serializer_class = UserAgentSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + class M3UAccountProfileViewSet(viewsets.ModelViewSet): queryset = M3UAccountProfile.objects.all() serializer_class = M3UAccountProfileSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): - m3u_account_id = self.kwargs['account_id'] + m3u_account_id = self.kwargs["account_id"] return M3UAccountProfile.objects.filter(m3u_account_id=m3u_account_id) def perform_create(self, serializer): # Get the account ID from the URL - account_id = self.kwargs['account_id'] + account_id = self.kwargs["account_id"] # Get the M3UAccount instance for the account_id m3u_account = M3UAccount.objects.get(id=account_id) # Save the 'm3u_account' in the serializer context - serializer.context['m3u_account'] = m3u_account + serializer.context["m3u_account"] = m3u_account # Perform the actual save serializer.save(m3u_account_id=m3u_account) diff --git a/apps/m3u/forms.py b/apps/m3u/forms.py index f6fc7f91..cf6586c3 100644 --- a/apps/m3u/forms.py +++ b/apps/m3u/forms.py @@ -4,6 +4,13 @@ from .models import M3UAccount, M3UFilter import re class M3UAccountForm(forms.ModelForm): + enable_vod = forms.BooleanField( + required=False, + initial=False, + label="Enable VOD Content", + help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts" + ) + class Meta: model = M3UAccount fields = [ @@ -13,8 +20,34 @@ class M3UAccountForm(forms.ModelForm): 'server_group', 'max_streams', 'is_active', + 'enable_vod', ] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Set initial value for enable_vod from custom_properties + if self.instance and self.instance.custom_properties: + custom_props = self.instance.custom_properties or {} + self.fields['enable_vod'].initial = custom_props.get('enable_vod', False) + + def save(self, commit=True): + instance = super().save(commit=False) + + # Handle enable_vod field + enable_vod = self.cleaned_data.get('enable_vod', False) + + # Parse existing custom_properties + custom_props = instance.custom_properties or {} + + # Update VOD preference + custom_props['enable_vod'] = enable_vod + instance.custom_properties = custom_props + + if commit: + instance.save() + return instance + def clean_uploaded_file(self): uploaded_file = self.cleaned_data.get('uploaded_file') if uploaded_file: diff --git a/apps/m3u/migrations/0003_create_custom_account.py b/apps/m3u/migrations/0003_create_custom_account.py index 8695f248..cdc40cda 100644 --- a/apps/m3u/migrations/0003_create_custom_account.py +++ b/apps/m3u/migrations/0003_create_custom_account.py @@ -3,6 +3,7 @@ from django.db import migrations from core.models import CoreSettings + def create_custom_account(apps, schema_editor): default_user_agent_id = CoreSettings.get_default_user_agent_id() @@ -18,7 +19,7 @@ def create_custom_account(apps, schema_editor): M3UAccountProfile = apps.get_model("m3u", "M3UAccountProfile") M3UAccountProfile.objects.create( m3u_account=m3u_account, - name=f'{m3u_account.name} Default', + name=f"{m3u_account.name} Default", max_streams=m3u_account.max_streams, is_default=True, is_active=True, @@ -26,10 +27,12 @@ def create_custom_account(apps, schema_editor): replace_pattern="$1", ) + class Migration(migrations.Migration): dependencies = [ - ('m3u', '0002_m3uaccount_locked'), + ("m3u", "0002_m3uaccount_locked"), + ("core", "0004_preload_core_settings"), ] operations = [ diff --git a/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py b/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py index 3728bf7f..7a5f2013 100644 --- a/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py +++ b/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py @@ -7,24 +7,29 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('django_celery_beat', '0019_alter_periodictasks_options'), - ('m3u', '0004_m3uaccount_stream_profile'), + ("django_celery_beat", "0019_alter_periodictasks_options"), + ("m3u", "0004_m3uaccount_stream_profile"), ] operations = [ migrations.AddField( - model_name='m3uaccount', - name='custom_properties', + model_name="m3uaccount", + name="custom_properties", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='m3uaccount', - name='refresh_interval', + model_name="m3uaccount", + name="refresh_interval", field=models.IntegerField(default=24), ), migrations.AddField( - model_name='m3uaccount', - name='refresh_task', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_celery_beat.periodictask'), + model_name="m3uaccount", + name="refresh_task", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="django_celery_beat.periodictask", + ), ), ] diff --git a/apps/m3u/migrations/0008_m3uaccount_stale_stream_days.py b/apps/m3u/migrations/0008_m3uaccount_stale_stream_days.py new file mode 100644 index 00000000..69a1397d --- /dev/null +++ b/apps/m3u/migrations/0008_m3uaccount_stale_stream_days.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0007_remove_m3uaccount_uploaded_file_m3uaccount_file_path'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='stale_stream_days', + field=models.PositiveIntegerField(default=7, help_text='Number of days after which a stream will be removed if not seen in the M3U source.'), + ), + ] diff --git a/apps/m3u/migrations/0009_m3uaccount_account_type_m3uaccount_password_and_more.py b/apps/m3u/migrations/0009_m3uaccount_account_type_m3uaccount_password_and_more.py new file mode 100644 index 00000000..d57f7ccd --- /dev/null +++ b/apps/m3u/migrations/0009_m3uaccount_account_type_m3uaccount_password_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.1.6 on 2025-04-27 12:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0008_m3uaccount_stale_stream_days'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='account_type', + field=models.CharField(choices=[('STD', 'Standard'), ('XC', 'Xtream Codes')], default='STD'), + ), + migrations.AddField( + model_name='m3uaccount', + name='password', + field=models.CharField(blank=True, max_length=255, null=True), + ), + migrations.AddField( + model_name='m3uaccount', + name='username', + field=models.CharField(blank=True, max_length=255, null=True), + ), + ] diff --git a/apps/m3u/migrations/0010_add_status_fields_and_remove_auto_now.py b/apps/m3u/migrations/0010_add_status_fields_and_remove_auto_now.py new file mode 100644 index 00000000..e9896041 --- /dev/null +++ b/apps/m3u/migrations/0010_add_status_fields_and_remove_auto_now.py @@ -0,0 +1,28 @@ +# Generated by Django 5.1.6 on 2025-05-04 21:43 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0009_m3uaccount_account_type_m3uaccount_password_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='last_message', + field=models.TextField(blank=True, null=True, help_text="Last status message, including success results or error information"), + ), + migrations.AddField( + model_name='m3uaccount', + name='status', + field=models.CharField(choices=[('idle', 'Idle'), ('fetching', 'Fetching'), ('parsing', 'Parsing'), ('error', 'Error'), ('success', 'Success')], default='idle', max_length=20), + ), + migrations.AlterField( + model_name='m3uaccount', + name='updated_at', + field=models.DateTimeField(blank=True, help_text='Time when this account was last successfully refreshed', null=True), + ), + ] diff --git a/apps/m3u/migrations/0011_alter_m3uaccount_status.py b/apps/m3u/migrations/0011_alter_m3uaccount_status.py new file mode 100644 index 00000000..7812f317 --- /dev/null +++ b/apps/m3u/migrations/0011_alter_m3uaccount_status.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-15 01:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0010_add_status_fields_and_remove_auto_now'), + ] + + operations = [ + migrations.AlterField( + model_name='m3uaccount', + name='status', + field=models.CharField(choices=[('idle', 'Idle'), ('fetching', 'Fetching'), ('parsing', 'Parsing'), ('error', 'Error'), ('success', 'Success'), ('pending_setup', 'Pending Setup'), ('disabled', 'Disabled')], default='idle', max_length=20), + ), + ] diff --git a/apps/m3u/migrations/0012_alter_m3uaccount_refresh_interval.py b/apps/m3u/migrations/0012_alter_m3uaccount_refresh_interval.py new file mode 100644 index 00000000..7045810e --- /dev/null +++ b/apps/m3u/migrations/0012_alter_m3uaccount_refresh_interval.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-21 19:58 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0011_alter_m3uaccount_status'), + ] + + operations = [ + migrations.AlterField( + model_name='m3uaccount', + name='refresh_interval', + field=models.IntegerField(default=0), + ), + ] diff --git a/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py b/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py new file mode 100644 index 00000000..0b0a8a1d --- /dev/null +++ b/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-07-22 21:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0012_alter_m3uaccount_refresh_interval'), + ] + + operations = [ + migrations.AlterField( + model_name='m3ufilter', + name='filter_type', + field=models.CharField(choices=[('group', 'Group'), ('name', 'Stream Name'), ('url', 'Stream URL')], default='group', help_text='Filter based on either group title or stream name.', max_length=50), + ), + ] diff --git a/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py b/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py new file mode 100644 index 00000000..3510bfc5 --- /dev/null +++ b/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py @@ -0,0 +1,22 @@ +# Generated by Django 5.1.6 on 2025-07-31 17:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0013_alter_m3ufilter_filter_type'), + ] + + operations = [ + migrations.AlterModelOptions( + name='m3ufilter', + options={'ordering': ['order']}, + ), + migrations.AddField( + model_name='m3ufilter', + name='order', + field=models.PositiveIntegerField(default=0), + ), + ] diff --git a/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py b/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py new file mode 100644 index 00000000..6b62c9a1 --- /dev/null +++ b/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py @@ -0,0 +1,22 @@ +# Generated by Django 5.2.4 on 2025-08-02 16:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0014_alter_m3ufilter_options_m3ufilter_order'), + ] + + operations = [ + migrations.AlterModelOptions( + name='m3ufilter', + options={}, + ), + migrations.AddField( + model_name='m3ufilter', + name='custom_properties', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/m3u/migrations/0016_m3uaccount_priority.py b/apps/m3u/migrations/0016_m3uaccount_priority.py new file mode 100644 index 00000000..55e0e95b --- /dev/null +++ b/apps/m3u/migrations/0016_m3uaccount_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-08-20 22:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'), + ), + ] diff --git a/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py b/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py new file mode 100644 index 00000000..84cb968b --- /dev/null +++ b/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-09-02 15:19 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0016_m3uaccount_priority'), + ] + + operations = [ + migrations.AlterField( + model_name='m3uaccount', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='m3uaccount', + name='server_url', + field=models.URLField(blank=True, help_text='The base URL of the M3U server (optional if a file is uploaded)', max_length=1000, null=True), + ), + migrations.AlterField( + model_name='m3ufilter', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/m3u/migrations/0018_add_profile_custom_properties.py b/apps/m3u/migrations/0018_add_profile_custom_properties.py new file mode 100644 index 00000000..d616c598 --- /dev/null +++ b/apps/m3u/migrations/0018_add_profile_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-09 20:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0017_alter_m3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccountprofile', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for storing account information from provider (e.g., XC account details, expiration dates)', null=True), + ), + ] diff --git a/apps/m3u/models.py b/apps/m3u/models.py index 25a332c6..b812ad6c 100644 --- a/apps/m3u/models.py +++ b/apps/m3u/models.py @@ -7,73 +7,98 @@ from apps.channels.models import StreamProfile from django_celery_beat.models import PeriodicTask from core.models import CoreSettings, UserAgent -CUSTOM_M3U_ACCOUNT_NAME="custom" +CUSTOM_M3U_ACCOUNT_NAME = "custom" + class M3UAccount(models.Model): + class Types(models.TextChoices): + STADNARD = "STD", "Standard" + XC = "XC", "Xtream Codes" + + class Status(models.TextChoices): + IDLE = "idle", "Idle" + FETCHING = "fetching", "Fetching" + PARSING = "parsing", "Parsing" + ERROR = "error", "Error" + SUCCESS = "success", "Success" + PENDING_SETUP = "pending_setup", "Pending Setup" + DISABLED = "disabled", "Disabled" + """Represents an M3U Account for IPTV streams.""" name = models.CharField( - max_length=255, - unique=True, - help_text="Unique name for this M3U account" + max_length=255, unique=True, help_text="Unique name for this M3U account" ) server_url = models.URLField( + max_length=1000, blank=True, null=True, - help_text="The base URL of the M3U server (optional if a file is uploaded)" - ) - file_path = models.CharField( - max_length=255, - blank=True, - null=True + help_text="The base URL of the M3U server (optional if a file is uploaded)", ) + file_path = models.CharField(max_length=255, blank=True, null=True) server_group = models.ForeignKey( - 'ServerGroup', + "ServerGroup", on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts', - help_text="The server group this M3U account belongs to" + related_name="m3u_accounts", + help_text="The server group this M3U account belongs to", ) max_streams = models.PositiveIntegerField( - default=0, - help_text="Maximum number of concurrent streams (0 for unlimited)" + default=0, help_text="Maximum number of concurrent streams (0 for unlimited)" ) is_active = models.BooleanField( - default=True, - help_text="Set to false to deactivate this M3U account" + default=True, help_text="Set to false to deactivate this M3U account" ) created_at = models.DateTimeField( - auto_now_add=True, - help_text="Time when this account was created" + auto_now_add=True, help_text="Time when this account was created" ) updated_at = models.DateTimeField( - auto_now=True, - help_text="Time when this account was last updated" + null=True, + blank=True, + help_text="Time when this account was last successfully refreshed", + ) + status = models.CharField( + max_length=20, choices=Status.choices, default=Status.IDLE + ) + last_message = models.TextField( + null=True, + blank=True, + help_text="Last status message, including success results or error information", ) user_agent = models.ForeignKey( - 'core.UserAgent', + "core.UserAgent", on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts', - help_text="The User-Agent associated with this M3U account." + related_name="m3u_accounts", + help_text="The User-Agent associated with this M3U account.", ) locked = models.BooleanField( - default=False, - help_text="Protected - can't be deleted or modified" + default=False, help_text="Protected - can't be deleted or modified" ) stream_profile = models.ForeignKey( StreamProfile, on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts' + related_name="m3u_accounts", ) - custom_properties = models.TextField(null=True, blank=True) - refresh_interval = models.IntegerField(default=24) + account_type = models.CharField(choices=Types.choices, default=Types.STADNARD) + username = models.CharField(max_length=255, null=True, blank=True) + password = models.CharField(max_length=255, null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) + refresh_interval = models.IntegerField(default=0) refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + stale_stream_days = models.PositiveIntegerField( + default=7, + help_text="Number of days after which a stream will be removed if not seen in the M3U source.", + ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.", + ) def __str__(self): return self.name @@ -104,10 +129,21 @@ class M3UAccount(models.Model): def get_user_agent(self): user_agent = self.user_agent if not user_agent: - user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + user_agent = UserAgent.objects.get( + id=CoreSettings.get_default_user_agent_id() + ) return user_agent + def save(self, *args, **kwargs): + # Prevent auto_now behavior by handling updated_at manually + if "update_fields" in kwargs and "updated_at" not in kwargs["update_fields"]: + # Don't modify updated_at for regular updates + kwargs.setdefault("update_fields", []) + if "updated_at" in kwargs["update_fields"]: + kwargs["update_fields"].remove("updated_at") + super().save(*args, **kwargs) + # def get_channel_groups(self): # return ChannelGroup.objects.filter(m3u_account__m3u_account=self) @@ -119,35 +155,40 @@ class M3UAccount(models.Model): # """Return all streams linked to this account with enabled ChannelGroups.""" # return self.streams.filter(channel_group__in=ChannelGroup.objects.filter(m3u_account__enabled=True)) + class M3UFilter(models.Model): """Defines filters for M3U accounts based on stream name or group title.""" + FILTER_TYPE_CHOICES = ( - ('group', 'Group Title'), - ('name', 'Stream Name'), + ("group", "Group"), + ("name", "Stream Name"), + ("url", "Stream URL"), ) + m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, - related_name='filters', - help_text="The M3U account this filter is applied to." + related_name="filters", + help_text="The M3U account this filter is applied to.", ) filter_type = models.CharField( max_length=50, choices=FILTER_TYPE_CHOICES, - default='group', - help_text="Filter based on either group title or stream name." + default="group", + help_text="Filter based on either group title or stream name.", ) regex_pattern = models.CharField( - max_length=200, - help_text="A regex pattern to match streams or groups." + max_length=200, help_text="A regex pattern to match streams or groups." ) exclude = models.BooleanField( default=True, - help_text="If True, matching items are excluded; if False, only matches are included." + help_text="If True, matching items are excluded; if False, only matches are included.", ) + order = models.PositiveIntegerField(default=0) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def applies_to(self, stream_name, group_name): - target = group_name if self.filter_type == 'group' else stream_name + target = group_name if self.filter_type == "group" else stream_name return bool(re.search(self.regex_pattern, target, re.IGNORECASE)) def clean(self): @@ -157,7 +198,9 @@ class M3UFilter(models.Model): raise ValidationError(f"Invalid regex pattern: {self.regex_pattern}") def __str__(self): - filter_type_display = dict(self.FILTER_TYPE_CHOICES).get(self.filter_type, 'Unknown') + filter_type_display = dict(self.FILTER_TYPE_CHOICES).get( + self.filter_type, "Unknown" + ) exclude_status = "Exclude" if self.exclude else "Include" return f"[{self.m3u_account.name}] {filter_type_display}: {self.regex_pattern} ({exclude_status})" @@ -183,40 +226,35 @@ class M3UFilter(models.Model): class ServerGroup(models.Model): """Represents a logical grouping of servers or channels.""" + name = models.CharField( - max_length=100, - unique=True, - help_text="Unique name for this server group." + max_length=100, unique=True, help_text="Unique name for this server group." ) def __str__(self): return self.name -from django.db import models class M3UAccountProfile(models.Model): """Represents a profile associated with an M3U Account.""" + m3u_account = models.ForeignKey( - 'M3UAccount', + "M3UAccount", on_delete=models.CASCADE, - related_name='profiles', - help_text="The M3U account this profile belongs to." + related_name="profiles", + help_text="The M3U account this profile belongs to.", ) name = models.CharField( - max_length=255, - help_text="Name for the M3U account profile" + max_length=255, help_text="Name for the M3U account profile" ) is_default = models.BooleanField( - default=False, - help_text="Set to false to deactivate this profile" + default=False, help_text="Set to false to deactivate this profile" ) max_streams = models.PositiveIntegerField( - default=0, - help_text="Maximum number of concurrent streams (0 for unlimited)" + default=0, help_text="Maximum number of concurrent streams (0 for unlimited)" ) is_active = models.BooleanField( - default=True, - help_text="Set to false to deactivate this profile" + default=True, help_text="Set to false to deactivate this profile" ) search_pattern = models.CharField( max_length=255, @@ -225,22 +263,95 @@ class M3UAccountProfile(models.Model): max_length=255, ) current_viewers = models.PositiveIntegerField(default=0) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for storing account information from provider (e.g., XC account details, expiration dates)" + ) class Meta: constraints = [ - models.UniqueConstraint(fields=['m3u_account', 'name'], name='unique_account_name') + models.UniqueConstraint( + fields=["m3u_account", "name"], name="unique_account_name" + ) ] def __str__(self): return f"{self.name} ({self.m3u_account.name})" + def get_account_expiration(self): + """Get account expiration date from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + exp_date = user_info.get('exp_date') + + if exp_date: + try: + from datetime import datetime + # XC exp_date is typically a Unix timestamp + if isinstance(exp_date, (int, float)): + return datetime.fromtimestamp(exp_date) + elif isinstance(exp_date, str): + # Try to parse as timestamp first, then as ISO date + try: + return datetime.fromtimestamp(float(exp_date)) + except ValueError: + return datetime.fromisoformat(exp_date) + except (ValueError, TypeError): + pass + + return None + + def get_account_status(self): + """Get account status from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('status') + + def get_max_connections(self): + """Get maximum connections from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('max_connections') + + def get_active_connections(self): + """Get active connections from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('active_cons') + + def get_last_refresh(self): + """Get last refresh timestamp from custom properties if available""" + if not self.custom_properties: + return None + + last_refresh = self.custom_properties.get('last_refresh') + if last_refresh: + try: + from datetime import datetime + return datetime.fromisoformat(last_refresh) + except (ValueError, TypeError): + pass + + return None + + @receiver(models.signals.post_save, sender=M3UAccount) def create_profile_for_m3u_account(sender, instance, created, **kwargs): """Automatically create an M3UAccountProfile when M3UAccount is created.""" if created: M3UAccountProfile.objects.create( m3u_account=instance, - name=f'{instance.name} Default', + name=f"{instance.name} Default", max_streams=instance.max_streams, is_default=True, is_active=True, @@ -253,6 +364,5 @@ def create_profile_for_m3u_account(sender, instance, created, **kwargs): is_default=True, ) - profile.max_streams = instance.max_streams profile.save() diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index d79b0117..a607dc07 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -1,41 +1,106 @@ -from rest_framework import serializers +from core.utils import validate_flexible_url +from rest_framework import serializers, status from rest_framework.response import Response from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount -from apps.channels.serializers import ChannelGroupM3UAccountSerializer, ChannelGroupSerializer +from apps.channels.serializers import ( + ChannelGroupM3UAccountSerializer, +) import logging +import json logger = logging.getLogger(__name__) + class M3UFilterSerializer(serializers.ModelSerializer): """Serializer for M3U Filters""" - channel_groups = ChannelGroupM3UAccountSerializer(source='m3u_account', many=True) class Meta: model = M3UFilter - fields = ['id', 'filter_type', 'regex_pattern', 'exclude', 'channel_groups'] + fields = [ + "id", + "filter_type", + "regex_pattern", + "exclude", + "order", + "custom_properties", + ] -from rest_framework import serializers -from .models import M3UAccountProfile class M3UAccountProfileSerializer(serializers.ModelSerializer): + account = serializers.SerializerMethodField() + + def get_account(self, obj): + """Include basic account information for frontend use""" + return { + 'id': obj.m3u_account.id, + 'name': obj.m3u_account.name, + 'account_type': obj.m3u_account.account_type, + 'is_xtream_codes': obj.m3u_account.account_type == 'XC' + } + class Meta: model = M3UAccountProfile - fields = ['id', 'name', 'max_streams', 'is_active', 'is_default', 'current_viewers', 'search_pattern', 'replace_pattern'] - read_only_fields = ['id'] + fields = [ + "id", + "name", + "max_streams", + "is_active", + "is_default", + "current_viewers", + "search_pattern", + "replace_pattern", + "custom_properties", + "account", + ] + read_only_fields = ["id", "account"] + extra_kwargs = { + 'search_pattern': {'required': False, 'allow_blank': True}, + 'replace_pattern': {'required': False, 'allow_blank': True}, + } def create(self, validated_data): - m3u_account = self.context.get('m3u_account') + m3u_account = self.context.get("m3u_account") # Use the m3u_account when creating the profile - validated_data['m3u_account_id'] = m3u_account.id + validated_data["m3u_account_id"] = m3u_account.id return super().create(validated_data) + def validate(self, data): + """Custom validation to handle default profiles""" + # For updates to existing instances + if self.instance and self.instance.is_default: + # For default profiles, search_pattern and replace_pattern are not required + # and we don't want to validate them since they shouldn't be changed + return data + + # For non-default profiles or new profiles, ensure required fields are present + if not data.get('search_pattern'): + raise serializers.ValidationError({ + 'search_pattern': ['This field is required for non-default profiles.'] + }) + if not data.get('replace_pattern'): + raise serializers.ValidationError({ + 'replace_pattern': ['This field is required for non-default profiles.'] + }) + + return data + def update(self, instance, validated_data): if instance.is_default: - raise serializers.ValidationError("Default profiles cannot be modified.") + # For default profiles, only allow updating name and custom_properties (for notes) + allowed_fields = {'name', 'custom_properties'} + + # Remove any fields that aren't allowed for default profiles + disallowed_fields = set(validated_data.keys()) - allowed_fields + if disallowed_fields: + raise serializers.ValidationError( + f"Default profiles can only modify name and notes. " + f"Cannot modify: {', '.join(disallowed_fields)}" + ) + return super().update(instance, validated_data) def destroy(self, request, *args, **kwargs): @@ -43,13 +108,15 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer): if instance.is_default: return Response( {"error": "Default profiles cannot be deleted."}, - status=status.HTTP_400_BAD_REQUEST + status=status.HTTP_400_BAD_REQUEST, ) return super().destroy(request, *args, **kwargs) + class M3UAccountSerializer(serializers.ModelSerializer): """Serializer for M3U Account""" - filters = M3UFilterSerializer(many=True, read_only=True) + + filters = serializers.SerializerMethodField() # Include user_agent as a mandatory field using its primary key. user_agent = serializers.PrimaryKeyRelatedField( queryset=UserAgent.objects.all(), @@ -57,21 +124,96 @@ class M3UAccountSerializer(serializers.ModelSerializer): allow_null=True, ) profiles = M3UAccountProfileSerializer(many=True, read_only=True) - read_only_fields = ['locked', 'created_at', 'updated_at'] + read_only_fields = ["locked", "created_at", "updated_at"] # channel_groups = serializers.SerializerMethodField() - channel_groups = ChannelGroupM3UAccountSerializer(source='channel_group', many=True, required=False) + channel_groups = ChannelGroupM3UAccountSerializer( + source="channel_group", many=True, required=False + ) + server_url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url], + ) + enable_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True) class Meta: model = M3UAccount fields = [ - 'id', 'name', 'server_url', 'file_path', 'server_group', - 'max_streams', 'is_active', 'created_at', 'updated_at', 'filters', 'user_agent', 'profiles', 'locked', - 'channel_groups', 'refresh_interval' + "id", + "name", + "server_url", + "file_path", + "server_group", + "max_streams", + "is_active", + "created_at", + "updated_at", + "filters", + "user_agent", + "profiles", + "locked", + "channel_groups", + "refresh_interval", + "custom_properties", + "account_type", + "username", + "password", + "stale_stream_days", + "priority", + "status", + "last_message", + "enable_vod", + "auto_enable_new_groups_live", + "auto_enable_new_groups_vod", + "auto_enable_new_groups_series", ] + extra_kwargs = { + "password": { + "required": False, + "allow_blank": True, + }, + } + + def to_representation(self, instance): + data = super().to_representation(instance) + + # Parse custom_properties to get VOD preference and auto_enable_new_groups settings + custom_props = instance.custom_properties or {} + + data["enable_vod"] = custom_props.get("enable_vod", False) + data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True) + data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True) + data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True) + return data def update(self, instance, validated_data): + # Handle enable_vod preference and auto_enable_new_groups settings + enable_vod = validated_data.pop("enable_vod", None) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None) + + # Get existing custom_properties + custom_props = instance.custom_properties or {} + + # Update preferences + if enable_vod is not None: + custom_props["enable_vod"] = enable_vod + if auto_enable_new_groups_live is not None: + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + if auto_enable_new_groups_vod is not None: + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + if auto_enable_new_groups_series is not None: + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + + validated_data["custom_properties"] = custom_props + # Pop out channel group memberships so we can handle them manually - channel_group_data = validated_data.pop('channel_group', []) + channel_group_data = validated_data.pop("channel_group", []) # First, update the M3UAccount itself for attr, value in validated_data.items(): @@ -81,13 +223,12 @@ class M3UAccountSerializer(serializers.ModelSerializer): # Prepare a list of memberships to update memberships_to_update = [] for group_data in channel_group_data: - group = group_data.get('channel_group') - enabled = group_data.get('enabled') + group = group_data.get("channel_group") + enabled = group_data.get("enabled") try: membership = ChannelGroupM3UAccount.objects.get( - m3u_account=instance, - channel_group=group + m3u_account=instance, channel_group=group ) membership.enabled = enabled memberships_to_update.append(membership) @@ -96,13 +237,39 @@ class M3UAccountSerializer(serializers.ModelSerializer): # Perform the bulk update if memberships_to_update: - ChannelGroupM3UAccount.objects.bulk_update(memberships_to_update, ['enabled']) + ChannelGroupM3UAccount.objects.bulk_update( + memberships_to_update, ["enabled"] + ) return instance + def create(self, validated_data): + # Handle enable_vod preference and auto_enable_new_groups settings during creation + enable_vod = validated_data.pop("enable_vod", False) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True) + + # Parse existing custom_properties or create new + custom_props = validated_data.get("custom_properties", {}) + + # Set preferences (default to True for auto_enable_new_groups) + custom_props["enable_vod"] = enable_vod + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + validated_data["custom_properties"] = custom_props + + return super().create(validated_data) + + def get_filters(self, obj): + filters = obj.filters.order_by("order") + return M3UFilterSerializer(filters, many=True).data + + class ServerGroupSerializer(serializers.ModelSerializer): """Serializer for Server Group""" class Meta: model = ServerGroup - fields = ['id', 'name'] + fields = ["id", "name"] diff --git a/apps/m3u/signals.py b/apps/m3u/signals.py index 6e46a0ff..d014ac92 100644 --- a/apps/m3u/signals.py +++ b/apps/m3u/signals.py @@ -1,10 +1,13 @@ # apps/m3u/signals.py -from django.db.models.signals import post_save, post_delete +from django.db.models.signals import post_save, post_delete, pre_save from django.dispatch import receiver from .models import M3UAccount -from .tasks import refresh_single_m3u_account, refresh_m3u_groups +from .tasks import refresh_single_m3u_account, refresh_m3u_groups, delete_m3u_refresh_task_by_id from django_celery_beat.models import PeriodicTask, IntervalSchedule import json +import logging + +logger = logging.getLogger(__name__) @receiver(post_save, sender=M3UAccount) def refresh_account_on_save(sender, instance, created, **kwargs): @@ -13,7 +16,7 @@ def refresh_account_on_save(sender, instance, created, **kwargs): call a Celery task that fetches & parses that single account if it is active or newly created. """ - if created: + if created and instance.account_type != M3UAccount.Types.XC: refresh_m3u_groups.delay(instance.id) @receiver(post_save, sender=M3UAccount) @@ -28,21 +31,17 @@ def create_or_update_refresh_task(sender, instance, **kwargs): period=IntervalSchedule.HOURS ) - if not instance.refresh_task: - refresh_task = PeriodicTask.objects.create( - name=task_name, - interval=interval, - task="apps.m3u.tasks.refresh_single_m3u_account", - kwargs=json.dumps({"account_id": instance.id}), - enabled=instance.refresh_interval != 0, - ) - M3UAccount.objects.filter(id=instance.id).update(refresh_task=refresh_task) - else: - task = instance.refresh_task + # Task should be enabled only if refresh_interval != 0 AND account is active + should_be_enabled = (instance.refresh_interval != 0) and instance.is_active + + # First check if the task already exists to avoid validation errors + try: + task = PeriodicTask.objects.get(name=task_name) + # Task exists, just update it updated_fields = [] - if task.enabled != (instance.refresh_interval != 0): - task.enabled = instance.refresh_interval != 0 + if task.enabled != should_be_enabled: + task.enabled = should_be_enabled updated_fields.append("enabled") if task.interval != interval: @@ -52,11 +51,60 @@ def create_or_update_refresh_task(sender, instance, **kwargs): if updated_fields: task.save(update_fields=updated_fields) + # Ensure instance has the task + if instance.refresh_task_id != task.id: + M3UAccount.objects.filter(id=instance.id).update(refresh_task=task) + + except PeriodicTask.DoesNotExist: + # Create new task if it doesn't exist + refresh_task = PeriodicTask.objects.create( + name=task_name, + interval=interval, + task="apps.m3u.tasks.refresh_single_m3u_account", + kwargs=json.dumps({"account_id": instance.id}), + enabled=should_be_enabled, + ) + M3UAccount.objects.filter(id=instance.id).update(refresh_task=refresh_task) + @receiver(post_delete, sender=M3UAccount) def delete_refresh_task(sender, instance, **kwargs): """ Delete the associated Celery Beat periodic task when a Channel is deleted. """ - if instance.refresh_task: - instance.refresh_task.interval.delete() - instance.refresh_task.delete() + try: + # First try the foreign key relationship to find the task ID + task = None + if instance.refresh_task: + logger.info(f"Found task via foreign key: {instance.refresh_task.id} for M3UAccount {instance.id}") + task = instance.refresh_task + + # Use the helper function to delete the task + if task: + delete_m3u_refresh_task_by_id(instance.id) + else: + # Otherwise use the helper function + delete_m3u_refresh_task_by_id(instance.id) + except Exception as e: + logger.error(f"Error in delete_refresh_task signal handler: {str(e)}", exc_info=True) + +@receiver(pre_save, sender=M3UAccount) +def update_status_on_active_change(sender, instance, **kwargs): + """ + When an M3UAccount's is_active field changes, update the status accordingly. + """ + if instance.pk: # Only for existing records, not new ones + try: + # Get the current record from the database + old_instance = M3UAccount.objects.get(pk=instance.pk) + + # If is_active changed, update the status + if old_instance.is_active != instance.is_active: + if instance.is_active: + # When activating, set status to idle + instance.status = M3UAccount.Status.IDLE + else: + # When deactivating, set status to disabled + instance.status = M3UAccount.Status.DISABLED + except M3UAccount.DoesNotExist: + # New record, will use default status + pass diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index beacaaa2..ed9eb465 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -5,6 +5,7 @@ import requests import os import gc import gzip, zipfile +from concurrent.futures import ThreadPoolExecutor, as_completed from celery.app.control import Inspect from celery.result import AsyncResult from celery import shared_task, current_app, group @@ -18,16 +19,25 @@ from channels.layers import get_channel_layer from django.utils import timezone import time import json -from core.utils import RedisClient, acquire_task_lock, release_task_lock -from core.models import CoreSettings +from core.utils import ( + RedisClient, + acquire_task_lock, + release_task_lock, + natural_sort_key, + log_system_event, +) +from core.models import CoreSettings, UserAgent from asgiref.sync import async_to_sync +from core.xtream_codes import Client as XCClient +from core.utils import send_websocket_update +from .utils import normalize_stream_url logger = logging.getLogger(__name__) -BATCH_SIZE = 1000 -SKIP_EXTS = {} +BATCH_SIZE = 1500 # Optimized batch size for threading m3u_dir = os.path.join(settings.MEDIA_ROOT, "cached_m3u") + def fetch_m3u_lines(account, use_cache=False): os.makedirs(m3u_dir, exist_ok=True) file_path = os.path.join(m3u_dir, f"{account.id}.m3u") @@ -35,119 +45,448 @@ def fetch_m3u_lines(account, use_cache=False): """Fetch M3U file lines efficiently.""" if account.server_url: if not use_cache or not os.path.exists(file_path): - user_agent = account.get_user_agent() - headers = {"User-Agent": user_agent.user_agent} - logger.info(f"Fetching from URL {account.server_url}") try: - response = requests.get(account.server_url, headers=headers, stream=True) + # Try to get account-specific user agent first + user_agent_obj = account.get_user_agent() + user_agent = ( + user_agent_obj.user_agent + if user_agent_obj + else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) + + logger.debug( + f"Using user agent: {user_agent} for M3U account: {account.name}" + ) + headers = {"User-Agent": user_agent} + logger.info(f"Fetching from URL {account.server_url}") + + # Set account status to FETCHING before starting download + account.status = M3UAccount.Status.FETCHING + account.last_message = "Starting download..." + account.save(update_fields=["status", "last_message"]) + + response = requests.get( + account.server_url, headers=headers, stream=True + ) + + # Log the actual response details for debugging + logger.debug(f"HTTP Response: {response.status_code} from {account.server_url}") + logger.debug(f"Content-Type: {response.headers.get('content-type', 'Not specified')}") + logger.debug(f"Content-Length: {response.headers.get('content-length', 'Not specified')}") + logger.debug(f"Response headers: {dict(response.headers)}") + + # Check if we've been redirected to a different URL + if hasattr(response, 'url') and response.url != account.server_url: + logger.warning(f"Request was redirected from {account.server_url} to {response.url}") + + # Check for ANY non-success status code FIRST (before raise_for_status) + if response.status_code < 200 or response.status_code >= 300: + # For error responses, read the content immediately (not streaming) + try: + response_content = response.text[:1000] # Capture up to 1000 characters + logger.error(f"Error response content: {response_content!r}") + except Exception as e: + logger.error(f"Could not read error response content: {e}") + response_content = "Could not read error response content" + + # Provide specific messages for known non-standard codes + if response.status_code == 884: + error_msg = f"Server returned HTTP 884 (authentication/authorization failure) from URL: {account.server_url}. Server message: {response_content}" + elif response.status_code >= 800: + error_msg = f"Server returned non-standard HTTP status {response.status_code} from URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 404: + error_msg = f"M3U file not found (404) at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 403: + error_msg = f"Access forbidden (403) to M3U file at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 401: + error_msg = f"Authentication required (401) for M3U file at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 500: + error_msg = f"Server error (500) while fetching M3U file from URL: {account.server_url}. Server message: {response_content}" + else: + error_msg = f"HTTP error ({response.status_code}) while fetching M3U file from URL: {account.server_url}. Server message: {response_content}" + + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account.id, + "downloading", + 100, + status="error", + error=error_msg, + ) + return [], False + + # Only call raise_for_status if we have a success code (this should not raise now) response.raise_for_status() - total_size = int(response.headers.get('Content-Length', 0)) + total_size = int(response.headers.get("Content-Length", 0)) downloaded = 0 start_time = time.time() last_update_time = start_time progress = 0 + temp_content = b"" # Store content temporarily to validate before saving + has_content = False - with open(file_path, 'wb') as file: - send_m3u_update(account.id, "downloading", 0) - for chunk in response.iter_content(chunk_size=8192): - if chunk: - file.write(chunk) + # First, let's collect the content and validate it + send_m3u_update(account.id, "downloading", 0) + for chunk in response.iter_content(chunk_size=8192): + if chunk: + temp_content += chunk + has_content = True - downloaded += len(chunk) - elapsed_time = time.time() - start_time + downloaded += len(chunk) + elapsed_time = time.time() - start_time - # Calculate download speed in KB/s - speed = downloaded / elapsed_time / 1024 # in KB/s + # Calculate download speed in KB/s + speed = downloaded / elapsed_time / 1024 # in KB/s - # Calculate progress percentage - if total_size and total_size > 0: - progress = (downloaded / total_size) * 100 + # Calculate progress percentage + if total_size and total_size > 0: + progress = (downloaded / total_size) * 100 - # Time remaining (in seconds) - time_remaining = (total_size - downloaded) / (speed * 1024) + # Time remaining (in seconds) + time_remaining = ( + (total_size - downloaded) / (speed * 1024) + if speed > 0 + else 0 + ) - current_time = time.time() - if current_time - last_update_time >= 0.5: - last_update_time = current_time - if progress > 0: - send_m3u_update(account.id, "downloading", progress, speed=speed, elapsed_time=elapsed_time, time_remaining=time_remaining) + current_time = time.time() + if current_time - last_update_time >= 0.5: + last_update_time = current_time + if progress > 0: + # Update the account's last_message with detailed progress info + progress_msg = f"Downloading: {progress:.1f}% - {speed:.1f} KB/s - {time_remaining:.1f}s remaining" + account.last_message = progress_msg + account.save(update_fields=["last_message"]) - send_m3u_update(account.id, "downloading", 100) + send_m3u_update( + account.id, + "downloading", + progress, + speed=speed, + elapsed_time=elapsed_time, + time_remaining=time_remaining, + message=progress_msg, + ) + + # Check if we actually received any content + logger.info(f"Download completed. Has content: {has_content}, Content length: {len(temp_content)} bytes") + if not has_content or len(temp_content) == 0: + error_msg = f"Server responded successfully (HTTP {response.status_code}) but provided empty M3U file from URL: {account.server_url}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account.id, + "downloading", + 100, + status="error", + error=error_msg, + ) + return [], False + + # Basic validation: check if content looks like an M3U file + try: + content_str = temp_content.decode('utf-8', errors='ignore') + content_lines = content_str.strip().split('\n') + + # Log first few lines for debugging (be careful not to log too much) + preview_lines = content_lines[:5] + logger.info(f"Content preview (first 5 lines): {preview_lines}") + logger.info(f"Total lines in content: {len(content_lines)}") + + # Check if it's a valid M3U file (should start with #EXTM3U or contain M3U-like content) + is_valid_m3u = False + + # First, check if this looks like an error response disguised as 200 OK + content_lower = content_str.lower() + if any(error_indicator in content_lower for error_indicator in [ + ' dict: """ Parse an EXTINF line from an M3U file. - This function removes the "#EXTINF:" prefix, then splits the remaining - string on the first comma that is not enclosed in quotes. + This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes, + and treats everything after the last attribute as the display name. Returns a dictionary with: - 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title) - - 'display_name': the text after the comma (the fallback display name) + - 'display_name': the text after the attributes (the fallback display name) - 'name': the value from tvg-name (if present) or the display name otherwise. """ if not line.startswith("#EXTINF:"): return None - content = line[len("#EXTINF:"):].strip() - # Split on the first comma that is not inside quotes. - parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1) - if len(parts) != 2: - return None - attributes_part, display_name = parts[0], parts[1].strip() - attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part)) - # Use tvg-name attribute if available; otherwise, use the display name. - name = attrs.get('tvg-name', display_name) - return { - 'attributes': attrs, - 'display_name': display_name, - 'name': name - } + content = line[len("#EXTINF:") :].strip() -import re -import logging + # Single pass: extract all attributes AND track the last attribute position + # This regex matches both key="value" and key='value' patterns + attrs = {} + last_attr_end = 0 -logger = logging.getLogger(__name__) + # Use a single regex that handles both quote types + for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content): + key = match.group(1) + value = match.group(3) + attrs[key] = value + last_attr_end = match.end() + + # Everything after the last attribute (skipping leading comma and whitespace) is the display name + if last_attr_end > 0: + remaining = content[last_attr_end:].strip() + # Remove leading comma if present + if remaining.startswith(','): + remaining = remaining[1:].strip() + display_name = remaining + else: + # No attributes found, try the old comma-split method as fallback + parts = content.split(',', 1) + if len(parts) == 2: + display_name = parts[1].strip() + else: + display_name = content.strip() + + # Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name. + name = get_case_insensitive_attr(attrs, "tvg-name", None) + if not name: + name = get_case_insensitive_attr(attrs, "tvc-guide-title", None) + if not name: + name = display_name + return {"attributes": attrs, "display_name": display_name, "name": name} -def _matches_filters(stream_name: str, group_name: str, filters): - """Check if a stream or group name matches a precompiled regex filter.""" - compiled_filters = [(re.compile(f.regex_pattern, re.IGNORECASE), f.exclude) for f in filters] - for pattern, exclude in compiled_filters: - target = group_name if f.filter_type == 'group' else stream_name - if pattern.search(target or ''): - return exclude - return False @shared_task def refresh_m3u_accounts(): @@ -162,6 +501,7 @@ def refresh_m3u_accounts(): logger.info(msg) return msg + def check_field_lengths(streams_to_create): for stream in streams_to_create: for field, value in stream.__dict__.items(): @@ -171,79 +511,497 @@ def check_field_lengths(streams_to_create): print("") print("") + @shared_task -def process_groups(account, group_names): - existing_groups = {group.name: group for group in ChannelGroup.objects.filter(name__in=group_names)} +def process_groups(account, groups, scan_start_time=None): + """Process groups and update their relationships with the M3U account. + + Args: + account: M3UAccount instance + groups: Dict of group names to custom properties + scan_start_time: Timestamp when the scan started (for consistent last_seen marking) + """ + # Use scan_start_time if provided, otherwise current time + # This ensures consistency with stream processing and cleanup logic + if scan_start_time is None: + scan_start_time = timezone.now() + + existing_groups = { + group.name: group + for group in ChannelGroup.objects.filter(name__in=groups.keys()) + } logger.info(f"Currently {len(existing_groups)} existing groups") - groups = [] + # Check if we should auto-enable new groups based on account settings + account_custom_props = account.custom_properties or {} + auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True) + + # Separate existing groups from groups that need to be created + existing_group_objs = [] groups_to_create = [] - for group_name in group_names: - logger.info(f"Handling group: {group_name}") + + for group_name, custom_props in groups.items(): if group_name in existing_groups: - groups.append(existing_groups[group_name]) + existing_group_objs.append(existing_groups[group_name]) else: - groups_to_create.append(ChannelGroup( - name=group_name, - )) + groups_to_create.append(ChannelGroup(name=group_name)) + # Create new groups and fetch them back with IDs + newly_created_group_objs = [] if groups_to_create: - logger.info(f"Creating {len(groups_to_create)} groups") - created = ChannelGroup.bulk_create_and_fetch(groups_to_create) - logger.info(f"Created {len(created)} groups") - groups.extend(created) + logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}") + newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create)) + logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups") - relations = [] - for group in groups: - relations.append(ChannelGroupM3UAccount( - channel_group=group, + # Combine all groups + all_group_objs = existing_group_objs + newly_created_group_objs + + # Get existing relationships for this account + existing_relationships = { + rel.channel_group.name: rel + for rel in ChannelGroupM3UAccount.objects.filter( m3u_account=account, - )) + channel_group__name__in=groups.keys() + ).select_related('channel_group') + } - ChannelGroupM3UAccount.objects.bulk_create( - relations, - ignore_conflicts=True + relations_to_create = [] + relations_to_update = [] + + for group in all_group_objs: + custom_props = groups.get(group.name, {}) + + if group.name in existing_relationships: + # Update existing relationship if xc_id has changed (preserve other custom properties) + existing_rel = existing_relationships[group.name] + + # Get existing custom properties (now JSONB, no need to parse) + existing_custom_props = existing_rel.custom_properties or {} + + # Get the new xc_id from groups data + new_xc_id = custom_props.get("xc_id") + existing_xc_id = existing_custom_props.get("xc_id") + + # Only update if xc_id has changed + if new_xc_id != existing_xc_id: + # Merge new xc_id with existing custom properties to preserve user settings + updated_custom_props = existing_custom_props.copy() + if new_xc_id is not None: + updated_custom_props["xc_id"] = new_xc_id + elif "xc_id" in updated_custom_props: + # Remove xc_id if it's no longer provided (e.g., converting from XC to standard) + del updated_custom_props["xc_id"] + + existing_rel.custom_properties = updated_custom_props + existing_rel.last_seen = scan_start_time + existing_rel.is_stale = False + relations_to_update.append(existing_rel) + logger.debug(f"Updated xc_id for group '{group.name}' from '{existing_xc_id}' to '{new_xc_id}' - account {account.id}") + else: + # Update last_seen even if xc_id hasn't changed + existing_rel.last_seen = scan_start_time + existing_rel.is_stale = False + relations_to_update.append(existing_rel) + logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}") + else: + # Create new relationship - this group is new to this M3U account + # Use the auto_enable setting to determine if it should start enabled + if not auto_enable_new_groups_live: + logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)") + + relations_to_create.append( + ChannelGroupM3UAccount( + channel_group=group, + m3u_account=account, + custom_properties=custom_props, + enabled=auto_enable_new_groups_live, + last_seen=scan_start_time, + is_stale=False, + ) + ) + + # Bulk create new relationships + if relations_to_create: + ChannelGroupM3UAccount.objects.bulk_create(relations_to_create, ignore_conflicts=True) + logger.debug(f"Created {len(relations_to_create)} new group relationships for account {account.id}") + + # Bulk update existing relationships + if relations_to_update: + ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties', 'last_seen', 'is_stale']) + logger.info(f"Updated {len(relations_to_update)} existing group relationships for account {account.id}") + + +def cleanup_stale_group_relationships(account, scan_start_time): + """ + Remove group relationships that haven't been seen since the stale retention period. + This follows the same logic as stream cleanup for consistency. + """ + # Calculate cutoff date for stale group relationships + stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days) + logger.info( + f"Removing group relationships not seen since {stale_cutoff} for M3U account {account.id}" ) -@shared_task -def process_m3u_batch(account_id, batch, groups, hash_keys): - """Processes a batch of M3U streams using bulk operations.""" + # Find stale relationships + stale_relationships = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, + last_seen__lt=stale_cutoff + ).select_related('channel_group') + + relations_to_delete = list(stale_relationships) + deleted_count = len(relations_to_delete) + + if deleted_count > 0: + logger.info( + f"Found {deleted_count} stale group relationships for account {account.id}: " + f"{[rel.channel_group.name for rel in relations_to_delete]}" + ) + + # Delete the stale relationships + stale_relationships.delete() + + # Check if any of the deleted relationships left groups with no remaining associations + orphaned_group_ids = [] + for rel in relations_to_delete: + group = rel.channel_group + + # Check if this group has any remaining M3U account relationships + remaining_m3u_relationships = ChannelGroupM3UAccount.objects.filter( + channel_group=group + ).exists() + + # Check if this group has any direct channels (not through M3U accounts) + has_direct_channels = group.related_channels().exists() + + # If no relationships and no direct channels, it's safe to delete + if not remaining_m3u_relationships and not has_direct_channels: + orphaned_group_ids.append(group.id) + logger.debug(f"Group '{group.name}' has no remaining associations and will be deleted") + + # Delete truly orphaned groups + if orphaned_group_ids: + deleted_groups = list(ChannelGroup.objects.filter(id__in=orphaned_group_ids).values_list('name', flat=True)) + ChannelGroup.objects.filter(id__in=orphaned_group_ids).delete() + logger.info(f"Deleted {len(orphaned_group_ids)} orphaned groups that had no remaining associations: {deleted_groups}") + else: + logger.debug(f"No stale group relationships found for account {account.id}") + + return deleted_count + + +def collect_xc_streams(account_id, enabled_groups): + """Collect all XC streams in a single API call and filter by enabled groups.""" + account = M3UAccount.objects.get(id=account_id) + all_streams = [] + + # Create a mapping from category_id to group info for filtering + enabled_category_ids = {} + for group_name, props in enabled_groups.items(): + if "xc_id" in props: + enabled_category_ids[str(props["xc_id"])] = { + "name": group_name, + "props": props + } + + try: + with XCClient( + account.server_url, + account.username, + account.password, + account.get_user_agent(), + ) as xc_client: + + # Fetch ALL live streams in a single API call (much more efficient) + logger.info("Fetching ALL live streams from XC provider...") + all_xc_streams = xc_client.get_all_live_streams() # Get all streams without category filter + + if not all_xc_streams: + logger.warning("No live streams returned from XC provider") + return [] + + logger.info(f"Retrieved {len(all_xc_streams)} total live streams from provider") + + # Filter streams based on enabled categories + filtered_count = 0 + for stream in all_xc_streams: + # Get the category_id for this stream + category_id = str(stream.get("category_id", "")) + + # Only include streams from enabled categories + if category_id in enabled_category_ids: + group_info = enabled_category_ids[category_id] + + # Convert XC stream to our standard format with all properties preserved + stream_data = { + "name": stream["name"], + "url": xc_client.get_stream_url(stream["stream_id"]), + "attributes": { + "tvg-id": stream.get("epg_channel_id", ""), + "tvg-logo": stream.get("stream_icon", ""), + "group-title": group_info["name"], + # Preserve all XC stream properties as custom attributes + "stream_id": str(stream.get("stream_id", "")), + "category_id": category_id, + "stream_type": stream.get("stream_type", ""), + "added": stream.get("added", ""), + "is_adult": str(stream.get("is_adult", "0")), + "custom_sid": stream.get("custom_sid", ""), + # Include any other properties that might be present + **{k: str(v) for k, v in stream.items() if k not in [ + "name", "stream_id", "epg_channel_id", "stream_icon", + "category_id", "stream_type", "added", "is_adult", "custom_sid" + ] and v is not None} + } + } + all_streams.append(stream_data) + filtered_count += 1 + + except Exception as e: + logger.error(f"Failed to fetch XC streams: {str(e)}") + return [] + + logger.info(f"Filtered {filtered_count} streams from {len(enabled_category_ids)} enabled categories") + return all_streams + +def process_xc_category_direct(account_id, batch, groups, hash_keys): + from django.db import connections + + # Ensure clean database connections for threading + connections.close_all() + account = M3UAccount.objects.get(id=account_id) streams_to_create = [] streams_to_update = [] stream_hashes = {} - # compiled_filters = [(f.filter_type, re.compile(f.regex_pattern, re.IGNORECASE)) for f in filters] - logger.debug(f"Processing batch of {len(batch)}") + try: + with XCClient( + account.server_url, + account.username, + account.password, + account.get_user_agent(), + ) as xc_client: + # Log the batch details to help with debugging + logger.debug(f"Processing XC batch: {batch}") + + for group_name, props in batch.items(): + # Check if we have a valid xc_id for this group + if "xc_id" not in props: + logger.error( + f"Missing xc_id for group {group_name} in batch {batch}" + ) + continue + + # Get actual group ID from the mapping + group_id = groups.get(group_name) + if not group_id: + logger.error(f"Group {group_name} not found in enabled groups") + continue + + try: + logger.debug( + f"Fetching streams for XC category: {group_name} (ID: {props['xc_id']})" + ) + streams = xc_client.get_live_category_streams(props["xc_id"]) + + if not streams: + logger.warning( + f"No streams found for XC category {group_name} (ID: {props['xc_id']})" + ) + continue + + logger.debug( + f"Found {len(streams)} streams for category {group_name}" + ) + + for stream in streams: + name = stream["name"] + url = xc_client.get_stream_url(stream["stream_id"]) + tvg_id = stream.get("epg_channel_id", "") + tvg_logo = stream.get("stream_icon", "") + group_title = group_name + + stream_hash = Stream.generate_hash_key( + name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title + ) + stream_props = { + "name": name, + "url": url, + "logo_url": tvg_logo, + "tvg_id": tvg_id, + "m3u_account": account, + "channel_group_id": int(group_id), + "stream_hash": stream_hash, + "custom_properties": stream, + "is_stale": False, + } + + if stream_hash not in stream_hashes: + stream_hashes[stream_hash] = stream_props + except Exception as e: + logger.error( + f"Error processing XC category {group_name} (ID: {props['xc_id']}): {str(e)}" + ) + continue + + # Process all found streams + existing_streams = { + s.stream_hash: s + for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys()).select_related('m3u_account').only( + 'id', 'stream_hash', 'name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'm3u_account' + ) + } + + for stream_hash, stream_props in stream_hashes.items(): + if stream_hash in existing_streams: + obj = existing_streams[stream_hash] + # Optimized field comparison for XC streams + changed = ( + obj.name != stream_props["name"] or + obj.url != stream_props["url"] or + obj.logo_url != stream_props["logo_url"] or + obj.tvg_id != stream_props["tvg_id"] or + obj.custom_properties != stream_props["custom_properties"] + ) + + if changed: + for key, value in stream_props.items(): + setattr(obj, key, value) + obj.last_seen = timezone.now() + obj.updated_at = timezone.now() # Update timestamp only for changed streams + obj.is_stale = False + streams_to_update.append(obj) + else: + # Always update last_seen, even if nothing else changed + obj.last_seen = timezone.now() + obj.is_stale = False + # Don't update updated_at for unchanged streams + streams_to_update.append(obj) + + # Remove from existing_streams since we've processed it + del existing_streams[stream_hash] + else: + stream_props["last_seen"] = timezone.now() + stream_props["updated_at"] = ( + timezone.now() + ) # Set initial updated_at for new streams + stream_props["is_stale"] = False + streams_to_create.append(Stream(**stream_props)) + + try: + with transaction.atomic(): + if streams_to_create: + Stream.objects.bulk_create(streams_to_create, ignore_conflicts=True) + + if streams_to_update: + # Simplified bulk update for better performance + Stream.objects.bulk_update( + streams_to_update, + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'], + batch_size=150 # Smaller batch size for XC processing + ) + + # Update last_seen for any remaining existing streams that weren't processed + if len(existing_streams.keys()) > 0: + Stream.objects.bulk_update(existing_streams.values(), ["last_seen"]) + except Exception as e: + logger.error(f"Bulk operation failed for XC streams: {str(e)}") + + retval = f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." + + except Exception as e: + logger.error(f"XC category processing error: {str(e)}") + retval = f"Error processing XC batch: {str(e)}" + finally: + # Clean up database connections for threading + connections.close_all() + + # Aggressive garbage collection + del streams_to_create, streams_to_update, stream_hashes, existing_streams + gc.collect() + + return retval + + +def process_m3u_batch_direct(account_id, batch, groups, hash_keys): + """Processes a batch of M3U streams using bulk operations with thread-safe DB connections.""" + from django.db import connections + + # Ensure clean database connections for threading + connections.close_all() + + account = M3UAccount.objects.get(id=account_id) + + compiled_filters = [ + ( + re.compile( + f.regex_pattern, + ( + re.IGNORECASE + if (f.custom_properties or {}).get( + "case_sensitive", True + ) + == False + else 0 + ), + ), + f, + ) + for f in account.filters.order_by("order") + ] + + streams_to_create = [] + streams_to_update = [] + stream_hashes = {} + + logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}") + if compiled_filters: + logger.debug(f"Using compiled filters: {[f[1].regex_pattern for f in compiled_filters]}") for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] - tvg_id, tvg_logo = stream_info["attributes"].get("tvg-id", ""), stream_info["attributes"].get("tvg-logo", "") - group_title = stream_info["attributes"].get("group-title", "Default Group") + + # Validate URL length - maximum of 4096 characters + if url and len(url) > 4096: + logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)") + continue + + tvg_id, tvg_logo = get_case_insensitive_attr( + stream_info["attributes"], "tvg-id", "" + ), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "") + group_title = get_case_insensitive_attr( + stream_info["attributes"], "group-title", "Default Group" + ) + logger.debug(f"Processing stream: {name} - {url} in group {group_title}") + include = True + for pattern, filter in compiled_filters: + logger.trace(f"Checking filter pattern {pattern}") + target = name + if filter.filter_type == "url": + target = url + elif filter.filter_type == "group": + target = group_title + + if pattern.search(target or ""): + logger.debug( + f"Stream {name} - {url} matches filter pattern {filter.regex_pattern}" + ) + include = not filter.exclude + break + + if not include: + logger.debug(f"Stream excluded by filter, skipping.") + continue # Filter out disabled groups for this account if group_title not in groups: - logger.debug(f"Skipping stream in disabled group: {group_title}") + logger.debug( + f"Skipping stream in disabled or excluded group: {group_title}" + ) continue - # if any(url.lower().endswith(ext) for ext in SKIP_EXTS) or len(url) > 2000: - # continue - - # if _matches_filters(name, group_title, account.filters.all()): - # continue - - # if any(compiled_pattern.search(current_info['name']) for ftype, compiled_pattern in compiled_filters if ftype == 'name'): - # excluded_count += 1 - # current_info = None - # continue - - stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys) - # if redis_client.exists(f"m3u_refresh:{stream_hash}"): - # # duplicate already processed by another batch - # continue - - # redis_client.set(f"m3u_refresh:{stream_hash}", "true") + stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title) stream_props = { "name": name, "url": url, @@ -252,7 +1010,8 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): "m3u_account": account, "channel_group_id": int(groups.get(group_title)), "stream_hash": stream_hash, - "custom_properties": json.dumps(stream_info["attributes"]), + "custom_properties": stream_info["attributes"], + "is_stale": False, } if stream_hash not in stream_hashes: @@ -261,236 +1020,1959 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): logger.error(f"Failed to process stream {name}: {e}") logger.error(json.dumps(stream_info)) - existing_streams = {s.stream_hash: s for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys())} + existing_streams = { + s.stream_hash: s + for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys()).select_related('m3u_account').only( + 'id', 'stream_hash', 'name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'm3u_account' + ) + } for stream_hash, stream_props in stream_hashes.items(): if stream_hash in existing_streams: obj = existing_streams[stream_hash] - existing_attr = {field.name: getattr(obj, field.name) for field in Stream._meta.fields if field != 'channel_group_id'} - changed = any(existing_attr[key] != value for key, value in stream_props.items() if key != 'channel_group_id') + # Optimized field comparison + changed = ( + obj.name != stream_props["name"] or + obj.url != stream_props["url"] or + obj.logo_url != stream_props["logo_url"] or + obj.tvg_id != stream_props["tvg_id"] or + obj.custom_properties != stream_props["custom_properties"] + ) + + # Always update last_seen + obj.last_seen = timezone.now() if changed: - for key, value in stream_props.items(): - setattr(obj, key, value) - obj.last_seen = timezone.now() - streams_to_update.append(obj) - del existing_streams[stream_hash] - else: - existing_streams[stream_hash] = obj + # Only update fields that changed and set updated_at + obj.name = stream_props["name"] + obj.url = stream_props["url"] + obj.logo_url = stream_props["logo_url"] + obj.tvg_id = stream_props["tvg_id"] + obj.custom_properties = stream_props["custom_properties"] + obj.updated_at = timezone.now() + + # Always mark as not stale since we saw it in this refresh + obj.is_stale = False + + streams_to_update.append(obj) else: + # New stream stream_props["last_seen"] = timezone.now() + stream_props["updated_at"] = timezone.now() + stream_props["is_stale"] = False streams_to_create.append(Stream(**stream_props)) try: with transaction.atomic(): if streams_to_create: Stream.objects.bulk_create(streams_to_create, ignore_conflicts=True) - if streams_to_update: - Stream.objects.bulk_update(streams_to_update, { key for key in stream_props.keys() if key not in ["m3u_account", "stream_hash"] and key not in hash_keys}) - # if len(existing_streams.keys()) > 0: - # Stream.objects.bulk_update(existing_streams.values(), ["last_seen"]) - except Exception as e: - logger.error(f"Bulk create failed: {str(e)}") - retval = f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." + if streams_to_update: + # Update all streams in a single bulk operation + Stream.objects.bulk_update( + streams_to_update, + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'], + batch_size=200 + ) + except Exception as e: + logger.error(f"Bulk operation failed: {str(e)}") + + retval = f"M3U account: {account_id}, Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." # Aggressive garbage collection - del streams_to_create, streams_to_update, stream_hashes, existing_streams - gc.collect() + # del streams_to_create, streams_to_update, stream_hashes, existing_streams + # from core.utils import cleanup_memory + # cleanup_memory(log_usage=True, force_collection=True) + + # Clean up database connections for threading + connections.close_all() return retval -def cleanup_streams(account_id): + +def cleanup_streams(account_id, scan_start_time=timezone.now): account = M3UAccount.objects.get(id=account_id, is_active=True) existing_groups = ChannelGroup.objects.filter( - m3u_account__m3u_account=account, - m3u_account__enabled=True, - ).values_list('id', flat=True) - logger.info(f"Found {len(existing_groups)} active groups") - streams = Stream.objects.filter(m3u_account=account) - - streams_to_delete = Stream.objects.filter( - m3u_account=account - ).exclude( - channel_group__in=existing_groups # Exclude products having any of the excluded tags + m3u_accounts__m3u_account=account, + m3u_accounts__enabled=True, + ).values_list("id", flat=True) + logger.info( + f"Found {len(existing_groups)} active groups for M3U account {account_id}" ) - # Delete the filtered products - streams_to_delete.delete() + # Calculate cutoff date for stale streams + stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days) + logger.info( + f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}" + ) + + # Delete streams that are not in active groups + streams_to_delete = Stream.objects.filter(m3u_account=account).exclude( + channel_group__in=existing_groups + ) + + # Also delete streams that haven't been seen for longer than stale_stream_days + stale_streams = Stream.objects.filter( + m3u_account=account, last_seen__lt=stale_cutoff + ) + + deleted_count = streams_to_delete.count() + stale_count = stale_streams.count() + + streams_to_delete.delete() + stale_streams.delete() + + total_deleted = deleted_count + stale_count + logger.info( + f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale" + ) + + # Return the total count of deleted streams + return total_deleted - logger.info(f"Cleanup complete") @shared_task -def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): - if not acquire_task_lock('refresh_m3u_account_groups', account_id): +def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False, scan_start_time=None): + """Refresh M3U groups for an account. + + Args: + account_id: ID of the M3U account + use_cache: Whether to use cached M3U file + full_refresh: Whether this is part of a full refresh + scan_start_time: Timestamp when the scan started (for consistent last_seen marking) + """ + if not acquire_task_lock("refresh_m3u_account_groups", account_id): return f"Task already running for account_id={account_id}.", None try: account = M3UAccount.objects.get(id=account_id, is_active=True) except M3UAccount.DoesNotExist: - release_task_lock('refresh_m3u_account_groups', account_id) + release_task_lock("refresh_m3u_account_groups", account_id) return f"M3UAccount with ID={account_id} not found or inactive.", None extinf_data = [] - groups = set(["Default Group"]) + groups = {"Default Group": {}} - for line in fetch_m3u_lines(account, use_cache): - line = line.strip() - if line.startswith("#EXTINF"): - parsed = parse_extinf_line(line) - if parsed: - if "group-title" in parsed["attributes"]: - groups.add(parsed["attributes"]["group-title"]) + if account.account_type == M3UAccount.Types.XC: + # Log detailed information about the account + logger.info( + f"Processing XC account {account_id} with URL: {account.server_url}" + ) + logger.debug( + f"Username: {account.username}, Has password: {'Yes' if account.password else 'No'}" + ) - extinf_data.append(parsed) - elif extinf_data and line.startswith("http"): - # Associate URL with the last EXTINF line - extinf_data[-1]["url"] = line + # Validate required fields + if not account.server_url: + error_msg = "Missing server URL for Xtream Codes account" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + if not account.username or not account.password: + error_msg = "Missing username or password for Xtream Codes account" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + try: + # Ensure server URL is properly formatted + server_url = account.server_url.rstrip("/") + if not ( + server_url.startswith("http://") or server_url.startswith("https://") + ): + server_url = f"http://{server_url}" + + # User agent handling - completely rewritten + try: + # Debug the user agent issue + logger.debug(f"Getting user agent for account {account.id}") + + # Use a hardcoded user agent string to avoid any issues with object structure + user_agent_string = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) + + try: + # Try to get the user agent directly from the database + if account.user_agent_id: + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ( + ua_obj + and hasattr(ua_obj, "user_agent") + and ua_obj.user_agent + ): + user_agent_string = ua_obj.user_agent + logger.debug( + f"Using user agent from account: {user_agent_string}" + ) + else: + # Get default user agent from CoreSettings + default_ua_id = CoreSettings.get_default_user_agent_id() + logger.debug( + f"Default user agent ID from settings: {default_ua_id}" + ) + if default_ua_id: + ua_obj = UserAgent.objects.get(id=default_ua_id) + if ( + ua_obj + and hasattr(ua_obj, "user_agent") + and ua_obj.user_agent + ): + user_agent_string = ua_obj.user_agent + logger.debug( + f"Using default user agent: {user_agent_string}" + ) + except Exception as e: + logger.warning( + f"Error getting user agent, using fallback: {str(e)}" + ) + + logger.debug(f"Final user agent string: {user_agent_string}") + except Exception as e: + user_agent_string = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) + logger.warning( + f"Exception in user agent handling, using fallback: {str(e)}" + ) + + logger.info( + f"Creating XCClient with URL: {account.server_url}, Username: {account.username}, User-Agent: {user_agent_string}" + ) + + # Create XCClient with explicit error handling + try: + with XCClient( + account.server_url, account.username, account.password, user_agent_string + ) as xc_client: + logger.info(f"XCClient instance created successfully") + + # Authenticate with detailed error handling + try: + logger.debug(f"Authenticating with XC server {server_url}") + auth_result = xc_client.authenticate() + logger.debug(f"Authentication response: {auth_result}") + + # Queue async profile refresh task to run in background + # This prevents any delay in the main refresh process + try: + logger.info(f"Queueing background profile refresh for account {account.name}") + refresh_account_profiles.delay(account.id) + except Exception as e: + logger.warning(f"Failed to queue profile refresh task: {str(e)}") + # Don't fail the main refresh if profile refresh can't be queued + + except Exception as e: + error_msg = f"Failed to authenticate with XC server: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + # Get categories with detailed error handling + try: + logger.info(f"Getting live categories from XC server") + xc_categories = xc_client.get_live_categories() + logger.info( + f"Found {len(xc_categories)} categories: {xc_categories}" + ) + + # Validate response + if not isinstance(xc_categories, list): + error_msg = ( + f"Unexpected response from XC server: {xc_categories}" + ) + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + if len(xc_categories) == 0: + logger.warning("No categories found in XC server response") + + for category in xc_categories: + cat_name = category.get("category_name", "Unknown Category") + cat_id = category.get("category_id", "0") + logger.info(f"Adding category: {cat_name} (ID: {cat_id})") + groups[cat_name] = { + "xc_id": cat_id, + } + except Exception as e: + error_msg = f"Failed to get categories from XC server: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + except Exception as e: + error_msg = f"Failed to create XC Client: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + except Exception as e: + error_msg = f"Unexpected error occurred in XC Client: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + else: + # Here's the key change - use the success flag from fetch_m3u_lines + lines, success = fetch_m3u_lines(account, use_cache) + if not success: + # If fetch failed, don't continue processing + release_task_lock("refresh_m3u_account_groups", account_id) + return f"Failed to fetch M3U data for account_id={account_id}.", None + + # Log basic file structure for debugging + logger.debug(f"Processing {len(lines)} lines from M3U file") + + line_count = 0 + extinf_count = 0 + url_count = 0 + valid_stream_count = 0 + problematic_lines = [] + + for line_index, line in enumerate(lines): + line_count += 1 + line = line.strip() + + if line.startswith("#EXTINF"): + extinf_count += 1 + parsed = parse_extinf_line(line) + if parsed: + group_title_attr = get_case_insensitive_attr( + parsed["attributes"], "group-title", "" + ) + if group_title_attr: + group_name = group_title_attr + # Log new groups as they're discovered + if group_name not in groups: + logger.debug( + f"Found new group for M3U account {account_id}: '{group_name}'" + ) + groups[group_name] = {} + + extinf_data.append(parsed) + else: + # Log problematic EXTINF lines + logger.warning( + f"Failed to parse EXTINF at line {line_index+1}: {line[:200]}" + ) + problematic_lines.append((line_index + 1, line[:200])) + + elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")): + url_count += 1 + # Normalize UDP URLs only (e.g., remove VLC-specific @ prefix) + normalized_url = normalize_stream_url(line) if line.startswith("udp") else line + # Associate URL with the last EXTINF line + extinf_data[-1]["url"] = normalized_url + valid_stream_count += 1 + + # Periodically log progress for large files + if valid_stream_count % 1000 == 0: + logger.debug( + f"Processed {valid_stream_count} valid streams so far for M3U account: {account_id}" + ) + + # Log summary statistics + logger.info( + f"M3U parsing complete - Lines: {line_count}, EXTINF: {extinf_count}, URLs: {url_count}, Valid streams: {valid_stream_count}" + ) + + if problematic_lines: + logger.warning( + f"Found {len(problematic_lines)} problematic lines during parsing" + ) + for i, (line_num, content) in enumerate( + problematic_lines[:10] + ): # Log max 10 examples + logger.warning(f"Problematic line #{i+1} at line {line_num}: {content}") + if len(problematic_lines) > 10: + logger.warning( + f"... and {len(problematic_lines) - 10} more problematic lines" + ) + + # Log group statistics + logger.info( + f"Found {len(groups)} groups in M3U file: {', '.join(list(groups.keys())[:20])}" + + ("..." if len(groups) > 20 else "") + ) + + # Cache processed data + cache_path = os.path.join(m3u_dir, f"{account_id}.json") + with open(cache_path, "w", encoding="utf-8") as f: + json.dump( + { + "extinf_data": extinf_data, + "groups": groups, + }, + f, + ) + logger.debug(f"Cached parsed M3U data to {cache_path}") send_m3u_update(account_id, "processing_groups", 0) - groups = list(groups) - cache_path = os.path.join(m3u_dir, f"{account_id}.json") - with open(cache_path, 'w', encoding='utf-8') as f: - json.dump({ - "extinf_data": extinf_data, - "groups": groups, - }, f) + process_groups(account, groups, scan_start_time) - process_groups(account, groups) - - release_task_lock('refresh_m3u_account_groups', account_id) - - send_m3u_update(account_id, "processing_groups", 100) + release_task_lock("refresh_m3u_account_groups", account_id) if not full_refresh: - channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( - 'updates', - { - 'type': 'update', - "data": {"success": True, "type": "m3u_group_refresh", "account": account_id} - } + # Use update() instead of save() to avoid triggering signals + M3UAccount.objects.filter(id=account_id).update( + status=M3UAccount.Status.PENDING_SETUP, + last_message="M3U groups loaded. Please select groups or refresh M3U to complete setup.", + ) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="pending_setup", + message="M3U groups loaded. Please select groups or refresh M3U to complete setup.", ) return extinf_data, groups + +def delete_m3u_refresh_task_by_id(account_id): + """ + Delete the periodic task associated with an M3U account ID. + Can be called directly or from the post_delete signal. + Returns True if a task was found and deleted, False otherwise. + """ + try: + task = None + task_name = f"m3u_account-refresh-{account_id}" + + # Look for task by name + try: + from django_celery_beat.models import PeriodicTask, IntervalSchedule + + task = PeriodicTask.objects.get(name=task_name) + logger.debug(f"Found task by name: {task.id} for M3UAccount {account_id}") + except PeriodicTask.DoesNotExist: + logger.warning(f"No PeriodicTask found with name {task_name}") + return False + + # Now delete the task and its interval + if task: + # Store interval info before deleting the task + interval_id = None + if hasattr(task, "interval") and task.interval: + interval_id = task.interval.id + + # Count how many TOTAL tasks use this interval (including this one) + tasks_with_same_interval = PeriodicTask.objects.filter( + interval_id=interval_id + ).count() + logger.debug( + f"Interval {interval_id} is used by {tasks_with_same_interval} tasks total" + ) + + # Delete the task first + task_id = task.id + task.delete() + logger.debug(f"Successfully deleted periodic task {task_id}") + + # Now check if we should delete the interval + # We only delete if it was the ONLY task using this interval + if interval_id and tasks_with_same_interval == 1: + try: + interval = IntervalSchedule.objects.get(id=interval_id) + logger.debug( + f"Deleting interval schedule {interval_id} (not shared with other tasks)" + ) + interval.delete() + logger.debug(f"Successfully deleted interval {interval_id}") + except IntervalSchedule.DoesNotExist: + logger.warning(f"Interval {interval_id} no longer exists") + elif interval_id: + logger.debug( + f"Not deleting interval {interval_id} as it's shared with {tasks_with_same_interval-1} other tasks" + ) + + return True + return False + except Exception as e: + logger.error( + f"Error deleting periodic task for M3UAccount {account_id}: {str(e)}", + exc_info=True, + ) + return False + + +@shared_task +def sync_auto_channels(account_id, scan_start_time=None): + """ + Automatically create/update/delete channels to match streams in groups with auto_channel_sync enabled. + Preserves existing channel UUIDs to maintain M3U link integrity. + Called after M3U refresh completes successfully. + """ + from apps.channels.models import ( + Channel, + ChannelGroup, + ChannelGroupM3UAccount, + Stream, + ChannelStream, + ) + from apps.epg.models import EPGData + from django.utils import timezone + + try: + account = M3UAccount.objects.get(id=account_id) + logger.info(f"Starting auto channel sync for M3U account {account.name}") + + # Always use scan_start_time as the cutoff for last_seen + if scan_start_time is not None: + if isinstance(scan_start_time, str): + scan_start_time = timezone.datetime.fromisoformat(scan_start_time) + else: + scan_start_time = timezone.now() + + # Get groups with auto sync enabled for this account + auto_sync_groups = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, enabled=True, auto_channel_sync=True + ).select_related("channel_group") + + channels_created = 0 + channels_updated = 0 + channels_deleted = 0 + + for group_relation in auto_sync_groups: + channel_group = group_relation.channel_group + start_number = group_relation.auto_sync_channel_start or 1.0 + + # Get force_dummy_epg, group_override, and regex patterns from group custom_properties + group_custom_props = {} + force_dummy_epg = False # Backward compatibility: legacy option to disable EPG + override_group_id = None + name_regex_pattern = None + name_replace_pattern = None + name_match_regex = None + channel_profile_ids = None + channel_sort_order = None + channel_sort_reverse = False + stream_profile_id = None + custom_logo_id = None + custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg) + if group_relation.custom_properties: + group_custom_props = group_relation.custom_properties + force_dummy_epg = group_custom_props.get("force_dummy_epg", False) + override_group_id = group_custom_props.get("group_override") + name_regex_pattern = group_custom_props.get("name_regex_pattern") + name_replace_pattern = group_custom_props.get( + "name_replace_pattern" + ) + name_match_regex = group_custom_props.get("name_match_regex") + channel_profile_ids = group_custom_props.get("channel_profile_ids") + custom_epg_id = group_custom_props.get("custom_epg_id") + channel_sort_order = group_custom_props.get("channel_sort_order") + channel_sort_reverse = group_custom_props.get( + "channel_sort_reverse", False + ) + stream_profile_id = group_custom_props.get("stream_profile_id") + custom_logo_id = group_custom_props.get("custom_logo_id") + + # Determine which group to use for created channels + target_group = channel_group + if override_group_id: + try: + target_group = ChannelGroup.objects.get(id=override_group_id) + logger.info( + f"Using override group '{target_group.name}' instead of '{channel_group.name}' for auto-created channels" + ) + except ChannelGroup.DoesNotExist: + logger.warning( + f"Override group with ID {override_group_id} not found, using original group '{channel_group.name}'" + ) + + logger.info( + f"Processing auto sync for group: {channel_group.name} (start: {start_number})" + ) + + # Get all current streams in this group for this M3U account, filter out stale streams + current_streams = Stream.objects.filter( + m3u_account=account, + channel_group=channel_group, + last_seen__gte=scan_start_time, + ) + + # --- FILTER STREAMS BY NAME MATCH REGEX IF SPECIFIED --- + if name_match_regex: + try: + current_streams = current_streams.filter( + name__iregex=name_match_regex + ) + except re.error as e: + logger.warning( + f"Invalid name_match_regex '{name_match_regex}' for group '{channel_group.name}': {e}. Skipping name filter." + ) + + # --- APPLY CHANNEL SORT ORDER --- + streams_is_list = False # Track if we converted to list + if channel_sort_order and channel_sort_order != "": + if channel_sort_order == "name": + # Use natural sorting for names to handle numbers correctly + current_streams = list(current_streams) + current_streams.sort( + key=lambda stream: natural_sort_key(stream.name), + reverse=channel_sort_reverse, + ) + streams_is_list = True + elif channel_sort_order == "tvg_id": + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}tvg_id") + elif channel_sort_order == "updated_at": + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by( + f"{order_prefix}updated_at" + ) + else: + logger.warning( + f"Unknown channel_sort_order '{channel_sort_order}' for group '{channel_group.name}'. Using provider order." + ) + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}id") + else: + # Provider order (default) - can still be reversed + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}id") + + # Get existing auto-created channels for this account (regardless of current group) + # We'll find them by their stream associations instead of just group location + existing_channels = Channel.objects.filter( + auto_created=True, auto_created_by=account + ).select_related("logo", "epg_data") + + # Create mapping of existing channels by their associated stream + # This approach finds channels even if they've been moved to different groups + existing_channel_map = {} + for channel in existing_channels: + # Get streams associated with this channel that belong to our M3U account and original group + channel_streams = ChannelStream.objects.filter( + channel=channel, + stream__m3u_account=account, + stream__channel_group=channel_group, # Match streams from the original group + ).select_related("stream") + + # Map each of our M3U account's streams to this channel + for channel_stream in channel_streams: + if channel_stream.stream: + existing_channel_map[channel_stream.stream.id] = channel + + # Track which streams we've processed + processed_stream_ids = set() + + # Check if we have streams - handle both QuerySet and list cases + has_streams = ( + len(current_streams) > 0 + if streams_is_list + else current_streams.exists() + ) + + if not has_streams: + logger.debug(f"No streams found in group {channel_group.name}") + # Delete all existing auto channels if no streams + channels_to_delete = [ch for ch in existing_channel_map.values()] + if channels_to_delete: + deleted_count = len(channels_to_delete) + Channel.objects.filter( + id__in=[ch.id for ch in channels_to_delete] + ).delete() + channels_deleted += deleted_count + logger.debug( + f"Deleted {deleted_count} auto channels (no streams remaining)" + ) + continue + + # Prepare profiles to assign to new channels + from apps.channels.models import ChannelProfile, ChannelProfileMembership + + if ( + channel_profile_ids + and isinstance(channel_profile_ids, list) + and len(channel_profile_ids) > 0 + ): + # Convert all to int (in case they're strings) + try: + profile_ids = [int(pid) for pid in channel_profile_ids] + except Exception: + profile_ids = [] + profiles_to_assign = list( + ChannelProfile.objects.filter(id__in=profile_ids) + ) + else: + profiles_to_assign = list(ChannelProfile.objects.all()) + + # Get stream profile to assign if specified + from core.models import StreamProfile + stream_profile_to_assign = None + if stream_profile_id: + try: + stream_profile_to_assign = StreamProfile.objects.get(id=int(stream_profile_id)) + logger.info( + f"Will assign stream profile '{stream_profile_to_assign.name}' to auto-synced streams in group '{channel_group.name}'" + ) + except (StreamProfile.DoesNotExist, ValueError, TypeError): + logger.warning( + f"Stream profile with ID {stream_profile_id} not found for group '{channel_group.name}', streams will use default profile" + ) + stream_profile_to_assign = None + + # Process each current stream + current_channel_number = start_number + + # Always renumber all existing channels to match current sort order + # This ensures channels are always in the correct sequence + channels_to_renumber = [] + temp_channel_number = start_number + + # Get all channel numbers that are already in use by other channels (not auto-created by this account) + used_numbers = set( + Channel.objects.exclude( + auto_created=True, auto_created_by=account + ).values_list("channel_number", flat=True) + ) + + for stream in current_streams: + if stream.id in existing_channel_map: + channel = existing_channel_map[stream.id] + + # Find next available number starting from temp_channel_number + target_number = temp_channel_number + while target_number in used_numbers: + target_number += 1 + + # Add this number to used_numbers so we don't reuse it in this batch + used_numbers.add(target_number) + + if channel.channel_number != target_number: + channel.channel_number = target_number + channels_to_renumber.append(channel) + logger.debug( + f"Will renumber channel '{channel.name}' to {target_number}" + ) + + temp_channel_number += 1.0 + if temp_channel_number % 1 != 0: # Has decimal + temp_channel_number = int(temp_channel_number) + 1.0 + + # Bulk update channel numbers if any need renumbering + if channels_to_renumber: + Channel.objects.bulk_update(channels_to_renumber, ["channel_number"]) + logger.info( + f"Renumbered {len(channels_to_renumber)} channels to maintain sort order" + ) + + # Reset channel number counter for processing new channels + current_channel_number = start_number + + for stream in current_streams: + processed_stream_ids.add(stream.id) + try: + # Parse custom properties for additional info + stream_custom_props = stream.custom_properties or {} + tvc_guide_stationid = stream_custom_props.get("tvc-guide-stationid") + + # --- REGEX FIND/REPLACE LOGIC --- + original_name = stream.name + new_name = original_name + if name_regex_pattern is not None: + # If replace is None, treat as empty string (remove match) + replace = ( + name_replace_pattern + if name_replace_pattern is not None + else "" + ) + try: + # Convert $1, $2, etc. to \1, \2, etc. for consistency with M3U profiles + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', replace) + new_name = re.sub( + name_regex_pattern, safe_replace_pattern, original_name + ) + except re.error as e: + logger.warning( + f"Regex error for group '{channel_group.name}': {e}. Using original name." + ) + new_name = original_name + + # Check if we already have a channel for this stream + existing_channel = existing_channel_map.get(stream.id) + + if existing_channel: + # Update existing channel if needed (channel number already handled above) + channel_updated = False + + # Use new_name instead of stream.name + if existing_channel.name != new_name: + existing_channel.name = new_name + channel_updated = True + + if existing_channel.tvg_id != stream.tvg_id: + existing_channel.tvg_id = stream.tvg_id + channel_updated = True + + if existing_channel.tvc_guide_stationid != tvc_guide_stationid: + existing_channel.tvc_guide_stationid = tvc_guide_stationid + channel_updated = True + + # Check if channel group needs to be updated (in case override was added/changed) + if existing_channel.channel_group != target_group: + existing_channel.channel_group = target_group + channel_updated = True + logger.info( + f"Moved auto channel '{existing_channel.name}' from '{existing_channel.channel_group.name if existing_channel.channel_group else 'None'}' to '{target_group.name}'" + ) + + # Handle logo updates + current_logo = None + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + current_logo = Logo.objects.get(id=custom_logo_id) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + elif stream.logo_url: + # No custom logo configured, use stream logo + from apps.channels.models import Logo + + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + + if existing_channel.logo != current_logo: + existing_channel.logo = current_logo + channel_updated = True + + # Handle EPG data updates + current_epg_data = None + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + current_epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if not current_epg_data: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + # If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None + + if existing_channel.epg_data != current_epg_data: + existing_channel.epg_data = current_epg_data + channel_updated = True + + # Handle stream profile updates for the channel + if stream_profile_to_assign and existing_channel.stream_profile != stream_profile_to_assign: + existing_channel.stream_profile = stream_profile_to_assign + channel_updated = True + + if channel_updated: + existing_channel.save() + channels_updated += 1 + logger.debug( + f"Updated auto channel: {existing_channel.channel_number} - {existing_channel.name}" + ) + + # Update channel profile memberships for existing channels + current_memberships = set( + ChannelProfileMembership.objects.filter( + channel=existing_channel, enabled=True + ).values_list("channel_profile_id", flat=True) + ) + + target_profile_ids = set( + profile.id for profile in profiles_to_assign + ) + + # Only update if memberships have changed + if current_memberships != target_profile_ids: + # Disable all current memberships + ChannelProfileMembership.objects.filter( + channel=existing_channel + ).update(enabled=False) + + # Enable/create memberships for target profiles + for profile in profiles_to_assign: + membership, created = ( + ChannelProfileMembership.objects.get_or_create( + channel_profile=profile, + channel=existing_channel, + defaults={"enabled": True}, + ) + ) + if not created and not membership.enabled: + membership.enabled = True + membership.save() + + logger.debug( + f"Updated profile memberships for auto channel: {existing_channel.name}" + ) + + else: + # Create new channel + # Find next available channel number + target_number = current_channel_number + while target_number in used_numbers: + target_number += 1 + + # Add this number to used_numbers + used_numbers.add(target_number) + + channel = Channel.objects.create( + channel_number=target_number, + name=new_name, + tvg_id=stream.tvg_id, + tvc_guide_stationid=tvc_guide_stationid, + channel_group=target_group, + user_level=0, + auto_created=True, + auto_created_by=account, + ) + + # Associate the stream with the channel + ChannelStream.objects.create( + channel=channel, stream=stream, order=0 + ) + + # Assign to correct profiles + memberships = [ + ChannelProfileMembership( + channel_profile=profile, channel=channel, enabled=True + ) + for profile in profiles_to_assign + ] + if memberships: + ChannelProfileMembership.objects.bulk_create(memberships) + + # Try to match EPG data + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + else: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif force_dummy_epg: + # Force dummy EPG with no custom EPG selected (set to None) + channel.epg_data = None + channel.save(update_fields=["epg_data"]) + + # Handle logo + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + custom_logo = Logo.objects.get(id=custom_logo_id) + channel.logo = custom_logo + channel.save(update_fields=["logo"]) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + elif stream.logo_url: + from apps.channels.models import Logo + + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + + # Handle stream profile assignment + if stream_profile_to_assign: + channel.stream_profile = stream_profile_to_assign + channel.save(update_fields=['stream_profile']) + channels_created += 1 + logger.debug( + f"Created auto channel: {channel.channel_number} - {channel.name}" + ) + + # Increment channel number for next iteration + current_channel_number += 1.0 + if current_channel_number % 1 != 0: # Has decimal + current_channel_number = int(current_channel_number) + 1.0 + + except Exception as e: + logger.error( + f"Error processing auto channel for stream {stream.name}: {str(e)}" + ) + continue + + # Delete channels for streams that no longer exist + channels_to_delete = [] + for stream_id, channel in existing_channel_map.items(): + if stream_id not in processed_stream_ids: + channels_to_delete.append(channel) + + if channels_to_delete: + deleted_count = len(channels_to_delete) + Channel.objects.filter( + id__in=[ch.id for ch in channels_to_delete] + ).delete() + channels_deleted += deleted_count + logger.debug( + f"Deleted {deleted_count} auto channels for removed streams" + ) + + # Additional cleanup: Remove auto-created channels that no longer have any valid streams + # This handles the case where streams were deleted due to stale retention policy + orphaned_channels = Channel.objects.filter( + auto_created=True, + auto_created_by=account + ).exclude( + # Exclude channels that still have valid stream associations + id__in=ChannelStream.objects.filter( + stream__m3u_account=account, + stream__isnull=False + ).values_list('channel_id', flat=True) + ) + + orphaned_count = orphaned_channels.count() + if orphaned_count > 0: + orphaned_channels.delete() + channels_deleted += orphaned_count + logger.info( + f"Deleted {orphaned_count} orphaned auto channels with no valid streams" + ) + + logger.info( + f"Auto channel sync complete for account {account.name}: {channels_created} created, {channels_updated} updated, {channels_deleted} deleted" + ) + return f"Auto sync: {channels_created} channels created, {channels_updated} updated, {channels_deleted} deleted" + + except Exception as e: + logger.error(f"Error in auto channel sync for account {account_id}: {str(e)}") + return f"Auto sync error: {str(e)}" + + +def get_transformed_credentials(account, profile=None): + """ + Get transformed credentials for XtreamCodes API calls. + + Args: + account: M3UAccount instance + profile: M3UAccountProfile instance (optional, if not provided will use primary profile) + + Returns: + tuple: (transformed_url, transformed_username, transformed_password) + """ + import re + import urllib.parse + + # If no profile is provided, find the primary active profile + if profile is None: + try: + from apps.m3u.models import M3UAccountProfile + profile = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ).first() + if profile: + logger.debug(f"Using primary profile '{profile.name}' for URL transformation") + else: + logger.debug(f"No active profiles found for account {account.name}, using base credentials") + except Exception as e: + logger.warning(f"Could not get primary profile for account {account.name}: {e}") + profile = None + + base_url = account.server_url + base_username = account.username + base_password = account.password # Build a complete URL with credentials (similar to how IPTV URLs are structured) + # Format: http://server.com:port/live/username/password/1234.ts + if base_url and base_username and base_password: + # Remove trailing slash from server URL if present + clean_server_url = base_url.rstrip('/') + + # Build the complete URL with embedded credentials + complete_url = f"{clean_server_url}/live/{base_username}/{base_password}/1234.ts" + logger.debug(f"Built complete URL: {complete_url}") + + # Apply profile-specific transformations if profile is provided + if profile and profile.search_pattern and profile.replace_pattern: + try: + # Handle backreferences in the replacement pattern + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', profile.replace_pattern) + + # Apply transformation to the complete URL + transformed_complete_url = re.sub(profile.search_pattern, safe_replace_pattern, complete_url) + logger.info(f"Transformed complete URL: {complete_url} -> {transformed_complete_url}") + + # Extract components from the transformed URL + # Pattern: http://server.com:port/live/username/password/1234.ts + parsed_url = urllib.parse.urlparse(transformed_complete_url) + path_parts = [part for part in parsed_url.path.split('/') if part] + + if len(path_parts) >= 2: + # Extract username and password from path + transformed_username = path_parts[1] + transformed_password = path_parts[2] + + # Rebuild server URL without the username/password path + transformed_url = f"{parsed_url.scheme}://{parsed_url.netloc}" + if parsed_url.port: + transformed_url = f"{parsed_url.scheme}://{parsed_url.hostname}:{parsed_url.port}" + + logger.debug(f"Extracted transformed credentials:") + logger.debug(f" Server URL: {transformed_url}") + logger.debug(f" Username: {transformed_username}") + logger.debug(f" Password: {transformed_password}") + + return transformed_url, transformed_username, transformed_password + else: + logger.warning(f"Could not extract credentials from transformed URL: {transformed_complete_url}") + return base_url, base_username, base_password + + except Exception as e: + logger.error(f"Error transforming URL for profile {profile.name if profile else 'unknown'}: {e}") + return base_url, base_username, base_password + else: + # No profile or no transformation patterns + return base_url, base_username, base_password + else: + logger.warning(f"Missing credentials for account {account.name}") + return base_url, base_username, base_password + + +@shared_task +def refresh_account_profiles(account_id): + """Refresh account information for all active profiles of an XC account. + + This task runs asynchronously in the background after account refresh completes. + It includes rate limiting delays between profile authentications to prevent provider bans. + """ + from django.conf import settings + import time + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.debug(f"Account {account_id} is not XC type, skipping profile refresh") + return f"Account {account_id} is not an XtreamCodes account" + + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + if not profiles.exists(): + logger.info(f"No active profiles found for account {account.name}") + return f"No active profiles for account {account_id}" + + # Get user agent for this account + try: + user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + if account.user_agent_id: + from core.models import UserAgent + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent: + user_agent_string = ua_obj.user_agent + except Exception as e: + logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.debug(f"Using user agent for profile refresh: {user_agent_string}") + # Get rate limiting delay from settings + profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5) + + profiles_updated = 0 + profiles_failed = 0 + + logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}") + + for idx, profile in enumerate(profiles): + try: + # Add delay between profiles to prevent rate limiting (except for first profile) + if idx > 0: + logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting") + time.sleep(profile_delay) + + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + profiles_updated += 1 + logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})") + else: + profiles_failed += 1 + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + profiles_failed += 1 + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed" + logger.info(result_msg) + return result_msg + + except M3UAccount.DoesNotExist: + error_msg = f"Account {account_id} not found" + logger.error(error_msg) + return error_msg + except Exception as e: + error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}" + logger.error(error_msg) + return error_msg + + +@shared_task +def refresh_account_info(profile_id): + """Refresh only the account information for a specific M3U profile.""" + if not acquire_task_lock("refresh_account_info", profile_id): + return f"Account info refresh task already running for profile_id={profile_id}." + + try: + from apps.m3u.models import M3UAccountProfile + import re + + profile = M3UAccountProfile.objects.get(id=profile_id) + account = profile.m3u_account + + if account.account_type != M3UAccount.Types.XC: + release_task_lock("refresh_account_info", profile_id) + return f"Profile {profile_id} belongs to account {account.id} which is not an XtreamCodes account." + + # Get transformed credentials using the helper function + transformed_url, transformed_username, transformed_password = get_transformed_credentials(account, profile) + + # Initialize XtreamCodes client with extracted/transformed credentials + client = XCClient( + transformed_url, + transformed_username, + transformed_password, + account.get_user_agent(), + ) # Authenticate and get account info + auth_result = client.authenticate() + if not auth_result: + error_msg = f"Authentication failed for profile {profile.name} ({profile_id})" + logger.error(error_msg) + + # Send error notification to frontend via websocket + send_websocket_update( + "updates", + "update", + { + "type": "account_info_refresh_error", + "profile_id": profile_id, + "profile_name": profile.name, + "error": "Authentication failed with the provided credentials", + "message": f"Failed to authenticate profile '{profile.name}'. Please check the credentials." + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg + + # Get account information + account_info = client.get_account_info() + + # Update only this specific profile with the new account info + if not profile.custom_properties: + profile.custom_properties = {} + profile.custom_properties.update(account_info) + profile.save() + + # Send success notification to frontend via websocket + send_websocket_update( + "updates", + "update", + { + "type": "account_info_refresh_success", + "profile_id": profile_id, + "profile_name": profile.name, + "message": f"Account information successfully refreshed for profile '{profile.name}'" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return f"Account info refresh completed for profile {profile_id} ({profile.name})." + + except M3UAccountProfile.DoesNotExist: + error_msg = f"Profile {profile_id} not found" + logger.error(error_msg) + + send_websocket_update( + "updates", + "update", + { + "type": "account_refresh_error", + "profile_id": profile_id, + "error": "Profile not found", + "message": f"Profile {profile_id} not found" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg + except Exception as e: + error_msg = f"Error refreshing account info for profile {profile_id}: {str(e)}" + logger.error(error_msg) + + send_websocket_update( + "updates", + "update", + { + "type": "account_refresh_error", + "profile_id": profile_id, + "error": str(e), + "message": f"Failed to refresh account info: {str(e)}" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg @shared_task def refresh_single_m3u_account(account_id): """Splits M3U processing into chunks and dispatches them as parallel tasks.""" - if not acquire_task_lock('refresh_single_m3u_account', account_id): + if not acquire_task_lock("refresh_single_m3u_account", account_id): return f"Task already running for account_id={account_id}." - # redis_client = RedisClient.get_client() # Record start time - start_time = time.time() + refresh_start_timestamp = timezone.now() # For the cleanup function + start_time = time.time() # For tracking elapsed time as float + streams_created = 0 + streams_updated = 0 + streams_deleted = 0 try: account = M3UAccount.objects.get(id=account_id, is_active=True) if not account.is_active: - logger.info(f"Account {account_id} is not active, skipping.") + logger.debug(f"Account {account_id} is not active, skipping.") + release_task_lock("refresh_single_m3u_account", account_id) return + # Set status to fetching + account.status = M3UAccount.Status.FETCHING + account.save(update_fields=['status']) + filters = list(account.filters.all()) + + # Check if VOD is enabled for this account + vod_enabled = False + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get('enable_vod', False) + except M3UAccount.DoesNotExist: - release_task_lock('refresh_single_m3u_account', account_id) - return f"M3UAccount with ID={account_id} not found or inactive." + # The M3U account doesn't exist, so delete the periodic task if it exists + logger.warning( + f"M3U account with ID {account_id} not found, but task was triggered. Cleaning up orphaned task." + ) + + # Call the helper function to delete the task + if delete_m3u_refresh_task_by_id(account_id): + logger.info( + f"Successfully cleaned up orphaned task for M3U account {account_id}" + ) + else: + logger.debug(f"No orphaned task found for M3U account {account_id}") + + release_task_lock("refresh_single_m3u_account", account_id) + return f"M3UAccount with ID={account_id} not found or inactive, task cleaned up" # Fetch M3U lines and handle potential issues - # lines = fetch_m3u_lines(account) # Extracted fetch logic into separate function extinf_data = [] groups = None cache_path = os.path.join(m3u_dir, f"{account_id}.json") if os.path.exists(cache_path): - with open(cache_path, 'r') as file: - data = json.load(file) + try: + with open(cache_path, "r") as file: + data = json.load(file) - extinf_data = data['extinf_data'] - groups = data['groups'] + extinf_data = data["extinf_data"] + groups = data["groups"] + except json.JSONDecodeError as e: + # Handle corrupted JSON file + logger.error( + f"Error parsing cached M3U data for account {account_id}: {str(e)}" + ) + + # Backup the corrupted file for potential analysis + backup_path = f"{cache_path}.corrupted" + try: + os.rename(cache_path, backup_path) + logger.info(f"Renamed corrupted cache file to {backup_path}") + except OSError as rename_err: + logger.warning( + f"Failed to rename corrupted cache file: {str(rename_err)}" + ) + + # Reset the data to empty structures + extinf_data = [] + groups = None + except Exception as e: + logger.error(f"Unexpected error reading cached M3U data: {str(e)}") + extinf_data = [] + groups = None if not extinf_data: try: - extinf_data, groups = refresh_m3u_groups(account_id, full_refresh=True) - if not extinf_data or not groups: - release_task_lock('refresh_single_m3u_account', account_id) - return "Failed to update m3u account, task may already be running" - except: - release_task_lock('refresh_single_m3u_account', account_id) + logger.info(f"Calling refresh_m3u_groups for account {account_id}") + result = refresh_m3u_groups(account_id, full_refresh=True, scan_start_time=refresh_start_timestamp) + logger.trace(f"refresh_m3u_groups result: {result}") + + # Check for completely empty result or missing groups + if not result or result[1] is None: + logger.error( + f"Failed to refresh M3U groups for account {account_id}: {result}" + ) + release_task_lock("refresh_single_m3u_account", account_id) + return "Failed to update m3u account - download failed or other error" + + extinf_data, groups = result + + # XC accounts can have empty extinf_data but valid groups + try: + account = M3UAccount.objects.get(id=account_id) + is_xc_account = account.account_type == M3UAccount.Types.XC + except M3UAccount.DoesNotExist: + is_xc_account = False + + # For XC accounts, empty extinf_data is normal at this stage + if not extinf_data and not is_xc_account: + logger.error(f"No streams found for non-XC account {account_id}") + account.status = M3UAccount.Status.ERROR + account.last_message = "No streams found in M3U source" + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "parsing", 100, status="error", error="No streams found" + ) + except Exception as e: + logger.error(f"Exception in refresh_m3u_groups: {str(e)}", exc_info=True) + account.status = M3UAccount.Status.ERROR + account.last_message = f"Error refreshing M3U groups: {str(e)}" + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "parsing", + 100, + status="error", + error=f"Error refreshing M3U groups: {str(e)}", + ) + release_task_lock("refresh_single_m3u_account", account_id) return "Failed to update m3u account" + # Only proceed with parsing if we actually have data and no errors were encountered + # Get account type to handle XC accounts differently + try: + is_xc_account = account.account_type == M3UAccount.Types.XC + except Exception: + is_xc_account = False + + # Modified validation logic for different account types + if (not groups) or (not is_xc_account and not extinf_data): + logger.error(f"No data to process for account {account_id}") + account.status = M3UAccount.Status.ERROR + account.last_message = "No data available for processing" + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "parsing", + 100, + status="error", + error="No data available for processing", + ) + release_task_lock("refresh_single_m3u_account", account_id) + return "Failed to update m3u account, no data available" + hash_keys = CoreSettings.get_m3u_hash_key().split(",") - existing_groups = {group.name: group.id for group in ChannelGroup.objects.filter( - m3u_account__m3u_account=account, # Filter by the M3UAccount - m3u_account__enabled=True # Filter by the enabled flag in the join table - )} + existing_groups = { + group.name: group.id + for group in ChannelGroup.objects.filter( + m3u_accounts__m3u_account=account, # Filter by the M3UAccount + m3u_accounts__enabled=True, # Filter by the enabled flag in the join table + ) + } - # Break into batches and process in parallel - batches = [extinf_data[i:i + BATCH_SIZE] for i in range(0, len(extinf_data), BATCH_SIZE)] - task_group = group(process_m3u_batch.s(account_id, batch, existing_groups, hash_keys) for batch in batches) + try: + # Set status to parsing + account.status = M3UAccount.Status.PARSING + account.save(update_fields=["status"]) - total_batches = len(batches) - completed_batches = 0 - logger.debug(f"Dispatched {len(batches)} parallel tasks for account_id={account_id}.") + # Commit any pending transactions before threading + from django.db import transaction + transaction.commit() - # result = task_group.apply_async() - result = task_group.apply_async() + # Initialize stream counters + streams_created = 0 + streams_updated = 0 - while completed_batches < total_batches: - for async_result in result: - if async_result.ready(): # If the task has completed - task_result = async_result.result # The result of the task - logger.debug(f"Task completed with result: {task_result}") - completed_batches += 1 + if account.account_type == M3UAccount.Types.STADNARD: + logger.debug( + f"Processing Standard account ({account_id}) with groups: {existing_groups}" + ) + # Break into batches and process with threading - use global batch size + batches = [ + extinf_data[i : i + BATCH_SIZE] + for i in range(0, len(extinf_data), BATCH_SIZE) + ] - # Calculate progress - progress = int((completed_batches / total_batches) * 100) + logger.info(f"Processing {len(extinf_data)} streams in {len(batches)} thread batches") - # Send progress update via Channels - # Don't send 100% because we want to clean up after - if progress == 100: - progress = 99 + # Use 2 threads for optimal database connection handling + max_workers = min(2, len(batches)) + logger.debug(f"Using {max_workers} threads for processing") - send_m3u_update(account_id, "parsing", progress) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit batch processing tasks using direct functions (now thread-safe) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } - # Optionally remove completed task from the group to prevent processing it again - result.remove(async_result) + completed_batches = 0 + total_batches = len(batches) + + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 + + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass + + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time + + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 + + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) + + logger.debug(f"Thread batch {completed_batches}/{total_batches} completed") + + except Exception as e: + logger.error(f"Error in thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging + + logger.info(f"Thread-based processing completed for account {account_id}") + else: + # For XC accounts, get the groups with their custom properties containing xc_id + logger.debug(f"Processing XC account with groups: {existing_groups}") + + # Get the ChannelGroupM3UAccount entries with their custom_properties + channel_group_relationships = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, enabled=True + ).select_related("channel_group") + + filtered_groups = {} + for rel in channel_group_relationships: + group_name = rel.channel_group.name + group_id = rel.channel_group.id + + # Load the custom properties with the xc_id + custom_props = rel.custom_properties or {} + if "xc_id" in custom_props: + filtered_groups[group_name] = { + "xc_id": custom_props["xc_id"], + "channel_group_id": group_id, + } + logger.debug( + f"Added group {group_name} with xc_id {custom_props['xc_id']}" + ) + else: + logger.warning( + f"No xc_id found in custom properties for group {group_name}" + ) + + logger.info( + f"Filtered {len(filtered_groups)} groups for processing: {filtered_groups}" + ) + + # Collect all XC streams in a single API call and filter by enabled categories + logger.info("Fetching all XC streams from provider and filtering by enabled categories...") + all_xc_streams = collect_xc_streams(account_id, filtered_groups) + + if not all_xc_streams: + logger.warning("No streams collected from XC groups") else: - logger.debug(f"Task is still running.") + # Now batch by stream count (like standard M3U processing) + batches = [ + all_xc_streams[i : i + BATCH_SIZE] + for i in range(0, len(all_xc_streams), BATCH_SIZE) + ] - # Run cleanup - cleanup_streams(account_id) - send_m3u_update(account_id, "parsing", 100) + logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") - end_time = time.time() + # Use threading for XC stream processing - now with consistent batch sizes + max_workers = min(4, len(batches)) + logger.debug(f"Using {max_workers} threads for XC stream processing") - # Calculate elapsed time - elapsed_time = end_time - start_time - account.save(update_fields=['updated_at']) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit stream batch processing tasks (reuse standard M3U processing) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } - print(f"Function took {elapsed_time} seconds to execute.") + completed_batches = 0 + total_batches = len(batches) + + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 + + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass + + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time + + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 + + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) + + logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") + + except Exception as e: + logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging + + logger.info(f"XC thread-based processing completed for account {account_id}") + + # Ensure all database transactions are committed before cleanup + logger.info( + f"All thread processing completed, ensuring DB transactions are committed before cleanup" + ) + # Force a simple DB query to ensure connection sync + Stream.objects.filter( + id=-1 + ).exists() # This will never find anything but ensures DB sync + + # Mark streams that weren't seen in this refresh as stale (pending deletion) + stale_stream_count = Stream.objects.filter( + m3u_account=account, + last_seen__lt=refresh_start_timestamp + ).update(is_stale=True) + logger.info(f"Marked {stale_stream_count} streams as stale for account {account_id}") + + # Mark group relationships that weren't seen in this refresh as stale (pending deletion) + stale_group_count = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, + last_seen__lt=refresh_start_timestamp + ).update(is_stale=True) + logger.info(f"Marked {stale_group_count} group relationships as stale for account {account_id}") + + # Now run cleanup + streams_deleted = cleanup_streams(account_id, refresh_start_timestamp) + + # Cleanup stale group relationships (follows same retention policy as streams) + cleanup_stale_group_relationships(account, refresh_start_timestamp) + + # Run auto channel sync after successful refresh + auto_sync_message = "" + try: + sync_result = sync_auto_channels( + account_id, scan_start_time=str(refresh_start_timestamp) + ) + logger.info( + f"Auto channel sync result for account {account_id}: {sync_result}" + ) + if sync_result and "created" in sync_result: + auto_sync_message = f" {sync_result}." + except Exception as e: + logger.error( + f"Error running auto channel sync for account {account_id}: {str(e)}" + ) + + # Calculate elapsed time + elapsed_time = time.time() - start_time + + # Calculate total streams processed + streams_processed = streams_created + streams_updated + + # Set status to success and update timestamp BEFORE sending the final update + account.status = M3UAccount.Status.SUCCESS + account.last_message = ( + f"Processing completed in {elapsed_time:.1f} seconds. " + f"Streams: {streams_created} created, {streams_updated} updated, {streams_deleted} removed. " + f"Total processed: {streams_processed}.{auto_sync_message}" + ) + account.updated_at = timezone.now() + account.save(update_fields=["status", "last_message", "updated_at"]) + + # Log system event for M3U refresh + log_system_event( + event_type='m3u_refresh', + account_name=account.name, + elapsed_time=round(elapsed_time, 2), + streams_created=streams_created, + streams_updated=streams_updated, + streams_deleted=streams_deleted, + total_processed=streams_processed, + ) + + # Send final update with complete metrics and explicitly include success status + send_m3u_update( + account_id, + "parsing", + 100, + status="success", # Explicitly set status to success + elapsed_time=elapsed_time, + time_remaining=0, + streams_processed=streams_processed, + streams_created=streams_created, + streams_updated=streams_updated, + streams_deleted=streams_deleted, + message=account.last_message, + ) + + # Trigger VOD refresh if enabled and account is XtreamCodes type + if vod_enabled and account.account_type == M3UAccount.Types.XC: + logger.info(f"VOD is enabled for account {account_id}, triggering VOD refresh") + try: + from apps.vod.tasks import refresh_vod_content + refresh_vod_content.delay(account_id) + logger.info(f"VOD refresh task queued for account {account_id}") + except Exception as e: + logger.error(f"Failed to queue VOD refresh for account {account_id}: {str(e)}") + + except Exception as e: + logger.error(f"Error processing M3U for account {account_id}: {str(e)}") + account.status = M3UAccount.Status.ERROR + account.last_message = f"Error processing M3U: {str(e)}" + account.save(update_fields=["status", "last_message"]) + raise # Re-raise the exception for Celery to handle + + release_task_lock("refresh_single_m3u_account", account_id) # Aggressive garbage collection - del existing_groups, extinf_data, groups, batches - gc.collect() + # Only delete variables if they exist + if 'existing_groups' in locals(): + del existing_groups + if 'extinf_data' in locals(): + del extinf_data + if 'groups' in locals(): + del groups + if 'batches' in locals(): + del batches + + from core.utils import cleanup_memory + + cleanup_memory(log_usage=True, force_collection=True) # Clean up cache file since we've fully processed it if os.path.exists(cache_path): os.remove(cache_path) - release_task_lock('refresh_single_m3u_account', account_id) - - # cursor = 0 - # while True: - # cursor, keys = redis_client.scan(cursor, match=f"m3u_refresh:*", count=BATCH_SIZE) - # if keys: - # redis_client.delete(*keys) # Delete the matching keys - # if cursor == 0: - # break - return f"Dispatched jobs complete." + def send_m3u_update(account_id, action, progress, **kwargs): # Start with the base data dictionary data = { @@ -500,15 +2982,20 @@ def send_m3u_update(account_id, action, progress, **kwargs): "action": action, } + # Add the status and message if not already in kwargs + try: + account = M3UAccount.objects.get(id=account_id) + if account: + if "status" not in kwargs: + data["status"] = account.status + if "message" not in kwargs and account.last_message: + data["message"] = account.last_message + except: + pass # If account can't be retrieved, continue without these fields + # Add the additional key-value pairs from kwargs data.update(kwargs) + send_websocket_update("updates", "update", data, collect_garbage=False) - # Now, send the updated data dictionary - channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( - 'updates', - { - 'type': 'update', - 'data': data - } - ) + # Explicitly clear data reference to help garbage collection + data = None diff --git a/apps/m3u/utils.py b/apps/m3u/utils.py index 784188ba..598ef713 100644 --- a/apps/m3u/utils.py +++ b/apps/m3u/utils.py @@ -1,9 +1,40 @@ # apps/m3u/utils.py import threading +import logging +from django.db import models lock = threading.Lock() # Dictionary to track usage: {m3u_account_id: current_usage} active_streams_map = {} +logger = logging.getLogger(__name__) + + +def normalize_stream_url(url): + """ + Normalize stream URLs for compatibility with FFmpeg. + + Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol. + FFmpeg doesn't recognize the @ prefix for multicast addresses. + + Args: + url (str): The stream URL to normalize + + Returns: + str: The normalized URL + """ + if not url: + return url + + # Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234 + # The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax + if url.startswith('udp://@'): + normalized = url.replace('udp://@', 'udp://', 1) + logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}") + return normalized + + # Could add other normalizations here in the future (rtp://@, etc.) + return url + def increment_stream_count(account): with lock: @@ -24,3 +55,64 @@ def decrement_stream_count(account): active_streams_map[account.id] = current_usage account.active_streams = current_usage account.save(update_fields=['active_streams']) + + +def calculate_tuner_count(minimum=1, unlimited_default=10): + """ + Calculate tuner/connection count from active M3U profiles and custom streams. + This is the centralized function used by both HDHR and XtreamCodes APIs. + + Args: + minimum (int): Minimum number to return (default: 1) + unlimited_default (int): Default value when unlimited profiles exist (default: 10) + + Returns: + int: Calculated tuner/connection count + """ + try: + from apps.m3u.models import M3UAccountProfile + from apps.channels.models import Stream + + # Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile) + profiles = M3UAccountProfile.objects.filter( + is_active=True, + m3u_account__is_active=True, # Only include profiles from enabled M3U accounts + ).exclude(id=1) + + # 1. Check if any profile has unlimited streams (max_streams=0) + has_unlimited = profiles.filter(max_streams=0).exists() + + # 2. Calculate tuner count from limited profiles + limited_tuners = 0 + if not has_unlimited: + limited_tuners = ( + profiles.filter(max_streams__gt=0) + .aggregate(total=models.Sum("max_streams")) + .get("total", 0) + or 0 + ) + + # 3. Add custom stream count to tuner count + custom_stream_count = Stream.objects.filter(is_custom=True).count() + logger.debug(f"Found {custom_stream_count} custom streams") + + # 4. Calculate final tuner count + if has_unlimited: + # If there are unlimited profiles, start with unlimited_default plus custom streams + tuner_count = unlimited_default + custom_stream_count + else: + # Otherwise use the limited profile sum plus custom streams + tuner_count = limited_tuners + custom_stream_count + + # 5. Ensure minimum number + tuner_count = max(minimum, tuner_count) + + logger.debug( + f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})" + ) + + return tuner_count + + except Exception as e: + logger.error(f"Error calculating tuner count: {e}") + return minimum # Fallback to minimum value diff --git a/apps/m3u/views.py b/apps/m3u/views.py index f69dd6c4..0fab8c10 100644 --- a/apps/m3u/views.py +++ b/apps/m3u/views.py @@ -3,6 +3,7 @@ from django.views import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt +from django.http import JsonResponse from apps.m3u.models import M3UAccount import json diff --git a/apps/output/tests.py b/apps/output/tests.py index e1e857ee..f87c8340 100644 --- a/apps/output/tests.py +++ b/apps/output/tests.py @@ -14,3 +14,26 @@ class OutputM3UTest(TestCase): self.assertEqual(response.status_code, 200) content = response.content.decode() self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_empty_body(self): + """ + Test that a POST request with an empty body returns 200 OK. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded') + content = response.content.decode() + + self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK") + self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_with_body(self): + """ + Test that a POST request with a non-empty body returns 403 Forbidden. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data={'evilstring': 'muhahaha'}) + + self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden") + self.assertIn("POST requests with body are not allowed, body is:", response.content.decode()) diff --git a/apps/output/urls.py b/apps/output/urls.py index 92774adb..dc023ed7 100644 --- a/apps/output/urls.py +++ b/apps/output/urls.py @@ -1,16 +1,14 @@ from django.urls import path, re_path, include -from .views import generate_m3u, generate_epg +from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream from core.views import stream_view -app_name = 'output' +app_name = "output" urlpatterns = [ # Allow `/m3u`, `/m3u/`, `/m3u/profile_name`, and `/m3u/profile_name/` - re_path(r'^m3u(?:/(?P[^/]+))?/?$', generate_m3u, name='generate_m3u'), - + re_path(r"^m3u(?:/(?P[^/]+))?/?$", m3u_endpoint, name="m3u_endpoint"), # Allow `/epg`, `/epg/`, `/epg/profile_name`, and `/epg/profile_name/` - re_path(r'^epg(?:/(?P[^/]+))?/?$', generate_epg, name='generate_epg'), - + re_path(r"^epg(?:/(?P[^/]+))?/?$", epg_endpoint, name="epg_endpoint"), # Allow both `/stream/` and `/stream//` - re_path(r'^stream/(?P[0-9a-fA-F\-]+)/?$', stream_view, name='stream'), + re_path(r"^stream/(?P[0-9a-fA-F\-]+)/?$", stream_view, name="stream"), ] diff --git a/apps/output/views.py b/apps/output/views.py index 9429d9ca..47798ee2 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -1,205 +1,3011 @@ -from django.http import HttpResponse +import ipaddress +from django.http import HttpResponse, JsonResponse, Http404, HttpResponseForbidden, StreamingHttpResponse +from rest_framework.response import Response from django.urls import reverse -from apps.channels.models import Channel, ChannelProfile +from apps.channels.models import Channel, ChannelProfile, ChannelGroup +from django.views.decorators.csrf import csrf_exempt +from django.views.decorators.http import require_http_methods from apps.epg.models import ProgramData -from django.utils import timezone +from apps.accounts.models import User +from dispatcharr.utils import network_access_allowed +from django.utils import timezone as django_timezone +from django.shortcuts import get_object_or_404 from datetime import datetime, timedelta -import re import html # Add this import for XML escaping +import json # Add this import for JSON parsing +import time # Add this import for keep-alive delays +from tzlocal import get_localzone +from urllib.parse import urlparse +import base64 +import logging +from django.db.models.functions import Lower +import os +from apps.m3u.utils import calculate_tuner_count +import regex +from core.utils import log_system_event +import hashlib -def generate_m3u(request, profile_name=None): +logger = logging.getLogger(__name__) + +def get_client_identifier(request): + """Get client information including IP, user agent, and a unique hash identifier + + Returns: + tuple: (client_id_hash, client_ip, user_agent) + """ + # Get client IP (handle proxies) + x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') + if x_forwarded_for: + client_ip = x_forwarded_for.split(',')[0].strip() + else: + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + + # Get user agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + # Create a hash for a shorter cache key + client_str = f"{client_ip}:{user_agent}" + client_id_hash = hashlib.md5(client_str.encode()).hexdigest()[:12] + + return client_id_hash, client_ip, user_agent + +def m3u_endpoint(request, profile_name=None, user=None): + logger.debug("m3u_endpoint called: method=%s, profile=%s", request.method, profile_name) + if not network_access_allowed(request, "M3U_EPG"): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({"error": "Forbidden"}, status=403) + + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for M3U") + response = HttpResponse(content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response + + return generate_m3u(request, profile_name, user) + +def epg_endpoint(request, profile_name=None, user=None): + logger.debug("epg_endpoint called: method=%s, profile=%s", request.method, profile_name) + if not network_access_allowed(request, "M3U_EPG"): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({"error": "Forbidden"}, status=403) + + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for EPG") + response = HttpResponse(content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + + return generate_epg(request, profile_name, user) + +@csrf_exempt +@require_http_methods(["GET", "POST", "HEAD"]) +def generate_m3u(request, profile_name=None, user=None): """ Dynamically generate an M3U file from channels. The stream URL now points to the new stream_view that uses StreamProfile. + Supports both GET and POST methods for compatibility with IPTVSmarters. """ - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') - else: - channels = Channel.objects.order_by('channel_number') + # Check if this is a POST request and the body is not empty (which we don't want to allow) + logger.debug("Generating M3U for profile: %s, user: %s, method: %s", profile_name, user.username if user else "Anonymous", request.method) - m3u_content = "#EXTM3U\n" + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"m3u_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving M3U from cache") + response = HttpResponse(cached_content, content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response + # Check if this is a POST request with data (which we don't want to allow) + if request.method == "POST" and request.body: + if request.body.decode() != '{}': + return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) + + if user is not None: + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( + "channel_number" + ) + + else: + if profile_name is not None: + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True + ).order_by('channel_number') + else: + channels = Channel.objects.order_by("channel_number") + + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + + # Check if direct stream URLs should be used instead of proxy + use_direct_urls = request.GET.get('direct', 'false').lower() == 'true' + + # Get the source to use for tvg-id value + # Options: 'channel_number' (default), 'tvg_id', 'gracenote' + tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() + + # Build EPG URL with query parameters if needed + # Check if this is an XC API request (has username/password in GET params and user is authenticated) + xc_username = request.GET.get('username') + xc_password = request.GET.get('password') + + if user is not None and xc_username and xc_password: + # This is an XC API request - use XC-style EPG URL + base_url = build_absolute_uri_with_port(request, '') + epg_url = f"{base_url}/xmltv.php?username={xc_username}&password={xc_password}" + else: + # Regular request - use standard EPG endpoint + epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint')) + + # Optionally preserve certain query parameters + preserved_params = ['tvg_id_source', 'cachedlogos', 'days'] + query_params = {k: v for k, v in request.GET.items() if k in preserved_params} + if query_params: + from urllib.parse import urlencode + epg_url = f"{epg_base_url}?{urlencode(query_params)}" + else: + epg_url = epg_base_url + + # Add x-tvg-url and url-tvg attribute for EPG URL + m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n' + + # Start building M3U content for channel in channels: group_title = channel.channel_group.name if channel.channel_group else "Default" - tvg_id = channel.channel_number or channel.id + + # Format channel number as integer if it has no decimal component + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" + + # Determine the tvg-id based on the selected source + if tvg_id_source == 'tvg_id' and channel.tvg_id: + tvg_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + tvg_id = channel.tvc_guide_stationid + else: + # Default to channel number (original behavior) + tvg_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + tvg_name = channel.name tvg_logo = "" if channel.logo: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + if use_cached_logos: + # Use cached logo as before + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) + else: + # Try to find direct logo URL from channel's streams + direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None + # If direct logo found, use it; otherwise fall back to cached version + if direct_logo: + tvg_logo = direct_logo + else: + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) - channel_number = channel.channel_number + # create possible gracenote id insertion + tvc_guide_stationid = "" + if channel.tvc_guide_stationid: + tvc_guide_stationid = ( + f'tvc-guide-stationid="{channel.tvc_guide_stationid}" ' + ) extinf_line = ( f'#EXTINF:-1 tvg-id="{tvg_id}" tvg-name="{tvg_name}" tvg-logo="{tvg_logo}" ' - f'tvg-chno="{channel_number}" group-title="{group_title}",{channel.name}\n' + f'tvg-chno="{formatted_channel_number}" {tvc_guide_stationid}group-title="{group_title}",{channel.name}\n' ) - base_url = request.build_absolute_uri('/')[:-1] - stream_url = f"{base_url}/proxy/ts/stream/{channel.uuid}" + # Determine the stream URL based on the direct parameter + if use_direct_urls: + # Try to get the first stream's direct URL + first_stream = channel.streams.order_by('channelstream__order').first() + if first_stream and first_stream.url: + # Use the direct stream URL + stream_url = first_stream.url + else: + # Fall back to proxy URL if no direct URL available + stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}") + else: + # Standard behavior - use proxy URL + stream_url = build_absolute_uri_with_port(request, f"/proxy/ts/stream/{channel.uuid}") - #stream_url = request.build_absolute_uri(reverse('output:stream', args=[channel.id])) m3u_content += extinf_line + stream_url + "\n" + # Cache the generated content for 2 seconds to handle double-GET requests + cache.set(content_cache_key, m3u_content, 2) + + # Log system event for M3U download (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"m3u_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='m3u_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") - response['Content-Disposition'] = 'attachment; filename="channels.m3u"' + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' return response -def generate_dummy_epg(name, channel_id, num_days=7, interval_hours=4): - xml_lines = [] - # Loop through the number of days - for day_offset in range(num_days): - current_day = datetime.now() + timedelta(days=day_offset) +def generate_fallback_programs(channel_id, channel_name, now, num_days, program_length_hours, fallback_title, fallback_description): + """ + Generate dummy programs using custom fallback templates when patterns don't match. - # Loop through each 4-hour interval in the day - for hour in range(0, 24, interval_hours): - start_time = current_day.replace(hour=hour, minute=0, second=0, microsecond=0) - stop_time = start_time + timedelta(hours=interval_hours) + Args: + channel_id: Channel ID for the programs + channel_name: Channel name to use as fallback in templates + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + fallback_title: Custom fallback title template (empty string if not provided) + fallback_description: Custom fallback description template (empty string if not provided) - # Format the times as per the requested format - start_str = start_time.strftime("%Y%m%d%H%M%S") + " 0000" - stop_str = stop_time.strftime("%Y%m%d%H%M%S") + " 0000" + Returns: + List of program dictionaries + """ + programs = [] - # Create the XML-like programme entry with escaped name - xml_lines.append(f'') - xml_lines.append(f' {html.escape(name)}') - xml_lines.append(f'') + # Use custom fallback title or channel name as default + title = fallback_title if fallback_title else channel_name + + # Use custom fallback description or a simple default message + if fallback_description: + description = fallback_description + else: + description = f"EPG information is currently unavailable for {channel_name}" + + # Create programs for each day + for day in range(num_days): + day_start = now + timedelta(days=day) + + # Create programs with specified length throughout the day + for hour_offset in range(0, 24, program_length_hours): + # Calculate program start and end times + start_time = day_start + timedelta(hours=hour_offset) + end_time = start_time + timedelta(hours=program_length_hours) + + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": title, + "description": description, + }) + + return programs + + +def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4, epg_source=None): + """ + Generate dummy EPG programs for channels. + + If epg_source is provided and it's a custom dummy EPG with patterns, + use those patterns to generate programs from the channel title. + Otherwise, generate default dummy programs. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title/name + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + epg_source: Optional EPGSource for custom dummy EPG with patterns + + Returns: + List of program dictionaries + """ + # Get current time rounded to hour + now = django_timezone.now() + now = now.replace(minute=0, second=0, microsecond=0) + + # Check if this is a custom dummy EPG with regex patterns + if epg_source and epg_source.source_type == 'dummy' and epg_source.custom_properties: + custom_programs = generate_custom_dummy_programs( + channel_id, channel_name, now, num_days, + epg_source.custom_properties + ) + # If custom generation succeeded, return those programs + # If it returned empty (pattern didn't match), check for custom fallback templates + if custom_programs: + return custom_programs + else: + logger.info(f"Custom pattern didn't match for '{channel_name}', checking for custom fallback templates") + + # Check if custom fallback templates are provided + custom_props = epg_source.custom_properties + fallback_title = custom_props.get('fallback_title_template', '').strip() + fallback_description = custom_props.get('fallback_description_template', '').strip() + + # If custom fallback templates exist, use them instead of default + if fallback_title or fallback_description: + logger.info(f"Using custom fallback templates for '{channel_name}'") + return generate_fallback_programs( + channel_id, channel_name, now, num_days, + program_length_hours, fallback_title, fallback_description + ) + else: + logger.info(f"No custom fallback templates found, using default dummy EPG") + + # Default humorous program descriptions based on time of day + time_descriptions = { + (0, 4): [ + f"Late Night with {channel_name} - Where insomniacs unite!", + f"The 'Why Am I Still Awake?' Show on {channel_name}", + f"Counting Sheep - A {channel_name} production for the sleepless", + ], + (4, 8): [ + f"Dawn Patrol - Rise and shine with {channel_name}!", + f"Early Bird Special - Coffee not included", + f"Morning Zombies - Before coffee viewing on {channel_name}", + ], + (8, 12): [ + f"Mid-Morning Meetings - Pretend you're paying attention while watching {channel_name}", + f"The 'I Should Be Working' Hour on {channel_name}", + f"Productivity Killer - {channel_name}'s daytime programming", + ], + (12, 16): [ + f"Lunchtime Laziness with {channel_name}", + f"The Afternoon Slump - Brought to you by {channel_name}", + f"Post-Lunch Food Coma Theater on {channel_name}", + ], + (16, 20): [ + f"Rush Hour - {channel_name}'s alternative to traffic", + f"The 'What's For Dinner?' Debate on {channel_name}", + f"Evening Escapism - {channel_name}'s remedy for reality", + ], + (20, 24): [ + f"Prime Time Placeholder - {channel_name}'s finest not-programming", + f"The 'Netflix Was Too Complicated' Show on {channel_name}", + f"Family Argument Avoider - Courtesy of {channel_name}", + ], + } + + programs = [] + + # Create programs for each day + for day in range(num_days): + day_start = now + timedelta(days=day) + + # Create programs with specified length throughout the day + for hour_offset in range(0, 24, program_length_hours): + # Calculate program start and end times + start_time = day_start + timedelta(hours=hour_offset) + end_time = start_time + timedelta(hours=program_length_hours) + + # Get the hour for selecting a description + hour = start_time.hour + + # Find the appropriate time slot for description + for time_range, descriptions in time_descriptions.items(): + start_range, end_range = time_range + if start_range <= hour < end_range: + # Pick a description using the sum of the hour and day as seed + # This makes it somewhat random but consistent for the same timeslot + description = descriptions[(hour + day) % len(descriptions)] + break + else: + # Fallback description if somehow no range matches + description = f"Placeholder program for {channel_name} - EPG data went on vacation" + + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": channel_name, + "description": description, + }) + + return programs + + +def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, custom_properties): + """ + Generate programs using custom dummy EPG regex patterns. + + Extracts information from channel title using regex patterns and generates + programs based on the extracted data. + + TIMEZONE HANDLING: + ------------------ + The timezone parameter specifies the timezone of the event times in your channel + titles using standard timezone names (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London'). + DST (Daylight Saving Time) is handled automatically by pytz. + + Examples: + - Channel: "NHL 01: Bruins VS Maple Leafs @ 8:00PM ET" + - Set timezone = "US/Eastern" + - In October (DST): 8:00PM EDT → 12:00AM UTC (automatically uses UTC-4) + - In January (no DST): 8:00PM EST → 1:00AM UTC (automatically uses UTC-5) + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title to parse + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + custom_properties: Dict with title_pattern, time_pattern, templates, etc. + - timezone: Timezone name (e.g., 'US/Eastern') + + Returns: + List of program dictionaries with start_time/end_time in UTC + """ + import pytz + + logger.info(f"Generating custom dummy programs for channel: {channel_name}") + + # Extract patterns from custom properties + title_pattern = custom_properties.get('title_pattern', '') + time_pattern = custom_properties.get('time_pattern', '') + date_pattern = custom_properties.get('date_pattern', '') + + # Get timezone name (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London') + timezone_value = custom_properties.get('timezone', 'UTC') + output_timezone_value = custom_properties.get('output_timezone', '') # Optional: display times in different timezone + program_duration = custom_properties.get('program_duration', 180) # Minutes + title_template = custom_properties.get('title_template', '') + description_template = custom_properties.get('description_template', '') + + # Templates for upcoming/ended programs + upcoming_title_template = custom_properties.get('upcoming_title_template', '') + upcoming_description_template = custom_properties.get('upcoming_description_template', '') + ended_title_template = custom_properties.get('ended_title_template', '') + ended_description_template = custom_properties.get('ended_description_template', '') + + # Image URL templates + channel_logo_url_template = custom_properties.get('channel_logo_url', '') + program_poster_url_template = custom_properties.get('program_poster_url', '') + + # EPG metadata options + category_string = custom_properties.get('category', '') + # Split comma-separated categories and strip whitespace, filter out empty strings + categories = [cat.strip() for cat in category_string.split(',') if cat.strip()] if category_string else [] + include_date = custom_properties.get('include_date', True) + include_live = custom_properties.get('include_live', False) + include_new = custom_properties.get('include_new', False) + + # Parse timezone name + try: + source_tz = pytz.timezone(timezone_value) + logger.debug(f"Using timezone: {timezone_value} (DST will be handled automatically)") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown timezone: {timezone_value}, defaulting to UTC") + source_tz = pytz.utc + + # Parse output timezone if provided (for display purposes) + output_tz = None + if output_timezone_value: + try: + output_tz = pytz.timezone(output_timezone_value) + logger.debug(f"Using output timezone for display: {output_timezone_value}") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown output timezone: {output_timezone_value}, will use source timezone") + output_tz = None + + if not title_pattern: + logger.warning(f"No title_pattern in custom_properties, falling back to default") + return [] # Return empty, will use default + + logger.debug(f"Title pattern from DB: {repr(title_pattern)}") + + # Convert PCRE/JavaScript named groups (?) to Python format (?P) + # This handles patterns created with JavaScript regex syntax + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', title_pattern) + logger.debug(f"Converted title pattern: {repr(title_pattern)}") + + # Compile regex patterns using the enhanced regex module + # (supports variable-width lookbehinds like JavaScript) + try: + title_regex = regex.compile(title_pattern) + except Exception as e: + logger.error(f"Invalid title regex pattern after conversion: {e}") + logger.error(f"Pattern was: {repr(title_pattern)}") + return [] + + time_regex = None + if time_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', time_pattern) + logger.debug(f"Converted time pattern: {repr(time_pattern)}") + try: + time_regex = regex.compile(time_pattern) + except Exception as e: + logger.warning(f"Invalid time regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(time_pattern)}") + + # Compile date regex if provided + date_regex = None + if date_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', date_pattern) + logger.debug(f"Converted date pattern: {repr(date_pattern)}") + try: + date_regex = regex.compile(date_pattern) + except Exception as e: + logger.warning(f"Invalid date regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(date_pattern)}") + + # Try to match the channel name with the title pattern + # Use search() instead of match() to match JavaScript behavior where .match() searches anywhere in the string + title_match = title_regex.search(channel_name) + if not title_match: + logger.debug(f"Channel name '{channel_name}' doesn't match title pattern") + return [] # Return empty, will use default + + groups = title_match.groupdict() + logger.debug(f"Title pattern matched. Groups: {groups}") + + # Helper function to format template with matched groups + def format_template(template, groups, url_encode=False): + """Replace {groupname} placeholders with matched group values + + Args: + template: Template string with {groupname} placeholders + groups: Dict of group names to values + url_encode: If True, URL encode the group values for safe use in URLs + """ + if not template: + return '' + result = template + for key, value in groups.items(): + if url_encode and value: + # URL encode the value to handle spaces and special characters + from urllib.parse import quote + encoded_value = quote(str(value), safe='') + result = result.replace(f'{{{key}}}', encoded_value) + else: + result = result.replace(f'{{{key}}}', str(value) if value else '') + return result + + # Extract time from title if time pattern exists + time_info = None + time_groups = {} + if time_regex: + time_match = time_regex.search(channel_name) + if time_match: + time_groups = time_match.groupdict() + try: + hour = int(time_groups.get('hour')) + # Handle optional minute group - could be None if not captured + minute_value = time_groups.get('minute') + minute = int(minute_value) if minute_value is not None else 0 + ampm = time_groups.get('ampm') + ampm = ampm.lower() if ampm else None + + # Determine if this is 12-hour or 24-hour format + if ampm in ('am', 'pm'): + # 12-hour format: convert to 24-hour + if ampm == 'pm' and hour != 12: + hour += 12 + elif ampm == 'am' and hour == 12: + hour = 0 + logger.debug(f"Extracted time (12-hour): {hour}:{minute:02d} {ampm}") + else: + # 24-hour format: hour is already in 24-hour format + # Validate that it's actually a 24-hour time (0-23) + if hour > 23: + logger.warning(f"Invalid 24-hour time: {hour}. Must be 0-23.") + hour = hour % 24 # Wrap around just in case + logger.debug(f"Extracted time (24-hour): {hour}:{minute:02d}") + + time_info = {'hour': hour, 'minute': minute} + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing time: {e}") + + # Extract date from title if date pattern exists + date_info = None + date_groups = {} + if date_regex: + date_match = date_regex.search(channel_name) + if date_match: + date_groups = date_match.groupdict() + try: + # Support various date group names: month, day, year + month_str = date_groups.get('month', '') + day_str = date_groups.get('day', '') + year_str = date_groups.get('year', '') + + # Parse day - default to current day if empty or invalid + day = int(day_str) if day_str else now.day + + # Parse year - default to current year if empty or invalid (matches frontend behavior) + year = int(year_str) if year_str else now.year + + # Parse month - can be numeric (1-12) or text (Jan, January, etc.) + month = None + if month_str: + if month_str.isdigit(): + month = int(month_str) + else: + # Try to parse text month names + import calendar + month_str_lower = month_str.lower() + # Check full month names + for i, month_name in enumerate(calendar.month_name): + if month_name.lower() == month_str_lower: + month = i + break + # Check abbreviated month names if not found + if month is None: + for i, month_abbr in enumerate(calendar.month_abbr): + if month_abbr.lower() == month_str_lower: + month = i + break + + # Default to current month if not extracted or invalid + if month is None: + month = now.month + + if month and 1 <= month <= 12 and 1 <= day <= 31: + date_info = {'year': year, 'month': month, 'day': day} + logger.debug(f"Extracted date: {year}-{month:02d}-{day:02d}") + else: + logger.warning(f"Invalid date values: month={month}, day={day}, year={year}") + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing date: {e}") + + # Merge title groups, time groups, and date groups for template formatting + all_groups = {**groups, **time_groups, **date_groups} + + # Add normalized versions of all groups for cleaner URLs + # These remove all non-alphanumeric characters and convert to lowercase + for key, value in list(all_groups.items()): + if value: + # Remove all non-alphanumeric characters (except spaces temporarily) + # then replace spaces with nothing, and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + all_groups[f'{key}_normalize'] = normalized + + # Format channel logo URL if template provided (with URL encoding) + channel_logo_url = None + if channel_logo_url_template: + channel_logo_url = format_template(channel_logo_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted channel logo URL: {channel_logo_url}") + + # Format program poster URL if template provided (with URL encoding) + program_poster_url = None + if program_poster_url_template: + program_poster_url = format_template(program_poster_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted program poster URL: {program_poster_url}") + + # Add formatted time strings for better display (handles minutes intelligently) + if time_info: + hour_24 = time_info['hour'] + minute = time_info['minute'] + + # Determine the base date to use for placeholders + # If date was extracted, use it; otherwise use current date + if date_info: + base_date = datetime(date_info['year'], date_info['month'], date_info['day']) + else: + base_date = datetime.now() + + # If output_timezone is specified, convert the display time to that timezone + if output_tz: + # Create a datetime in the source timezone using the base date + temp_date = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + # Convert to output timezone + temp_date_output = temp_date.astimezone(output_tz) + # Extract converted hour and minute for display + hour_24 = temp_date_output.hour + minute = temp_date_output.minute + logger.debug(f"Converted display time from {source_tz} to {output_tz}: {hour_24}:{minute:02d}") + + # Add date placeholders based on the OUTPUT timezone + # This ensures {date}, {month}, {day}, {year} reflect the converted timezone + all_groups['date'] = temp_date_output.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_output.month) + all_groups['day'] = str(temp_date_output.day) + all_groups['year'] = str(temp_date_output.year) + logger.debug(f"Converted date placeholders to {output_tz}: {all_groups['date']}") + else: + # No output timezone conversion - use source timezone for date + # Create temp date to get proper date in source timezone using the base date + temp_date_source = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + all_groups['date'] = temp_date_source.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_source.month) + all_groups['day'] = str(temp_date_source.day) + all_groups['year'] = str(temp_date_source.year) + + # Format 24-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime24'] = f"{hour_24}:{minute:02d}" + else: + all_groups['starttime24'] = f"{hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {starttime} placeholder + # Note: hour_24 is ALWAYS in 24-hour format at this point (converted earlier if needed) + ampm = 'AM' if hour_24 < 12 else 'PM' + hour_12 = hour_24 + if hour_24 == 0: + hour_12 = 12 + elif hour_24 > 12: + hour_12 = hour_24 - 12 + + # Format 12-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime'] = f"{hour_12}:{minute:02d} {ampm}" + else: + all_groups['starttime'] = f"{hour_12} {ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['starttime_long'] = f"{hour_12}:{minute:02d} {ampm}" + + # Calculate end time based on program duration + # Create a datetime for calculations + temp_start = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + temp_end = temp_start + timedelta(minutes=program_duration) + + # Extract end time components (already in correct timezone if output_tz was applied above) + end_hour_24 = temp_end.hour + end_minute = temp_end.minute + + # Format 24-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime24'] = f"{end_hour_24}:{end_minute:02d}" + else: + all_groups['endtime24'] = f"{end_hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {endtime} placeholder + end_ampm = 'AM' if end_hour_24 < 12 else 'PM' + end_hour_12 = end_hour_24 + if end_hour_24 == 0: + end_hour_12 = 12 + elif end_hour_24 > 12: + end_hour_12 = end_hour_24 - 12 + + # Format 12-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + else: + all_groups['endtime'] = f"{end_hour_12} {end_ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['endtime_long'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + + # Generate programs + programs = [] + + # If we have extracted time AND date, the event happens on a SPECIFIC date + # If we have time but NO date, generate for multiple days (existing behavior) + # All other days and times show "Upcoming" before or "Ended" after + event_happened = False + + # Determine how many iterations we need + if date_info and time_info: + # Specific date extracted - only generate for that one date + iterations = 1 + logger.debug(f"Date extracted, generating single event for specific date") + else: + # No specific date - use num_days (existing behavior) + iterations = num_days + + for day in range(iterations): + # Start from current time (like standard dummy) instead of midnight + # This ensures programs appear in the guide's current viewing window + day_start = now + timedelta(days=day) + day_end = day_start + timedelta(days=1) + + if time_info: + # We have an extracted event time - this is when the MAIN event starts + # The extracted time is in the SOURCE timezone (e.g., 8PM ET) + # We need to convert it to UTC for storage + + # Determine which date to use + if date_info: + # Use the extracted date from the channel title + current_date = datetime( + date_info['year'], + date_info['month'], + date_info['day'] + ).date() + logger.debug(f"Using extracted date: {current_date}") + else: + # No date extracted, use day offset from current time in SOURCE timezone + # This ensures we calculate "today" in the event's timezone, not UTC + # For example: 8:30 PM Central (1:30 AM UTC next day) for a 10 PM ET event + # should use today's date in ET, not tomorrow's date in UTC + now_in_source_tz = now.astimezone(source_tz) + current_date = (now_in_source_tz + timedelta(days=day)).date() + logger.debug(f"No date extracted, using day offset in {source_tz}: {current_date}") + + # Create a naive datetime (no timezone info) representing the event in source timezone + event_start_naive = datetime.combine( + current_date, + datetime.min.time().replace( + hour=time_info['hour'], + minute=time_info['minute'] + ) + ) + + # Use pytz to localize the naive datetime to the source timezone + # This automatically handles DST! + try: + event_start_local = source_tz.localize(event_start_naive) + # Convert to UTC + event_start_utc = event_start_local.astimezone(pytz.utc) + logger.debug(f"Converted {event_start_local} to UTC: {event_start_utc}") + except Exception as e: + logger.error(f"Error localizing time to {source_tz}: {e}") + # Fallback: treat as UTC + event_start_utc = django_timezone.make_aware(event_start_naive, pytz.utc) + + event_end_utc = event_start_utc + timedelta(minutes=program_duration) + + # Pre-generate the main event title and description for reuse + if title_template: + main_event_title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + main_event_title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + main_event_description = format_template(description_template, all_groups) + else: + main_event_description = main_event_title + + + + # Determine if this day is before, during, or after the event + # Event only happens on day 0 (first day) + is_event_day = (day == 0) + + if is_event_day and not event_happened: + # This is THE day the event happens + # Fill programs BEFORE the event + current_time = day_start + + while current_time < event_start_utc: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), event_start_utc) + + # Use custom upcoming templates if provided, otherwise use defaults + if upcoming_title_template: + upcoming_title = format_template(upcoming_title_template, all_groups) + else: + upcoming_title = main_event_title + + if upcoming_description_template: + upcoming_description = format_template(upcoming_description_template, all_groups) + else: + upcoming_description = f"Upcoming: {main_event_description}" + + # Build custom_properties for upcoming programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": upcoming_title, + "description": upcoming_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + + # Add the MAIN EVENT at the extracted time + # Build custom_properties for main event (includes category and live) + main_event_custom_properties = {} + + # Add categories if provided + if categories: + main_event_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = event_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + main_event_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + main_event_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + main_event_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + main_event_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": event_start_utc, + "end_time": event_end_utc, + "title": main_event_title, + "description": main_event_description, + "custom_properties": main_event_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + event_happened = True + + # Fill programs AFTER the event until end of day + current_time = event_end_utc + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom ended templates if provided, otherwise use defaults + if ended_title_template: + ended_title = format_template(ended_title_template, all_groups) + else: + ended_title = main_event_title + + if ended_description_template: + ended_description = format_template(ended_description_template, all_groups) + else: + ended_description = f"Ended: {main_event_description}" + + # Build custom_properties for ended programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": ended_title, + "description": ended_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + else: + # This day is either before the event (future days) or after the event happened + # Fill entire day with appropriate message + current_time = day_start + + # If event already happened, all programs show "Ended" + # If event hasn't happened yet (shouldn't occur with day 0 logic), show "Upcoming" + is_ended = event_happened + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom templates based on whether event has ended or is upcoming + if is_ended: + if ended_title_template: + program_title = format_template(ended_title_template, all_groups) + else: + program_title = main_event_title + + if ended_description_template: + program_description = format_template(ended_description_template, all_groups) + else: + program_description = f"Ended: {main_event_description}" + else: + if upcoming_title_template: + program_title = format_template(upcoming_title_template, all_groups) + else: + program_title = main_event_title + + if upcoming_description_template: + program_description = format_template(upcoming_description_template, all_groups) + else: + program_description = f"Upcoming: {main_event_description}" + + # Build custom_properties (only date for upcoming/ended filler programs) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": program_title, + "description": program_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, + }) + + current_time += timedelta(minutes=program_duration) + else: + # No extracted time - fill entire day with regular intervals + # day_start and day_end are already in UTC, so no conversion needed + programs_per_day = max(1, int(24 / (program_duration / 60))) + + for program_num in range(programs_per_day): + program_start_utc = day_start + timedelta(minutes=program_num * program_duration) + program_end_utc = program_start_utc + timedelta(minutes=program_duration) + + if title_template: + title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + description = format_template(description_template, all_groups) + else: + description = title + + # Build custom_properties for this program + program_custom_properties = {} + + # Add categories if provided + if categories: + program_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + program_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + program_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": title, + "description": description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + logger.info(f"Generated {len(programs)} custom dummy programs for {channel_name}") + return programs + + +def generate_dummy_epg( + channel_id, channel_name, xml_lines=None, num_days=1, program_length_hours=4 +): + """ + Generate dummy EPG programs for channels without EPG data. + Creates program blocks for a specified number of days. + + Args: + channel_id: The channel ID to use in the program entries + channel_name: The name of the channel to use in program titles + xml_lines: Optional list to append lines to, otherwise returns new list + num_days: Number of days to generate EPG data for (default: 1) + program_length_hours: Length of each program block in hours (default: 4) + + Returns: + List of XML lines for the dummy EPG entries + """ + if xml_lines is None: + xml_lines = [] + + for program in generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4): + # Format times in XMLTV format + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + # Create program entry with escaped channel name + xml_lines.append( + f' ' + ) + xml_lines.append(f" {html.escape(program['title'])}") + xml_lines.append(f" {html.escape(program['description'])}") + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + xml_lines.append(f" {html.escape(cat)}") + + # Date tag + if 'date' in custom_data: + xml_lines.append(f" {html.escape(custom_data['date'])}") + + # Live tag + if custom_data.get('live', False): + xml_lines.append(f" ") + + # New tag + if custom_data.get('new', False): + xml_lines.append(f" ") + + xml_lines.append(f" ") return xml_lines -def generate_epg(request, profile_name=None): + +def generate_epg(request, profile_name=None, user=None): """ - Dynamically generate an XMLTV (EPG) file using the new EPGData/ProgramData models. + Dynamically generate an XMLTV (EPG) file using streaming response to handle keep-alives. Since the EPG data is stored independently of Channels, we group programmes by their associated EPGData record. - This version does not filter by time, so it includes the entire EPG saved in the DB. + This version filters data based on the 'days' parameter and sends keep-alives during processing. """ - xml_lines = [] - xml_lines.append('') - xml_lines.append('') + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"epg_content:{cache_params}" - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving EPG from cache") + response = HttpResponse(cached_content, content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + + def epg_generator(): + """Generator function that yields EPG data with keep-alives during processing""" + # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) + + xml_lines = [] + xml_lines.append('') + xml_lines.append( + '' ) - else: - channels = Channel.objects.all() - # Retrieve all active channels - for channel in channels: - channel_id = channel.channel_number or channel.id - display_name = channel.epg_data.name if channel.epg_data else channel.name - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(display_name)}') + # Get channels based on user/profile + if user is not None: + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() - # Add channel logo if available - if channel.logo: - logo_url = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) - xml_lines.append(f' ') - - xml_lines.append(' ') - - for channel in channels: - channel_id = channel.channel_number or channel.id - display_name = channel.epg_data.name if channel.epg_data else channel.name - if not channel.epg_data: - xml_lines = xml_lines + generate_dummy_epg(display_name, channel_id) + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( + "channel_number" + ) else: - programs = channel.epg_data.programs.all() - for prog in programs: - start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") - stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(prog.title)}') + if profile_name is not None: + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during epg generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True, + ).order_by("channel_number") + else: + channels = Channel.objects.all().order_by("channel_number") - # Add subtitle if available - if prog.sub_title: - xml_lines.append(f' {html.escape(prog.sub_title)}') + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' - # Add description if available - if prog.description: - xml_lines.append(f' {html.escape(prog.description)}') + # Get the source to use for tvg-id value + # Options: 'channel_number' (default), 'tvg_id', 'gracenote' + tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() - # Process custom properties if available - if prog.custom_properties: - try: - import json - custom_data = json.loads(prog.custom_properties) + # Get the number of days for EPG data + try: + # Default to 0 days (everything) for real EPG if not specified + days_param = request.GET.get('days', '0') + num_days = int(days_param) + # Set reasonable limits + num_days = max(0, min(num_days, 365)) # Between 0 and 365 days + except ValueError: + num_days = 0 # Default to all data if invalid value - # Add categories if available - if 'categories' in custom_data and custom_data['categories']: - for category in custom_data['categories']: - xml_lines.append(f' {html.escape(category)}') + # For dummy EPG, use either the specified value or default to 3 days + dummy_days = num_days if num_days > 0 else 3 - # Handle episode numbering - multiple formats supported - # Standard episode number if available - if 'episode' in custom_data: - xml_lines.append(f' E{custom_data["episode"]}') + # Calculate cutoff date for EPG data filtering (only if days > 0) + now = django_timezone.now() + cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None - # Handle onscreen episode format (like S06E128) - if 'onscreen_episode' in custom_data: - xml_lines.append(f' {html.escape(custom_data["onscreen_episode"])}') + # Build collision-free channel number mapping for XC clients (if user is authenticated) + # XC clients require integer channel numbers, so we need to ensure no conflicts + channel_num_map = {} + if user is not None: + # This is an XC client - build collision-free mapping + used_numbers = set() - # Add season and episode numbers in xmltv_ns format if available - if 'season' in custom_data and 'episode' in custom_data: - season = int(custom_data['season']) - 1 if str(custom_data['season']).isdigit() else 0 - episode = int(custom_data['episode']) - 1 if str(custom_data['episode']).isdigit() else 0 - xml_lines.append(f' {season}.{episode}.') + # First pass: assign integers for channels that already have integer numbers + for channel in channels: + if channel.channel_number == int(channel.channel_number): + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) - # Add rating if available - if 'rating' in custom_data: - rating_system = custom_data.get('rating_system', 'TV Parental Guidelines') - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(custom_data["rating"])}') - xml_lines.append(f' ') + # Second pass: assign integers for channels with float numbers + for channel in channels: + if channel.channel_number != int(channel.channel_number): + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) - # Add actors/directors/writers if available - if 'credits' in custom_data: - xml_lines.append(f' ') - for role, people in custom_data['credits'].items(): - if isinstance(people, list): - for person in people: - xml_lines.append(f' <{role}>{html.escape(person)}') + # Process channels for the section + for channel in channels: + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer + formatted_channel_number = channel_num_map[channel.id] + else: + # Regular client - format channel number as integer if it has no decimal component + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" + + # Determine the channel ID based on the selected source + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid + else: + # Default to channel number (original behavior) + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + + # Add channel logo if available + tvg_logo = "" + + # Check if this is a custom dummy EPG with channel logo URL template + if channel.epg_data and channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + channel_logo_url_template = custom_props.get('channel_logo_url', '') + + if channel_logo_url_template: + # Determine which name to use for pattern matching (same logic as program generation) + pattern_match_name = channel.name + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + + # Try to extract groups from the channel/stream name and build the logo URL + title_pattern = custom_props.get('title_pattern', '') + if title_pattern: + try: + # Convert PCRE/JavaScript named groups to Python format + title_pattern = regex.sub(r'\(\?<(?![=!])([^>]+)>', r'(?P<\1>', title_pattern) + title_regex = regex.compile(title_pattern) + title_match = title_regex.search(pattern_match_name) + + if title_match: + groups = title_match.groupdict() + + # Add normalized versions of all groups for cleaner URLs + for key, value in list(groups.items()): + if value: + # Remove all non-alphanumeric characters and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + groups[f'{key}_normalize'] = normalized + + # Format the logo URL template with the matched groups (with URL encoding) + from urllib.parse import quote + for key, value in groups.items(): + if value: + encoded_value = quote(str(value), safe='') + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', encoded_value) + else: + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', '') + tvg_logo = channel_logo_url_template + logger.debug(f"Built channel logo URL from template: {tvg_logo}") + except Exception as e: + logger.warning(f"Failed to build channel logo URL for {channel.name}: {e}") + + # If no custom dummy logo, use regular logo logic + if not tvg_logo and channel.logo: + if use_cached_logos: + # Use cached logo as before + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) + else: + # Try to find direct logo URL from channel's streams + direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None + # If direct logo found, use it; otherwise fall back to cached version + if direct_logo: + tvg_logo = direct_logo + else: + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) + display_name = channel.name + xml_lines.append(f' ') + xml_lines.append(f' {html.escape(display_name)}') + xml_lines.append(f' ') + xml_lines.append(" ") + + # Send all channel definitions + channel_xml = '\n'.join(xml_lines) + '\n' + yield channel_xml + xml_lines = [] # Clear to save memory + + # Process programs for each channel + for channel in channels: + + # Use the same channel ID determination for program entries + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid + else: + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer from map + formatted_channel_number = channel_num_map[channel.id] + else: + # Regular client - format channel number as before + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" + # Default to channel number + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + + # Use EPG data name for display, but channel name for pattern matching + display_name = channel.epg_data.name if channel.epg_data else channel.name + # For dummy EPG pattern matching, determine which name to use + pattern_match_name = channel.name + + # Check if we should use stream name instead of channel name + if channel.epg_data and channel.epg_data.epg_source: + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + logger.debug(f"Using stream name for parsing: {pattern_match_name} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + + if not channel.epg_data: + # Use the enhanced dummy EPG generation function with defaults + program_length_hours = 4 # Default to 4-hour program blocks + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=None + ) + + for program in dummy_programs: + # Format times in XMLTV format + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + # Create program entry with escaped channel name + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + + yield f" \n" + + else: + # Check if this is a dummy EPG with no programs (generate on-demand) + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + # This is a custom dummy EPG - check if it has programs + if not channel.epg_data.programs.exists(): + # No programs stored, generate on-demand using custom patterns + # Use actual channel name for pattern matching + program_length_hours = 4 + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=channel.epg_data.epg_source + ) + + for program in dummy_programs: + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + + yield f" \n" + + continue # Skip to next channel + + # For real EPG data - filter only if days parameter was specified + if num_days > 0: + programs_qs = channel.epg_data.programs.filter( + start_time__gte=now, + start_time__lt=cutoff_date + ).order_by('id') # Explicit ordering for consistent chunking + else: + # Return all programs if days=0 or not specified + programs_qs = channel.epg_data.programs.all().order_by('id') + + # Process programs in chunks to avoid cursor timeout issues + program_batch = [] + batch_size = 250 + chunk_size = 1000 # Fetch 1000 programs at a time from DB + + # Fetch chunks until no more results (avoids count() query) + offset = 0 + while True: + # Fetch a chunk of programs - this closes the cursor after fetching + program_chunk = list(programs_qs[offset:offset + chunk_size]) + + # Break if no more programs + if not program_chunk: + break + + # Process each program in the chunk + for prog in program_chunk: + start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") + stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") + + program_xml = [f' '] + program_xml.append(f' {html.escape(prog.title)}') + + # Add subtitle if available + if prog.sub_title: + program_xml.append(f" {html.escape(prog.sub_title)}") + + # Add description if available + if prog.description: + program_xml.append(f" {html.escape(prog.description)}") + + # Process custom properties if available + if prog.custom_properties: + custom_data = prog.custom_properties or {} + + # Add categories if available + if "categories" in custom_data and custom_data["categories"]: + for category in custom_data["categories"]: + program_xml.append(f" {html.escape(category)}") + + # Add keywords if available + if "keywords" in custom_data and custom_data["keywords"]: + for keyword in custom_data["keywords"]: + program_xml.append(f" {html.escape(keyword)}") + + # Handle episode numbering - multiple formats supported + # Prioritize onscreen_episode over standalone episode for onscreen system + if "onscreen_episode" in custom_data: + program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') + elif "episode" in custom_data: + program_xml.append(f' E{custom_data["episode"]}') + + # Handle dd_progid format + if 'dd_progid' in custom_data: + program_xml.append(f' {html.escape(custom_data["dd_progid"])}') + + # Handle external database IDs + for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: + if f'{system}_id' in custom_data: + program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') + + # Add season and episode numbers in xmltv_ns format if available + if "season" in custom_data and "episode" in custom_data: + season = ( + int(custom_data["season"]) - 1 + if str(custom_data["season"]).isdigit() + else 0 + ) + episode = ( + int(custom_data["episode"]) - 1 + if str(custom_data["episode"]).isdigit() + else 0 + ) + program_xml.append(f' {season}.{episode}.') + + # Add language information + if "language" in custom_data: + program_xml.append(f' {html.escape(custom_data["language"])}') + + if "original_language" in custom_data: + program_xml.append(f' {html.escape(custom_data["original_language"])}') + + # Add length information + if "length" in custom_data and isinstance(custom_data["length"], dict): + length_value = custom_data["length"].get("value", "") + length_units = custom_data["length"].get("units", "minutes") + program_xml.append(f' {html.escape(str(length_value))}') + + # Add video information + if "video" in custom_data and isinstance(custom_data["video"], dict): + program_xml.append(" ") + + # Add audio information + if "audio" in custom_data and isinstance(custom_data["audio"], dict): + program_xml.append(" ") + + # Add subtitles information + if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): + for subtitle in custom_data["subtitles"]: + if isinstance(subtitle, dict): + subtitle_type = subtitle.get("type", "") + type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" + program_xml.append(f" ") + if "language" in subtitle: + program_xml.append(f" {html.escape(subtitle['language'])}") + program_xml.append(" ") + + # Add rating if available + if "rating" in custom_data: + rating_system = custom_data.get("rating_system", "TV Parental Guidelines") + program_xml.append(f' ') + program_xml.append(f' {html.escape(custom_data["rating"])}') + program_xml.append(f" ") + + # Add star ratings + if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): + for star_rating in custom_data["star_ratings"]: + if isinstance(star_rating, dict) and "value" in star_rating: + system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" + program_xml.append(f" ") + program_xml.append(f" {html.escape(star_rating['value'])}") + program_xml.append(" ") + + # Add reviews + if "reviews" in custom_data and isinstance(custom_data["reviews"], list): + for review in custom_data["reviews"]: + if isinstance(review, dict) and "content" in review: + review_type = review.get("type", "text") + attrs = [f'type="{html.escape(review_type)}"'] + if "source" in review: + attrs.append(f'source="{html.escape(review["source"])}"') + if "reviewer" in review: + attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') + attr_str = " ".join(attrs) + program_xml.append(f' {html.escape(review["content"])}') + + # Add images + if "images" in custom_data and isinstance(custom_data["images"], list): + for image in custom_data["images"]: + if isinstance(image, dict) and "url" in image: + attrs = [] + for attr in ['type', 'size', 'orient', 'system']: + if attr in image: + attrs.append(f'{attr}="{html.escape(image[attr])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f' {html.escape(image["url"])}') + + # Add enhanced credits handling + if "credits" in custom_data: + program_xml.append(" ") + credits = custom_data["credits"] + + # Handle different credit types + for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if role in credits: + people = credits[role] + if isinstance(people, list): + for person in people: + program_xml.append(f" <{role}>{html.escape(person)}") + else: + program_xml.append(f" <{role}>{html.escape(people)}") + + # Handle actors separately to include role and guest attributes + if "actor" in credits: + actors = credits["actor"] + if isinstance(actors, list): + for actor in actors: + if isinstance(actor, dict): + name = actor.get("name", "") + role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" + guest_attr = ' guest="yes"' if actor.get("guest") else "" + program_xml.append(f" {html.escape(name)}") + else: + program_xml.append(f" {html.escape(actor)}") + else: + program_xml.append(f" {html.escape(actors)}") + + program_xml.append(" ") + + # Add program date if available (full date, not just year) + if "date" in custom_data: + program_xml.append(f' {html.escape(custom_data["date"])}') + + # Add country if available + if "country" in custom_data: + program_xml.append(f' {html.escape(custom_data["country"])}') + + # Add icon if available + if "icon" in custom_data: + program_xml.append(f' ') + + # Add special flags as proper tags with enhanced handling + if custom_data.get("previously_shown", False): + prev_shown_details = custom_data.get("previously_shown_details", {}) + attrs = [] + if "start" in prev_shown_details: + attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') + if "channel" in prev_shown_details: + attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f" ") + + if custom_data.get("premiere", False): + premiere_text = custom_data.get("premiere_text", "") + if premiere_text: + program_xml.append(f" {html.escape(premiere_text)}") else: - xml_lines.append(f' <{role}>{html.escape(people)}') - xml_lines.append(f' ') + program_xml.append(" ") - # Add program date/year if available - if 'year' in custom_data: - xml_lines.append(f' {html.escape(custom_data["year"])}') + if custom_data.get("last_chance", False): + last_chance_text = custom_data.get("last_chance_text", "") + if last_chance_text: + program_xml.append(f" {html.escape(last_chance_text)}") + else: + program_xml.append(" ") - # Add country if available - if 'country' in custom_data: - xml_lines.append(f' {html.escape(custom_data["country"])}') + if custom_data.get("new", False): + program_xml.append(" ") - # Add icon if available - if 'icon' in custom_data: - xml_lines.append(f' ') + if custom_data.get('live', False): + program_xml.append(' ') - # Add special flags as proper tags - if custom_data.get('previously_shown', False): - xml_lines.append(f' ') + program_xml.append(" ") - if custom_data.get('premiere', False): - xml_lines.append(f' ') + # Add to batch + program_batch.extend(program_xml) - if custom_data.get('new', False): - xml_lines.append(f' ') + # Send batch when full or send keep-alive + if len(program_batch) >= batch_size: + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml + program_batch = [] - except Exception as e: - xml_lines.append(f' ') + # Move to next chunk + offset += chunk_size - xml_lines.append('
') + # Send remaining programs in batch + if program_batch: + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml - xml_lines.append('
') - xml_content = "\n".join(xml_lines) + # Send final closing tag and completion message + yield "\n" - response = HttpResponse(xml_content, content_type="application/xml") - response['Content-Disposition'] = 'attachment; filename="epg.xml"' + # Log system event for EPG download after streaming completes (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"epg_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='epg_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + + # Wrapper generator that collects content for caching + def caching_generator(): + collected_content = [] + for chunk in epg_generator(): + collected_content.append(chunk) + yield chunk + # After streaming completes, cache the full content + full_content = ''.join(collected_content) + cache.set(content_cache_key, full_content, 300) + logger.debug("Cached EPG content (%d bytes)", len(full_content)) + + # Return streaming response + response = StreamingHttpResponse( + streaming_content=caching_generator(), + content_type="application/xml" + ) + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" return response + + +def xc_get_user(request): + username = request.GET.get("username") + password = request.GET.get("password") + + if not username or not password: + return None + + user = get_object_or_404(User, username=username) + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return None + + if custom_properties["xc_password"] != password: + return None + + return user + + +def xc_get_info(request, full=False): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + raw_host = request.get_host() + if ":" in raw_host: + hostname, port = raw_host.split(":", 1) + else: + hostname = raw_host + port = "443" if request.is_secure() else "80" + + info = { + "user_info": { + "username": request.GET.get("username"), + "password": request.GET.get("password"), + "message": "Dispatcharr XC API", + "auth": 1, + "status": "Active", + "exp_date": str(int(time.time()) + (90 * 24 * 60 * 60)), + "max_connections": str(calculate_tuner_count(minimum=1, unlimited_default=50)), + "allowed_output_formats": [ + "ts", + ], + }, + "server_info": { + "url": hostname, + "server_protocol": request.scheme, + "port": port, + "timezone": get_localzone().key, + "timestamp_now": int(time.time()), + "time_now": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "process": True, + }, + } + + if full == True: + info['categories'] = { + "series": [], + "movie": [], + "live": xc_get_live_categories(user), + } + info['available_channels'] = {channel["stream_id"]: channel for channel in xc_get_live_streams(request, user, request.GET.get("category_id"))} + + return info + + +def xc_player_api(request, full=False): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + action = request.GET.get("action") + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + if action == "get_live_categories": + return JsonResponse(xc_get_live_categories(user), safe=False) + elif action == "get_live_streams": + return JsonResponse(xc_get_live_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_short_epg": + return JsonResponse(xc_get_epg(request, user, short=True), safe=False) + elif action == "get_simple_data_table": + return JsonResponse(xc_get_epg(request, user, short=False), safe=False) + elif action == "get_vod_categories": + return JsonResponse(xc_get_vod_categories(user), safe=False) + elif action == "get_vod_streams": + return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_categories": + return JsonResponse(xc_get_series_categories(user), safe=False) + elif action == "get_series": + return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_info": + return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) + elif action == "get_vod_info": + return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) + else: + # For any other action (including get_account_info or unknown actions), + # return server_info/account_info to match provider behavior + server_info = xc_get_info(request) + return JsonResponse(server_info, safe=False) + + +def xc_panel_api(request): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return JsonResponse(xc_get_info(request, True)) + + +def xc_get(request): + if not network_access_allowed(request, 'XC_API'): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({'error': 'Forbidden'}, status=403) + + action = request.GET.get("action") + user = xc_get_user(request) + + if user is None: + # Log blocked M3U download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return generate_m3u(request, None, user) + + +def xc_xmltv(request): + if not network_access_allowed(request, 'XC_API'): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + # Log blocked EPG download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return generate_epg(request, None, user) + + +def xc_get_live_categories(user): + from django.db.models import Min + response = [] + + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channel groups + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + else: + # User has specific limited profiles assigned + filters = { + "channels__channelprofilemembership__enabled": True, + "channels__user_level": 0, + "channels__channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel_groups = ChannelGroup.objects.filter(**filters).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + else: + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + + for group in channel_groups: + response.append( + { + "category_id": str(group.id), + "category_name": group.name, + "parent_id": 0, + } + ) + + return response + + +def xc_get_live_streams(request, user, category_id=None): + streams = [] + + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = {"user_level__lte": user.user_level} + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + if not category_id: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + channels = Channel.objects.filter( + channel_group__id=category_id, user_level__lte=user.user_level + ).order_by("channel_number") + + # Build collision-free mapping for XC clients (which require integers) + # This ensures channels with float numbers don't conflict with existing integers + channel_num_map = {} # Maps channel.id -> integer channel number for XC + used_numbers = set() # Track all assigned integer channel numbers + + # First pass: assign integers for channels that already have integer numbers + for channel in channels: + if channel.channel_number == int(channel.channel_number): + # Already an integer, use it directly + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + # Find next available number to avoid collisions + for channel in channels: + if channel.channel_number != int(channel.channel_number): + # Has decimal component, need to find available integer + # Start from truncated value and increment until we find an unused number + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) + + # Build the streams list with the collision-free channel numbers + for channel in channels: + channel_num_int = channel_num_map[channel.id] + + streams.append( + { + "num": channel_num_int, + "name": channel.name, + "stream_type": "live", + "stream_id": channel.id, + "stream_icon": ( + None + if not channel.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[channel.logo.id]) + ) + ), + "epg_channel_id": str(channel_num_int), + "added": int(channel.created_at.timestamp()), + "is_adult": 0, + "category_id": str(channel.channel_group.id), + "category_ids": [channel.channel_group.id], + "custom_sid": None, + "tv_archive": 0, + "direct_source": "", + "tv_archive_duration": 0, + } + ) + + return streams + + +def xc_get_epg(request, user, short=False): + channel_id = request.GET.get('stream_id') + if not channel_id: + raise Http404() + + channel = None + if user.user_level < 10: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channel = Channel.objects.filter( + id=channel_id, + user_level__lte=user.user_level + ).first() + else: + # User has specific limited profiles assigned + filters = { + "id": channel_id, + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() + + if not channel: + raise Http404() + else: + channel = get_object_or_404(Channel, id=channel_id) + + if not channel: + raise Http404() + + # Calculate the collision-free integer channel number for this channel + # This must match the logic in xc_get_live_streams to ensure consistency + # Get all channels in the same category for collision detection + category_channels = Channel.objects.filter( + channel_group=channel.channel_group + ).order_by("channel_number") + + channel_num_map = {} + used_numbers = set() + + # First pass: assign integers for channels that already have integer numbers + for ch in category_channels: + if ch.channel_number == int(ch.channel_number): + num = int(ch.channel_number) + channel_num_map[ch.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + for ch in category_channels: + if ch.channel_number != int(ch.channel_number): + candidate = int(ch.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[ch.id] = candidate + used_numbers.add(candidate) + + # Get the mapped integer for this specific channel + channel_num_int = channel_num_map.get(channel.id, int(channel.channel_number)) + + limit = int(request.GET.get('limit', 4)) + if channel.epg_data: + # Check if this is a dummy EPG that generates on-demand + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + if not channel.epg_data.programs.exists(): + # Generate on-demand using custom patterns + programs = generate_dummy_programs( + channel_id=channel_id, + channel_name=channel.name, + epg_source=channel.epg_data.epg_source + ) + else: + # Has stored programs, use them + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + else: + # Regular EPG with stored programs + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + else: + # No EPG data assigned, generate default dummy + programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name, epg_source=None) + + output = {"epg_listings": []} + + for program in programs: + title = program['title'] if isinstance(program, dict) else program.title + description = program['description'] if isinstance(program, dict) else program.description + + start = program["start_time"] if isinstance(program, dict) else program.start_time + end = program["end_time"] if isinstance(program, dict) else program.end_time + + # For database programs, use actual ID; for generated dummy programs, create synthetic ID + if isinstance(program, dict): + # Generated dummy program - create unique ID from channel + timestamp + program_id = str(abs(hash(f"{channel_id}_{int(start.timestamp())}"))) + else: + # Database program - use actual ID + program_id = str(program.id) + + # epg_id refers to the EPG source/channel mapping in XC panels + # Use the actual EPGData ID when available, otherwise fall back to 0 + epg_id = str(channel.epg_data.id) if channel.epg_data else "0" + + program_output = { + "id": program_id, + "epg_id": epg_id, + "title": base64.b64encode((title or "").encode()).decode(), + "lang": "", + "start": start.strftime("%Y-%m-%d %H:%M:%S"), + "end": end.strftime("%Y-%m-%d %H:%M:%S"), + "description": base64.b64encode((description or "").encode()).decode(), + "channel_id": str(channel_num_int), + "start_timestamp": str(int(start.timestamp())), + "stop_timestamp": str(int(end.timestamp())), + "stream_id": f"{channel_id}", + } + + if short == False: + program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 + program_output["has_archive"] = 0 + + output['epg_listings'].append(program_output) + + return output + + +def xc_get_vod_categories(user): + """Get VOD categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3UMovieRelation + + response = [] + + # All authenticated users get access to VOD from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='movie', + m3umovierelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) + + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) + + return response + + +def xc_get_vod_streams(request, user, category_id=None): + """Get VOD streams (movies) for XtreamCodes API""" + from apps.vod.models import Movie, M3UMovieRelation + from django.db.models import Prefetch + + streams = [] + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"m3u_relations__m3u_account__is_active": True} + + if category_id: + filters["m3u_relations__category_id"] = category_id + + # Optimize with prefetch_related to eliminate N+1 queries + # This loads all relations in a single query instead of one per movie + movies = Movie.objects.filter(**filters).select_related('logo').prefetch_related( + Prefetch( + 'm3u_relations', + queryset=M3UMovieRelation.objects.filter( + m3u_account__is_active=True + ).select_related('m3u_account', 'category').order_by('-m3u_account__priority', 'id'), + to_attr='active_relations' + ) + ).distinct() + + for movie in movies: + # Get the first (highest priority) relation from prefetched data + # This avoids the N+1 query problem entirely + if hasattr(movie, 'active_relations') and movie.active_relations: + relation = movie.active_relations[0] + else: + # Fallback - should rarely be needed with proper prefetching + continue + + streams.append({ + "num": movie.id, + "name": movie.name, + "stream_type": "movie", + "stream_id": movie.id, + "stream_icon": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + #'stream_icon': movie.logo.url if movie.logo else '', + "rating": movie.rating or "0", + "rating_5based": round(float(movie.rating or 0) / 2, 2) if movie.rating else 0, + "added": str(int(movie.created_at.timestamp())), + "is_adult": 0, + "tmdb_id": movie.tmdb_id or "", + "imdb_id": movie.imdb_id or "", + "trailer": (movie.custom_properties or {}).get('trailer') or "", + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + "container_extension": relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + }) + + return streams + + +def xc_get_series_categories(user): + """Get series categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3USeriesRelation + + response = [] + + # All authenticated users get access to series from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='series', + m3useriesrelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) + + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) + + return response + + +def xc_get_series(request, user, category_id=None): + """Get series list for XtreamCodes API""" + from apps.vod.models import M3USeriesRelation + + series_list = [] + + # All authenticated users get access to series from all active M3U accounts + filters = {"m3u_account__is_active": True} + + if category_id: + filters["category_id"] = category_id + + # Get series relations instead of series directly + series_relations = M3USeriesRelation.objects.filter(**filters).select_related( + 'series', 'series__logo', 'category', 'm3u_account' + ) + + for relation in series_relations: + series = relation.series + series_list.append({ + "num": relation.id, # Use relation ID + "name": series.name, + "series_id": relation.id, # Use relation ID + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) + ) + ), + "plot": series.description or "", + "cast": series.custom_properties.get('cast', '') if series.custom_properties else "", + "director": series.custom_properties.get('director', '') if series.custom_properties else "", + "genre": series.genre or "", + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "last_modified": str(int(relation.updated_at.timestamp())), + "rating": str(series.rating or "0"), + "rating_5based": str(round(float(series.rating or 0) / 2, 2)) if series.rating else "0", + "backdrop_path": series.custom_properties.get('backdrop_path', []) if series.custom_properties else [], + "youtube_trailer": series.custom_properties.get('youtube_trailer', '') if series.custom_properties else "", + "episode_run_time": series.custom_properties.get('episode_run_time', '') if series.custom_properties else "", + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + }) + + return series_list + + +def xc_get_series_info(request, user, series_id): + """Get detailed series information including episodes""" + from apps.vod.models import M3USeriesRelation, M3UEpisodeRelation + + if not series_id: + raise Http404() + + # All authenticated users get access to series from all active M3U accounts + filters = {"id": series_id, "m3u_account__is_active": True} + + try: + series_relation = M3USeriesRelation.objects.select_related('series', 'series__logo').get(**filters) + series = series_relation.series + except M3USeriesRelation.DoesNotExist: + raise Http404() + + # Check if we need to refresh detailed info (similar to vod api_views pattern) + try: + should_refresh = ( + not series_relation.last_episode_refresh or + series_relation.last_episode_refresh < django_timezone.now() - timedelta(hours=24) + ) + + # Check if detailed data has been fetched + custom_props = series_relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes/details have never been fetched or time interval exceeded + if not episodes_fetched or not detailed_fetched or should_refresh: + from apps.vod.tasks import refresh_series_episodes + account = series_relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, series_relation.external_series_id) + # Refresh objects from database after task completion + series.refresh_from_db() + series_relation.refresh_from_db() + + except Exception as e: + logger.error(f"Error refreshing series data for relation {series_relation.id}: {str(e)}") + + # Get unique episodes for this series that have relations from any active M3U account + # We query episodes directly to avoid duplicates when multiple relations exist + # (e.g., same episode in different languages/qualities) + from apps.vod.models import Episode + episodes = Episode.objects.filter( + series=series, + m3u_relations__m3u_account__is_active=True + ).distinct().order_by('season_number', 'episode_number') + + # Group episodes by season + seasons = {} + for episode in episodes: + season_num = episode.season_number or 1 + if season_num not in seasons: + seasons[season_num] = [] + + # Get the highest priority relation for this episode (for container_extension, video/audio/bitrate) + from apps.vod.models import M3UEpisodeRelation + best_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + video = audio = bitrate = None + container_extension = "mp4" + added_timestamp = str(int(episode.created_at.timestamp())) + + if best_relation: + container_extension = best_relation.container_extension or "mp4" + added_timestamp = str(int(best_relation.created_at.timestamp())) + if best_relation.custom_properties: + info = best_relation.custom_properties.get('info') + if info and isinstance(info, dict): + info_info = info.get('info') + if info_info and isinstance(info_info, dict): + video = info_info.get('video', {}) + audio = info_info.get('audio', {}) + bitrate = info_info.get('bitrate', 0) + + if video is None: + video = episode.custom_properties.get('video', {}) if episode.custom_properties else {} + if audio is None: + audio = episode.custom_properties.get('audio', {}) if episode.custom_properties else {} + if bitrate is None: + bitrate = episode.custom_properties.get('bitrate', 0) if episode.custom_properties else 0 + + seasons[season_num].append({ + "id": episode.id, + "season": season_num, + "episode_num": episode.episode_number or 0, + "title": episode.name, + "container_extension": container_extension, + "added": added_timestamp, + "custom_sid": None, + "direct_source": "", + "info": { + "id": int(episode.id), + "name": episode.name, + "overview": episode.description or "", + "crew": str(episode.custom_properties.get('crew', "") if episode.custom_properties else ""), + "directed_by": episode.custom_properties.get('director', '') if episode.custom_properties else "", + "imdb_id": episode.imdb_id or "", + "air_date": f"{episode.air_date}" if episode.air_date else "", + "backdrop_path": episode.custom_properties.get('backdrop_path', []) if episode.custom_properties else [], + "movie_image": episode.custom_properties.get('movie_image', '') if episode.custom_properties else "", + "rating": float(episode.rating or 0), + "release_date": f"{episode.air_date}" if episode.air_date else "", + "duration_secs": (episode.duration_secs or 0), + "duration": format_duration_hms(episode.duration_secs), + "video": video, + "audio": audio, + "bitrate": bitrate, + } + }) + + # Build response using potentially refreshed data + series_data = { + 'name': series.name, + 'description': series.description or '', + 'year': series.year, + 'genre': series.genre or '', + 'rating': series.rating or '0', + 'cast': '', + 'director': '', + 'youtube_trailer': '', + 'episode_run_time': '', + 'backdrop_path': [], + } + + # Add detailed info from custom_properties if available + try: + if series.custom_properties: + custom_data = series.custom_properties + series_data.update({ + 'cast': custom_data.get('cast', ''), + 'director': custom_data.get('director', ''), + 'youtube_trailer': custom_data.get('youtube_trailer', ''), + 'episode_run_time': custom_data.get('episode_run_time', ''), + 'backdrop_path': custom_data.get('backdrop_path', []), + }) + + # Check relation custom_properties for detailed_info + if series_relation.custom_properties and 'detailed_info' in series_relation.custom_properties: + detailed_info = series_relation.custom_properties['detailed_info'] + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating']: + if detailed_info.get(key): + series_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + series_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + series_data['description'] = detailed_info['description'] + + # Update additional fields from detailed info + series_data.update({ + 'cast': detailed_info.get('cast', series_data['cast']), + 'director': detailed_info.get('director', series_data['director']), + 'youtube_trailer': detailed_info.get('youtube_trailer', series_data['youtube_trailer']), + 'episode_run_time': detailed_info.get('episode_run_time', series_data['episode_run_time']), + 'backdrop_path': detailed_info.get('backdrop_path', series_data['backdrop_path']), + }) + + except Exception as e: + logger.error(f"Error parsing series custom_properties: {str(e)}") + + seasons_list = [ + {"season_number": int(season_num), "name": f"Season {season_num}"} + for season_num in sorted(seasons.keys(), key=lambda x: int(x)) + ] + + info = { + 'seasons': seasons_list, + "info": { + "name": series_data['name'], + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) + ) + ), + "plot": series_data['description'], + "cast": series_data['cast'], + "director": series_data['director'], + "genre": series_data['genre'], + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "added": str(int(series_relation.created_at.timestamp())), + "last_modified": str(int(series_relation.updated_at.timestamp())), + "rating": str(series_data['rating']), + "rating_5based": str(round(float(series_data['rating'] or 0) / 2, 2)) if series_data['rating'] else "0", + "backdrop_path": series_data['backdrop_path'], + "youtube_trailer": series_data['youtube_trailer'], + "imdb": str(series.imdb_id) if series.imdb_id else "", + "tmdb": str(series.tmdb_id) if series.tmdb_id else "", + "episode_run_time": str(series_data['episode_run_time']), + "category_id": str(series_relation.category.id) if series_relation.category else "0", + "category_ids": [int(series_relation.category.id)] if series_relation.category else [], + }, + "episodes": dict(seasons) + } + return info + + +def xc_get_vod_info(request, user, vod_id): + """Get detailed VOD (movie) information""" + from apps.vod.models import M3UMovieRelation + from django.utils import timezone + from datetime import timedelta + + if not vod_id: + raise Http404() + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"movie_id": vod_id, "m3u_account__is_active": True} + + try: + # Order by account priority to get the best relation when multiple exist + movie_relation = M3UMovieRelation.objects.select_related('movie', 'movie__logo').filter(**filters).order_by('-m3u_account__priority', 'id').first() + if not movie_relation: + raise Http404() + movie = movie_relation.movie + except (M3UMovieRelation.DoesNotExist, M3UMovieRelation.MultipleObjectsReturned): + raise Http404() + + # Initialize basic movie data first + movie_data = { + 'name': movie.name, + 'description': movie.description or '', + 'year': movie.year, + 'genre': movie.genre or '', + 'rating': movie.rating or 0, + 'tmdb_id': movie.tmdb_id or '', + 'imdb_id': movie.imdb_id or '', + 'director': '', + 'actors': '', + 'country': '', + 'release_date': '', + 'youtube_trailer': '', + 'backdrop_path': [], + 'cover_big': '', + 'bitrate': 0, + 'video': {}, + 'audio': {}, + } + + # Duplicate the provider_info logic for detailed information + try: + # Check if we need to refresh detailed info (same logic as provider_info) + should_refresh = ( + not movie_relation.last_advanced_refresh or + movie_relation.last_advanced_refresh < timezone.now() - timedelta(hours=24) + ) + + if should_refresh: + # Trigger refresh of detailed info + from apps.vod.tasks import refresh_movie_advanced_data + refresh_movie_advanced_data(movie_relation.id) + # Refresh objects from database after task completion + movie.refresh_from_db() + movie_relation.refresh_from_db() + + # Add detailed info from custom_properties if available + if movie.custom_properties: + custom_data = movie.custom_properties or {} + + # Extract detailed info + #detailed_info = custom_data.get('detailed_info', {}) + detailed_info = movie_relation.custom_properties.get('detailed_info', {}) + # Update movie_data with detailed info + movie_data.update({ + 'director': custom_data.get('director') or detailed_info.get('director', ''), + 'actors': custom_data.get('actors') or detailed_info.get('actors', ''), + 'country': custom_data.get('country') or detailed_info.get('country', ''), + 'release_date': custom_data.get('release_date') or detailed_info.get('release_date') or detailed_info.get('releasedate', ''), + 'youtube_trailer': custom_data.get('youtube_trailer') or detailed_info.get('youtube_trailer') or detailed_info.get('trailer', ''), + 'backdrop_path': custom_data.get('backdrop_path') or detailed_info.get('backdrop_path', []), + 'cover_big': detailed_info.get('cover_big', ''), + 'bitrate': detailed_info.get('bitrate', 0), + 'video': detailed_info.get('video', {}), + 'audio': detailed_info.get('audio', {}), + }) + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating', 'tmdb_id', 'imdb_id']: + if detailed_info.get(key): + movie_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + movie_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + movie_data['description'] = detailed_info['description'] + + except Exception as e: + logger.error(f"Failed to process movie data: {e}") + + # Transform API response to XtreamCodes format + info = { + "info": { + "name": movie_data.get('name', movie.name), + "o_name": movie_data.get('name', movie.name), + "cover_big": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + "movie_image": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + 'description': movie_data.get('description', ''), + 'plot': movie_data.get('description', ''), + 'year': movie_data.get('year', ''), + 'release_date': movie_data.get('release_date', ''), + 'genre': movie_data.get('genre', ''), + 'director': movie_data.get('director', ''), + 'actors': movie_data.get('actors', ''), + 'cast': movie_data.get('actors', ''), + 'country': movie_data.get('country', ''), + 'rating': movie_data.get('rating', 0), + 'imdb_id': movie_data.get('imdb_id', ''), + "tmdb_id": movie_data.get('tmdb_id', ''), + 'youtube_trailer': movie_data.get('youtube_trailer', ''), + 'backdrop_path': movie_data.get('backdrop_path', []), + 'cover': movie_data.get('cover_big', ''), + 'bitrate': movie_data.get('bitrate', 0), + 'video': movie_data.get('video', {}), + 'audio': movie_data.get('audio', {}), + }, + "movie_data": { + "stream_id": movie.id, + "name": movie.name, + "added": int(movie_relation.created_at.timestamp()), + "category_id": str(movie_relation.category.id) if movie_relation.category else "0", + "category_ids": [int(movie_relation.category.id)] if movie_relation.category else [], + "container_extension": movie_relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + } + } + + return info + + +def xc_movie_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes movie streaming requests""" + from apps.vod.models import M3UMovieRelation + + user = get_object_or_404(User, username=username) + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"movie_id": stream_id, "m3u_account__is_active": True} + + try: + # Order by account priority to get the best relation when multiple exist + movie_relation = M3UMovieRelation.objects.select_related('movie').filter(**filters).order_by('-m3u_account__priority', 'id').first() + if not movie_relation: + return JsonResponse({"error": "Movie not found"}, status=404) + except (M3UMovieRelation.DoesNotExist, M3UMovieRelation.MultipleObjectsReturned): + return JsonResponse({"error": "Movie not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'movie', + 'content_id': movie_relation.movie.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def xc_series_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes series/episode streaming requests""" + from apps.vod.models import M3UEpisodeRelation + + user = get_object_or_404(User, username=username) + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # All authenticated users get access to series/episodes from all active M3U accounts + filters = {"episode_id": stream_id, "m3u_account__is_active": True} + + try: + episode_relation = M3UEpisodeRelation.objects.select_related('episode').filter(**filters).order_by('-m3u_account__priority', 'id').first() + except M3UEpisodeRelation.DoesNotExist: + return JsonResponse({"error": "Episode not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'episode', + 'content_id': episode_relation.episode.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def get_host_and_port(request): + """ + Returns (host, port) for building absolute URIs. + - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). + - Falls back to Host header. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. + """ + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx + xfh = request.META.get("HTTP_X_FORWARDED_HOST") + if xfh: + if ":" in xfh: + host, port = xfh.split(":", 1) + # Omit standard ports from URLs + if port == standard_port: + return host, None + # Non-standard port in X-Forwarded-Host - return it + # This handles reverse proxies on non-standard ports (e.g., https://example.com:8443) + return host, port + else: + host = xfh + + # Check for X-Forwarded-Port header (if we didn't find a port in X-Forwarded-Host) + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): + return host, None + + # 2. Try Host header + raw_host = request.get_host() + if ":" in raw_host: + host, port = raw_host.split(":", 1) + # Omit standard ports from URLs + return host, None if port == standard_port else port + else: + host = raw_host + + # 3. Check for X-Forwarded-Port (when Host header has no port but we're behind a reverse proxy) + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + + # 4. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 5. Try SERVER_PORT from META (only if NOT behind reverse proxy) + port = request.META.get("SERVER_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + + # 6. Dev fallback: guess port 5656 + if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): + return host, "5656" + + # 7. Final fallback: assume standard port for scheme (omit from URL) + return host, None + +def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ + host, port = get_host_and_port(request) + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" + +def format_duration_hms(seconds): + """ + Format a duration in seconds as HH:MM:SS zero-padded string. + """ + seconds = int(seconds or 0) + return f"{seconds//3600:02}:{(seconds%3600)//60:02}:{seconds%60:02}" diff --git a/apps/plugins/__init__.py b/apps/plugins/__init__.py new file mode 100644 index 00000000..22c35396 --- /dev/null +++ b/apps/plugins/__init__.py @@ -0,0 +1,2 @@ +default_app_config = "apps.plugins.apps.PluginsConfig" + diff --git a/apps/plugins/api_urls.py b/apps/plugins/api_urls.py new file mode 100644 index 00000000..a229a07c --- /dev/null +++ b/apps/plugins/api_urls.py @@ -0,0 +1,22 @@ +from django.urls import path +from .api_views import ( + PluginsListAPIView, + PluginReloadAPIView, + PluginSettingsAPIView, + PluginRunAPIView, + PluginEnabledAPIView, + PluginImportAPIView, + PluginDeleteAPIView, +) + +app_name = "plugins" + +urlpatterns = [ + path("plugins/", PluginsListAPIView.as_view(), name="list"), + path("plugins/reload/", PluginReloadAPIView.as_view(), name="reload"), + path("plugins/import/", PluginImportAPIView.as_view(), name="import"), + path("plugins//delete/", PluginDeleteAPIView.as_view(), name="delete"), + path("plugins//settings/", PluginSettingsAPIView.as_view(), name="settings"), + path("plugins//run/", PluginRunAPIView.as_view(), name="run"), + path("plugins//enabled/", PluginEnabledAPIView.as_view(), name="enabled"), +] diff --git a/apps/plugins/api_views.py b/apps/plugins/api_views.py new file mode 100644 index 00000000..0d68fc7d --- /dev/null +++ b/apps/plugins/api_views.py @@ -0,0 +1,306 @@ +import logging +from rest_framework.views import APIView +from rest_framework.response import Response +from rest_framework import status +from rest_framework.decorators import api_view +from django.conf import settings +from django.core.files.uploadedfile import UploadedFile +import io +import os +import zipfile +import shutil +import tempfile +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_method, +) + +from .loader import PluginManager +from .models import PluginConfig + +logger = logging.getLogger(__name__) + + +class PluginsListAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def get(self, request): + pm = PluginManager.get() + # Ensure registry is up-to-date on each request + pm.discover_plugins() + return Response({"plugins": pm.list_plugins()}) + + +class PluginReloadAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request): + pm = PluginManager.get() + pm.discover_plugins() + return Response({"success": True, "count": len(pm._registry)}) + + +class PluginImportAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request): + file: UploadedFile = request.FILES.get("file") + if not file: + return Response({"success": False, "error": "Missing 'file' upload"}, status=status.HTTP_400_BAD_REQUEST) + + pm = PluginManager.get() + plugins_dir = pm.plugins_dir + + try: + zf = zipfile.ZipFile(file) + except zipfile.BadZipFile: + return Response({"success": False, "error": "Invalid zip file"}, status=status.HTTP_400_BAD_REQUEST) + + # Extract to a temporary directory first to avoid server reload thrash + tmp_root = tempfile.mkdtemp(prefix="plugin_import_") + try: + file_members = [m for m in zf.infolist() if not m.is_dir()] + if not file_members: + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Archive is empty"}, status=status.HTTP_400_BAD_REQUEST) + + for member in file_members: + name = member.filename + if not name or name.endswith("/"): + continue + # Normalize and prevent path traversal + norm = os.path.normpath(name) + if norm.startswith("..") or os.path.isabs(norm): + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Unsafe path in archive"}, status=status.HTTP_400_BAD_REQUEST) + dest_path = os.path.join(tmp_root, norm) + os.makedirs(os.path.dirname(dest_path), exist_ok=True) + with zf.open(member, 'r') as src, open(dest_path, 'wb') as dst: + shutil.copyfileobj(src, dst) + + # Find candidate directory containing plugin.py or __init__.py + candidates = [] + for dirpath, dirnames, filenames in os.walk(tmp_root): + has_pluginpy = "plugin.py" in filenames + has_init = "__init__.py" in filenames + if has_pluginpy or has_init: + depth = len(os.path.relpath(dirpath, tmp_root).split(os.sep)) + candidates.append((0 if has_pluginpy else 1, depth, dirpath)) + if not candidates: + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Invalid plugin: missing plugin.py or package __init__.py"}, status=status.HTTP_400_BAD_REQUEST) + + candidates.sort() + chosen = candidates[0][2] + # Determine plugin key: prefer chosen folder name; if chosen is tmp_root, use zip base name + base_name = os.path.splitext(getattr(file, "name", "plugin"))[0] + plugin_key = os.path.basename(chosen.rstrip(os.sep)) + if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep): + plugin_key = base_name + plugin_key = plugin_key.replace(" ", "_").lower() + + final_dir = os.path.join(plugins_dir, plugin_key) + if os.path.exists(final_dir): + # If final dir exists but contains a valid plugin, refuse; otherwise clear it + if os.path.exists(os.path.join(final_dir, "plugin.py")) or os.path.exists(os.path.join(final_dir, "__init__.py")): + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": f"Plugin '{plugin_key}' already exists"}, status=status.HTTP_400_BAD_REQUEST) + try: + shutil.rmtree(final_dir) + except Exception: + pass + + # Move chosen directory into final location + if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep): + # Move all contents into final_dir + os.makedirs(final_dir, exist_ok=True) + for item in os.listdir(tmp_root): + shutil.move(os.path.join(tmp_root, item), os.path.join(final_dir, item)) + else: + shutil.move(chosen, final_dir) + # Cleanup temp + shutil.rmtree(tmp_root, ignore_errors=True) + target_dir = final_dir + finally: + try: + shutil.rmtree(tmp_root, ignore_errors=True) + except Exception: + pass + + # Reload discovery and validate plugin entry + pm.discover_plugins() + plugin = pm._registry.get(plugin_key) + if not plugin: + # Cleanup the copied folder to avoid leaving invalid plugin behind + try: + shutil.rmtree(target_dir, ignore_errors=True) + except Exception: + pass + return Response({"success": False, "error": "Invalid plugin: missing Plugin class in plugin.py or __init__.py"}, status=status.HTTP_400_BAD_REQUEST) + + # Extra validation: ensure Plugin.run exists + instance = getattr(plugin, "instance", None) + run_method = getattr(instance, "run", None) + if not callable(run_method): + try: + shutil.rmtree(target_dir, ignore_errors=True) + except Exception: + pass + return Response({"success": False, "error": "Invalid plugin: Plugin class must define a callable run(action, params, context)"}, status=status.HTTP_400_BAD_REQUEST) + + # Find DB config to return enabled/ever_enabled + try: + cfg = PluginConfig.objects.get(key=plugin_key) + enabled = cfg.enabled + ever_enabled = getattr(cfg, "ever_enabled", False) + except PluginConfig.DoesNotExist: + enabled = False + ever_enabled = False + + return Response({ + "success": True, + "plugin": { + "key": plugin.key, + "name": plugin.name, + "version": plugin.version, + "description": plugin.description, + "enabled": enabled, + "ever_enabled": ever_enabled, + "fields": plugin.fields or [], + "actions": plugin.actions or [], + } + }) + + +class PluginSettingsAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + pm = PluginManager.get() + data = request.data or {} + settings = data.get("settings", {}) + try: + updated = pm.update_settings(key, settings) + return Response({"success": True, "settings": updated}) + except Exception as e: + return Response({"success": False, "error": str(e)}, status=status.HTTP_400_BAD_REQUEST) + + +class PluginRunAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + pm = PluginManager.get() + action = request.data.get("action") + params = request.data.get("params", {}) + if not action: + return Response({"success": False, "error": "Missing 'action'"}, status=status.HTTP_400_BAD_REQUEST) + + # Respect plugin enabled flag + try: + cfg = PluginConfig.objects.get(key=key) + if not cfg.enabled: + return Response({"success": False, "error": "Plugin is disabled"}, status=status.HTTP_403_FORBIDDEN) + except PluginConfig.DoesNotExist: + return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND) + + try: + result = pm.run_action(key, action, params) + return Response({"success": True, "result": result}) + except PermissionError as e: + return Response({"success": False, "error": str(e)}, status=status.HTTP_403_FORBIDDEN) + except Exception as e: + logger.exception("Plugin action failed") + return Response({"success": False, "error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +class PluginEnabledAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + enabled = request.data.get("enabled") + if enabled is None: + return Response({"success": False, "error": "Missing 'enabled' boolean"}, status=status.HTTP_400_BAD_REQUEST) + try: + cfg = PluginConfig.objects.get(key=key) + cfg.enabled = bool(enabled) + # Mark that this plugin has been enabled at least once + if cfg.enabled and not cfg.ever_enabled: + cfg.ever_enabled = True + cfg.save(update_fields=["enabled", "ever_enabled", "updated_at"]) + return Response({"success": True, "enabled": cfg.enabled, "ever_enabled": cfg.ever_enabled}) + except PluginConfig.DoesNotExist: + return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND) + + +class PluginDeleteAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def delete(self, request, key): + pm = PluginManager.get() + plugins_dir = pm.plugins_dir + target_dir = os.path.join(plugins_dir, key) + # Safety: ensure path inside plugins_dir + abs_plugins = os.path.abspath(plugins_dir) + os.sep + abs_target = os.path.abspath(target_dir) + if not abs_target.startswith(abs_plugins): + return Response({"success": False, "error": "Invalid plugin path"}, status=status.HTTP_400_BAD_REQUEST) + + # Remove files + if os.path.isdir(target_dir): + try: + shutil.rmtree(target_dir) + except Exception as e: + return Response({"success": False, "error": f"Failed to delete plugin files: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Remove DB record + try: + PluginConfig.objects.filter(key=key).delete() + except Exception: + pass + + # Reload registry + pm.discover_plugins() + return Response({"success": True}) diff --git a/apps/plugins/apps.py b/apps/plugins/apps.py new file mode 100644 index 00000000..3ab44cb1 --- /dev/null +++ b/apps/plugins/apps.py @@ -0,0 +1,54 @@ +from django.apps import AppConfig +import os +import sys +from django.db.models.signals import post_migrate + + +class PluginsConfig(AppConfig): + name = "apps.plugins" + verbose_name = "Plugins" + + def ready(self): + """Wire up plugin discovery without hitting the DB during app init. + + - Skip during common management commands that don't need discovery. + - Register post_migrate handler to sync plugin registry to DB after migrations. + - Do an in-memory discovery (no DB) so registry is available early. + """ + try: + # Allow explicit opt-out via env var + if os.environ.get("DISPATCHARR_SKIP_PLUGIN_AUTODISCOVERY", "").lower() in ("1", "true", "yes"): + return + + argv = sys.argv[1:] if len(sys.argv) > 1 else [] + mgmt_cmds_to_skip = { + # Skip immediate discovery for these commands + "makemigrations", "collectstatic", "check", "test", "shell", "showmigrations", + } + if argv and argv[0] in mgmt_cmds_to_skip: + return + + # Run discovery with DB sync after the plugins app has been migrated + def _post_migrate_discover(sender=None, app_config=None, **kwargs): + try: + if app_config and getattr(app_config, 'label', None) != 'plugins': + return + from .loader import PluginManager + PluginManager.get().discover_plugins(sync_db=True) + except Exception: + import logging + logging.getLogger(__name__).exception("Plugin discovery failed in post_migrate") + + post_migrate.connect( + _post_migrate_discover, + dispatch_uid="apps.plugins.post_migrate_discover", + ) + + # Perform non-DB discovery now to populate in-memory registry. + from .loader import PluginManager + PluginManager.get().discover_plugins(sync_db=False) + except Exception: + # Avoid breaking startup due to plugin errors + import logging + + logging.getLogger(__name__).exception("Plugin discovery wiring failed during app ready") diff --git a/apps/plugins/loader.py b/apps/plugins/loader.py new file mode 100644 index 00000000..5422ae7e --- /dev/null +++ b/apps/plugins/loader.py @@ -0,0 +1,254 @@ +import importlib +import json +import logging +import os +import sys +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from django.db import transaction + +from .models import PluginConfig + +logger = logging.getLogger(__name__) + + +@dataclass +class LoadedPlugin: + key: str + name: str + version: str = "" + description: str = "" + module: Any = None + instance: Any = None + fields: List[Dict[str, Any]] = field(default_factory=list) + actions: List[Dict[str, Any]] = field(default_factory=list) + + +class PluginManager: + """Singleton manager that discovers and runs plugins from /data/plugins.""" + + _instance: Optional["PluginManager"] = None + + @classmethod + def get(cls) -> "PluginManager": + if not cls._instance: + cls._instance = PluginManager() + return cls._instance + + def __init__(self) -> None: + self.plugins_dir = os.environ.get("DISPATCHARR_PLUGINS_DIR", "/data/plugins") + self._registry: Dict[str, LoadedPlugin] = {} + + # Ensure plugins directory exists + os.makedirs(self.plugins_dir, exist_ok=True) + if self.plugins_dir not in sys.path: + sys.path.append(self.plugins_dir) + + def discover_plugins(self, *, sync_db: bool = True) -> Dict[str, LoadedPlugin]: + if sync_db: + logger.info(f"Discovering plugins in {self.plugins_dir}") + else: + logger.debug(f"Discovering plugins (no DB sync) in {self.plugins_dir}") + self._registry.clear() + + try: + for entry in sorted(os.listdir(self.plugins_dir)): + path = os.path.join(self.plugins_dir, entry) + if not os.path.isdir(path): + continue + + plugin_key = entry.replace(" ", "_").lower() + + try: + self._load_plugin(plugin_key, path) + except Exception: + logger.exception(f"Failed to load plugin '{plugin_key}' from {path}") + + logger.info(f"Discovered {len(self._registry)} plugin(s)") + except FileNotFoundError: + logger.warning(f"Plugins directory not found: {self.plugins_dir}") + + # Sync DB records (optional) + if sync_db: + try: + self._sync_db_with_registry() + except Exception: + # Defer sync if database is not ready (e.g., first startup before migrate) + logger.exception("Deferring plugin DB sync; database not ready yet") + return self._registry + + def _load_plugin(self, key: str, path: str): + # Plugin can be a package and/or contain plugin.py. Prefer plugin.py when present. + has_pkg = os.path.exists(os.path.join(path, "__init__.py")) + has_pluginpy = os.path.exists(os.path.join(path, "plugin.py")) + if not (has_pkg or has_pluginpy): + logger.debug(f"Skipping {path}: no plugin.py or package") + return + + candidate_modules = [] + if has_pluginpy: + candidate_modules.append(f"{key}.plugin") + if has_pkg: + candidate_modules.append(key) + + module = None + plugin_cls = None + last_error = None + for module_name in candidate_modules: + try: + logger.debug(f"Importing plugin module {module_name}") + module = importlib.import_module(module_name) + plugin_cls = getattr(module, "Plugin", None) + if plugin_cls is not None: + break + else: + logger.warning(f"Module {module_name} has no Plugin class") + except Exception as e: + last_error = e + logger.exception(f"Error importing module {module_name}") + + if plugin_cls is None: + if last_error: + raise last_error + else: + logger.warning(f"No Plugin class found for {key}; skipping") + return + + instance = plugin_cls() + + name = getattr(instance, "name", key) + version = getattr(instance, "version", "") + description = getattr(instance, "description", "") + fields = getattr(instance, "fields", []) + actions = getattr(instance, "actions", []) + + self._registry[key] = LoadedPlugin( + key=key, + name=name, + version=version, + description=description, + module=module, + instance=instance, + fields=fields, + actions=actions, + ) + + def _sync_db_with_registry(self): + with transaction.atomic(): + for key, lp in self._registry.items(): + obj, _ = PluginConfig.objects.get_or_create( + key=key, + defaults={ + "name": lp.name, + "version": lp.version, + "description": lp.description, + "settings": {}, + }, + ) + # Update meta if changed + changed = False + if obj.name != lp.name: + obj.name = lp.name + changed = True + if obj.version != lp.version: + obj.version = lp.version + changed = True + if obj.description != lp.description: + obj.description = lp.description + changed = True + if changed: + obj.save() + + def list_plugins(self) -> List[Dict[str, Any]]: + from .models import PluginConfig + + plugins: List[Dict[str, Any]] = [] + try: + configs = {c.key: c for c in PluginConfig.objects.all()} + except Exception as e: + # Database might not be migrated yet; fall back to registry only + logger.warning("PluginConfig table unavailable; listing registry only: %s", e) + configs = {} + + # First, include all discovered plugins + for key, lp in self._registry.items(): + conf = configs.get(key) + plugins.append( + { + "key": key, + "name": lp.name, + "version": lp.version, + "description": lp.description, + "enabled": conf.enabled if conf else False, + "ever_enabled": getattr(conf, "ever_enabled", False) if conf else False, + "fields": lp.fields or [], + "settings": (conf.settings if conf else {}), + "actions": lp.actions or [], + "missing": False, + } + ) + + # Then, include any DB-only configs (files missing or failed to load) + discovered_keys = set(self._registry.keys()) + for key, conf in configs.items(): + if key in discovered_keys: + continue + plugins.append( + { + "key": key, + "name": conf.name, + "version": conf.version, + "description": conf.description, + "enabled": conf.enabled, + "ever_enabled": getattr(conf, "ever_enabled", False), + "fields": [], + "settings": conf.settings or {}, + "actions": [], + "missing": True, + } + ) + + return plugins + + def get_plugin(self, key: str) -> Optional[LoadedPlugin]: + return self._registry.get(key) + + def update_settings(self, key: str, settings: Dict[str, Any]) -> Dict[str, Any]: + cfg = PluginConfig.objects.get(key=key) + cfg.settings = settings or {} + cfg.save(update_fields=["settings", "updated_at"]) + return cfg.settings + + def run_action(self, key: str, action_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + lp = self.get_plugin(key) + if not lp or not lp.instance: + raise ValueError(f"Plugin '{key}' not found") + + cfg = PluginConfig.objects.get(key=key) + if not cfg.enabled: + raise PermissionError(f"Plugin '{key}' is disabled") + params = params or {} + + # Provide a context object to the plugin + context = { + "settings": cfg.settings or {}, + "logger": logger, + "actions": {a.get("id"): a for a in (lp.actions or [])}, + } + + # Run either via Celery if plugin provides a delayed method, or inline + run_method = getattr(lp.instance, "run", None) + if not callable(run_method): + raise ValueError(f"Plugin '{key}' has no runnable 'run' method") + + try: + result = run_method(action_id, params, context) + except Exception: + logger.exception(f"Plugin '{key}' action '{action_id}' failed") + raise + + # Normalize return + if isinstance(result, dict): + return result + return {"status": "ok", "result": result} diff --git a/apps/plugins/migrations/0001_initial.py b/apps/plugins/migrations/0001_initial.py new file mode 100644 index 00000000..6de1490a --- /dev/null +++ b/apps/plugins/migrations/0001_initial.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.4 on 2025-09-13 13:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='PluginConfig', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('key', models.CharField(max_length=128, unique=True)), + ('name', models.CharField(max_length=255)), + ('version', models.CharField(blank=True, default='', max_length=64)), + ('description', models.TextField(blank=True, default='')), + ('enabled', models.BooleanField(default=False)), + ('ever_enabled', models.BooleanField(default=False)), + ('settings', models.JSONField(blank=True, default=dict)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ], + ), + ] diff --git a/apps/plugins/migrations/__init__.py b/apps/plugins/migrations/__init__.py new file mode 100644 index 00000000..ade076bd --- /dev/null +++ b/apps/plugins/migrations/__init__.py @@ -0,0 +1 @@ +# This file marks the migrations package for the plugins app. diff --git a/apps/plugins/models.py b/apps/plugins/models.py new file mode 100644 index 00000000..8ae0b5be --- /dev/null +++ b/apps/plugins/models.py @@ -0,0 +1,19 @@ +from django.db import models + + +class PluginConfig(models.Model): + """Stores discovered plugins and their persisted settings.""" + + key = models.CharField(max_length=128, unique=True) + name = models.CharField(max_length=255) + version = models.CharField(max_length=64, blank=True, default="") + description = models.TextField(blank=True, default="") + enabled = models.BooleanField(default=False) + # Tracks whether this plugin has ever been enabled at least once + ever_enabled = models.BooleanField(default=False) + settings = models.JSONField(default=dict, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + def __str__(self) -> str: + return f"{self.name} ({self.key})" diff --git a/apps/plugins/serializers.py b/apps/plugins/serializers.py new file mode 100644 index 00000000..cc7b1882 --- /dev/null +++ b/apps/plugins/serializers.py @@ -0,0 +1,28 @@ +from rest_framework import serializers + + +class PluginActionSerializer(serializers.Serializer): + id = serializers.CharField() + label = serializers.CharField() + description = serializers.CharField(required=False, allow_blank=True) + + +class PluginFieldSerializer(serializers.Serializer): + id = serializers.CharField() + label = serializers.CharField() + type = serializers.ChoiceField(choices=["string", "number", "boolean", "select"]) # simple types + default = serializers.JSONField(required=False) + help_text = serializers.CharField(required=False, allow_blank=True) + options = serializers.ListField(child=serializers.DictField(), required=False) + + +class PluginSerializer(serializers.Serializer): + key = serializers.CharField() + name = serializers.CharField() + version = serializers.CharField(allow_blank=True) + description = serializers.CharField(allow_blank=True) + enabled = serializers.BooleanField() + fields = PluginFieldSerializer(many=True) + settings = serializers.JSONField() + actions = PluginActionSerializer(many=True) + diff --git a/apps/proxy/config.py b/apps/proxy/config.py index 9d38532b..3b1ce967 100644 --- a/apps/proxy/config.py +++ b/apps/proxy/config.py @@ -1,4 +1,6 @@ """Shared configuration between proxy types""" +import time +from django.db import connection class BaseConfig: DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail @@ -9,8 +11,56 @@ class BaseConfig: CONNECTION_TIMEOUT = 10 # seconds to wait for initial connection MAX_STREAM_SWITCHES = 10 # Maximum number of stream switch attempts before giving up BUFFER_CHUNK_SIZE = 188 * 1361 # ~256KB - # Redis settings - REDIS_CHUNK_TTL = 60 # Number in seconds - Chunks expire after 1 minute + BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams + BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc. + + # Cache for proxy settings (class-level, shared across all instances) + _proxy_settings_cache = None + _proxy_settings_cache_time = 0 + _proxy_settings_cache_ttl = 10 # Cache for 10 seconds + + @classmethod + def get_proxy_settings(cls): + """Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)""" + # Check if cache is still valid + now = time.time() + if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl: + return cls._proxy_settings_cache + + # Cache miss or expired - fetch from database + try: + from core.models import CoreSettings + settings = CoreSettings.get_proxy_settings() + cls._proxy_settings_cache = settings + cls._proxy_settings_cache_time = now + return settings + + except Exception: + # Return defaults if database query fails + return { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + + finally: + # Always close the connection after reading settings + try: + connection.close() + except Exception: + pass + + @classmethod + def get_redis_chunk_ttl(cls): + """Get Redis chunk TTL from database or default""" + settings = cls.get_proxy_settings() + return settings.get("redis_chunk_ttl", 60) + + @property + def REDIS_CHUNK_TTL(self): + return self.get_redis_chunk_ttl() class HLSConfig(BaseConfig): MIN_SEGMENTS = 12 @@ -32,6 +82,8 @@ class TSConfig(BaseConfig): INITIAL_BEHIND_CHUNKS = 4 # How many chunks behind to start a client (4 chunks = ~1MB) CHUNK_BATCH_SIZE = 5 # How many chunks to fetch in one batch KEEPALIVE_INTERVAL = 0.5 # Seconds between keepalive packets when at buffer head + # Chunk read timeout + CHUNK_TIMEOUT = 5 # Seconds to wait for each chunk read # Streaming settings TARGET_BITRATE = 8000000 # Target bitrate (8 Mbps) @@ -40,25 +92,64 @@ class TSConfig(BaseConfig): # Resource management CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds - CHANNEL_SHUTDOWN_DELAY = 0 # How long to wait after last client before shutdown (seconds) # Client tracking settings - CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. + CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds) - CHANNEL_INIT_GRACE_PERIOD = 5 # How long to wait for first client after initialization (seconds) - CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds) - GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1) - - # TS packets are 188 bytes - # Make chunk size a multiple of TS packet size for perfect alignment - # ~1MB is ideal for streaming (matches typical media buffer sizes) + CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds) + GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6) + CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect # Stream health and recovery settings MAX_HEALTH_RECOVERY_ATTEMPTS = 2 # Maximum times to attempt recovery for a single stream MAX_RECONNECT_ATTEMPTS = 3 # Maximum reconnects to try before switching streams MIN_STABLE_TIME_BEFORE_RECONNECT = 30 # Minimum seconds a stream must be stable to try reconnect FAILOVER_GRACE_PERIOD = 20 # Extra time (seconds) to allow for stream switching before disconnecting clients + URL_SWITCH_TIMEOUT = 20 # Max time allowed for a stream switch operation + # Database-dependent settings with fallbacks + @classmethod + def get_channel_shutdown_delay(cls): + """Get channel shutdown delay from database or default""" + settings = cls.get_proxy_settings() + return settings.get("channel_shutdown_delay", 0) + + @classmethod + def get_buffering_timeout(cls): + """Get buffering timeout from database or default""" + settings = cls.get_proxy_settings() + return settings.get("buffering_timeout", 15) + + @classmethod + def get_buffering_speed(cls): + """Get buffering speed threshold from database or default""" + settings = cls.get_proxy_settings() + return settings.get("buffering_speed", 1.0) + + @classmethod + def get_channel_init_grace_period(cls): + """Get channel init grace period from database or default""" + settings = cls.get_proxy_settings() + return settings.get("channel_init_grace_period", 5) + + # Dynamic property access for these settings + @property + def CHANNEL_SHUTDOWN_DELAY(self): + return self.get_channel_shutdown_delay() + + @property + def BUFFERING_TIMEOUT(self): + return self.get_buffering_timeout() + + @property + def BUFFERING_SPEED(self): + return self.get_buffering_speed() + + @property + def CHANNEL_INIT_GRACE_PERIOD(self): + return self.get_channel_init_grace_period() + + diff --git a/apps/proxy/tasks.py b/apps/proxy/tasks.py index a4aaf8e5..68843712 100644 --- a/apps/proxy/tasks.py +++ b/apps/proxy/tasks.py @@ -6,8 +6,11 @@ import redis import json import logging import re +import gc # Add import for garbage collection from core.utils import RedisClient from apps.proxy.ts_proxy.channel_status import ChannelStatus +from core.utils import send_websocket_update +from apps.proxy.vod_proxy.connection_manager import get_connection_manager logger = logging.getLogger(__name__) @@ -43,11 +46,27 @@ def fetch_channel_stats(): return # return JsonResponse({'error': str(e)}, status=500) - channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( + send_websocket_update( "updates", + "update", { - "type": "update", - "data": {"success": True, "type": "channel_stats", "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})} + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) }, + collect_garbage=True ) + + # Explicitly clean up large data structures + all_channels = None + gc.collect() + +@shared_task +def cleanup_vod_connections(): + """Clean up stale VOD connections""" + try: + connection_manager = get_connection_manager() + connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour + logger.info("VOD connection cleanup completed") + except Exception as e: + logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True) diff --git a/apps/proxy/ts_proxy/channel_status.py b/apps/proxy/ts_proxy/channel_status.py index dd18d922..8f1d0649 100644 --- a/apps/proxy/ts_proxy/channel_status.py +++ b/apps/proxy/ts_proxy/channel_status.py @@ -264,6 +264,63 @@ class ChannelStatus: 'last_data_age': time.time() - manager.last_data_time } + # Add FFmpeg stream information + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + + pixel_format = metadata.get(ChannelMetadataField.PIXEL_FORMAT.encode('utf-8')) + if pixel_format: + info['pixel_format'] = pixel_format.decode('utf-8') + + source_bitrate = metadata.get(ChannelMetadataField.SOURCE_BITRATE.encode('utf-8')) + if source_bitrate: + info['source_bitrate'] = float(source_bitrate.decode('utf-8')) + + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') + + sample_rate = metadata.get(ChannelMetadataField.SAMPLE_RATE.encode('utf-8')) + if sample_rate: + info['sample_rate'] = int(sample_rate.decode('utf-8')) + + audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8')) + if audio_channels: + info['audio_channels'] = audio_channels.decode('utf-8') + + audio_bitrate = metadata.get(ChannelMetadataField.AUDIO_BITRATE.encode('utf-8')) + if audio_bitrate: + info['audio_bitrate'] = float(audio_bitrate.decode('utf-8')) + + # Add FFmpeg performance stats + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + + ffmpeg_fps = metadata.get(ChannelMetadataField.FFMPEG_FPS.encode('utf-8')) + if ffmpeg_fps: + info['ffmpeg_fps'] = float(ffmpeg_fps.decode('utf-8')) + + actual_fps = metadata.get(ChannelMetadataField.ACTUAL_FPS.encode('utf-8')) + if actual_fps: + info['actual_fps'] = float(actual_fps.decode('utf-8')) + + ffmpeg_bitrate = metadata.get(ChannelMetadataField.FFMPEG_BITRATE.encode('utf-8')) + if ffmpeg_bitrate: + info['ffmpeg_bitrate'] = float(ffmpeg_bitrate.decode('utf-8')) + stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8')) + if stream_type: + info['stream_type'] = stream_type.decode('utf-8') + return info @staticmethod @@ -307,16 +364,23 @@ class ChannelStatus: client_count = proxy_server.redis_client.scard(client_set_key) or 0 # Calculate uptime - created_at = float(metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0').decode('utf-8')) + init_time_bytes = metadata.get(ChannelMetadataField.INIT_TIME.encode('utf-8'), b'0') + created_at = float(init_time_bytes.decode('utf-8')) uptime = time.time() - created_at if created_at > 0 else 0 + # Safely decode bytes or use defaults + def safe_decode(bytes_value, default="unknown"): + if bytes_value is None: + return default + return bytes_value.decode('utf-8') + # Simplified info info = { 'channel_id': channel_id, - 'state': metadata.get(ChannelMetadataField.STATE.encode('utf-8'), b'unknown').decode('utf-8'), - 'url': metadata.get(ChannelMetadataField.URL.encode('utf-8'), b'').decode('utf-8'), - 'stream_profile': metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8'), b'').decode('utf-8'), - 'owner': metadata.get(ChannelMetadataField.OWNER.encode('utf-8'), b'unknown').decode('utf-8'), + 'state': safe_decode(metadata.get(ChannelMetadataField.STATE.encode('utf-8'))), + 'url': safe_decode(metadata.get(ChannelMetadataField.URL.encode('utf-8')), ""), + 'stream_profile': safe_decode(metadata.get(ChannelMetadataField.STREAM_PROFILE.encode('utf-8')), ""), + 'owner': safe_decode(metadata.get(ChannelMetadataField.OWNER.encode('utf-8'))), 'buffer_index': int(buffer_index_value.decode('utf-8')) if buffer_index_value else 0, 'client_count': client_count, 'uptime': uptime @@ -376,14 +440,15 @@ class ChannelStatus: # Efficient way - just retrieve the essentials client_info = { 'client_id': client_id_str, - 'user_agent': proxy_server.redis_client.hget(client_key, 'user_agent'), - 'ip_address': proxy_server.redis_client.hget(client_key, 'ip_address').decode('utf-8'), } - if client_info['user_agent']: - client_info['user_agent'] = client_info['user_agent'].decode('utf-8') - else: - client_info['user_agent'] = 'unknown' + # Safely get user_agent and ip_address + user_agent_bytes = proxy_server.redis_client.hget(client_key, 'user_agent') + client_info['user_agent'] = safe_decode(user_agent_bytes) + + ip_address_bytes = proxy_server.redis_client.hget(client_key, 'ip_address') + if ip_address_bytes: + client_info['ip_address'] = safe_decode(ip_address_bytes) # Just get connected_at for client age connected_at_bytes = proxy_server.redis_client.hget(client_key, 'connected_at') @@ -414,7 +479,32 @@ class ChannelStatus: except ValueError: logger.warning(f"Invalid m3u_profile_id format in Redis: {m3u_profile_id_bytes}") + # Add stream info to basic info as well + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') + audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8')) + if audio_channels: + info['audio_channels'] = audio_channels.decode('utf-8') + stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8')) + if stream_type: + info['stream_type'] = stream_type.decode('utf-8') + return info except Exception as e: - logger.error(f"Error getting channel info: {e}") + logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging return None diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index 98dbf072..a361bfa1 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -4,13 +4,15 @@ import threading import logging import time import json +import gevent from typing import Set, Optional from apps.proxy.config import TSConfig as Config from redis.exceptions import ConnectionError, TimeoutError -from .constants import EventType +from .constants import EventType, ChannelState, ChannelMetadataField from .config_helper import ConfigHelper from .redis_keys import RedisKeys from .utils import get_logger +from core.utils import send_websocket_update logger = get_logger() @@ -24,6 +26,7 @@ class ClientManager: self.lock = threading.Lock() self.last_active_time = time.time() self.worker_id = worker_id # Store worker ID as instance variable + self._heartbeat_running = True # Flag to control heartbeat thread # STANDARDIZED KEYS: Move client set under channel namespace self.client_set_key = RedisKeys.clients(channel_id) @@ -31,39 +34,78 @@ class ClientManager: self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10) self.last_heartbeat_time = {} + # Get ProxyServer instance for ownership checks + from .server import ProxyServer + self.proxy_server = ProxyServer.get_instance() + # Start heartbeat thread for local clients self._start_heartbeat_thread() self._registered_clients = set() # Track already registered client IDs - def _start_heartbeat_thread(self): - """Start thread to regularly refresh client presence in Redis""" - def heartbeat_task(): - no_clients_count = 0 # Track consecutive empty cycles - max_empty_cycles = 3 # Exit after this many consecutive empty checks + def _trigger_stats_update(self): + """Trigger a channel stats update via WebSocket""" + try: + # Import here to avoid potential import issues + from apps.proxy.ts_proxy.channel_status import ChannelStatus + import redis + from django.conf import settings - logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + # Get all channels from Redis using settings + redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0') + redis_client = redis.Redis.from_url(redis_url, decode_responses=True) + all_channels = [] + cursor = 0 while True: + cursor, keys = redis_client.scan(cursor, match="ts_proxy:channel:*:clients", count=100) + for key in keys: + # Extract channel ID from key + parts = key.split(':') + if len(parts) >= 4: + ch_id = parts[2] + channel_info = ChannelStatus.get_basic_channel_info(ch_id) + if channel_info: + all_channels.append(channel_info) + + if cursor == 0: + break + + # Send WebSocket update using existing infrastructure + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) + } + ) + except Exception as e: + logger.debug(f"Failed to trigger stats update: {e}") + + def _start_heartbeat_thread(self): + """Start thread to regularly refresh client presence in Redis for local clients""" + def heartbeat_task(): + logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + + while self._heartbeat_running: try: - # Wait for the interval - time.sleep(self.heartbeat_interval) + # Wait for the interval, but check stop flag frequently for quick shutdown + # Sleep in 1-second increments to allow faster response to stop signal + for _ in range(int(self.heartbeat_interval)): + if not self._heartbeat_running: + break + time.sleep(1) + + # Final check before doing work + if not self._heartbeat_running: + break # Send heartbeat for all local clients with self.lock: - if not self.clients or not self.redis_client: - # No clients left, increment our counter - no_clients_count += 1 - - # If we've seen no clients for several consecutive checks, exit the thread - if no_clients_count >= max_empty_cycles: - logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks, exiting heartbeat thread") - return # This exits the thread - - # Skip this cycle if we have no clients + # Skip this cycle if we have no local clients + if not self.clients: continue - else: - # Reset counter when we see clients - no_clients_count = 0 # IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats current_time = time.time() @@ -134,11 +176,20 @@ class ClientManager: except Exception as e: logger.error(f"Error in client heartbeat thread: {e}") + logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}") + thread = threading.Thread(target=heartbeat_task, daemon=True) thread.name = f"client-heartbeat-{self.channel_id}" thread.start() logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + def stop(self): + """Stop the heartbeat thread and cleanup""" + logger.debug(f"Stopping ClientManager for channel {self.channel_id}") + self._heartbeat_running = False + # Give the thread a moment to exit gracefully + # Note: We don't join() here because it's a daemon thread and will exit on its own + def _execute_redis_command(self, command_func): """Execute Redis command with error handling""" if not self.redis_client: @@ -237,6 +288,9 @@ class ClientManager: json.dumps(event_data) ) + # Trigger channel stats update via WebSocket + self._trigger_stats_update() + # Get total clients across all workers total_clients = self.get_total_client_count() logger.info(f"New client connected: {client_id} (local: {len(self.clients)}, total: {total_clients})") @@ -251,6 +305,8 @@ class ClientManager: def remove_client(self, client_id): """Remove a client from this channel and Redis""" + client_ip = None + with self.lock: if client_id in self.clients: self.clients.remove(client_id) @@ -261,6 +317,14 @@ class ClientManager: self.last_active_time = time.time() if self.redis_client: + # Get client IP before removing the data + client_key = f"ts_proxy:channel:{self.channel_id}:clients:{client_id}" + client_data = self.redis_client.hgetall(client_key) + if client_data and b'ip_address' in client_data: + client_ip = client_data[b'ip_address'].decode('utf-8') + elif client_data and 'ip_address' in client_data: + client_ip = client_data['ip_address'] + # Remove from channel's client set self.redis_client.srem(self.client_set_key, client_id) @@ -279,16 +343,33 @@ class ClientManager: self._notify_owner_of_activity() - # Publish client disconnected event - event_data = json.dumps({ - "event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string - "channel_id": self.channel_id, - "client_id": client_id, - "worker_id": self.worker_id or "unknown", - "timestamp": time.time(), - "remaining_clients": remaining - }) - self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + # Check if we're the owner - if so, handle locally; if not, publish event + am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id) + + if am_i_owner: + # We're the owner - handle the disconnect directly + logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)") + if remaining == 0: + # Trigger shutdown check directly via ProxyServer method + logger.debug(f"No clients left - triggering immediate shutdown check") + # Spawn greenlet to avoid blocking + import gevent + gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id) + else: + # We're not the owner - publish event so owner can handle it + logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}") + event_data = json.dumps({ + "event": EventType.CLIENT_DISCONNECTED, + "channel_id": self.channel_id, + "client_id": client_id, + "worker_id": self.worker_id or "unknown", + "timestamp": time.time(), + "remaining_clients": remaining + }) + self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + + # Trigger channel stats update via WebSocket + self._trigger_stats_update() total_clients = self.get_total_client_count() logger.info(f"Client disconnected: {client_id} (local: {len(self.clients)}, total: {total_clients})") diff --git a/apps/proxy/ts_proxy/config_helper.py b/apps/proxy/ts_proxy/config_helper.py index f78ba0b6..d7d33558 100644 --- a/apps/proxy/ts_proxy/config_helper.py +++ b/apps/proxy/ts_proxy/config_helper.py @@ -34,12 +34,12 @@ class ConfigHelper: @staticmethod def channel_shutdown_delay(): """Get channel shutdown delay in seconds""" - return ConfigHelper.get('CHANNEL_SHUTDOWN_DELAY', 5) + return Config.get_channel_shutdown_delay() @staticmethod def initial_behind_chunks(): """Get number of chunks to start behind""" - return ConfigHelper.get('INITIAL_BEHIND_CHUNKS', 10) + return ConfigHelper.get('INITIAL_BEHIND_CHUNKS', 4) @staticmethod def keepalive_interval(): @@ -54,7 +54,7 @@ class ConfigHelper: @staticmethod def redis_chunk_ttl(): """Get Redis chunk TTL in seconds""" - return ConfigHelper.get('REDIS_CHUNK_TTL', 60) + return Config.get_redis_chunk_ttl() @staticmethod def chunk_size(): @@ -75,3 +75,37 @@ class ConfigHelper: def retry_wait_interval(): """Get wait interval between connection retries in seconds""" return ConfigHelper.get('RETRY_WAIT_INTERVAL', 0.5) # Default to 0.5 second + + @staticmethod + def url_switch_timeout(): + """Get URL switch timeout in seconds (max time allowed for a stream switch operation)""" + return ConfigHelper.get('URL_SWITCH_TIMEOUT', 20) # Default to 20 seconds + + @staticmethod + def failover_grace_period(): + """Get extra time (in seconds) to allow for stream switching before disconnecting clients""" + return ConfigHelper.get('FAILOVER_GRACE_PERIOD', 20) # Default to 20 seconds + + @staticmethod + def buffering_timeout(): + """Get buffering timeout in seconds""" + return Config.get_buffering_timeout() + + @staticmethod + def buffering_speed(): + """Get buffering speed threshold""" + return Config.get_buffering_speed() + + @staticmethod + def channel_init_grace_period(): + """Get channel initialization grace period in seconds""" + return Config.get_channel_init_grace_period() + + @staticmethod + def chunk_timeout(): + """ + Get chunk timeout in seconds (used for both socket and HTTP read timeouts). + This controls how long we wait for each chunk before timing out. + Set this higher (e.g., 30s) for slow providers that may have intermittent delays. + """ + return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index 4827b24b..7baa9e1c 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -18,6 +18,7 @@ class ChannelState: ERROR = "error" STOPPING = "stopping" STOPPED = "stopped" + BUFFERING = "buffering" # Event types class EventType: @@ -32,6 +33,8 @@ class EventType: # Stream types class StreamType: HLS = "hls" + RTSP = "rtsp" + UDP = "udp" TS = "ts" UNKNOWN = "unknown" @@ -63,6 +66,33 @@ class ChannelMetadataField: STREAM_SWITCH_TIME = "stream_switch_time" STREAM_SWITCH_REASON = "stream_switch_reason" + # FFmpeg performance metrics + FFMPEG_SPEED = "ffmpeg_speed" + FFMPEG_FPS = "ffmpeg_fps" + ACTUAL_FPS = "actual_fps" + FFMPEG_OUTPUT_BITRATE = "ffmpeg_output_bitrate" + FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated" + + # Video stream info + VIDEO_CODEC = "video_codec" + RESOLUTION = "resolution" + WIDTH = "width" + HEIGHT = "height" + SOURCE_FPS = "source_fps" + PIXEL_FORMAT = "pixel_format" + VIDEO_BITRATE = "video_bitrate" + + # Audio stream info + AUDIO_CODEC = "audio_codec" + SAMPLE_RATE = "sample_rate" + AUDIO_CHANNELS = "audio_channels" + AUDIO_BITRATE = "audio_bitrate" + + # Stream format info + STREAM_TYPE = "stream_type" + # Stream info timestamp + STREAM_INFO_UPDATED = "stream_info_updated" + # Client metadata fields CONNECTED_AT = "connected_at" LAST_ACTIVE = "last_active" diff --git a/apps/proxy/ts_proxy/http_streamer.py b/apps/proxy/ts_proxy/http_streamer.py new file mode 100644 index 00000000..147d2c93 --- /dev/null +++ b/apps/proxy/ts_proxy/http_streamer.py @@ -0,0 +1,138 @@ +""" +HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe. +This allows us to use the same fetch_chunk() path for both transcode and HTTP streams. +""" + +import threading +import os +import requests +from requests.adapters import HTTPAdapter +from .utils import get_logger + +logger = get_logger() + + +class HTTPStreamReader: + """Thread-based HTTP stream reader that writes to a pipe""" + + def __init__(self, url, user_agent=None, chunk_size=8192): + self.url = url + self.user_agent = user_agent + self.chunk_size = chunk_size + self.session = None + self.response = None + self.thread = None + self.pipe_read = None + self.pipe_write = None + self.running = False + + def start(self): + """Start the HTTP stream reader thread""" + # Create a pipe (works on Windows and Unix) + self.pipe_read, self.pipe_write = os.pipe() + + # Start the reader thread + self.running = True + self.thread = threading.Thread(target=self._read_stream, daemon=True) + self.thread.start() + + logger.info(f"Started HTTP stream reader thread for {self.url}") + return self.pipe_read + + def _read_stream(self): + """Thread worker that reads HTTP stream and writes to pipe""" + try: + # Build headers + headers = {} + if self.user_agent: + headers['User-Agent'] = self.user_agent + + logger.info(f"HTTP reader connecting to {self.url}") + + # Create session + self.session = requests.Session() + + # Disable retries for faster failure detection + adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + # Stream the URL + self.response = self.session.get( + self.url, + headers=headers, + stream=True, + timeout=(5, 30) # 5s connect, 30s read + ) + + if self.response.status_code != 200: + logger.error(f"HTTP {self.response.status_code} from {self.url}") + return + + logger.info(f"HTTP reader connected successfully, streaming data...") + + # Stream chunks to pipe + chunk_count = 0 + for chunk in self.response.iter_content(chunk_size=self.chunk_size): + if not self.running: + break + + if chunk: + try: + # Write binary data to pipe + os.write(self.pipe_write, chunk) + chunk_count += 1 + + # Log progress periodically + if chunk_count % 1000 == 0: + logger.debug(f"HTTP reader streamed {chunk_count} chunks") + except OSError as e: + logger.error(f"Pipe write error: {e}") + break + + logger.info("HTTP stream ended") + + except requests.exceptions.RequestException as e: + logger.error(f"HTTP reader request error: {e}") + except Exception as e: + logger.error(f"HTTP reader unexpected error: {e}", exc_info=True) + finally: + self.running = False + # Close write end of pipe to signal EOF + try: + if self.pipe_write is not None: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + def stop(self): + """Stop the HTTP stream reader""" + logger.info("Stopping HTTP stream reader") + self.running = False + + # Close response + if self.response: + try: + self.response.close() + except: + pass + + # Close session + if self.session: + try: + self.session.close() + except: + pass + + # Close write end of pipe + if self.pipe_write is not None: + try: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + # Wait for thread + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index cebcc545..db5b3d57 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -19,7 +19,7 @@ import gevent # Add gevent import from typing import Dict, Optional, Set from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream -from core.utils import RedisClient +from core.utils import RedisClient, log_system_event from redis.exceptions import ConnectionError, TimeoutError from .stream_manager import StreamManager from .stream_buffer import StreamBuffer @@ -131,6 +131,8 @@ class ProxyServer: max_retries = 10 base_retry_delay = 1 # Start with 1 second delay max_retry_delay = 30 # Cap at 30 seconds + pubsub_client = None + pubsub = None while True: try: @@ -192,35 +194,11 @@ class ProxyServer: self.redis_client.delete(disconnect_key) elif event_type == EventType.CLIENT_DISCONNECTED: - logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}") - # Check if any clients remain - if channel_id in self.client_managers: - # VERIFY REDIS CLIENT COUNT DIRECTLY - client_set_key = RedisKeys.clients(channel_id) - total = self.redis_client.scard(client_set_key) or 0 - - if total == 0: - logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") - # Set the disconnect timer for other workers to see - disconnect_key = RedisKeys.last_client_disconnect(channel_id) - self.redis_client.setex(disconnect_key, 60, str(time.time())) - - # Get configured shutdown delay or default - shutdown_delay = getattr(Config, 'CHANNEL_SHUTDOWN_DELAY', 0) - - if shutdown_delay > 0: - logger.info(f"Waiting {shutdown_delay}s before stopping channel...") - gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay) - - # Re-check client count before stopping - total = self.redis_client.scard(client_set_key) or 0 - if total > 0: - logger.info(f"New clients connected during shutdown delay - aborting shutdown") - self.redis_client.delete(disconnect_key) - return - - # Stop the channel directly - self.stop_channel(channel_id) + client_id = data.get("client_id") + worker_id = data.get("worker_id") + logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}") + # Delegate to dedicated method + self.handle_client_disconnect(channel_id) elif event_type == EventType.STREAM_SWITCH: @@ -339,20 +317,27 @@ class ProxyServer: logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})") gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay) - # Try to clean up the old connection - try: - if 'pubsub' in locals(): - pubsub.close() - if 'pubsub_client' in locals(): - pubsub_client.close() - except: - pass - except Exception as e: logger.error(f"Error in event listener: {e}") # Add a short delay to prevent rapid retries on persistent errors gevent.sleep(5) # REPLACE: time.sleep(5) + finally: + # Always clean up PubSub connections in all error paths + try: + if pubsub: + pubsub.close() + pubsub = None + except Exception as e: + logger.debug(f"Error closing pubsub: {e}") + + try: + if pubsub_client: + pubsub_client.close() + pubsub_client = None + except Exception as e: + logger.debug(f"Error closing pubsub_client: {e}") + thread = threading.Thread(target=event_listener, daemon=True) thread.name = "redis-event-listener" thread.start() @@ -464,17 +449,54 @@ class ProxyServer: def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, stream_id=None): """Initialize a channel without redundant active key""" try: - # Create buffer and client manager instances - buffer = StreamBuffer(channel_id, redis_client=self.redis_client) - client_manager = ClientManager( - channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) + # IMPROVED: First check if channel is already being initialized by another process + if self.redis_client: + metadata_key = RedisKeys.channel_metadata(channel_id) + if self.redis_client.exists(metadata_key): + metadata = self.redis_client.hgetall(metadata_key) + if b'state' in metadata: + state = metadata[b'state'].decode('utf-8') + active_states = [ChannelState.INITIALIZING, ChannelState.CONNECTING, + ChannelState.WAITING_FOR_CLIENTS, ChannelState.ACTIVE, ChannelState.BUFFERING] + if state in active_states: + logger.info(f"Channel {channel_id} already being initialized with state {state}") + # Create buffer and client manager only if we don't have them + if channel_id not in self.stream_buffers: + self.stream_buffers[channel_id] = StreamBuffer(channel_id, redis_client=self.redis_client) + if channel_id not in self.client_managers: + self.client_managers[channel_id] = ClientManager( + channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + return True - # Store in local tracking - self.stream_buffers[channel_id] = buffer - self.client_managers[channel_id] = client_manager + # Create buffer and client manager instances (or reuse if they exist) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer + + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager + + # IMPROVED: Set initializing state in Redis BEFORE any other operations + if self.redis_client: + # Set early initialization state to prevent race conditions + metadata_key = RedisKeys.channel_metadata(channel_id) + initial_metadata = { + "state": ChannelState.INITIALIZING, + "init_time": str(time.time()), + "owner": self.worker_id + } + if stream_id: + initial_metadata["stream_id"] = str(stream_id) + self.redis_client.hset(metadata_key, mapping=initial_metadata) + logger.info(f"Set early initializing state for channel {channel_id}") # Get channel URL from Redis if available channel_url = url @@ -514,13 +536,15 @@ class ProxyServer: logger.info(f"Channel {channel_id} already owned by worker {current_owner}") logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -535,13 +559,15 @@ class ProxyServer: # Another worker just acquired ownership logger.info(f"Another worker just acquired ownership of channel {channel_id}") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -560,7 +586,7 @@ class ProxyServer: if channel_user_agent: metadata["user_agent"] = channel_user_agent - # CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged + # Make sure stream_id is always set in metadata and properly logged if channel_stream_id: metadata["stream_id"] = str(channel_stream_id) logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}") @@ -596,13 +622,37 @@ class ProxyServer: logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager - # Create client manager with channel_id, redis_client AND worker_id - client_manager = ClientManager( - channel_id=channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) - self.client_managers[channel_id] = client_manager + # Log channel start event + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Get stream name if stream_id is available + stream_name = None + if channel_stream_id: + try: + stream_obj = Stream.objects.get(id=channel_stream_id) + stream_name = stream_obj.name + except Exception: + pass + + log_system_event( + 'channel_start', + channel_id=channel_id, + channel_name=channel_obj.name, + stream_name=stream_name, + stream_id=channel_stream_id + ) + except Exception as e: + logger.error(f"Could not log channel start event: {e}") + + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id=channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # Start stream manager thread only for the owner thread = threading.Thread(target=stream_manager.run, daemon=True) @@ -652,8 +702,10 @@ class ProxyServer: state = metadata.get(b'state', b'unknown').decode('utf-8') owner = metadata.get(b'owner', b'').decode('utf-8') - # States that indicate the channel is running properly - valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, ChannelState.CONNECTING] + # States that indicate the channel is running properly or shutting down + valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING, + ChannelState.STOPPING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -666,11 +718,23 @@ class ProxyServer: else: # This is a zombie channel - owner is gone but metadata still exists logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") + + # Check if there are any clients connected + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + if client_count > 0: + logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover") + # Could potentially take ownership here in the future + # For now, just clean it up to be safe + else: + logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up") + self._clean_zombie_channel(channel_id, metadata) return False - elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: - # These states indicate the channel should be reinitialized - logger.info(f"Channel {channel_id} exists but in terminal state: {state}") + elif state in [ChannelState.STOPPED, ChannelState.ERROR]: + # These terminal states indicate the channel should be cleaned up and reinitialized + logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup") return False else: # Unknown or initializing state, check how long it's been in this state @@ -735,6 +799,44 @@ class ProxyServer: logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True) return False + def handle_client_disconnect(self, channel_id): + """ + Handle client disconnect event - check if channel should shut down. + Can be called directly by owner or via PubSub from non-owner workers. + """ + if channel_id not in self.client_managers: + return + + try: + # VERIFY REDIS CLIENT COUNT DIRECTLY + client_set_key = RedisKeys.clients(channel_id) + total = self.redis_client.scard(client_set_key) or 0 + + if total == 0: + logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") + # Set the disconnect timer for other workers to see + disconnect_key = RedisKeys.last_client_disconnect(channel_id) + self.redis_client.setex(disconnect_key, 60, str(time.time())) + + # Get configured shutdown delay or default + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if shutdown_delay > 0: + logger.info(f"Waiting {shutdown_delay}s before stopping channel...") + gevent.sleep(shutdown_delay) + + # Re-check client count before stopping + total = self.redis_client.scard(client_set_key) or 0 + if total > 0: + logger.info(f"New clients connected during shutdown delay - aborting shutdown") + self.redis_client.delete(disconnect_key) + return + + # Stop the channel directly + self.stop_channel(channel_id) + except Exception as e: + logger.error(f"Error handling client disconnect for channel {channel_id}: {e}") + def stop_channel(self, channel_id): """Stop a channel with proper ownership handling""" try: @@ -782,6 +884,41 @@ class ProxyServer: self.release_ownership(channel_id) logger.info(f"Released ownership of channel {channel_id}") + # Log channel stop event (after cleanup, before releasing ownership section ends) + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Calculate runtime and get total bytes from metadata + runtime = None + total_bytes = None + if self.redis_client: + metadata_key = RedisKeys.channel_metadata(channel_id) + metadata = self.redis_client.hgetall(metadata_key) + if metadata: + # Calculate runtime from init_time + if b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + runtime = round(time.time() - init_time, 2) + except Exception: + pass + # Get total bytes transferred + if b'total_bytes' in metadata: + try: + total_bytes = int(metadata[b'total_bytes'].decode('utf-8')) + except Exception: + pass + + log_system_event( + 'channel_stop', + channel_id=channel_id, + channel_name=channel_obj.name, + runtime=runtime, + total_bytes=total_bytes + ) + except Exception as e: + logger.error(f"Could not log channel stop event: {e}") + # Always clean up local resources - WITH SAFE CHECKS if channel_id in self.stream_managers: del self.stream_managers[channel_id] @@ -809,6 +946,10 @@ class ProxyServer: # Clean up client manager - SAFE CHECK HERE TOO if channel_id in self.client_managers: try: + client_manager = self.client_managers[channel_id] + # Stop the heartbeat thread before deleting + if hasattr(client_manager, 'stop'): + client_manager.stop() del self.client_managers[channel_id] logger.info(f"Removed client manager for channel {channel_id}") except KeyError: @@ -883,6 +1024,15 @@ class ProxyServer: if channel_id in self.client_managers: client_manager = self.client_managers[channel_id] total_clients = client_manager.get_total_client_count() + else: + # This can happen during reconnection attempts or crashes + # Check Redis directly for any connected clients + if self.redis_client: + client_set_key = RedisKeys.clients(channel_id) + total_clients = self.redis_client.scard(client_set_key) or 0 + + if total_clients == 0: + logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup") # Log client count periodically if time.time() % 30 < 1: # Every ~30 seconds @@ -890,7 +1040,14 @@ class ProxyServer: # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: - # Get connection ready time from metadata + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: try: @@ -898,17 +1055,60 @@ class ProxyServer: except (ValueError, TypeError): pass - # If still connecting, give it more time - if channel_state == ChannelState.CONNECTING: - logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet") - continue + if total_clients == 0: + # Check if we have a connection_attempt timestamp (set when CONNECTING starts) + connection_attempt_time = None + attempt_key = RedisKeys.connection_attempt(channel_id) + if self.redis_client: + attempt_value = self.redis_client.get(attempt_key) + if attempt_value: + try: + connection_attempt_time = float(attempt_value.decode('utf-8')) + except (ValueError, TypeError): + pass - # If waiting for clients, check grace period - if connection_ready_time: - grace_period = ConfigHelper.get('CHANNEL_INIT_GRACE_PERIOD', 20) + # Also get init time as a fallback + init_time = None + if metadata and b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + except (ValueError, TypeError): + pass + + # Use whichever timestamp we have (prefer connection_attempt as it's more recent) + start_time = connection_attempt_time or init_time + + if start_time: + # Check which timeout to apply based on channel lifecycle + if connection_ready_time: + # Already reached ready - use shutdown_delay + time_since_ready = time.time() - connection_ready_time + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if time_since_ready > shutdown_delay: + logger.warning( + f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s " + f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel" + ) + self.stop_channel(channel_id) + continue + else: + # Never reached ready - use grace_period timeout + time_since_start = time.time() - start_time + connecting_timeout = ConfigHelper.channel_init_grace_period() + + if time_since_start > connecting_timeout: + logger.warning( + f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s " + f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues" + ) + self.stop_channel(channel_id) + continue + elif connection_ready_time: + # We have clients now, but check grace period for state transition + grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time - # Add this debug log logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, " f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, " f"total_clients={total_clients}") @@ -917,16 +1117,9 @@ class ProxyServer: # Still within grace period logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed") continue - elif total_clients == 0: - # Grace period expired with no clients - logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}") - self.stop_channel(channel_id) else: - # Grace period expired but we have clients - mark channel as active + # Grace period expired with clients - mark channel as active logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active") - old_state = "unknown" - if metadata and b'state' in metadata: - old_state = metadata[b'state'].decode('utf-8') if self.update_channel_state(channel_id, ChannelState.ACTIVE, { "grace_period_ended_at": str(time.time()), "clients_at_activation": str(total_clients) @@ -934,6 +1127,13 @@ class ProxyServer: logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period") # If active and no clients, start normal shutdown procedure elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0: + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + # Check if there's a pending no-clients timeout disconnect_key = RedisKeys.last_client_disconnect(channel_id) disconnect_time = None @@ -993,14 +1193,30 @@ class ProxyServer: continue # Check for local client count - if zero, clean up our local resources - if self.client_managers[channel_id].get_client_count() == 0: - # We're not the owner, and we have no local clients - clean up our resources - logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + if channel_id in self.client_managers: + if self.client_managers[channel_id].get_client_count() == 0: + # We're not the owner, and we have no local clients - clean up our resources + logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + self._cleanup_local_resources(channel_id) + else: + # This shouldn't happen, but clean up anyway + logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources") self._cleanup_local_resources(channel_id) except Exception as e: logger.error(f"Error in cleanup thread: {e}", exc_info=True) + # Periodically check for orphaned channels (every 30 seconds) + if hasattr(self, '_last_orphan_check'): + if time.time() - self._last_orphan_check > 30: + try: + self._check_orphaned_metadata() + self._last_orphan_check = time.time() + except Exception as orphan_error: + logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True) + else: + self._last_orphan_check = time.time() + gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval()) thread = threading.Thread(target=cleanup_task, daemon=True) @@ -1022,10 +1238,6 @@ class ProxyServer: try: channel_id = key.decode('utf-8').split(':')[2] - # Skip channels we already have locally - if channel_id in self.stream_buffers: - continue - # Check if this channel has an owner owner = self.get_channel_owner(channel_id) @@ -1040,13 +1252,84 @@ class ProxyServer: else: # Orphaned channel with no clients - clean it up logger.info(f"Cleaning up orphaned channel {channel_id}") - self._clean_redis_keys(channel_id) + + # If we have it locally, stop it properly to clean up processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) except Exception as e: logger.error(f"Error processing channel key {key}: {e}") except Exception as e: logger.error(f"Error checking orphaned channels: {e}") + def _check_orphaned_metadata(self): + """ + Check for metadata entries that have no owner and no clients. + This catches zombie channels that weren't cleaned up properly. + """ + if not self.redis_client: + return + + try: + # Get all channel metadata keys + channel_pattern = "ts_proxy:channel:*:metadata" + channel_keys = self.redis_client.keys(channel_pattern) + + for key in channel_keys: + try: + channel_id = key.decode('utf-8').split(':')[2] + + # Get metadata first + metadata = self.redis_client.hgetall(key) + if not metadata: + # Empty metadata - clean it up + logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up") + # If we have it locally, stop it properly + if channel_id in self.stream_managers or channel_id in self.client_managers: + self.stop_channel(channel_id) + else: + self._clean_redis_keys(channel_id) + continue + + # Get owner + owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else '' + + # Check if owner is still alive + owner_alive = False + if owner: + owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" + owner_alive = self.redis_client.exists(owner_heartbeat_key) + + # Check client count + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + # If no owner and no clients, clean it up + if not owner_alive and client_count == 0: + state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown' + logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up") + + # If we have it locally, stop it properly to clean up transcode/proxy processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) + elif not owner_alive and client_count > 0: + # Owner is gone but clients remain - just log for now + logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover") + + except Exception as e: + logger.error(f"Error processing metadata key {key}: {e}", exc_info=True) + + except Exception as e: + logger.error(f"Error checking orphaned metadata: {e}", exc_info=True) + def _clean_redis_keys(self, channel_id): """Clean up all Redis keys for a channel more efficiently""" # Release the channel, stream, and profile keys from the channel diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index bd1f2f81..4c4a73ac 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -6,6 +6,7 @@ This separates business logic from HTTP handling in views. import logging import time import json +import re from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.proxy.config import TSConfig as Config @@ -13,6 +14,8 @@ from ..server import ProxyServer from ..redis_keys import RedisKeys from ..constants import EventType, ChannelState, ChannelMetadataField from ..url_utils import get_stream_info_for_switch +from core.utils import log_system_event +from .log_parsers import LogParserFactory logger = logging.getLogger("ts_proxy") @@ -181,7 +184,7 @@ class ChannelService: old_url = manager.url # Update the stream - success = manager.update_url(new_url, stream_id) + success = manager.update_url(new_url, stream_id, m3u_profile_id) logger.info(f"Stream URL changed from {old_url} to {new_url}, result: {success}") result.update({ @@ -415,6 +418,149 @@ class ChannelService: logger.error(f"Error validating channel state: {e}", exc_info=True) return False, None, None, {"error": f"Exception: {str(e)}"} + @staticmethod + def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None): + """ + Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB. + Uses specialized parsers for each streaming tool. + """ + try: + # Use factory to parse the line based on stream type + parsed_data = LogParserFactory.parse(stream_type, stream_info_line) + + if not parsed_data: + return + + # Update Redis and database with parsed data + ChannelService._update_stream_info_in_redis( + channel_id, + parsed_data.get('video_codec'), + parsed_data.get('resolution'), + parsed_data.get('width'), + parsed_data.get('height'), + parsed_data.get('source_fps'), + parsed_data.get('pixel_format'), + parsed_data.get('video_bitrate'), + parsed_data.get('audio_codec'), + parsed_data.get('sample_rate'), + parsed_data.get('audio_channels'), + parsed_data.get('audio_bitrate'), + parsed_data.get('stream_type') + ) + + if stream_id: + ChannelService._update_stream_stats_in_db( + stream_id, + video_codec=parsed_data.get('video_codec'), + resolution=parsed_data.get('resolution'), + source_fps=parsed_data.get('source_fps'), + pixel_format=parsed_data.get('pixel_format'), + video_bitrate=parsed_data.get('video_bitrate'), + audio_codec=parsed_data.get('audio_codec'), + sample_rate=parsed_data.get('sample_rate'), + audio_channels=parsed_data.get('audio_channels'), + audio_bitrate=parsed_data.get('audio_bitrate'), + stream_type=parsed_data.get('stream_type') + ) + + except Exception as e: + logger.debug(f"Error parsing {stream_type} stream info: {e}") + + @staticmethod + def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None): + """Update stream info in Redis metadata""" + try: + proxy_server = ProxyServer.get_instance() + if not proxy_server.redis_client: + return False + + metadata_key = RedisKeys.channel_metadata(channel_id) + update_data = { + ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time()) + } + + # Video info + if codec is not None: + update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec) + + if resolution is not None: + update_data[ChannelMetadataField.RESOLUTION] = str(resolution) + + if width is not None: + update_data[ChannelMetadataField.WIDTH] = str(width) + + if height is not None: + update_data[ChannelMetadataField.HEIGHT] = str(height) + + if fps is not None: + update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2)) + + if pixel_format is not None: + update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format) + + if video_bitrate is not None: + update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1)) + + # Audio info + if audio_codec is not None: + update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec) + + if sample_rate is not None: + update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate) + + if channels is not None: + update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels) + + if audio_bitrate is not None: + update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1)) + if input_format is not None: + update_data[ChannelMetadataField.STREAM_TYPE] = str(input_format) + + proxy_server.redis_client.hset(metadata_key, mapping=update_data) + return True + + except Exception as e: + logger.error(f"Error updating stream info in Redis: {e}") + return False + + @staticmethod + def _update_stream_stats_in_db(stream_id, **stats): + """Update stream stats in database""" + from django.db import connection + + try: + from apps.channels.models import Stream + from django.utils import timezone + + stream = Stream.objects.get(id=stream_id) + + # Get existing stats or create new dict + current_stats = stream.stream_stats or {} + + # Update with new stats + for key, value in stats.items(): + if value is not None: + current_stats[key] = value + + # Save updated stats and timestamp + stream.stream_stats = current_stats + stream.stream_stats_updated_at = timezone.now() + stream.save(update_fields=['stream_stats', 'stream_stats_updated_at']) + + logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}") + return True + + except Exception as e: + logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") + return False + + finally: + # Always close database connection after update + try: + connection.close() + except Exception: + pass + # Helper methods for Redis operations @staticmethod @@ -470,7 +616,7 @@ class ChannelService: switch_request = { "event": EventType.STREAM_SWITCH, - "channel_id": channel_id, + "channel_id": str(channel_id), "url": new_url, "user_agent": user_agent, "stream_id": stream_id, @@ -483,6 +629,7 @@ class ChannelService: RedisKeys.events_channel(channel_id), json.dumps(switch_request) ) + return True @staticmethod @@ -495,7 +642,7 @@ class ChannelService: stop_request = { "event": EventType.CHANNEL_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() } @@ -518,7 +665,7 @@ class ChannelService: stop_request = { "event": EventType.CLIENT_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "client_id": client_id, "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() diff --git a/apps/proxy/ts_proxy/services/log_parsers.py b/apps/proxy/ts_proxy/services/log_parsers.py new file mode 100644 index 00000000..95ee7a06 --- /dev/null +++ b/apps/proxy/ts_proxy/services/log_parsers.py @@ -0,0 +1,410 @@ +"""Log parsers for FFmpeg, Streamlink, and VLC output.""" +import re +import logging +from abc import ABC, abstractmethod +from typing import Optional, Dict, Any + +logger = logging.getLogger(__name__) + + +class BaseLogParser(ABC): + """Base class for log parsers""" + + # Map of stream_type -> method_name that this parser handles + STREAM_TYPE_METHODS: Dict[str, str] = {} + + @abstractmethod + def can_parse(self, line: str) -> Optional[str]: + """ + Check if this parser can handle the line. + Returns the stream_type if it can parse, None otherwise. + e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink' + """ + pass + + @abstractmethod + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + pass + + @abstractmethod + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + pass + + @abstractmethod + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + pass + + +class FFmpegLogParser(BaseLogParser): + """Parser for FFmpeg log output""" + + STREAM_TYPE_METHODS = { + 'input': 'parse_input_format', + 'video': 'parse_video_stream', + 'audio': 'parse_audio_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is an FFmpeg line we can parse""" + lower = line.lower() + + # Input format detection + if lower.startswith('input #'): + return 'input' + + # Stream info (only during input phase, but we'll let stream_manager handle phase tracking) + if 'stream #' in lower: + if 'video:' in lower: + return 'video' + elif 'audio:' in lower: + return 'audio' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg input format (e.g., mpegts, hls)""" + try: + input_match = re.search(r'Input #\d+,\s*([^,]+)', line) + input_format = input_match.group(1).strip() if input_match else None + + if input_format: + logger.debug(f"Input format info - Format: {input_format}") + return {'stream_type': input_format} + except Exception as e: + logger.debug(f"Error parsing FFmpeg input format: {e}") + + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg video stream info""" + try: + result = {} + + # Extract codec, resolution, fps, pixel format, bitrate + codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line) + if codec_match: + result['video_codec'] = codec_match.group(1) + + resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + + fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line) + if fps_match: + result['source_fps'] = float(fps_match.group(1)) + + pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line) + if pixel_format_match: + pf = pixel_format_match.group(1).strip() + if '(' in pf: + pf = pf.split('(')[0].strip() + result['pixel_format'] = pf + + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line) + if bitrate_match: + result['video_bitrate'] = float(bitrate_match.group(1)) + + if result: + logger.info(f"Video stream info - Codec: {result.get('video_codec')}, " + f"Resolution: {result.get('resolution')}, " + f"Source FPS: {result.get('source_fps')}, " + f"Pixel Format: {result.get('pixel_format')}, " + f"Video Bitrate: {result.get('video_bitrate')} kb/s") + return result + + except Exception as e: + logger.debug(f"Error parsing FFmpeg video stream info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse FFmpeg audio stream info""" + try: + result = {} + + codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line) + if codec_match: + result['audio_codec'] = codec_match.group(1) + + sample_rate_match = re.search(r'(\d+)\s*Hz', line) + if sample_rate_match: + result['sample_rate'] = int(sample_rate_match.group(1)) + + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE) + if channel_match: + result['audio_channels'] = channel_match.group(1) + + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line) + if bitrate_match: + result['audio_bitrate'] = float(bitrate_match.group(1)) + + if result: + return result + + except Exception as e: + logger.debug(f"Error parsing FFmpeg audio stream info: {e}") + + return None + + +class VLCLogParser(BaseLogParser): + """Parser for VLC log output""" + + STREAM_TYPE_METHODS = { + 'vlc_video': 'parse_video_stream', + 'vlc_audio': 'parse_audio_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is a VLC line we can parse""" + lower = line.lower() + + # VLC TS demux codec detection + if 'ts demux debug' in lower and 'type=' in lower: + if 'video' in lower: + return 'vlc_video' + elif 'audio' in lower: + return 'vlc_audio' + + # VLC decoder output + if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower): + if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower: + return 'vlc_audio' + else: + return 'vlc_video' + + # VLC transcode output for resolution/FPS + if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)): + return 'vlc_video' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse VLC TS demux output and decoder info for video""" + try: + lower = line.lower() + result = {} + + # Codec detection from TS demux + video_codec_map = { + ('avc', 'h.264', 'type=0x1b'): "h264", + ('hevc', 'h.265', 'type=0x24'): "hevc", + ('mpeg-2', 'type=0x02'): "mpeg2video", + ('mpeg-4', 'type=0x10'): "mpeg4" + } + + for patterns, codec in video_codec_map.items(): + if any(p in lower for p in patterns): + result['video_codec'] = codec + break + + # Extract FPS from transcode output: "source fps 30/1" + fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower) + if fps_fraction_match: + numerator = int(fps_fraction_match.group(1)) + denominator = int(fps_fraction_match.group(2)) + if denominator > 0: + result['source_fps'] = numerator / denominator + + # Extract resolution from transcode output: "source 1280x720" + source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower) + if source_res_match: + width = int(source_res_match.group(1)) + height = int(source_res_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + else: + # Fallback: generic resolution pattern + resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + if 100 <= width <= 10000 and 100 <= height <= 10000: + result['resolution'] = f"{width}x{height}" + result['width'] = width + result['height'] = height + + # Fallback: try to extract FPS from generic format + if 'source_fps' not in result: + fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower) + if fps_match: + result['source_fps'] = float(fps_match.group(1)) + + return result if result else None + + except Exception as e: + logger.debug(f"Error parsing VLC video stream info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse VLC TS demux output and decoder info for audio""" + try: + lower = line.lower() + result = {} + + # Codec detection from TS demux + audio_codec_map = { + ('type=0xf', 'adts'): "aac", + ('type=0x03', 'type=0x04'): "mp3", + ('type=0x06', 'type=0x81'): "ac3", + ('type=0x0b', 'lpcm'): "pcm" + } + + for patterns, codec in audio_codec_map.items(): + if any(p in lower for p in patterns): + result['audio_codec'] = codec + break + + # VLC decoder format: "AAC channels: 2 samplerate: 48000" + if 'channels:' in lower: + channels_match = re.search(r'channels:\s*(\d+)', lower) + if channels_match: + num_channels = int(channels_match.group(1)) + # Convert number to name + channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'} + result['audio_channels'] = channel_names.get(num_channels, str(num_channels)) + + if 'samplerate:' in lower: + samplerate_match = re.search(r'samplerate:\s*(\d+)', lower) + if samplerate_match: + result['sample_rate'] = int(samplerate_match.group(1)) + + # Try to extract sample rate (Hz format) + sample_rate_match = re.search(r'(\d+)\s*hz', lower) + if sample_rate_match and 'sample_rate' not in result: + result['sample_rate'] = int(sample_rate_match.group(1)) + + # Try to extract channels (word format) + if 'audio_channels' not in result: + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower) + if channel_match: + result['audio_channels'] = channel_match.group(1) + + return result if result else None + + except Exception as e: + logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}") + + return None + + +class StreamlinkLogParser(BaseLogParser): + """Parser for Streamlink log output""" + + STREAM_TYPE_METHODS = { + 'streamlink': 'parse_video_stream' + } + + def can_parse(self, line: str) -> Optional[str]: + """Check if this is a Streamlink line we can parse""" + lower = line.lower() + + if 'opening stream:' in lower or 'available streams:' in lower: + return 'streamlink' + + return None + + def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]: + return None + + def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]: + """Parse Streamlink quality/resolution""" + try: + quality_match = re.search(r'(\d+p|\d+x\d+)', line) + if quality_match: + quality = quality_match.group(1) + + if 'x' in quality: + resolution = quality + width, height = map(int, quality.split('x')) + else: + resolutions = { + '2160p': ('3840x2160', 3840, 2160), + '1080p': ('1920x1080', 1920, 1080), + '720p': ('1280x720', 1280, 720), + '480p': ('854x480', 854, 480), + '360p': ('640x360', 640, 360) + } + resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080)) + + return { + 'video_codec': 'h264', + 'resolution': resolution, + 'width': width, + 'height': height, + 'pixel_format': 'yuv420p' + } + + except Exception as e: + logger.debug(f"Error parsing Streamlink video info: {e}") + + return None + + def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]: + return None + + +class LogParserFactory: + """Factory to get the appropriate log parser""" + + _parsers = { + 'ffmpeg': FFmpegLogParser(), + 'vlc': VLCLogParser(), + 'streamlink': StreamlinkLogParser() + } + + @classmethod + def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]: + """Determine parser and method from stream_type""" + # Check each parser to see if it handles this stream_type + for parser in cls._parsers.values(): + method_name = parser.STREAM_TYPE_METHODS.get(stream_type) + if method_name: + return (parser, method_name) + + return None + + @classmethod + def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]: + """ + Parse a log line based on stream type. + Returns parsed data or None if parsing fails. + """ + result = cls._get_parser_and_method(stream_type) + if not result: + return None + + parser, method_name = result + method = getattr(parser, method_name, None) + if method: + return method(line) + + return None + + @classmethod + def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]: + """ + Automatically detect which parser can handle this line and parse it. + Returns (stream_type, parsed_data) or None if no parser can handle it. + """ + # Try each parser to see if it can handle this line + for parser in cls._parsers.values(): + stream_type = parser.can_parse(line) + if stream_type: + # Parser can handle this line, now parse it + parsed_data = cls.parse(stream_type, line) + if parsed_data: + return (stream_type, parsed_data) + + return None diff --git a/apps/proxy/ts_proxy/stream_buffer.py b/apps/proxy/ts_proxy/stream_buffer.py index f0be1c52..85feb5dd 100644 --- a/apps/proxy/ts_proxy/stream_buffer.py +++ b/apps/proxy/ts_proxy/stream_buffer.py @@ -12,6 +12,7 @@ from .config_helper import ConfigHelper from .constants import TS_PACKET_SIZE from .utils import get_logger import gevent.event +import gevent # Make sure this import is at the top logger = get_logger() @@ -236,8 +237,8 @@ class StreamBuffer: timers_cancelled = 0 for timer in list(self.fill_timers): try: - if timer and timer.is_alive(): - timer.cancel() + if timer and not timer.dead: # Changed from timer.is_alive() + timer.kill() # Changed from timer.cancel() timers_cancelled += 1 except Exception as e: logger.error(f"Error canceling timer: {e}") @@ -302,6 +303,14 @@ class StreamBuffer: # Retrieve chunks chunks = self.get_chunks_exact(client_index, chunk_count) + # Check if we got significantly fewer chunks than expected (likely due to expiration) + # Only check if we expected multiple chunks and got none or very few + if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10: + # Chunks are missing - likely expired from Redis + # Return empty list to signal client should skip forward + logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)") + return [], client_index + # Check total size total_size = sum(len(c) for c in chunks) @@ -315,7 +324,7 @@ class StreamBuffer: additional_size = sum(len(c) for c in more_chunks) if total_size + additional_size <= MAX_SIZE: chunks.extend(more_chunks) - chunk_count += additional + chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved return chunks, client_index + chunk_count @@ -325,8 +334,7 @@ class StreamBuffer: if self.stopping: return None - timer = threading.Timer(delay, callback, args=args, kwargs=kwargs) - timer.daemon = True - timer.start() + # Replace threading.Timer with gevent.spawn_later for better compatibility + timer = gevent.spawn_later(delay, callback, *args, **kwargs) self.fill_timers.append(timer) return timer diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 82060f2f..50404f1d 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -8,11 +8,14 @@ import logging import threading import gevent # Add this import at the top of your file from apps.proxy.config import TSConfig as Config +from apps.channels.models import Channel +from core.utils import log_system_event from .server import ProxyServer from .utils import create_ts_packet, get_logger from .redis_keys import RedisKeys from .utils import get_logger from .constants import ChannelMetadataField +from .config_helper import ConfigHelper # Add this import logger = get_logger() @@ -51,6 +54,10 @@ class StreamGenerator: self.last_stats_bytes = 0 self.current_rate = 0.0 + # TTL refresh tracking + self.last_ttl_refresh = time.time() + self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming + def generate(self): """ Generator function that produces the stream content for the client. @@ -83,6 +90,20 @@ class StreamGenerator: if not self._setup_streaming(): return + # Log client connect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_connect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None + ) + except Exception as e: + logger.error(f"Could not log client connect event: {e}") + # Main streaming loop for chunk in self._stream_data_generator(): yield chunk @@ -95,7 +116,7 @@ class StreamGenerator: def _wait_for_initialization(self): """Wait for channel initialization to complete, sending keepalive packets.""" initialization_start = time.time() - max_init_wait = getattr(Config, 'CLIENT_WAIT_TIMEOUT', 30) + max_init_wait = ConfigHelper.client_wait_timeout() keepalive_interval = 0.5 last_keepalive = 0 proxy_server = ProxyServer.get_instance() @@ -119,9 +140,19 @@ class StreamGenerator: yield create_ts_packet('error', f"Error: {error_message}") return False else: + # Improved logging to track initialization progress + init_time = "unknown" + if b'init_time' in metadata: + try: + init_time_float = float(metadata[b'init_time'].decode('utf-8')) + init_duration = time.time() - init_time_float + init_time = f"{init_duration:.1f}s ago" + except: + pass + # Still initializing - send keepalive if needed if time.time() - last_keepalive >= keepalive_interval: - status_msg = f"Initializing: {state}" + status_msg = f"Initializing: {state} (started {init_time})" keepalive_packet = create_ts_packet('keepalive', status_msg) logger.debug(f"[{self.client_id}] Sending keepalive packet during initialization, state={state}") yield keepalive_packet @@ -156,7 +187,7 @@ class StreamGenerator: return False # Client state tracking - use config for initial position - initial_behind = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10) + initial_behind = ConfigHelper.initial_behind_chunks() current_buffer_index = buffer.index self.local_index = max(0, current_buffer_index - initial_behind) @@ -193,6 +224,18 @@ class StreamGenerator: self.empty_reads += 1 self.consecutive_empty += 1 + # Check if we're too far behind (chunks expired from Redis) + chunks_behind = self.buffer.index - self.local_index + if chunks_behind > 50: # If more than 50 chunks behind, jump forward + # Calculate new position: stay a few chunks behind current buffer + initial_behind = ConfigHelper.initial_behind_chunks() + new_index = max(self.local_index, self.buffer.index - initial_behind) + + logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}") + self.local_index = new_index + self.consecutive_empty = 0 # Reset since we're repositioning + continue # Try again immediately with new position + if self._should_send_keepalive(self.local_index): keepalive_packet = create_ts_packet('keepalive') logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head") @@ -313,7 +356,20 @@ class StreamGenerator: ChannelMetadataField.STATS_UPDATED_AT: str(current_time) } proxy_server.redis_client.hset(client_key, mapping=stats) - # No need to set expiration as client heartbeat will refresh this key + + # Refresh TTL periodically while actively streaming + # This provides proof-of-life independent of heartbeat thread + if current_time - self.last_ttl_refresh > self.ttl_refresh_interval: + try: + # Refresh TTL on client key + proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL) + # Also refresh the client set TTL + client_set_key = f"ts_proxy:channel:{self.channel_id}:clients" + proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL) + self.last_ttl_refresh = current_time + logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)") + except Exception as ttl_error: + logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}") except Exception as e: logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}") @@ -337,8 +393,8 @@ class StreamGenerator: def _is_timeout(self): """Check if the stream has timed out.""" # Get a more generous timeout for stream switching - stream_timeout = getattr(Config, 'STREAM_TIMEOUT', 10) - failover_grace_period = getattr(Config, 'FAILOVER_GRACE_PERIOD', 20) + stream_timeout = ConfigHelper.stream_timeout() + failover_grace_period = ConfigHelper.failover_grace_period() total_timeout = stream_timeout + failover_grace_period # Disconnect after long inactivity @@ -399,6 +455,22 @@ class StreamGenerator: total_clients = client_manager.get_total_client_count() logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})") + # Log client disconnect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_disconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None, + duration=round(elapsed, 2), + bytes_sent=self.bytes_sent + ) + except Exception as e: + logger.error(f"Could not log client disconnect event: {e}") + # Schedule channel shutdown if no clients left if not stream_released: # Only if we haven't already released the stream self._schedule_channel_shutdown_if_needed(local_clients) @@ -415,7 +487,7 @@ class StreamGenerator: def delayed_shutdown(): # Use the config setting instead of hardcoded value - shutdown_delay = getattr(Config, 'CHANNEL_SHUTDOWN_DELAY', 5) + shutdown_delay = ConfigHelper.channel_shutdown_delay() # Use ConfigHelper logger.info(f"Waiting {shutdown_delay}s before checking if channel should be stopped") gevent.sleep(shutdown_delay) # Replace time.sleep @@ -436,4 +508,4 @@ def create_stream_generator(channel_id, client_id, client_ip, client_user_agent, Returns a function that can be passed to StreamingHttpResponse. """ generator = StreamGenerator(channel_id, client_id, client_ip, client_user_agent, channel_initializing) - return generator.generate + return generator.generate \ No newline at end of file diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 771ffba8..e7f752d8 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -6,13 +6,17 @@ import time import socket import requests import subprocess -import gevent # Add this import +import gevent +import re from typing import Optional, List +from django.db import connection from django.shortcuts import get_object_or_404 +from urllib3.exceptions import ReadTimeoutError from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile from core.models import UserAgent, CoreSettings +from core.utils import log_system_event from .stream_buffer import StreamBuffer from .utils import detect_stream_type, get_logger from .redis_keys import RedisKeys @@ -37,6 +41,12 @@ class StreamManager: self.current_response = None self.current_session = None self.url_switching = False + self.url_switch_start_time = 0 + self.url_switch_timeout = ConfigHelper.url_switch_timeout() + self.buffering = False + self.buffering_timeout = ConfigHelper.buffering_timeout() + self.buffering_speed = ConfigHelper.buffering_speed() + self.buffering_start_time = None # Store worker_id for ownership checks self.worker_id = worker_id @@ -84,22 +94,35 @@ class StreamManager: self.tried_stream_ids.add(self.current_stream_id) logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}") else: - logger.warning(f"No stream_id found in Redis for channel {channel_id}") + logger.warning(f"No stream_id found in Redis for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") except Exception as e: logger.warning(f"Error loading stream ID from Redis: {e}") else: - logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly") + logger.warning(f"Unable to get stream ID for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") logger.info(f"Initialized stream manager for channel {buffer.channel_id}") # Add this flag for tracking transcoding process status self.transcode_process_active = False + # Track stream command for efficient log parser routing + self.stream_command = None + self.parser_type = None # Will be set when transcode process starts + # Add tracking for data throughput self.bytes_processed = 0 self.last_bytes_update = time.time() self.bytes_update_interval = 5 # Update Redis every 5 seconds + # Add stderr reader thread property + self.stderr_reader_thread = None + self.ffmpeg_input_phase = True # Track if we're still reading input info + + # Add HTTP reader thread property + self.http_reader = None + def _create_session(self): """Create and configure requests session with optimal settings""" session = requests.Session() @@ -124,6 +147,37 @@ class StreamManager: return session + def _wait_for_existing_processes_to_close(self, timeout=5.0): + """Wait for existing processes/connections to fully close before establishing new ones""" + start_time = time.time() + + while time.time() - start_time < timeout: + # Check if transcode process is still running + if self.transcode_process and self.transcode_process.poll() is None: + logger.debug(f"Waiting for existing transcode process to terminate for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # Check if HTTP connections are still active + if self.current_response or self.current_session: + logger.debug(f"Waiting for existing HTTP connections to close for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # Check if socket is still active + if self.socket: + logger.debug(f"Waiting for existing socket to close for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # All processes/connections are closed + logger.debug(f"All existing processes closed for channel {self.channel_id}") + return True + + # Timeout reached + logger.warning(f"Timeout waiting for existing processes to close for channel {self.channel_id} after {timeout}s") + return False + def run(self): """Main execution loop using HTTP streaming with improved connection handling and stream switching""" # Add a stop flag to the class properties @@ -140,16 +194,50 @@ class StreamManager: health_thread = threading.Thread(target=self._monitor_health, daemon=True) health_thread.start() - logger.info(f"Starting stream for URL: {self.url}") + logger.info(f"Starting stream for URL: {self.url} for channel {self.channel_id}") # Main stream switching loop - we'll try different streams if needed while self.running and stream_switch_attempts <= max_stream_switches: + # Check for stuck switching state + if self.url_switching and time.time() - self.url_switch_start_time > self.url_switch_timeout: + logger.warning(f"URL switching state appears stuck for channel {self.channel_id} " + f"({time.time() - self.url_switch_start_time:.1f}s > {self.url_switch_timeout}s timeout). " + f"Resetting switching state.") + self._reset_url_switching_state() + + # NEW: Check for health monitor recovery requests + if hasattr(self, 'needs_reconnect') and self.needs_reconnect and not self.url_switching: + logger.info(f"Health monitor requested reconnect for channel {self.channel_id}") + self.needs_reconnect = False + + # Attempt reconnect without changing streams + if self._attempt_reconnect(): + logger.info(f"Health-requested reconnect successful for channel {self.channel_id}") + continue # Go back to main loop + else: + logger.warning(f"Health-requested reconnect failed, will try stream switch for channel {self.channel_id}") + self.needs_stream_switch = True + + if hasattr(self, 'needs_stream_switch') and self.needs_stream_switch and not self.url_switching: + logger.info(f"Health monitor requested stream switch for channel {self.channel_id}") + self.needs_stream_switch = False + + if self._try_next_stream(): + logger.info(f"Health-requested stream switch successful for channel {self.channel_id}") + stream_switch_attempts += 1 + self.retry_count = 0 # Reset retries for new stream + continue # Go back to main loop with new stream + else: + logger.error(f"Health-requested stream switch failed for channel {self.channel_id}") + # Continue with normal flow + # Check stream type before connecting - stream_type = detect_stream_type(self.url) - if self.transcode == False and stream_type == StreamType.HLS: - logger.info(f"Detected HLS stream: {self.url}") - logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively") - # Enable transcoding for HLS streams + self.stream_type = detect_stream_type(self.url) + if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP): + stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP") + logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}") + logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}") + # Enable transcoding for HLS, RTSP/RTP, and UDP streams self.transcode = True # We'll override the stream profile selection with ffmpeg in the transcoding section self.force_ffmpeg = True @@ -157,13 +245,13 @@ class StreamManager: self.retry_count = 0 url_failed = False if self.url_switching: - logger.debug("Skipping connection attempt during URL switch") + logger.debug(f"Skipping connection attempt during URL switch for channel {self.channel_id}") gevent.sleep(0.1) # REPLACE time.sleep(0.1) continue # Connection retry loop for current URL - while self.running and self.retry_count < self.max_retries and not url_failed: + while self.running and self.retry_count < self.max_retries and not url_failed and not self.needs_stream_switch: - logger.info(f"Connection attempt {self.retry_count + 1}/{self.max_retries} for URL: {self.url}") + logger.info(f"Connection attempt {self.retry_count + 1}/{self.max_retries} for URL: {self.url} for channel {self.channel_id}") # Handle connection based on whether we transcode or not connection_result = False @@ -177,6 +265,20 @@ class StreamManager: # Store connection start time to measure success duration connection_start_time = time.time() + # Log reconnection event if this is a retry (not first attempt) + if self.retry_count > 0: + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + attempt=self.retry_count + 1, + max_attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + # Successfully connected - read stream data until disconnect/error self._process_stream_data() # If we get here, the connection was closed/failed @@ -185,8 +287,12 @@ class StreamManager: # This indicates we had a stable connection for a while before failing connection_duration = time.time() - connection_start_time stable_connection_threshold = 30 # 30 seconds threshold + + if self.needs_stream_switch: + logger.info(f"Stream needs to switch after {connection_duration:.1f} seconds for channel: {self.channel_id}") + break # Exit to switch streams if connection_duration > stable_connection_threshold: - logger.info(f"Stream was stable for {connection_duration:.1f} seconds, resetting switch attempts counter") + logger.info(f"Stream was stable for {connection_duration:.1f} seconds, resetting switch attempts counter for channel: {self.channel_id}") stream_switch_attempts = 0 # Connection failed or ended - decide what to do next @@ -201,42 +307,71 @@ class StreamManager: # If we've reached max retries, mark this URL as failed if self.retry_count >= self.max_retries: url_failed = True - logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url}") + logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}") + + # Log connection error event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_failed', + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log connection error event: {e}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds - logger.info(f"Reconnecting in {timeout} seconds... (attempt {self.retry_count}/{self.max_retries})") + logger.info(f"Reconnecting in {timeout} seconds... (attempt {self.retry_count}/{self.max_retries}) for channel: {self.channel_id}") gevent.sleep(timeout) # REPLACE time.sleep(timeout) except Exception as e: - logger.error(f"Connection error: {e}", exc_info=True) + logger.error(f"Connection error on channel: {self.channel_id}: {e}", exc_info=True) self.retry_count += 1 self.connected = False if self.retry_count >= self.max_retries: url_failed = True + + # Log connection error event with exception details + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_exception', + error_message=str(e)[:200], + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as log_error: + logger.error(f"Could not log connection error event: {log_error}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds - logger.info(f"Reconnecting in {timeout} seconds after error... (attempt {self.retry_count}/{self.max_retries})") + logger.info(f"Reconnecting in {timeout} seconds after error... (attempt {self.retry_count}/{self.max_retries}) for channel: {self.channel_id}") gevent.sleep(timeout) # REPLACE time.sleep(timeout) # If URL failed and we're still running, try switching to another stream if url_failed and self.running: - logger.info(f"URL {self.url} failed after {self.retry_count} attempts, trying next stream") + logger.info(f"URL {self.url} failed after {self.retry_count} attempts, trying next stream for channel: {self.channel_id}") # Try to switch to next stream switch_result = self._try_next_stream() if switch_result: # Successfully switched to a new stream, continue with the new URL stream_switch_attempts += 1 - logger.info(f"Successfully switched to new URL: {self.url} (switch attempt {stream_switch_attempts}/{max_stream_switches})") + logger.info(f"Successfully switched to new URL: {self.url} (switch attempt {stream_switch_attempts}/{max_stream_switches}) for channel: {self.channel_id}") # Reset retry count for the new stream - important for the loop to work correctly self.retry_count = 0 # Continue outer loop with new URL - DON'T add a break statement here else: # No more streams to try - logger.error(f"Failed to find alternative streams after {stream_switch_attempts} attempts") + logger.error(f"Failed to find alternative streams after {stream_switch_attempts} attempts for channel: {self.channel_id}") break elif not self.running: # Normal shutdown was requested @@ -260,7 +395,7 @@ class StreamManager: # Make sure transcode process is terminated if self.transcode_process_active: - logger.info("Ensuring transcode process is terminated in finally block") + logger.info(f"Ensuring transcode process is terminated in finally block for channel: {self.channel_id}") self._close_socket() # Close all connections @@ -297,7 +432,13 @@ class StreamManager: stop_key = RedisKeys.channel_stopping(self.channel_id) self.buffer.redis_client.setex(stop_key, 60, "true") except Exception as e: - logger.error(f"Failed to update channel state in Redis: {e}") + logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True) + + # Close database connection for this thread + try: + connection.close() + except Exception: + pass logger.info(f"Stream manager stopped for channel {self.channel_id}") @@ -305,6 +446,22 @@ class StreamManager: """Establish a connection using transcoding""" try: logger.debug(f"Building transcode command for channel {self.channel_id}") + + # Check if we already have a running transcode process + if self.transcode_process and self.transcode_process.poll() is None: + logger.info(f"Existing transcode process found for channel {self.channel_id}, closing before establishing new connection") + self._close_socket() + + # Wait for the process to fully terminate + if not self._wait_for_existing_processes_to_close(): + logger.error(f"Failed to close existing transcode process for channel {self.channel_id}") + return False + + # Also check for any lingering HTTP connections + if self.current_response or self.current_session: + logger.debug(f"Closing existing HTTP connections before establishing transcode connection for channel {self.channel_id}") + self._close_connection() + channel = get_stream_object(self.channel_id) # Use FFmpeg specifically for HLS streams @@ -312,25 +469,51 @@ class StreamManager: from core.models import StreamProfile try: stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True) - logger.info("Using FFmpeg stream profile for HLS content") + logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)") except StreamProfile.DoesNotExist: # Fall back to channel's profile if FFmpeg not found stream_profile = channel.get_stream_profile() - logger.warning("FFmpeg profile not found, using channel default profile") + logger.warning(f"FFmpeg profile not found, using channel default profile for channel: {self.channel_id}") else: stream_profile = channel.get_stream_profile() # Build and start transcode command self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent) - logger.debug(f"Starting transcode process: {self.transcode_cmd}") + # Store stream command for efficient log parser routing + self.stream_command = stream_profile.command + # Map actual commands to parser types for direct routing + command_to_parser = { + 'ffmpeg': 'ffmpeg', + 'cvlc': 'vlc', + 'vlc': 'vlc', + 'streamlink': 'streamlink' + } + self.parser_type = command_to_parser.get(self.stream_command.lower()) + if self.parser_type: + logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})") + else: + logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing") + + # For UDP streams, remove any user_agent parameters from the command + if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP: + # Filter out any arguments that contain the user_agent value or related headers + self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()] + logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}") + + logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}") + + # Modified to capture stderr instead of discarding it self.transcode_process = subprocess.Popen( self.transcode_cmd, stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, # Suppress error logs + stderr=subprocess.PIPE, # Capture stderr instead of discarding it bufsize=188 * 64 # Buffer optimized for TS packets ) + # Start a thread to read stderr + self._start_stderr_reader() + # Set flag that transcoding process is active self.transcode_process_active = True @@ -345,50 +528,393 @@ class StreamManager: return True except Exception as e: - logger.error(f"Error establishing transcode connection: {e}", exc_info=True) + logger.error(f"Error establishing transcode connection for channel: {self.channel_id}: {e}", exc_info=True) self._close_socket() return False - def _establish_http_connection(self): - """Establish a direct HTTP connection to the stream""" - try: - logger.debug(f"Using TS Proxy to connect to stream: {self.url}") - - # Create new session for each connection attempt - session = self._create_session() - self.current_session = session - - # Stream the URL with proper timeout handling - response = session.get( - self.url, - stream=True, - timeout=(10, 60) # 10s connect timeout, 60s read timeout + def _start_stderr_reader(self): + """Start a thread to read stderr from the transcode process""" + if self.transcode_process and self.transcode_process.stderr: + self.stderr_reader_thread = threading.Thread( + target=self._read_stderr, + daemon=True # Use daemon thread so it doesn't block program exit ) - self.current_response = response + self.stderr_reader_thread.start() + logger.debug(f"Started stderr reader thread for channel {self.channel_id}") - if response.status_code == 200: - self.connected = True - self.healthy = True - logger.info(f"Successfully connected to stream source") + def _read_stderr(self): + """Read and log ffmpeg stderr output with real-time stats parsing""" + try: + buffer = b"" + last_stats_line = b"" - # Store connection start time for stability tracking - self.connection_start_time = time.time() + # Read byte by byte for immediate detection + while self.transcode_process and self.transcode_process.stderr: + try: + # Read one byte at a time for immediate processing + byte = self.transcode_process.stderr.read(1) + if not byte: + break - # Set channel state to waiting for clients - self._set_waiting_for_clients() + buffer += byte + + # Check for frame= at the start of buffer (new stats line) + if buffer == b"frame=": + # We detected the start of a stats line, read until we get a complete line + # or hit a carriage return (which overwrites the previous stats) + while True: + next_byte = self.transcode_process.stderr.read(1) + if not next_byte: + break + + buffer += next_byte + + # Break on carriage return (stats overwrite) or newline + if next_byte in (b'\r', b'\n'): + break + + # Also break if we have enough data for a typical stats line + if len(buffer) > 200: # Typical stats line length + break + + # Process the stats line immediately + if buffer.strip(): + try: + stats_text = buffer.decode('utf-8', errors='ignore').strip() + if stats_text and "frame=" in stats_text: + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + except Exception as e: + logger.debug(f"Error parsing immediate stats line: {e}") + + # Clear buffer after processing + buffer = b"" + continue + + # Handle regular line breaks for non-stats content + elif byte == b'\n': + if buffer.strip(): + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text and not line_text.startswith("frame="): + self._log_stderr_content(line_text) + buffer = b"" + + # Handle carriage returns (potential stats overwrite) + elif byte == b'\r': + # Check if this might be a stats line + if b"frame=" in buffer: + try: + stats_text = buffer.decode('utf-8', errors='ignore').strip() + if stats_text and "frame=" in stats_text: + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + except Exception as e: + logger.debug(f"Error parsing stats on carriage return: {e}") + elif buffer.strip(): + # Regular content with carriage return + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text: + self._log_stderr_content(line_text) + buffer = b"" + + # Prevent buffer from growing too large for non-stats content + elif len(buffer) > 1024 and b"frame=" not in buffer: + # Process whatever we have if it's not a stats line + if buffer.strip(): + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text: + self._log_stderr_content(line_text) + buffer = b"" + + except Exception as e: + logger.error(f"Error reading stderr byte: {e}") + break + + # Process any remaining buffer content + if buffer.strip(): + try: + remaining_text = buffer.decode('utf-8', errors='ignore').strip() + if remaining_text: + if "frame=" in remaining_text: + self._parse_ffmpeg_stats(remaining_text) + self._log_stderr_content(remaining_text) + except Exception as e: + logger.debug(f"Error processing remaining buffer: {e}") - return True - else: - logger.error(f"Failed to connect to stream: HTTP {response.status_code}") - self._close_connection() - return False - except requests.exceptions.RequestException as e: - logger.error(f"HTTP request error: {e}") - self._close_connection() - return False except Exception as e: - logger.error(f"Error establishing HTTP connection: {e}", exc_info=True) - self._close_connection() + # Catch any other exceptions in the thread to prevent crashes + try: + logger.error(f"Error in stderr reader thread for channel {self.channel_id}: {e}") + except: + pass + + def _log_stderr_content(self, content): + """Log stderr content from FFmpeg with appropriate log levels""" + try: + content = content.strip() + if not content: + return + + # Convert to lowercase for easier matching + content_lower = content.lower() + # Check if we are still in the input phase + if content_lower.startswith('input #') or 'decoder' in content_lower: + self.ffmpeg_input_phase = True + # Track FFmpeg phases - once we see output info, we're past input phase + if content_lower.startswith('output #') or 'encoder' in content_lower: + self.ffmpeg_input_phase = False + + # Route to appropriate parser based on known command type + from .services.log_parsers import LogParserFactory + from .services.channel_service import ChannelService + + parse_result = None + + # If we know the parser type, use direct routing for efficiency + if self.parser_type: + # Get the appropriate parser and check what it can parse + parser = LogParserFactory._parsers.get(self.parser_type) + if parser: + stream_type = parser.can_parse(content) + if stream_type: + # Parser can handle this line, parse it directly + parsed_data = LogParserFactory.parse(stream_type, content) + if parsed_data: + parse_result = (stream_type, parsed_data) + else: + # Unknown command type - use auto-detection as fallback + parse_result = LogParserFactory.auto_parse(content) + + if parse_result: + stream_type, parsed_data = parse_result + # For FFmpeg, only parse during input phase + if stream_type in ['video', 'audio', 'input']: + if self.ffmpeg_input_phase: + ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id) + else: + # VLC and Streamlink can be parsed anytime + ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id) + + # Determine log level based on content + if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): + logger.error(f"Stream process error for channel {self.channel_id}: {content}") + elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']): + logger.warning(f"Stream process warning for channel {self.channel_id}: {content}") + elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content: + # Stats lines - log at trace level to avoid spam + logger.trace(f"Stream stats for channel {self.channel_id}: {content}") + elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']): + # Stream info - log at info level + logger.info(f"Stream info for channel {self.channel_id}: {content}") + else: + # Everything else at debug level + logger.debug(f"Stream process output for channel {self.channel_id}: {content}") + + except Exception as e: + logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}") + + def _parse_ffmpeg_stats(self, stats_line): + """Parse FFmpeg stats line and extract speed, fps, and bitrate""" + try: + # Example FFmpeg stats line: + # frame= 1234 fps= 30 q=28.0 size= 2048kB time=00:00:41.33 bitrate= 406.1kbits/s speed=1.02x + + # Extract speed (e.g., "speed=1.02x") + speed_match = re.search(r'speed=\s*([0-9.]+)x?', stats_line) + ffmpeg_speed = float(speed_match.group(1)) if speed_match else None + + # Extract fps (e.g., "fps= 30") + fps_match = re.search(r'fps=\s*([0-9.]+)', stats_line) + ffmpeg_fps = float(fps_match.group(1)) if fps_match else None + + # Extract bitrate (e.g., "bitrate= 406.1kbits/s") + bitrate_match = re.search(r'bitrate=\s*([0-9.]+(?:\.[0-9]+)?)\s*([kmg]?)bits/s', stats_line, re.IGNORECASE) + ffmpeg_output_bitrate = None + if bitrate_match: + bitrate_value = float(bitrate_match.group(1)) + unit = bitrate_match.group(2).lower() + # Convert to kbps + if unit == 'm': + bitrate_value *= 1000 + elif unit == 'g': + bitrate_value *= 1000000 + # If no unit or 'k', it's already in kbps + ffmpeg_output_bitrate = bitrate_value + + # Calculate actual FPS + actual_fps = None + if ffmpeg_fps is not None and ffmpeg_speed is not None and ffmpeg_speed > 0: + actual_fps = ffmpeg_fps / ffmpeg_speed + # Store in Redis if we have valid data + if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate]): + self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate) + + # Also save ffmpeg_output_bitrate to database if we have stream_id + if ffmpeg_output_bitrate is not None and self.current_stream_id: + from .services.channel_service import ChannelService + ChannelService._update_stream_stats_in_db( + self.current_stream_id, + ffmpeg_output_bitrate=ffmpeg_output_bitrate + ) + + # Fix the f-string formatting + actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A" + ffmpeg_output_bitrate_str = f"{ffmpeg_output_bitrate:.1f}" if ffmpeg_output_bitrate is not None else "N/A" + # Log the stats + logger.debug(f"FFmpeg stats for channel {self.channel_id}: - Speed: {ffmpeg_speed}x, FFmpeg FPS: {ffmpeg_fps}, " + f"Actual FPS: {actual_fps_str}, " + f"Output Bitrate: {ffmpeg_output_bitrate_str} kbps") + # If we have a valid speed, check for buffering + if ffmpeg_speed is not None and ffmpeg_speed < self.buffering_speed: + if self.buffering: + # Buffering is still ongoing, check for how long + if self.buffering_start_time is None: + self.buffering_start_time = time.time() + else: + buffering_duration = time.time() - self.buffering_start_time + if buffering_duration > self.buffering_timeout: + # Buffering timeout reached, log error and try next stream + logger.error(f"Buffering timeout reached for channel {self.channel_id} after {buffering_duration:.1f} seconds") + # Send next stream request + if self._try_next_stream(): + logger.info(f"Switched to next stream for channel {self.channel_id} after buffering timeout") + # Reset buffering state + self.buffering = False + self.buffering_start_time = None + + # Log failover event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_failover', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='buffering_timeout', + duration=buffering_duration + ) + except Exception as e: + logger.error(f"Could not log failover event: {e}") + else: + logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout") + else: + # Buffering just started, set the flag and start timer + self.buffering = True + self.buffering_start_time = time.time() + logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x") + + # Log system event for buffering + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_buffering', + channel_id=self.channel_id, + channel_name=channel_obj.name, + speed=ffmpeg_speed + ) + except Exception as e: + logger.error(f"Could not log buffering event: {e}") + + # Log buffering warning + logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected") + # Set channel state to buffering + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, ChannelMetadataField.STATE, ChannelState.BUFFERING) + elif ffmpeg_speed is not None and ffmpeg_speed >= self.buffering_speed: + # Speed is good, check if we were buffering + if self.buffering: + # Reset buffering state + logger.info(f"Buffering ended for channel {self.channel_id} - speed: {ffmpeg_speed}x") + self.buffering = False + self.buffering_start_time = None + # Set channel state to active if speed is good + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, ChannelMetadataField.STATE, ChannelState.ACTIVE) + + except Exception as e: + logger.debug(f"Error parsing FFmpeg stats: {e}") + + def _update_ffmpeg_stats_in_redis(self, speed, fps, actual_fps, output_bitrate): + """Update FFmpeg performance stats in Redis metadata""" + try: + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + update_data = { + ChannelMetadataField.FFMPEG_STATS_UPDATED: str(time.time()) + } + + if speed is not None: + update_data[ChannelMetadataField.FFMPEG_SPEED] = str(round(speed, 3)) + + if fps is not None: + update_data[ChannelMetadataField.FFMPEG_FPS] = str(round(fps, 1)) + + if actual_fps is not None: + update_data[ChannelMetadataField.ACTUAL_FPS] = str(round(actual_fps, 1)) + + if output_bitrate is not None: + update_data[ChannelMetadataField.FFMPEG_OUTPUT_BITRATE] = str(round(output_bitrate, 1)) + + self.buffer.redis_client.hset(metadata_key, mapping=update_data) + + except Exception as e: + logger.error(f"Error updating FFmpeg stats in Redis: {e}") + + + def _establish_http_connection(self): + """Establish HTTP connection using thread-based reader (same as transcode path)""" + try: + logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}") + + # Check if we already have active HTTP connections + if self.current_response or self.current_session: + logger.info(f"Existing HTTP connection found for channel {self.channel_id}, closing before establishing new connection") + self._close_connection() + + # Wait for connections to fully close + if not self._wait_for_existing_processes_to_close(): + logger.error(f"Failed to close existing HTTP connections for channel {self.channel_id}") + return False + + # Also check for any lingering transcode processes + if self.transcode_process and self.transcode_process.poll() is None: + logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}") + self._close_socket() + + # Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor + # This allows us to use the same fetch_chunk() path as transcode + from .http_streamer import HTTPStreamReader + + # Create and start the HTTP stream reader + self.http_reader = HTTPStreamReader( + url=self.url, + user_agent=self.user_agent, + chunk_size=self.chunk_size + ) + + # Start the reader thread and get the read end of the pipe + pipe_fd = self.http_reader.start() + + # Wrap the file descriptor in a file object (same as transcode stdout) + import os + self.socket = os.fdopen(pipe_fd, 'rb', buffering=0) + self.connected = True + self.healthy = True + + logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}") + + # Store connection start time for stability tracking + self.connection_start_time = time.time() + + # Set channel state to waiting for clients + self._set_waiting_for_clients() + + return True + + except Exception as e: + logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True) + self._close_socket() return False def _update_bytes_processed(self, chunk_size): @@ -416,73 +942,44 @@ class StreamManager: logger.error(f"Error updating bytes processed: {e}") def _process_stream_data(self): - """Process stream data until disconnect or error""" + """Process stream data until disconnect or error - unified path for both transcode and HTTP""" try: - if self.transcode: - # Handle transcoded stream data - while self.running and self.connected: - if self.fetch_chunk(): - self.last_data_time = time.time() - else: - if not self.running: - break - gevent.sleep(0.1) # REPLACE time.sleep(0.1) - else: - # Handle direct HTTP connection - chunk_count = 0 - try: - for chunk in self.current_response.iter_content(chunk_size=self.chunk_size): - # Check if we've been asked to stop - if self.stop_requested or self.url_switching: - break - - if chunk: - # Track chunk size before adding to buffer - chunk_size = len(chunk) - self._update_bytes_processed(chunk_size) - - # Add chunk to buffer with TS packet alignment - success = self.buffer.add_chunk(chunk) - - if success: - self.last_data_time = time.time() - chunk_count += 1 - - # Update last data timestamp in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - last_data_key = RedisKeys.last_data(self.buffer.channel_id) - self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60) - except (AttributeError, ConnectionError) as e: - if self.stop_requested or self.url_switching: - logger.debug(f"Expected connection error during shutdown/URL switch: {e}") - else: - logger.error(f"Unexpected stream error: {e}") - raise + # Both transcode and HTTP now use the same subprocess/socket approach + # This gives us perfect control: check flags between chunks, timeout just returns False + while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: + if self.fetch_chunk(): + self.last_data_time = time.time() + else: + # fetch_chunk() returned False - could be timeout, no data, or error + if not self.running: + break + # Brief sleep before retry to avoid tight loop + gevent.sleep(0.1) except Exception as e: - logger.error(f"Error processing stream data: {e}", exc_info=True) + logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True) # If we exit the loop, connection is closed or failed self.connected = False def _close_all_connections(self): """Close all connection resources""" - if self.socket: + if self.socket or self.transcode_process: try: self._close_socket() except Exception as e: - logger.debug(f"Error closing socket: {e}") + logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") if self.current_response: try: self.current_response.close() except Exception as e: - logger.debug(f"Error closing response: {e}") + logger.debug(f"Error closing response for channel {self.channel_id}: {e}") if self.current_session: try: self.current_session.close() except Exception as e: - logger.debug(f"Error closing session: {e}") + logger.debug(f"Error closing session for channel {self.channel_id}: {e}") # Clear references self.socket = None @@ -509,7 +1006,7 @@ class StreamManager: if timer and timer.is_alive(): timer.cancel() except Exception as e: - logger.error(f"Error canceling buffer check timer: {e}") + logger.error(f"Error canceling buffer check timer for channel {self.channel_id}: {e}") self._buffer_check_timers.clear() @@ -536,16 +1033,17 @@ class StreamManager: # Set running to false to ensure thread exits self.running = False - def update_url(self, new_url, stream_id=None): + def update_url(self, new_url, stream_id=None, m3u_profile_id=None): """Update stream URL and reconnect with proper cleanup for both HTTP and transcode sessions""" if new_url == self.url: logger.info(f"URL unchanged: {new_url}") return False - logger.info(f"Switching stream URL from {self.url} to {new_url}") + logger.info(f"Switching stream URL from {self.url} to {new_url} for channel {self.channel_id}") # Import both models for proper resource management from apps.channels.models import Stream, Channel + from django.db import connection # Update stream profile if we're switching streams if self.current_stream_id and stream_id and self.current_stream_id != stream_id: @@ -554,71 +1052,99 @@ class StreamManager: channel = Channel.objects.get(uuid=self.channel_id) # Get stream to find its profile - new_stream = Stream.objects.get(pk=stream_id) + #new_stream = Stream.objects.get(pk=stream_id) # Use the new method to update the profile and manage connection counts - if new_stream.m3u_account_id: - success = channel.update_stream_profile(new_stream.m3u_account_id) + if m3u_profile_id: + success = channel.update_stream_profile(m3u_profile_id) if success: - logger.debug(f"Updated stream profile for channel {self.channel_id} to use profile from stream {stream_id}") + logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}") else: logger.warning(f"Failed to update stream profile for channel {self.channel_id}") + except Exception as e: logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}") + finally: + # Always close database connection after profile update + try: + connection.close() + except Exception: + pass + # CRITICAL: Set a flag to prevent immediate reconnection with old URL self.url_switching = True + self.url_switch_start_time = time.time() - # Check which type of connection we're using and close it properly - if self.transcode or self.socket: - logger.debug("Closing transcode process before URL change") - self._close_socket() - else: - logger.debug("Closing HTTP connection before URL change") - self._close_connection() + try: + # Check which type of connection we're using and close it properly + if self.transcode or self.socket: + logger.debug(f"Closing transcode process before URL change for channel {self.channel_id}") + self._close_socket() + else: + logger.debug(f"Closing HTTP connection before URL change for channel {self.channel_id}") + self._close_connection() - # Update URL and reset connection state - old_url = self.url - self.url = new_url - self.connected = False + # Update URL and reset connection state + old_url = self.url + self.url = new_url + self.connected = False - # Update stream ID if provided - if stream_id: - old_stream_id = self.current_stream_id - self.current_stream_id = stream_id - # Add stream ID to tried streams for proper tracking - self.tried_stream_ids.add(stream_id) - logger.info(f"Updated stream ID from {old_stream_id} to {stream_id} for channel {self.buffer.channel_id}") + # Update stream ID if provided + if stream_id: + old_stream_id = self.current_stream_id + self.current_stream_id = stream_id + # Add stream ID to tried streams for proper tracking + self.tried_stream_ids.add(stream_id) + logger.info(f"Updated stream ID from {old_stream_id} to {stream_id} for channel {self.channel_id}") - # Reset retry counter to allow immediate reconnect - self.retry_count = 0 + # Reset retry counter to allow immediate reconnect + self.retry_count = 0 - # Also reset buffer position to prevent stale data after URL change - if hasattr(self.buffer, 'reset_buffer_position'): + # Also reset buffer position to prevent stale data after URL change + if hasattr(self.buffer, 'reset_buffer_position'): + try: + self.buffer.reset_buffer_position() + logger.debug("Reset buffer position for clean URL switch") + except Exception as e: + logger.warning(f"Failed to reset buffer position: {e}") + + # Log stream switch event try: - self.buffer.reset_buffer_position() - logger.debug("Reset buffer position for clean URL switch") + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'stream_switch', + channel_id=self.channel_id, + channel_name=channel_obj.name, + new_url=new_url[:100] if new_url else None, + stream_id=stream_id + ) except Exception as e: - logger.warning(f"Failed to reset buffer position: {e}") + logger.error(f"Could not log stream switch event: {e}") - # Done with URL switch - self.url_switching = False - logger.info(f"Stream switch completed for channel {self.buffer.channel_id}") - - return True + return True + except Exception as e: + logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True) + return False + finally: + # CRITICAL FIX: Always reset the URL switching flag when done, whether successful or not + self.url_switching = False + logger.info(f"Stream switch completed for channel {self.channel_id}") def should_retry(self) -> bool: """Check if connection retry is allowed""" return self.retry_count < self.max_retries def _monitor_health(self): - """Monitor stream health and attempt recovery if needed""" + """Monitor stream health and set flags for the main loop to handle recovery""" consecutive_unhealthy_checks = 0 - health_recovery_attempts = 0 - reconnect_attempts = 0 - max_health_recovery_attempts = ConfigHelper.get('MAX_HEALTH_RECOVERY_ATTEMPTS', 2) - max_reconnect_attempts = ConfigHelper.get('MAX_RECONNECT_ATTEMPTS', 3) - min_stable_time = ConfigHelper.get('MIN_STABLE_TIME_BEFORE_RECONNECT', 30) # seconds + max_unhealthy_checks = 3 + + # Add flags for the main loop to check + self.needs_reconnect = False + self.needs_stream_switch = False + self.last_health_action_time = 0 + action_cooldown = 30 # Prevent rapid recovery attempts while self.running: try: @@ -627,48 +1153,43 @@ class StreamManager: timeout_threshold = getattr(Config, 'CONNECTION_TIMEOUT', 10) if inactivity_duration > timeout_threshold and self.connected: - # Mark unhealthy if no data for too long if self.healthy: - logger.warning(f"Stream unhealthy - no data for {inactivity_duration:.1f}s") + logger.warning(f"Stream unhealthy for channel {self.channel_id} - no data for {inactivity_duration:.1f}s") self.healthy = False - # Track consecutive unhealthy checks consecutive_unhealthy_checks += 1 - # After several unhealthy checks in a row, try recovery - if consecutive_unhealthy_checks >= 3 and health_recovery_attempts < max_health_recovery_attempts: - # Calculate how long the stream was stable before failing + # Only set flags if enough time has passed since last action + if (consecutive_unhealthy_checks >= max_unhealthy_checks and + now - self.last_health_action_time > action_cooldown): + + # Calculate stability to decide on action type connection_start_time = getattr(self, 'connection_start_time', 0) stable_time = self.last_data_time - connection_start_time if connection_start_time > 0 else 0 - if stable_time >= min_stable_time and reconnect_attempts < max_reconnect_attempts: - # Stream was stable for a while, try reconnecting first - logger.warning(f"Stream was stable for {stable_time:.1f}s before failing. " - f"Attempting reconnect {reconnect_attempts + 1}/{max_reconnect_attempts}") - reconnect_attempts += 1 - threading.Thread(target=self._attempt_reconnect, daemon=True).start() + if stable_time >= 30: # Stream was stable, try reconnect first + if not self.needs_reconnect: + logger.info(f"Setting reconnect flag for stable stream (stable for {stable_time:.1f}s) for channel {self.channel_id}") + self.needs_reconnect = True + self.last_health_action_time = now else: - # Stream was not stable long enough, or reconnects failed too many times - # Try switching to another stream - if reconnect_attempts > 0: - logger.warning(f"Reconnect attempts exhausted ({reconnect_attempts}/{max_reconnect_attempts}). " - f"Attempting stream switch recovery") - else: - logger.warning(f"Stream was only stable for {stable_time:.1f}s (<{min_stable_time}s). " - f"Skipping reconnect, attempting stream switch") + # Stream wasn't stable, suggest stream switch + if not self.needs_stream_switch: + logger.info(f"Setting stream switch flag for unstable stream (stable for {stable_time:.1f}s) for channel {self.channel_id}") + self.needs_stream_switch = True + self.last_health_action_time = now + + consecutive_unhealthy_checks = 0 # Reset after setting flag - health_recovery_attempts += 1 - reconnect_attempts = 0 # Reset for next time - threading.Thread(target=self._attempt_health_recovery, daemon=True).start() elif self.connected and not self.healthy: # Auto-recover health when data resumes - logger.info(f"Stream health restored") + logger.info(f"Stream health restored for channel {self.channel_id} - data resumed after {inactivity_duration:.1f}s") self.healthy = True consecutive_unhealthy_checks = 0 - health_recovery_attempts = 0 - reconnect_attempts = 0 + # Clear recovery flags when healthy again + self.needs_reconnect = False + self.needs_stream_switch = False - # If healthy, reset unhealthy counter (but keep other state) if self.healthy: consecutive_unhealthy_checks = 0 @@ -684,39 +1205,65 @@ class StreamManager: # Don't try to reconnect if we're already switching URLs if self.url_switching: - logger.info("URL switching already in progress, skipping reconnect") - return + logger.info(f"URL switching already in progress, skipping reconnect for channel {self.channel_id}") + return False - # Close existing connection - if self.transcode or self.socket: - self._close_socket() - else: - self._close_connection() + # Set a flag to prevent concurrent operations + if hasattr(self, 'reconnecting') and self.reconnecting: + logger.info(f"Reconnect already in progress, skipping for channel {self.channel_id}") + return False - self.connected = False + self.reconnecting = True - # Attempt to establish a new connection using the same URL - connection_result = False try: + # Close existing connection and wait for it to fully terminate + if self.transcode or self.socket: + logger.debug(f"Closing transcode process before reconnect for channel {self.channel_id}") + self._close_socket() + else: + logger.debug(f"Closing HTTP connection before reconnect for channel {self.channel_id}") + self._close_connection() + + # Wait for all processes to fully close before attempting reconnect + if not self._wait_for_existing_processes_to_close(): + logger.warning(f"Some processes may still be running during reconnect for channel {self.channel_id}") + + self.connected = False + + # Attempt to establish a new connection using the same URL + connection_result = False if self.transcode: connection_result = self._establish_transcode_connection() else: connection_result = self._establish_http_connection() if connection_result: - # Store connection start time to measure stability self.connection_start_time = time.time() logger.info(f"Reconnect successful for channel {self.channel_id}") + + # Log reconnection event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='health_monitor' + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + return True else: logger.warning(f"Reconnect failed for channel {self.channel_id}") return False - except Exception as e: - logger.error(f"Error during reconnect: {e}", exc_info=True) - return False + + finally: + self.reconnecting = False except Exception as e: - logger.error(f"Error in reconnect attempt: {e}", exc_info=True) + logger.error(f"Error in reconnect attempt for channel {self.channel_id}: {e}", exc_info=True) + self.reconnecting = False return False def _attempt_health_recovery(self): @@ -726,7 +1273,7 @@ class StreamManager: # Don't try to switch if we're already in the process of switching URLs if self.url_switching: - logger.info("URL switching already in progress, skipping health recovery") + logger.info(f"URL switching already in progress, skipping health recovery for channel {self.channel_id}") return # Try to switch to next stream @@ -739,7 +1286,7 @@ class StreamManager: return False except Exception as e: - logger.error(f"Error in health recovery attempt: {e}", exc_info=True) + logger.error(f"Error in health recovery attempt for channel {self.channel_id}: {e}", exc_info=True) return False def _close_connection(self): @@ -749,7 +1296,7 @@ class StreamManager: try: self.current_response.close() except Exception as e: - logger.debug(f"Error closing response: {e}") + logger.debug(f"Error closing response for channel {self.channel_id}: {e}") self.current_response = None # Close session if it exists @@ -757,7 +1304,7 @@ class StreamManager: try: self.current_session.close() except Exception as e: - logger.debug(f"Error closing session: {e}") + logger.debug(f"Error closing session for channel {self.channel_id}: {e}") self.current_session = None def _close_socket(self): @@ -765,46 +1312,67 @@ class StreamManager: # First try to use _close_connection for HTTP resources if self.current_response or self.current_session: self._close_connection() - return + + # Stop HTTP reader thread if it exists + if hasattr(self, 'http_reader') and self.http_reader: + try: + logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}") + self.http_reader.stop() + self.http_reader = None + except Exception as e: + logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}") # Otherwise handle socket and transcode resources if self.socket: try: self.socket.close() except Exception as e: - logger.debug(f"Error closing socket: {e}") + logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") pass - self.socket = None - self.connected = False - - # Enhanced transcode process cleanup with more aggressive termination + # Enhanced transcode process cleanup with immediate termination if self.transcode_process: try: - # First try polite termination - logger.debug(f"Terminating transcode process for channel {self.channel_id}") - self.transcode_process.terminate() + logger.debug(f"Killing transcode process for channel {self.channel_id}") + self.transcode_process.kill() - # Give it a short time to terminate gracefully + # Give it a very short time to die try: - self.transcode_process.wait(timeout=1.0) + self.transcode_process.wait(timeout=0.5) except subprocess.TimeoutExpired: - # If it doesn't terminate quickly, kill it - logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully") - self.transcode_process.kill() - - try: - self.transcode_process.wait(timeout=1.0) - except subprocess.TimeoutExpired: - logger.error(f"Failed to kill transcode process even with force") + logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error terminating transcode process: {e}") + logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}") # Final attempt: try to kill directly try: self.transcode_process.kill() except Exception as e: - logger.error(f"Final kill attempt failed: {e}") + logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}") + + # Explicitly close all subprocess pipes to prevent file descriptor leaks + try: + if self.transcode_process.stdin: + self.transcode_process.stdin.close() + if self.transcode_process.stdout: + self.transcode_process.stdout.close() + if self.transcode_process.stderr: + self.transcode_process.stderr.close() + logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}") + + # Join stderr reader thread to ensure it's fully terminated + if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive(): + try: + logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}") + self.stderr_reader_thread.join(timeout=2.0) + if self.stderr_reader_thread.is_alive(): + logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}") + finally: + self.stderr_reader_thread = None self.transcode_process = None self.transcode_process_active = False # Reset the flag @@ -816,8 +1384,9 @@ class StreamManager: self.buffer.redis_client.delete(transcode_key) logger.debug(f"Cleared transcode active flag for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error clearing transcode flag: {e}") - + logger.debug(f"Error clearing transcode flag for channel {self.channel_id}: {e}") + self.socket = None + self.connected = False # Cancel any remaining buffer check timers for timer in list(self._buffer_check_timers): try: @@ -825,31 +1394,47 @@ class StreamManager: timer.cancel() logger.debug(f"Cancelled buffer check timer during socket close for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error canceling timer during socket close: {e}") + logger.debug(f"Error canceling timer during socket close for channel {self.channel_id}: {e}") self._buffer_check_timers = [] def fetch_chunk(self): - """Fetch data from socket with direct pass-through to buffer""" + """Fetch data from socket with timeout handling""" if not self.connected or not self.socket: return False try: - # Read data chunk - no need to align with TS packet size anymore - try: - # Try to read data chunk - if hasattr(self.socket, 'recv'): - chunk = self.socket.recv(Config.CHUNK_SIZE) # Standard socket - else: - chunk = self.socket.read(Config.CHUNK_SIZE) # SocketIO object + # Set timeout for chunk reads + chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration - except AttributeError: - # Fall back to read() if recv() isn't available - chunk = self.socket.read(Config.CHUNK_SIZE) + try: + # Handle different socket types with timeout + if hasattr(self.socket, 'recv'): + # Standard socket - set timeout + original_timeout = self.socket.gettimeout() + self.socket.settimeout(chunk_timeout) + chunk = self.socket.recv(Config.CHUNK_SIZE) + self.socket.settimeout(original_timeout) # Restore original timeout + else: + # SocketIO object (transcode process stdout) - use select for timeout + import select + ready, _, _ = select.select([self.socket], [], [], chunk_timeout) + + if not ready: + # Timeout occurred + logger.debug(f"Chunk read timeout ({chunk_timeout}s) for channel {self.channel_id}") + return False + + chunk = self.socket.read(Config.CHUNK_SIZE) + + except socket.timeout: + # Socket timeout occurred + logger.debug(f"Socket timeout ({chunk_timeout}s) for channel {self.channel_id}") + return False if not chunk: # Connection closed by server - logger.warning("Server closed connection") + logger.warning(f"Server closed connection for channel {self.channel_id}") self._close_socket() self.connected = False return False @@ -903,7 +1488,17 @@ class StreamManager: # Only update if not already past connecting if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: # NEW CODE: Check if buffer has enough chunks - current_buffer_index = getattr(self.buffer, 'index', 0) + # IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup + # each worker has its own StreamBuffer instance with potentially stale local index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + initial_chunks_needed = ConfigHelper.initial_behind_chunks() if current_buffer_index < initial_chunks_needed: @@ -932,13 +1527,13 @@ class StreamManager: redis_client.hset(metadata_key, mapping=update_data) # Get configured grace period or default - grace_period = ConfigHelper.get('CHANNEL_INIT_GRACE_PERIOD', 20) + grace_period = ConfigHelper.channel_init_grace_period() logger.info(f"STREAM MANAGER: Updated channel {channel_id} state: {current_state or 'None'} -> {ChannelState.WAITING_FOR_CLIENTS} with {current_buffer_index} buffer chunks") logger.info(f"Started initial connection grace period ({grace_period}s) for channel {channel_id}") else: logger.debug(f"Not changing state: channel {channel_id} already in {current_state} state") except Exception as e: - logger.error(f"Error setting waiting for clients state: {e}") + logger.error(f"Error setting waiting for clients state for channel {channel_id}: {e}") def _check_buffer_and_set_state(self): """Check buffer size and set state to waiting_for_clients when ready""" @@ -951,10 +1546,21 @@ class StreamManager: # Clean up completed timers self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()] - if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'): - current_buffer_index = self.buffer.index - initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10) + if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'): channel_id = self.buffer.channel_id + redis_client = self.buffer.redis_client + + # IMPORTANT: Read from Redis, not local buffer.index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + + initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency if current_buffer_index >= initial_chunks_needed: # We now have enough buffer, call _set_waiting_for_clients again @@ -973,12 +1579,13 @@ class StreamManager: return True # Return value to indicate check was successful except Exception as e: - logger.error(f"Error in buffer check: {e}") + logger.error(f"Error in buffer check for channel {self.channel_id}: {e}") return False def _try_next_stream(self): """ Try to switch to the next available stream for this channel. + Will iterate through multiple alternate streams if needed to find one with a different URL. Returns: bool: True if successfully switched to a new stream, False otherwise @@ -1004,60 +1611,79 @@ class StreamManager: logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}") return False - # Get the next stream to try - next_stream = untried_streams[0] - stream_id = next_stream['stream_id'] + # IMPROVED: Try multiple streams until we find one with a different URL + for next_stream in untried_streams: + stream_id = next_stream['stream_id'] + profile_id = next_stream['profile_id'] # This is the M3U profile ID we need - # Add to tried streams - self.tried_stream_ids.add(stream_id) + # Add to tried streams + self.tried_stream_ids.add(stream_id) - # Get stream info including URL - logger.info(f"Trying next stream ID {stream_id} for channel {self.channel_id}") - stream_info = get_stream_info_for_switch(self.channel_id, stream_id) + # Get stream info including URL using the profile_id we already have + logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") + stream_info = get_stream_info_for_switch(self.channel_id, stream_id) - if 'error' in stream_info or not stream_info.get('url'): - logger.error(f"Error getting info for stream {stream_id}: {stream_info.get('error', 'No URL')}") - return False + if 'error' in stream_info or not stream_info.get('url'): + logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") + continue # Try next stream instead of giving up - # Update URL and user agent - new_url = stream_info['url'] - new_user_agent = stream_info['user_agent'] - new_transcode = stream_info['transcode'] + # Update URL and user agent + new_url = stream_info['url'] + new_user_agent = stream_info['user_agent'] + new_transcode = stream_info['transcode'] - logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") + # CRITICAL FIX: Check if the new URL is the same as current URL + # This can happen when current_stream_id is None and we accidentally select the same stream + if new_url == self.url: + logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). " + f"Skipping this stream and trying next alternative.") + continue # Try next stream instead of giving up - # Update stream ID tracking - self.current_stream_id = stream_id + logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") - # Store the new user agent and transcode settings - self.user_agent = new_user_agent - self.transcode = new_transcode + # IMPORTANT: Just update the URL, don't stop the channel or release resources + switch_result = self.update_url(new_url, stream_id, profile_id) + if not switch_result: + logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") + continue # Try next stream - # Update stream metadata in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - metadata_key = RedisKeys.channel_metadata(self.channel_id) - self.buffer.redis_client.hset(metadata_key, mapping={ - ChannelMetadataField.URL: new_url, - ChannelMetadataField.USER_AGENT: new_user_agent, - ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], - ChannelMetadataField.M3U_PROFILE: stream_info['m3u_profile_id'], - ChannelMetadataField.STREAM_ID: str(stream_id), - ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), - ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" - }) + # Update stream ID tracking + self.current_stream_id = stream_id - # Log the switch - logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id}") + # Store the new user agent and transcode settings + self.user_agent = new_user_agent + self.transcode = new_transcode - # IMPORTANT: Just update the URL, don't stop the channel or release resources - switch_result = self.update_url(new_url, stream_id) - if not switch_result: - logger.error(f"Failed to update URL for stream ID {stream_id}") - return False + # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, mapping={ + ChannelMetadataField.URL: new_url, + ChannelMetadataField.USER_AGENT: new_user_agent, + ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], + ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams + ChannelMetadataField.STREAM_ID: str(stream_id), + ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), + ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" + }) - logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url}") - return True + # Log the switch + logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") + + logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") + return True + + # If we get here, we tried all streams but none worked + logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}") + return False except Exception as e: logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True) - return False \ No newline at end of file + return False + + # Add a new helper method to safely reset the URL switching state + def _reset_url_switching_state(self): + """Safely reset the URL switching state if it gets stuck""" + self.url_switching = False + self.url_switch_start_time = 0 + logger.info(f"Reset URL switching state for channel {self.channel_id}") \ No newline at end of file diff --git a/apps/proxy/ts_proxy/url_utils.py b/apps/proxy/ts_proxy/url_utils.py index e3b1c264..8b467b7f 100644 --- a/apps/proxy/ts_proxy/url_utils.py +++ b/apps/proxy/ts_proxy/url_utils.py @@ -8,7 +8,7 @@ from typing import Optional, Tuple, List from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile -from core.models import UserAgent, CoreSettings +from core.models import UserAgent, CoreSettings, StreamProfile from .utils import get_logger from uuid import UUID import requests @@ -17,7 +17,6 @@ logger = get_logger() def get_stream_object(id: str): try: - uuid_obj = UUID(id, version=4) logger.info(f"Fetching channel ID {id}") return get_object_or_404(Channel, uuid=id) except: @@ -27,16 +26,100 @@ def get_stream_object(id: str): def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]: """ - Generate the appropriate stream URL for a channel based on its profile settings. + Generate the appropriate stream URL for a channel or stream based on its profile settings. Args: - channel_id: The UUID of the channel + channel_id: The UUID of the channel or stream hash Returns: Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id) """ try: - channel = get_stream_object(channel_id) + channel_or_stream = get_stream_object(channel_id) + + # Handle direct stream preview (custom streams) + if isinstance(channel_or_stream, Stream): + from core.utils import RedisClient + + stream = channel_or_stream + logger.info(f"Previewing stream directly: {stream.id} ({stream.name})") + + # For custom streams, we need to get the M3U account and profile + m3u_account = stream.m3u_account + if not m3u_account: + logger.error(f"Stream {stream.id} has no M3U account") + return None, None, False, None + + # Get active profiles for this M3U account + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not default_profile: + logger.error(f"No default active profile found for M3U account {m3u_account.id}") + return None, None, False, None + + # Check profiles in order: default first, then others + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + # Try to find an available profile with connection capacity + redis_client = RedisClient.get_client() + selected_profile = None + + for profile in profiles: + logger.info(profile) + + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if profile has available slots (or unlimited connections) + if profile.max_streams == 0 or current_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}") + else: + # No Redis available, use first active profile + selected_profile = profile + break + + if not selected_profile: + logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}") + return None, None, False, None + + # Get the appropriate user agent + stream_user_agent = m3u_account.get_user_agent().user_agent + if stream_user_agent is None: + stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + logger.debug(f"No user agent found for account, using default: {stream_user_agent}") + + # Get stream URL with the selected profile's URL transformation + stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern) + + # Check if the stream has its own stream_profile set, otherwise use default + if stream.stream_profile: + stream_profile = stream.stream_profile + logger.debug(f"Using stream's own stream profile: {stream_profile.name}") + else: + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) + logger.debug(f"Using default stream profile: {stream_profile.name}") + + # Check if transcoding is needed + if stream_profile.is_proxy() or stream_profile is None: + transcode = False + else: + transcode = True + + stream_profile_id = stream_profile.id + + return stream_url, stream_user_agent, transcode, stream_profile_id + + # Handle channel preview (existing logic) + channel = channel_or_stream # Get stream and profile for this channel # Note: get_stream now returns 3 values (stream_id, profile_id, error_reason) @@ -126,7 +209,10 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] dict: Stream information including URL, user agent and transcode flag """ try: + from core.utils import RedisClient + channel = get_object_or_404(Channel, uuid=channel_id) + redis_client = RedisClient.get_client() # Use the target stream if specified, otherwise use current stream if target_stream_id: @@ -135,24 +221,58 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] # Get the stream object stream = get_object_or_404(Stream, pk=stream_id) - # Find compatible profile for this stream - profiles = M3UAccountProfile.objects.filter(m3u_account=stream.m3u_account) + # Find compatible profile for this stream with connection availability check + m3u_account = stream.m3u_account + if not m3u_account: + return {'error': 'Stream has no M3U account'} - if not profiles.exists(): - # Try to get default profile - default_profile = M3UAccountProfile.objects.filter( - m3u_account=stream.m3u_account, - is_default=True - ).first() + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - if default_profile: - m3u_profile_id = default_profile.id + if not default_profile: + return {'error': 'M3U account has no default profile'} + + # Check profiles in order: default first, then others + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + selected_profile = None + for profile in profiles: + + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if this channel is already using this profile + channel_using_profile = False + existing_stream_id = redis_client.get(f"channel_stream:{channel.id}") + if existing_stream_id: + # Decode bytes to string/int for proper Redis key lookup + existing_stream_id = existing_stream_id.decode('utf-8') + existing_profile_id = redis_client.get(f"stream_profile:{existing_stream_id}") + if existing_profile_id and int(existing_profile_id.decode('utf-8')) == profile.id: + channel_using_profile = True + logger.debug(f"Channel {channel.id} already using profile {profile.id}") + + # Calculate effective connections (subtract 1 if channel already using this profile) + effective_connections = current_connections - (1 if channel_using_profile else 0) + + # Check if profile has available slots + if profile.max_streams == 0 or effective_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Selected profile {profile.id} with {effective_connections}/{profile.max_streams} effective connections (current: {current_connections}, already using: {channel_using_profile})") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {effective_connections}/{profile.max_streams} (current: {current_connections}, already using: {channel_using_profile})") else: - logger.error(f"No profile found for stream {stream_id}") - return {'error': 'No profile found for stream'} - else: - # Use first available profile - m3u_profile_id = profiles.first().id + # No Redis available, assume first active profile is okay + selected_profile = profile + break + + if not selected_profile: + return {'error': 'No profiles available with connection capacity'} + + m3u_profile_id = selected_profile.id else: stream_id, m3u_profile_id, error_reason = channel.get_stream() if stream_id is None or m3u_profile_id is None: @@ -162,8 +282,15 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] stream = get_object_or_404(Stream, pk=stream_id) profile = get_object_or_404(M3UAccountProfile, pk=m3u_profile_id) - # Get the user agent from the M3U account + # Check connections left m3u_account = M3UAccount.objects.get(id=profile.m3u_account.id) + #connections_left = get_connections_left(m3u_profile_id) + + #if connections_left <= 0: + #logger.warning(f"No connections left for M3U account {m3u_account.id}") + #return {'error': 'No connections left'} + + # Get the user agent from the M3U account user_agent = m3u_account.get_user_agent().user_agent # Generate URL using the transform function directly @@ -172,7 +299,7 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] # Get transcode info from the channel's stream profile stream_profile = channel.get_stream_profile() transcode = not (stream_profile.is_proxy() or stream_profile is None) - profile_value = str(stream_profile) + profile_value = stream_profile.id return { 'url': stream_url, @@ -198,15 +325,18 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No List[dict]: List of stream information dictionaries with stream_id and profile_id """ try: + from core.utils import RedisClient + # Get channel object channel = get_stream_object(channel_id) if isinstance(channel, Stream): logger.error(f"Stream is not a channel") return [] + redis_client = RedisClient.get_client() logger.debug(f"Looking for alternate streams for channel {channel_id}, current stream ID: {current_stream_id}") - # Get all assigned streams for this channel using the correct ordering from the channelstream table + # Get all assigned streams for this channel using the correct ordering streams = channel.streams.all().order_by('channelstream__order') logger.debug(f"Channel {channel_id} has {streams.count()} total assigned streams") @@ -218,7 +348,6 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No # Process each stream in the user-defined order for stream in streams: - # Log each stream we're checking logger.debug(f"Checking stream ID {stream.id} ({stream.name}) for channel {channel_id}") # Skip the current failing stream @@ -226,44 +355,76 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No logger.debug(f"Skipping current stream ID {current_stream_id}") continue - # Find compatible profiles for this stream + # Find compatible profiles for this stream with connection checking try: - # Check if we can find profiles via m3u_account - profiles = M3UAccountProfile.objects.filter(m3u_account=stream.m3u_account) - if not profiles.exists(): - logger.debug(f"No profiles found via m3u_account for stream {stream.id}") - # Fallback to the default profile of the account - default_profile = M3UAccountProfile.objects.filter( - m3u_account=stream.m3u_account, - is_default=True - ).first() - if default_profile: - profiles = [default_profile] + m3u_account = stream.m3u_account + if not m3u_account: + logger.debug(f"Stream {stream.id} has no M3U account") + continue + if m3u_account.is_active == False: + logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.") + continue + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not default_profile: + logger.debug(f"M3U account {m3u_account.id} has no default profile") + continue + + # Check profiles in order with connection availability + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + selected_profile = None + for profile in profiles: + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if this channel is already using this profile + channel_using_profile = False + existing_stream_id = redis_client.get(f"channel_stream:{channel.id}") + if existing_stream_id: + # Decode bytes to string/int for proper Redis key lookup + existing_stream_id = existing_stream_id.decode('utf-8') + existing_profile_id = redis_client.get(f"stream_profile:{existing_stream_id}") + if existing_profile_id and int(existing_profile_id.decode('utf-8')) == profile.id: + channel_using_profile = True + logger.debug(f"Channel {channel.id} already using profile {profile.id}") + + # Calculate effective connections (subtract 1 if channel already using this profile) + effective_connections = current_connections - (1 if channel_using_profile else 0) + + # Check if profile has available slots + if profile.max_streams == 0 or effective_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Found available profile {profile.id} for stream {stream.id}: {effective_connections}/{profile.max_streams} effective (current: {current_connections}, already using: {channel_using_profile})") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {effective_connections}/{profile.max_streams} (current: {current_connections}, already using: {channel_using_profile})") else: - logger.warning(f"No default profile found for m3u_account {stream.m3u_account.id}") - continue - - # Get first compatible profile - profile = profiles.first() - if profile: - logger.debug(f"Found compatible profile ID {profile.id} for stream ID {stream.id}") + # No Redis available, assume first active profile is okay + selected_profile = profile + break + if selected_profile: alternate_streams.append({ 'stream_id': stream.id, - 'profile_id': profile.id, + 'profile_id': selected_profile.id, 'name': stream.name }) else: - logger.debug(f"No compatible profile found for stream ID {stream.id}") + logger.debug(f"No available profiles for stream ID {stream.id}") + except Exception as inner_e: logger.error(f"Error finding profiles for stream {stream.id}: {inner_e}") continue if alternate_streams: stream_ids = ', '.join([str(s['stream_id']) for s in alternate_streams]) - logger.info(f"Found {len(alternate_streams)} alternate streams for channel {channel_id}: [{stream_ids}]") + logger.info(f"Found {len(alternate_streams)} alternate streams with available connections for channel {channel_id}: [{stream_ids}]") else: - logger.warning(f"No alternate streams found for channel {channel_id}") + logger.warning(f"No alternate streams with available connections found for channel {channel_id}") return alternate_streams except Exception as e: @@ -274,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): """ Validate if a stream URL is accessible without downloading the full content. + Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot + be validated via HTTP methods. + Args: url (str): The URL to validate user_agent (str): User agent to use for the request @@ -282,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): Returns: tuple: (is_valid, final_url, status_code, message) """ + # Check if URL uses non-HTTP protocols (UDP/RTP/RTSP) + # These cannot be validated via HTTP methods, so we skip validation + if url.startswith(('udp://', 'rtp://', 'rtsp://')): + logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}") + return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped" + try: # Create session with proper headers session = requests.Session() @@ -292,16 +462,21 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): session.headers.update(headers) # Make HEAD request first as it's faster and doesn't download content - head_response = session.head( - url, - timeout=timeout, - allow_redirects=True - ) + head_request_success = True + try: + head_response = session.head( + url, + timeout=timeout, + allow_redirects=True + ) + except requests.exceptions.RequestException as e: + head_request_success = False + logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}") # If HEAD not supported, server will return 405 or other error - if 200 <= head_response.status_code < 300: + if head_request_success and (200 <= head_response.status_code < 300): # HEAD request successful - return True, head_response.url, head_response.status_code, "Valid (HEAD request)" + return True, url, head_response.status_code, "Valid (HEAD request)" # Try a GET request with stream=True to avoid downloading all content get_response = session.get( @@ -314,7 +489,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): # IMPORTANT: Check status code first before checking content if not (200 <= get_response.status_code < 300): logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}") - return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}" + return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}" # Only check content if status code is valid try: @@ -368,7 +543,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): get_response.close() # If we have content, consider it valid even with unrecognized content type - return is_valid, get_response.url, get_response.status_code, message + return is_valid, url, get_response.status_code, message except requests.exceptions.Timeout: return False, url, 0, "Timeout connecting to stream" @@ -381,3 +556,47 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): finally: if 'session' in locals(): session.close() + +def get_connections_left(m3u_profile_id: int) -> int: + """ + Get the number of available connections left for an M3U profile. + + Args: + m3u_profile_id: The ID of the M3U profile + + Returns: + int: Number of connections available (0 if none available) + """ + try: + from core.utils import RedisClient + + # Get the M3U profile + m3u_profile = M3UAccountProfile.objects.get(id=m3u_profile_id) + + # If max_streams is 0, it means unlimited + if m3u_profile.max_streams == 0: + return 999999 # Return a large number to indicate unlimited + + # Get Redis client + redis_client = RedisClient.get_client() + if not redis_client: + logger.warning("Redis not available, assuming connections available") + return max(0, m3u_profile.max_streams - 1) # Conservative estimate + + # Check current connections for this specific profile + profile_connections_key = f"profile_connections:{m3u_profile_id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Calculate available connections + connections_left = max(0, m3u_profile.max_streams - current_connections) + + logger.debug(f"M3U profile {m3u_profile_id}: {current_connections}/{m3u_profile.max_streams} used, {connections_left} available") + + return connections_left + + except M3UAccountProfile.DoesNotExist: + logger.error(f"M3U profile {m3u_profile_id} not found") + return 0 + except Exception as e: + logger.error(f"Error getting connections left for M3U profile {m3u_profile_id}: {e}") + return 0 diff --git a/apps/proxy/ts_proxy/utils.py b/apps/proxy/ts_proxy/utils.py index b568b804..20a6e140 100644 --- a/apps/proxy/ts_proxy/utils.py +++ b/apps/proxy/ts_proxy/utils.py @@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy") def detect_stream_type(url): """ - Detect if stream URL is HLS or TS format. + Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format. Args: url (str): The stream URL to analyze Returns: - str: 'hls' or 'ts' depending on detected format + str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format """ if not url: return 'unknown' url_lower = url.lower() + # Check for UDP streams (requires FFmpeg) + if url_lower.startswith('udp://'): + return 'udp' + + # Check for RTSP/RTP streams (requires FFmpeg) + if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'): + return 'rtsp' + # Look for common HLS indicators if (url_lower.endswith('.m3u8') or '.m3u8?' in url_lower or diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index 35ca3648..91f254a7 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -3,7 +3,8 @@ import threading import time import random import re -from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect +import pathlib +from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse from django.views.decorators.csrf import csrf_exempt from django.shortcuts import get_object_or_404 from apps.proxy.config import TSConfig as Config @@ -15,21 +16,39 @@ from .redis_keys import RedisKeys import logging from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile +from apps.accounts.models import User from core.models import UserAgent, CoreSettings, PROXY_PROFILE_NAME from rest_framework.decorators import api_view, permission_classes -from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from apps.accounts.permissions import ( + IsAdmin, + permission_classes_by_method, + permission_classes_by_action, +) from .constants import ChannelState, EventType, StreamType, ChannelMetadataField from .config_helper import ConfigHelper from .services.channel_service import ChannelService -from .url_utils import generate_stream_url, transform_url, get_stream_info_for_switch, get_stream_object, get_alternate_streams +from core.utils import send_websocket_update +from .url_utils import ( + generate_stream_url, + transform_url, + get_stream_info_for_switch, + get_stream_object, + get_alternate_streams, +) from .utils import get_logger from uuid import UUID +import gevent +from dispatcharr.utils import network_access_allowed logger = get_logger() -@api_view(['GET']) +@api_view(["GET"]) def stream_ts(request, channel_id): + if not network_access_allowed(request, "STREAMS"): + return JsonResponse({"error": "Forbidden"}, status=403) + """Stream TS data to client with immediate response and keep-alive packets during initialization""" channel = get_stream_object(channel_id) @@ -43,52 +62,96 @@ def stream_ts(request, channel_id): logger.info(f"[{client_id}] Requested stream for channel {channel_id}") # Extract client user agent early - for header in ['HTTP_USER_AGENT', 'User-Agent', 'user-agent']: - if (header in request.META): + for header in ["HTTP_USER_AGENT", "User-Agent", "user-agent"]: + if header in request.META: client_user_agent = request.META[header] - logger.debug(f"[{client_id}] Client connected with user agent: {client_user_agent}") + logger.debug( + f"[{client_id}] Client connected with user agent: {client_user_agent}" + ) break # Check if we need to reinitialize the channel needs_initialization = True channel_state = None + channel_initializing = False # Get current channel state from Redis if available if proxy_server.redis_client: metadata_key = RedisKeys.channel_metadata(channel_id) if proxy_server.redis_client.exists(metadata_key): metadata = proxy_server.redis_client.hgetall(metadata_key) - state_field = ChannelMetadataField.STATE.encode('utf-8') + state_field = ChannelMetadataField.STATE.encode("utf-8") if state_field in metadata: - channel_state = metadata[state_field].decode('utf-8') + channel_state = metadata[state_field].decode("utf-8") - # Only skip initialization if channel is in a healthy state - valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS] - if channel_state in valid_states: - # Verify the owner is still active - owner_field = ChannelMetadataField.OWNER.encode('utf-8') + # Active/running states - channel is operational, don't reinitialize + if channel_state in [ + ChannelState.ACTIVE, + ChannelState.WAITING_FOR_CLIENTS, + ChannelState.BUFFERING, + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ChannelState.STOPPING, + ]: + needs_initialization = False + logger.debug( + f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization" + ) + + # Special handling for initializing/connecting states + if channel_state in [ + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ]: + channel_initializing = True + logger.debug( + f"[{client_id}] Channel {channel_id} is still initializing, client will wait" + ) + # Terminal states - channel needs cleanup before reinitialization + elif channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPED, + ]: + needs_initialization = True + logger.info( + f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize" + ) + # Unknown/empty state - check if owner is alive + else: + owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: - owner = metadata[owner_field].decode('utf-8') + owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): - # Owner is active and channel is in good state + # Owner is still active with unknown state - don't reinitialize needs_initialization = False - logger.info(f"[{client_id}] Channel {channel_id} in state {channel_state} with active owner {owner}") + logger.debug( + f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init" + ) + else: + # Owner dead - needs reinitialization + needs_initialization = True + logger.warning( + f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize" + ) # Start initialization if needed - channel_initializing = False if needs_initialization or not proxy_server.check_if_channel_exists(channel_id): - # Force cleanup of any previous instance - if channel_state in [ChannelState.ERROR, ChannelState.STOPPING, ChannelState.STOPPED]: - logger.warning(f"[{client_id}] Channel {channel_id} in state {channel_state}, forcing cleanup") - proxy_server.stop_channel(channel_id) - - # Initialize the channel (but don't wait for completion) logger.info(f"[{client_id}] Starting channel {channel_id} initialization") + # Force cleanup of any previous instance if in terminal state + if channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPING, + ChannelState.STOPPED, + ]: + logger.warning( + f"[{client_id}] Channel {channel_id} in state {channel_state}, forcing cleanup" + ) + ChannelService.stop_channel(channel_id) - # Use max retry attempts and connection timeout from config - max_retries = ConfigHelper.max_retries() - retry_timeout = ConfigHelper.connection_timeout() + # Use fixed retry interval and timeout + retry_timeout = 3 # 3 seconds total timeout + retry_interval = 0.1 # 100ms between attempts wait_start_time = time.time() stream_url = None @@ -96,70 +159,111 @@ def stream_ts(request, channel_id): transcode = False profile_value = None error_reason = None + attempt = 0 + should_retry = True - # Try to get a stream with configured retries - for attempt in range(max_retries): - stream_url, stream_user_agent, transcode, profile_value = generate_stream_url(channel_id) + # Try to get a stream with fixed interval retries + while should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) if stream_url is not None: - logger.info(f"[{client_id}] Successfully obtained stream for channel {channel_id}") + logger.info( + f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts" + ) break - # If we failed because there are no streams assigned, don't retry - _, _, error_reason = channel.get_stream() - if error_reason and 'maximum connection limits' not in error_reason: - logger.warning(f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}") + # On first failure, check if the error is retryable + if attempt == 1: + _, _, error_reason = channel.get_stream() + if error_reason and "maximum connection limits" not in error_reason: + logger.warning( + f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" + ) + should_retry = False + break + + # Check if we have time remaining for another sleep cycle + elapsed_time = time.time() - wait_start_time + remaining_time = retry_timeout - elapsed_time + + # If we don't have enough time for the next sleep interval, break + # but only after we've already made an attempt (the while condition will try one more time) + if remaining_time <= retry_interval: + logger.info( + f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt" + ) break - # Don't exceed the overall connection timeout - if time.time() - wait_start_time > retry_timeout: - logger.warning(f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)") - break + # Wait before retrying + logger.info( + f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + ) + gevent.sleep(retry_interval) + retry_interval += 0.025 # Increase wait time by 25ms for next attempt - # Wait before retrying (using exponential backoff with a cap) - wait_time = min(0.5 * (2 ** attempt), 2.0) # Caps at 2 seconds - logger.info(f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})") - time.sleep(wait_time) + # Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout + if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + logger.info( + f"[{client_id}] Making final attempt {attempt} at timeout boundary" + ) + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) + if stream_url is not None: + logger.info( + f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}" + ) if stream_url is None: - # Make sure to release any stream locks that might have been acquired - if hasattr(channel, 'streams') and channel.streams.exists(): - for stream in channel.streams.all(): - try: - stream.release_stream() - logger.info(f"[{client_id}] Released stream {stream.id} for channel {channel_id}") - except Exception as e: - logger.error(f"[{client_id}] Error releasing stream: {e}") + # Release the channel's stream lock if one was acquired + # Note: Only call this if get_stream() actually assigned a stream + # In our case, if stream_url is None, no stream was ever assigned, so don't release # Get the specific error message if available wait_duration = f"{int(time.time() - wait_start_time)}s" - error_msg = error_reason if error_reason else 'No available streams for this channel' - return JsonResponse({ - 'error': error_msg, - 'waited': wait_duration - }, status=503) # 503 Service Unavailable is appropriate here + error_msg = ( + error_reason + if error_reason + else "No available streams for this channel" + ) + logger.info( + f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}" + ) + return JsonResponse( + {"error": error_msg, "waited": wait_duration}, status=503 + ) # 503 Service Unavailable is appropriate here # Get the stream ID from the channel stream_id, m3u_profile_id, _ = channel.get_stream() - logger.info(f"Channel {channel_id} using stream ID {stream_id}, m3u account profile ID {m3u_profile_id}") + logger.info( + f"Channel {channel_id} using stream ID {stream_id}, m3u account profile ID {m3u_profile_id}" + ) # Generate transcode command if needed stream_profile = channel.get_stream_profile() if stream_profile.is_redirect(): # Validate the stream URL before redirecting - from .url_utils import validate_stream_url, get_alternate_streams, get_stream_info_for_switch + from .url_utils import ( + validate_stream_url, + get_alternate_streams, + get_stream_info_for_switch, + ) # Try initial URL logger.info(f"[{client_id}] Validating redirect URL: {stream_url}") is_valid, final_url, status_code, message = validate_stream_url( - stream_url, - user_agent=stream_user_agent, - timeout=(5, 5) + stream_url, user_agent=stream_user_agent, timeout=(5, 5) ) # If first URL doesn't validate, try alternates if not is_valid: - logger.warning(f"[{client_id}] Primary stream URL failed validation: {message}") + logger.warning( + f"[{client_id}] Primary stream URL failed validation: {message}" + ) # Track tried streams to avoid loops tried_streams = {stream_id} @@ -169,49 +273,80 @@ def stream_ts(request, channel_id): # Try each alternate until one works for alt in alternates: - if alt['stream_id'] in tried_streams: + if alt["stream_id"] in tried_streams: continue - tried_streams.add(alt['stream_id']) + tried_streams.add(alt["stream_id"]) # Get stream info - alt_info = get_stream_info_for_switch(channel_id, alt['stream_id']) - if 'error' in alt_info: - logger.warning(f"[{client_id}] Error getting alternate stream info: {alt_info['error']}") + alt_info = get_stream_info_for_switch( + channel_id, alt["stream_id"] + ) + if "error" in alt_info: + logger.warning( + f"[{client_id}] Error getting alternate stream info: {alt_info['error']}" + ) continue # Validate the alternate URL - logger.info(f"[{client_id}] Trying alternate stream #{alt['stream_id']}: {alt_info['url']}") + logger.info( + f"[{client_id}] Trying alternate stream #{alt['stream_id']}: {alt_info['url']}" + ) is_valid, final_url, status_code, message = validate_stream_url( - alt_info['url'], - user_agent=alt_info['user_agent'], - timeout=(5, 5) + alt_info["url"], + user_agent=alt_info["user_agent"], + timeout=(5, 5), ) if is_valid: - logger.info(f"[{client_id}] Alternate stream #{alt['stream_id']} validated successfully") + logger.info( + f"[{client_id}] Alternate stream #{alt['stream_id']} validated successfully" + ) break else: - logger.warning(f"[{client_id}] Alternate stream #{alt['stream_id']} failed validation: {message}") + logger.warning( + f"[{client_id}] Alternate stream #{alt['stream_id']} failed validation: {message}" + ) # Release stream lock before redirecting channel.release_stream() # Final decision based on validation results if is_valid: - logger.info(f"[{client_id}] Redirecting to validated URL: {final_url} ({message})") + logger.info( + f"[{client_id}] Redirecting to validated URL: {final_url} ({message})" + ) + + # For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect + # because Django's HttpResponseRedirect blocks them for security + if final_url.startswith(('rtsp://', 'rtp://', 'udp://')): + logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol") + response = HttpResponse(status=301) + response['Location'] = final_url + return response + return HttpResponseRedirect(final_url) else: - logger.error(f"[{client_id}] All available redirect URLs failed validation") - return JsonResponse({ - 'error': 'All available streams failed validation' - }, status=502) # 502 Bad Gateway + logger.error( + f"[{client_id}] All available redirect URLs failed validation" + ) + return JsonResponse( + {"error": "All available streams failed validation"}, status=502 + ) # 502 Bad Gateway # Initialize channel with the stream's user agent (not the client's) success = ChannelService.initialize_channel( - channel_id, stream_url, stream_user_agent, transcode, profile_value, stream_id, m3u_profile_id + channel_id, + stream_url, + stream_user_agent, + transcode, + profile_value, + stream_id, + m3u_profile_id, ) if not success: - return JsonResponse({'error': 'Failed to initialize channel'}, status=500) + return JsonResponse( + {"error": "Failed to initialize channel"}, status=500 + ) # If we're the owner, wait for connection to establish if proxy_server.am_i_owner(channel_id): @@ -222,7 +357,9 @@ def stream_ts(request, channel_id): while not manager.connected: if time.time() - wait_start > timeout: proxy_server.stop_channel(channel_id) - return JsonResponse({'error': 'Connection timeout'}, status=504) + return JsonResponse( + {"error": "Connection timeout"}, status=504 + ) # Check if this manager should keep retrying or stop if not manager.should_retry(): @@ -232,41 +369,68 @@ def stream_ts(request, channel_id): if proxy_server.redis_client: try: - state_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STATE) + state_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STATE + ) if state_bytes: - current_state = state_bytes.decode('utf-8') - logger.debug(f"[{client_id}] Current state of channel {channel_id}: {current_state}") + current_state = state_bytes.decode("utf-8") + logger.debug( + f"[{client_id}] Current state of channel {channel_id}: {current_state}" + ) except Exception as e: - logger.warning(f"[{client_id}] Error getting channel state: {e}") + logger.warning( + f"[{client_id}] Error getting channel state: {e}" + ) # Allow normal transitional states to continue - if current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: - logger.info(f"[{client_id}] Channel {channel_id} is in {current_state} state, continuing to wait") + if current_state in [ + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ]: + logger.info( + f"[{client_id}] Channel {channel_id} is in {current_state} state, continuing to wait" + ) # Reset wait timer to allow the transition to complete wait_start = time.time() continue # Check if we're switching URLs - if hasattr(manager, 'url_switching') and manager.url_switching: - logger.info(f"[{client_id}] Stream manager is currently switching URLs for channel {channel_id}") + if ( + hasattr(manager, "url_switching") + and manager.url_switching + ): + logger.info( + f"[{client_id}] Stream manager is currently switching URLs for channel {channel_id}" + ) # Reset wait timer to give the switch a chance wait_start = time.time() continue # If we reach here, we've exhausted retries and the channel isn't in a valid transitional state - logger.warning(f"[{client_id}] Channel {channel_id} failed to connect and is not in transitional state") + logger.warning( + f"[{client_id}] Channel {channel_id} failed to connect and is not in transitional state" + ) proxy_server.stop_channel(channel_id) - return JsonResponse({'error': 'Failed to connect'}, status=502) + return JsonResponse( + {"error": "Failed to connect"}, status=502 + ) - time.sleep(0.1) + gevent.sleep( + 0.1 + ) # FIXED: Using gevent.sleep instead of time.sleep logger.info(f"[{client_id}] Successfully initialized channel {channel_id}") channel_initializing = True # Register client - can do this regardless of initialization state # Create local resources if needed - if channel_id not in proxy_server.stream_buffers or channel_id not in proxy_server.client_managers: - logger.debug(f"[{client_id}] Channel {channel_id} exists in Redis but not initialized in this worker - initializing now") + if ( + channel_id not in proxy_server.stream_buffers + or channel_id not in proxy_server.client_managers + ): + logger.debug( + f"[{client_id}] Channel {channel_id} exists in Redis but not initialized in this worker - initializing now" + ) # Get URL from Redis metadata url = None @@ -274,32 +438,54 @@ def stream_ts(request, channel_id): if proxy_server.redis_client: metadata_key = RedisKeys.channel_metadata(channel_id) - url_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.URL) - ua_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.USER_AGENT) - profile_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STREAM_PROFILE) + url_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.URL + ) + ua_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.USER_AGENT + ) + profile_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STREAM_PROFILE + ) if url_bytes: - url = url_bytes.decode('utf-8') + url = url_bytes.decode("utf-8") if ua_bytes: - stream_user_agent = ua_bytes.decode('utf-8') + stream_user_agent = ua_bytes.decode("utf-8") # Extract transcode setting from Redis if profile_bytes: - profile_str = profile_bytes.decode('utf-8') - use_transcode = (profile_str == PROXY_PROFILE_NAME or profile_str == 'None') - logger.debug(f"Using profile '{profile_str}' for channel {channel_id}, transcode={use_transcode}") + profile_str = profile_bytes.decode("utf-8") + use_transcode = ( + profile_str == PROXY_PROFILE_NAME or profile_str == "None" + ) + logger.debug( + f"Using profile '{profile_str}' for channel {channel_id}, transcode={use_transcode}" + ) else: # Default settings when profile not found in Redis - profile_str = 'None' # Default profile name - use_transcode = False # Default to direct streaming without transcoding - logger.debug(f"No profile found in Redis for channel {channel_id}, defaulting to transcode={use_transcode}") + profile_str = "None" # Default profile name + use_transcode = ( + False # Default to direct streaming without transcoding + ) + logger.debug( + f"No profile found in Redis for channel {channel_id}, defaulting to transcode={use_transcode}" + ) # Use client_user_agent as fallback if stream_user_agent is None - success = proxy_server.initialize_channel(url, channel_id, stream_user_agent or client_user_agent, use_transcode) + success = proxy_server.initialize_channel( + url, channel_id, stream_user_agent or client_user_agent, use_transcode + ) if not success: - logger.error(f"[{client_id}] Failed to initialize channel {channel_id} locally") - return JsonResponse({'error': 'Failed to initialize channel locally'}, status=500) + logger.error( + f"[{client_id}] Failed to initialize channel {channel_id} locally" + ) + return JsonResponse( + {"error": "Failed to initialize channel locally"}, status=500 + ) - logger.info(f"[{client_id}] Successfully initialized channel {channel_id} locally") + logger.info( + f"[{client_id}] Successfully initialized channel {channel_id} locally" + ) # Register client buffer = proxy_server.stream_buffers[channel_id] @@ -314,53 +500,106 @@ def stream_ts(request, channel_id): # Return the StreamingHttpResponse from the main function response = StreamingHttpResponse( - streaming_content=generate(), - content_type='video/mp2t' + streaming_content=generate(), content_type="video/mp2t" ) - response['Cache-Control'] = 'no-cache' + response["Cache-Control"] = "no-cache" return response except Exception as e: logger.error(f"Error in stream_ts: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + + +@api_view(["GET"]) +def stream_xc(request, username, password, channel_id): + user = get_object_or_404(User, username=username) + + extension = pathlib.Path(channel_id).suffix + channel_id = pathlib.Path(channel_id).stem + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return Response({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return Response({"error": "Invalid credentials"}, status=401) + + print(f"Fetchin channel with ID: {channel_id}") + if user.user_level < 10: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = { + "id": int(channel_id), + "user_level__lte": user.user_level + } + channel = Channel.objects.filter(**filters).first() + else: + # User has specific limited profiles assigned + filters = { + "id": int(channel_id), + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() + + if not channel: + return JsonResponse({"error": "Not found"}, status=404) + else: + channel = get_object_or_404(Channel, id=channel_id) + + # @TODO: we've got the file 'type' via extension, support this when we support multiple outputs + return stream_ts(request._request, str(channel.uuid)) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def change_stream(request, channel_id): """Change stream URL for existing channel with enhanced diagnostics""" proxy_server = ProxyServer.get_instance() try: data = json.loads(request.body) - new_url = data.get('url') - user_agent = data.get('user_agent') - stream_id = data.get('stream_id') + new_url = data.get("url") + user_agent = data.get("user_agent") + stream_id = data.get("stream_id") # If stream_id is provided, get the URL and user_agent from it if stream_id: - logger.info(f"Stream ID {stream_id} provided, looking up stream info for channel {channel_id}") + logger.info( + f"Stream ID {stream_id} provided, looking up stream info for channel {channel_id}" + ) stream_info = get_stream_info_for_switch(channel_id, stream_id) - if 'error' in stream_info: - return JsonResponse({ - 'error': stream_info['error'], - 'stream_id': stream_id - }, status=404) + if "error" in stream_info: + return JsonResponse( + {"error": stream_info["error"], "stream_id": stream_id}, status=404 + ) # Use the info from the stream - new_url = stream_info['url'] - user_agent = stream_info['user_agent'] - m3u_profile_id = stream_info.get('m3u_profile_id') + new_url = stream_info["url"] + user_agent = stream_info["user_agent"] + m3u_profile_id = stream_info.get("m3u_profile_id") # Stream ID will be passed to change_stream_url later elif not new_url: - return JsonResponse({'error': 'Either url or stream_id must be provided'}, status=400) + return JsonResponse( + {"error": "Either url or stream_id must be provided"}, status=400 + ) - logger.info(f"Attempting to change stream for channel {channel_id} to {new_url}") + logger.info( + f"Attempting to change stream for channel {channel_id} to {new_url}" + ) # Use the service layer instead of direct implementation # Pass stream_id to ensure proper connection tracking - result = ChannelService.change_stream_url(channel_id, new_url, user_agent, stream_id, m3u_profile_id) + result = ChannelService.change_stream_url( + channel_id, new_url, user_agent, stream_id, m3u_profile_id + ) # Get the stream manager before updating URL stream_manager = proxy_server.stream_managers.get(channel_id) @@ -369,37 +608,43 @@ def change_stream(request, channel_id): if stream_manager: # Reset tried streams when manually switching URL via API stream_manager.tried_stream_ids = set() - logger.debug(f"Reset tried stream IDs for channel {channel_id} during manual stream change") + logger.debug( + f"Reset tried stream IDs for channel {channel_id} during manual stream change" + ) - if result.get('status') == 'error': - return JsonResponse({ - 'error': result.get('message', 'Unknown error'), - 'diagnostics': result.get('diagnostics', {}) - }, status=404) + if result.get("status") == "error": + return JsonResponse( + { + "error": result.get("message", "Unknown error"), + "diagnostics": result.get("diagnostics", {}), + }, + status=404, + ) # Format response based on whether it was a direct update or event-based response_data = { - 'message': 'Stream changed successfully', - 'channel': channel_id, - 'url': new_url, - 'owner': result.get('direct_update', False), - 'worker_id': proxy_server.worker_id + "message": "Stream changed successfully", + "channel": channel_id, + "url": new_url, + "owner": result.get("direct_update", False), + "worker_id": proxy_server.worker_id, } # Include stream_id in response if it was used if stream_id: - response_data['stream_id'] = stream_id + response_data["stream_id"] = stream_id return JsonResponse(response_data) except json.JSONDecodeError: - return JsonResponse({'error': 'Invalid JSON'}, status=400) + return JsonResponse({"error": "Invalid JSON"}, status=400) except Exception as e: logger.error(f"Failed to change stream: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) + +@api_view(["GET"]) +@permission_classes([IsAdmin]) def channel_status(request, channel_id=None): """ Returns status information about channels with detail level based on request: @@ -411,7 +656,7 @@ def channel_status(request, channel_id=None): try: # Check if Redis is available if not proxy_server.redis_client: - return JsonResponse({'error': 'Redis connection not available'}, status=500) + return JsonResponse({"error": "Redis connection not available"}, status=500) # Handle single channel or all channels if channel_id: @@ -420,7 +665,9 @@ def channel_status(request, channel_id=None): if channel_info: return JsonResponse(channel_info) else: - return JsonResponse({'error': f'Channel {channel_id} not found'}, status=404) + return JsonResponse( + {"error": f"Channel {channel_id} not found"}, status=404 + ) else: # Basic info for all channels channel_pattern = "ts_proxy:channel:*:metadata" @@ -429,9 +676,13 @@ def channel_status(request, channel_id=None): # Extract channel IDs from keys cursor = 0 while True: - cursor, keys = proxy_server.redis_client.scan(cursor, match=channel_pattern) + cursor, keys = proxy_server.redis_client.scan( + cursor, match=channel_pattern + ) for key in keys: - channel_id_match = re.search(r"ts_proxy:channel:(.*):metadata", key.decode('utf-8')) + channel_id_match = re.search( + r"ts_proxy:channel:(.*):metadata", key.decode("utf-8") + ) if channel_id_match: ch_id = channel_id_match.group(1) channel_info = ChannelStatus.get_basic_channel_info(ch_id) @@ -441,15 +692,28 @@ def channel_status(request, channel_id=None): if cursor == 0: break - return JsonResponse({'channels': all_channels, 'count': len(all_channels)}) + # Send WebSocket update with the stats + # Format it the same way the original Celery task did + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) + } + ) + + return JsonResponse({"channels": all_channels, "count": len(all_channels)}) except Exception as e: logger.error(f"Error in channel_status: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST', 'DELETE']) -@permission_classes([IsAuthenticated]) +@api_view(["POST", "DELETE"]) +@permission_classes([IsAdmin]) def stop_channel(request, channel_id): """Stop a channel and release all associated resources using PubSub events""" try: @@ -458,60 +722,70 @@ def stop_channel(request, channel_id): # Use the service layer instead of direct implementation result = ChannelService.stop_channel(channel_id) - if result.get('status') == 'error': - return JsonResponse({'error': result.get('message', 'Unknown error')}, status=404) + if result.get("status") == "error": + return JsonResponse( + {"error": result.get("message", "Unknown error")}, status=404 + ) - return JsonResponse({ - 'message': 'Channel stop request sent', - 'channel_id': channel_id, - 'previous_state': result.get('previous_state') - }) + return JsonResponse( + { + "message": "Channel stop request sent", + "channel_id": channel_id, + "previous_state": result.get("previous_state"), + } + ) except Exception as e: logger.error(f"Failed to stop channel: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def stop_client(request, channel_id): """Stop a specific client connection using existing client management""" try: # Parse request body to get client ID data = json.loads(request.body) - client_id = data.get('client_id') + client_id = data.get("client_id") if not client_id: - return JsonResponse({'error': 'No client_id provided'}, status=400) + return JsonResponse({"error": "No client_id provided"}, status=400) # Use the service layer instead of direct implementation result = ChannelService.stop_client(channel_id, client_id) - if result.get('status') == 'error': - return JsonResponse({'error': result.get('message')}, status=404) + if result.get("status") == "error": + return JsonResponse({"error": result.get("message")}, status=404) - return JsonResponse({ - 'message': 'Client stop request processed', - 'channel_id': channel_id, - 'client_id': client_id, - 'locally_processed': result.get('locally_processed', False) - }) + return JsonResponse( + { + "message": "Client stop request processed", + "channel_id": channel_id, + "client_id": client_id, + "locally_processed": result.get("locally_processed", False), + } + ) except json.JSONDecodeError: - return JsonResponse({'error': 'Invalid JSON'}, status=400) + return JsonResponse({"error": "Invalid JSON"}, status=400) except Exception as e: logger.error(f"Failed to stop client: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def next_stream(request, channel_id): """Switch to the next available stream for a channel""" proxy_server = ProxyServer.get_instance() try: - logger.info(f"Request to switch to next stream for channel {channel_id} received") + logger.info( + f"Request to switch to next stream for channel {channel_id} received" + ) # Check if the channel exists channel = get_stream_object(channel_id) @@ -524,29 +798,42 @@ def next_stream(request, channel_id): metadata_key = RedisKeys.channel_metadata(channel_id) if proxy_server.redis_client.exists(metadata_key): # Get current stream ID from Redis - stream_id_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STREAM_ID) + stream_id_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STREAM_ID + ) if stream_id_bytes: - current_stream_id = int(stream_id_bytes.decode('utf-8')) - logger.info(f"Found current stream ID {current_stream_id} in Redis for channel {channel_id}") + current_stream_id = int(stream_id_bytes.decode("utf-8")) + logger.info( + f"Found current stream ID {current_stream_id} in Redis for channel {channel_id}" + ) # Get M3U profile from Redis if available - profile_id_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.M3U_PROFILE) + profile_id_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.M3U_PROFILE + ) if profile_id_bytes: - profile_id = int(profile_id_bytes.decode('utf-8')) - logger.info(f"Found M3U profile ID {profile_id} in Redis for channel {channel_id}") + profile_id = int(profile_id_bytes.decode("utf-8")) + logger.info( + f"Found M3U profile ID {profile_id} in Redis for channel {channel_id}" + ) if not current_stream_id: # Channel is not running - return JsonResponse({'error': 'No current stream found for channel'}, status=404) + return JsonResponse( + {"error": "No current stream found for channel"}, status=404 + ) # Get all streams for this channel in their defined order - streams = list(channel.streams.all().order_by('channelstream__order')) + streams = list(channel.streams.all().order_by("channelstream__order")) if len(streams) <= 1: - return JsonResponse({ - 'error': 'No alternate streams available for this channel', - 'current_stream_id': current_stream_id - }, status=404) + return JsonResponse( + { + "error": "No alternate streams available for this channel", + "current_stream_id": current_stream_id, + }, + status=404, + ) # Find the current stream's position in the list current_index = None @@ -556,61 +843,74 @@ def next_stream(request, channel_id): break if current_index is None: - logger.warning(f"Current stream ID {current_stream_id} not found in channel's streams list") + logger.warning( + f"Current stream ID {current_stream_id} not found in channel's streams list" + ) # Fall back to the first stream that's not the current one next_stream = next((s for s in streams if s.id != current_stream_id), None) if not next_stream: - return JsonResponse({ - 'error': 'Could not find current stream in channel list', - 'current_stream_id': current_stream_id - }, status=404) + return JsonResponse( + { + "error": "Could not find current stream in channel list", + "current_stream_id": current_stream_id, + }, + status=404, + ) else: # Get the next stream in the rotation (with wrap-around) next_index = (current_index + 1) % len(streams) next_stream = streams[next_index] next_stream_id = next_stream.id - logger.info(f"Rotating to next stream ID {next_stream_id} for channel {channel_id}") + logger.info( + f"Rotating to next stream ID {next_stream_id} for channel {channel_id}" + ) # Get full stream info including URL for the next stream stream_info = get_stream_info_for_switch(channel_id, next_stream_id) - if 'error' in stream_info: - return JsonResponse({ - 'error': stream_info['error'], - 'current_stream_id': current_stream_id, - 'next_stream_id': next_stream_id - }, status=404) + if "error" in stream_info: + return JsonResponse( + { + "error": stream_info["error"], + "current_stream_id": current_stream_id, + "next_stream_id": next_stream_id, + }, + status=404, + ) # Now use the ChannelService to change the stream URL result = ChannelService.change_stream_url( channel_id, - stream_info['url'], - stream_info['user_agent'], - next_stream_id # Pass the stream_id to be stored in Redis + stream_info["url"], + stream_info["user_agent"], + next_stream_id, # Pass the stream_id to be stored in Redis ) - if result.get('status') == 'error': - return JsonResponse({ - 'error': result.get('message', 'Unknown error'), - 'diagnostics': result.get('diagnostics', {}), - 'current_stream_id': current_stream_id, - 'next_stream_id': next_stream_id - }, status=404) + if result.get("status") == "error": + return JsonResponse( + { + "error": result.get("message", "Unknown error"), + "diagnostics": result.get("diagnostics", {}), + "current_stream_id": current_stream_id, + "next_stream_id": next_stream_id, + }, + status=404, + ) # Format success response response_data = { - 'message': 'Stream switched to next available', - 'channel': channel_id, - 'previous_stream_id': current_stream_id, - 'new_stream_id': next_stream_id, - 'new_url': stream_info['url'], - 'owner': result.get('direct_update', False), - 'worker_id': proxy_server.worker_id + "message": "Stream switched to next available", + "channel": channel_id, + "previous_stream_id": current_stream_id, + "new_stream_id": next_stream_id, + "new_url": stream_info["url"], + "owner": result.get("direct_update", False), + "worker_id": proxy_server.worker_id, } return JsonResponse(response_data) except Exception as e: logger.error(f"Failed to switch to next stream: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) diff --git a/apps/proxy/urls.py b/apps/proxy/urls.py index 98303990..34c026a9 100644 --- a/apps/proxy/urls.py +++ b/apps/proxy/urls.py @@ -5,4 +5,5 @@ app_name = 'proxy' urlpatterns = [ path('ts/', include('apps.proxy.ts_proxy.urls')), path('hls/', include('apps.proxy.hls_proxy.urls')), + path('vod/', include('apps.proxy.vod_proxy.urls')), ] \ No newline at end of file diff --git a/apps/proxy/vod_proxy/__init__.py b/apps/proxy/vod_proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/proxy/vod_proxy/connection_manager.py b/apps/proxy/vod_proxy/connection_manager.py new file mode 100644 index 00000000..ec0bffa5 --- /dev/null +++ b/apps/proxy/vod_proxy/connection_manager.py @@ -0,0 +1,1449 @@ +""" +VOD Connection Manager - Redis-based connection tracking for VOD streams +""" + +import time +import json +import logging +import threading +import random +import re +import requests +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +class PersistentVODConnection: + """Handles a single persistent connection to a VOD provider for a session""" + + def __init__(self, session_id: str, stream_url: str, headers: dict): + self.session_id = session_id + self.stream_url = stream_url + self.base_headers = headers + self.session = None + self.current_response = None + self.content_length = None + self.content_type = 'video/mp4' + self.final_url = None + self.lock = threading.Lock() + self.request_count = 0 # Track number of requests on this connection + self.last_activity = time.time() # Track last activity for cleanup + self.cleanup_timer = None # Timer for delayed cleanup + self.active_streams = 0 # Count of active stream generators + + def _establish_connection(self, range_header=None): + """Establish or re-establish connection to provider""" + try: + if not self.session: + self.session = requests.Session() + + headers = self.base_headers.copy() + + # Validate range header against content length + if range_header and self.content_length: + logger.info(f"[{self.session_id}] Validating range {range_header} against content length {self.content_length}") + validated_range = self._validate_range_header(range_header, int(self.content_length)) + if validated_range is None: + # Range is not satisfiable, but don't raise error - return empty response + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header} for content length {self.content_length}") + return None + elif validated_range != range_header: + range_header = validated_range + logger.info(f"[{self.session_id}] Adjusted range header: {range_header}") + else: + logger.info(f"[{self.session_id}] Range header validated successfully: {range_header}") + elif range_header: + logger.info(f"[{self.session_id}] Range header provided but no content length available yet: {range_header}") + + if range_header: + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Track request count for better logging + self.request_count += 1 + if self.request_count == 1: + logger.info(f"[{self.session_id}] Making initial request to provider") + target_url = self.stream_url + allow_redirects = True + else: + logger.info(f"[{self.session_id}] Making range request #{self.request_count} on SAME session (using final URL)") + # Use the final URL from first request to avoid redirect chain + target_url = self.final_url if self.final_url else self.stream_url + allow_redirects = False # No need to follow redirects again + logger.info(f"[{self.session_id}] Using cached final URL: {target_url}") + + response = self.session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 30), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Log successful response + if self.request_count == 1: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (followed redirects)") + else: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (direct to final URL)") + + # Capture headers from final URL + if not self.content_length: + # First check if we have a pre-stored content length from HEAD request + try: + import redis + from django.conf import settings + redis_host = getattr(settings, 'REDIS_HOST', 'localhost') + redis_port = int(getattr(settings, 'REDIS_PORT', 6379)) + redis_db = int(getattr(settings, 'REDIS_DB', 0)) + r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True) + content_length_key = f"vod_content_length:{self.session_id}" + stored_length = r.get(content_length_key) + if stored_length: + self.content_length = stored_length + logger.info(f"[{self.session_id}] *** USING PRE-STORED CONTENT LENGTH: {self.content_length} ***") + else: + # Fallback to response headers + self.content_length = response.headers.get('content-length') + logger.info(f"[{self.session_id}] *** USING RESPONSE CONTENT LENGTH: {self.content_length} ***") + except Exception as e: + logger.error(f"[{self.session_id}] Error checking Redis for content length: {e}") + # Fallback to response headers + self.content_length = response.headers.get('content-length') + + self.content_type = response.headers.get('content-type', 'video/mp4') + self.final_url = response.url + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Final URL: {self.final_url} ***") + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Content-Length: {self.content_length} ***") + + self.current_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header, content_length): + """Validate and potentially adjust range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + # Start is beyond file end - not satisfiable + logger.warning(f"[{self.session_id}] Range start {start_byte} >= content length {content_length} - not satisfiable") + return None + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + # Adjust end to file end + end_byte = content_length - 1 + logger.info(f"[{self.session_id}] Adjusted range end to {end_byte}") + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + logger.warning(f"[{self.session_id}] Range start {start_byte} > end {end_byte} - not satisfiable") + return None + + validated_range = f"bytes={start_byte}-{end_byte}" + return validated_range + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def get_stream(self, range_header=None): + """Get stream with optional range header - reuses connection for range requests""" + with self.lock: + # Update activity timestamp + self.last_activity = time.time() + + # Cancel any pending cleanup since connection is being reused + self.cancel_cleanup() + + # For range requests, we don't need to close the connection + # We can make a new request on the same session + if range_header: + logger.info(f"[{self.session_id}] Range request on existing connection: {range_header}") + # Close only the response stream, keep the session alive + if self.current_response: + logger.info(f"[{self.session_id}] Closing previous response stream (keeping connection alive)") + self.current_response.close() + self.current_response = None + + # Make new request (reuses connection if session exists) + response = self._establish_connection(range_header) + if response is None: + # Range not satisfiable - return None to indicate this + return None + + return self.current_response + + def cancel_cleanup(self): + """Cancel any pending cleanup - called when connection is reused""" + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.info(f"[{self.session_id}] Cancelled pending cleanup - connection being reused for new request") + + def increment_active_streams(self): + """Increment the count of active streams""" + with self.lock: + self.active_streams += 1 + logger.debug(f"[{self.session_id}] Active streams incremented to {self.active_streams}") + + def decrement_active_streams(self): + """Decrement the count of active streams""" + with self.lock: + if self.active_streams > 0: + self.active_streams -= 1 + logger.debug(f"[{self.session_id}] Active streams decremented to {self.active_streams}") + else: + logger.warning(f"[{self.session_id}] Attempted to decrement active streams when already at 0") + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + with self.lock: + return self.active_streams > 0 + + def schedule_cleanup_if_not_streaming(self, delay_seconds: int = 10): + """Schedule cleanup only if no active streams""" + with self.lock: + if self.active_streams > 0: + logger.info(f"[{self.session_id}] Connection has {self.active_streams} active streams - NOT scheduling cleanup") + return False + + # No active streams, proceed with delayed cleanup + if self.cleanup_timer: + self.cleanup_timer.cancel() + + def delayed_cleanup(): + logger.info(f"[{self.session_id}] Delayed cleanup triggered - checking if connection is still needed") + # Use the singleton VODConnectionManager instance + manager = VODConnectionManager.get_instance() + manager.cleanup_persistent_connection(self.session_id) + + self.cleanup_timer = threading.Timer(delay_seconds, delayed_cleanup) + self.cleanup_timer.start() + logger.info(f"[{self.session_id}] Scheduled cleanup in {delay_seconds} seconds (connection not actively streaming)") + return True + + def get_headers(self): + """Get headers for response""" + return { + 'content_length': self.content_length, + 'content_type': self.content_type, + 'final_url': self.final_url + } + + def cleanup(self): + """Clean up connection resources""" + with self.lock: + # Cancel any pending cleanup timer + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.debug(f"[{self.session_id}] Cancelled cleanup timer during manual cleanup") + + # Clear active streams count + self.active_streams = 0 + + if self.current_response: + self.current_response.close() + self.current_response = None + if self.session: + self.session.close() + self.session = None + logger.info(f"[{self.session_id}] Persistent connection cleaned up") + + +class VODConnectionManager: + """Manages VOD connections using Redis for tracking""" + + _instance = None + _persistent_connections = {} # session_id -> PersistentVODConnection + + @classmethod + def get_instance(cls): + """Get the singleton instance of VODConnectionManager""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """ + Find an existing session that matches content and client criteria with no active streams + + Args: + content_type: Type of content (movie, episode, series) + content_uuid: UUID of the content + client_ip: Client IP address + user_agent: Client user agent + utc_start: UTC start time for timeshift + utc_end: UTC end time for timeshift + offset: Offset in seconds + + Returns: + Session ID if matching idle session found, None otherwise + """ + if not self.redis_client: + return None + + try: + # Search for sessions with matching content + pattern = "vod_session:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + session_data = self.redis_client.hgetall(key) + if not session_data: + continue + + # Extract session info + stored_content_type = session_data.get(b'content_type', b'').decode('utf-8') + stored_content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + + # Check if content matches + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID from key + session_id = key.decode('utf-8').replace('vod_session:', '') + + # Check if session has an active persistent connection + persistent_conn = self._persistent_connections.get(session_id) + if not persistent_conn: + # No persistent connection exists, skip + continue + + # Check if connection has no active streams + if persistent_conn.has_active_streams(): + logger.debug(f"[{session_id}] Session has active streams - skipping") + continue + + # Get stored client info for comparison + stored_client_ip = session_data.get(b'client_ip', b'').decode('utf-8') + stored_user_agent = session_data.get(b'user_agent', b'').decode('utf-8') + + # Check timeshift parameters match + stored_utc_start = session_data.get(b'utc_start', b'').decode('utf-8') + stored_utc_end = session_data.get(b'utc_end', b'').decode('utf-8') + stored_offset = session_data.get(b'offset', b'').decode('utf-8') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + # Calculate match score + score = 0 + match_reasons = [] + + # Content already matches (required) + score += 10 + match_reasons.append("content") + + # IP match (high priority) + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + # User-Agent match (medium priority) + if stored_user_agent and stored_user_agent == user_agent: + score += 3 + match_reasons.append("user-agent") + + # Timeshift parameters match (high priority for seeking) + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + # Consider it a good match if we have at least content + one other criteria + if score >= 13: # content(10) + ip(5) or content(10) + user-agent(3) + something else + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(session_data.get(b'last_activity', b'0').decode('utf-8')) + }) + + except Exception as e: + logger.debug(f"Error processing session key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score (highest first), then by last activity (most recent first) + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + else: + logger.debug(f"No matching idle sessions found for {content_type} {content_uuid}") + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None + + def _get_connection_key(self, content_type: str, content_uuid: str, client_id: str) -> str: + """Get Redis key for a specific connection""" + return f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _get_content_connections_key(self, content_type: str, content_uuid: str) -> str: + """Get Redis key for tracking connections per content""" + return f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """ + Create a new VOD connection with profile limit checking + + Returns: + bool: True if connection was created, False if profile limit exceeded + """ + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits using standardized key + if not self._check_profile_limits(m3u_profile): + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded") + return False + + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + + # Check if connection already exists to prevent duplicate counting + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + # Update activity but don't increment profile counter + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0", + "last_position_update": str(time.time()) + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + + # Store connection data + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + + # Increment profile connections using standardized method + pipe.incr(profile_connections_key) + + # Add to content connections set + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + + # Execute all operations + pipe.execute() + + logger.info(f"Created VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating VOD connection: {e}") + return False + + def _check_profile_limits(self, m3u_profile: M3UAccountProfile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int = 0, + position_seconds: int = 0) -> bool: + """Update connection activity""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + update_data = { + "last_activity": str(time.time()) + } + + if bytes_sent > 0: + # Get current bytes and add to it + current_bytes = self.redis_client.hget(connection_key, "bytes_sent") + if current_bytes: + total_bytes = int(current_bytes.decode('utf-8')) + bytes_sent + else: + total_bytes = bytes_sent + update_data["bytes_sent"] = str(total_bytes) + + if position_seconds > 0: + update_data["position_seconds"] = str(position_seconds) + + # Update connection data + self.redis_client.hset(connection_key, mapping=update_data) + self.redis_client.expire(connection_key, self.connection_ttl) + + return True + + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str) -> bool: + """Remove a VOD connection""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + # Get connection data before removing + connection_data = self.redis_client.hgetall(connection_key) + if not connection_data: + return True # Already removed + + # Get profile ID for cleanup + profile_id = None + if b"m3u_profile_id" in connection_data: + try: + profile_id = int(connection_data[b"m3u_profile_id"].decode('utf-8')) + except ValueError: + pass + + # Use pipeline for atomic cleanup + pipe = self.redis_client.pipeline() + + # Remove connection data + pipe.delete(connection_key) + + # Decrement profile connections using standardized key + if profile_id: + profile_connections_key = self._get_profile_connections_key(profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + pipe.decr(profile_connections_key) + + # Remove from content connections set + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + pipe.srem(content_connections_key, client_id) + + # Execute cleanup + pipe.execute() + + logger.info(f"Removed VOD connection: {client_id}") + return True + + except Exception as e: + logger.error(f"Error removing connection: {e}") + return False + + def get_connection_info(self, content_type: str, content_uuid: str, client_id: str) -> Optional[Dict[str, Any]]: + """Get connection information""" + if not self.redis_client: + return None + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + connection_data = self.redis_client.hgetall(connection_key) + + if not connection_data: + return None + + # Convert bytes to strings and parse numbers + info = {} + for key, value in connection_data.items(): + key_str = key.decode('utf-8') + value_str = value.decode('utf-8') + + # Parse numeric fields + if key_str in ['connected_at', 'last_activity']: + info[key_str] = float(value_str) + elif key_str in ['bytes_sent', 'position_seconds', 'm3u_profile_id']: + info[key_str] = int(value_str) + else: + info[key_str] = value_str + + return info + + except Exception as e: + logger.error(f"Error getting connection info: {e}") + return None + + def get_profile_connections(self, profile_id: int) -> int: + """Get current connection count for a profile using standardized key""" + if not self.redis_client: + return 0 + + try: + profile_connections_key = self._get_profile_connections_key(profile_id) + return int(self.redis_client.get(profile_connections_key) or 0) + + except Exception as e: + logger.error(f"Error getting profile connections: {e}") + return 0 + + def get_content_connections(self, content_type: str, content_uuid: str) -> int: + """Get current connection count for content""" + if not self.redis_client: + return 0 + + try: + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + return self.redis_client.scard(content_connections_key) or 0 + + except Exception as e: + logger.error(f"Error getting content connections: {e}") + return 0 + + def cleanup_stale_connections(self, max_age_seconds: int = 3600): + """Clean up stale connections that haven't been active recently""" + if not self.redis_client: + return + + try: + pattern = "vod_proxy:connection:*" + cursor = 0 + cleaned = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + key_str = key.decode('utf-8') + last_activity = self.redis_client.hget(key, "last_activity") + + if last_activity: + last_activity_time = float(last_activity.decode('utf-8')) + if current_time - last_activity_time > max_age_seconds: + # Extract info for cleanup + parts = key_str.split(':') + if len(parts) >= 5: + content_type = parts[2] + content_uuid = parts[3] + client_id = parts[4] + self.remove_connection(content_type, content_uuid, client_id) + cleaned += 1 + except Exception as e: + logger.error(f"Error processing key {key}: {e}") + + if cursor == 0: + break + + if cleaned > 0: + logger.info(f"Cleaned up {cleaned} stale VOD connections") + + except Exception as e: + logger.error(f"Error during connection cleanup: {e}") + + def stream_content(self, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with connection tracking and timeshift support + + Args: + content_obj: Movie or Episode object + stream_url: Final stream URL to proxy + m3u_profile: M3UAccountProfile instance + client_ip: Client IP address + user_agent: Client user agent + request: Django request object + utc_start: UTC start time for timeshift (e.g., '2023-01-01T12:00:00') + utc_end: UTC end time for timeshift + offset: Offset in seconds for seeking + range_header: HTTP Range header for partial content requests + + Returns: + StreamingHttpResponse or HttpResponse with error + """ + + try: + # Generate unique client ID + client_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Create connection tracking + connection_created = self.create_connection( + content_type=content_type, + content_uuid=content_uuid, + content_name=content_name, + client_id=client_id, + client_ip=client_ip, + user_agent=user_agent, + m3u_profile=m3u_profile + ) + + if not connection_created: + logger.error(f"Failed to create connection tracking for {content_type} {content_uuid}") + return HttpResponse("Connection limit exceeded", status=503) + + # Modify stream URL for timeshift functionality + modified_stream_url = self._apply_timeshift_parameters( + stream_url, utc_start, utc_end, offset + ) + + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Create streaming generator with simplified header handling + upstream_response = None + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make single request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Final URL after redirects: {upstream_response.url}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + # Create streaming response with sensible defaults + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type='video/mp4' + ) + + # Set status code based on request type + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC and other players expect + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # Log the critical headers we're sending to the client + logger.info(f"[{client_id}] Response headers to client - Status: {response.status_code}, Accept-Ranges: {response.get('Accept-Ranges', 'MISSING')}") + if 'Content-Length' in response: + logger.info(f"[{client_id}] Content-Length: {response['Content-Length']}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] Content-Range: {response['Content-Range']}") + if 'Content-Type' in response: + logger.info(f"[{client_id}] Content-Type: {response['Content-Type']}") + + # Critical: Log what VLC needs to see for seeking to work + if response.status_code == 200: + logger.info(f"[{client_id}] VLC SEEKING INFO: Full content response (200). VLC should see Accept-Ranges and Content-Length to enable seeking.") + elif response.status_code == 206: + logger.info(f"[{client_id}] VLC SEEKING INFO: Partial content response (206). This confirms seeking is working if VLC requested a range.") + + return response + + except Exception as e: + logger.error(f"Error in stream_content: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with persistent connection per session + + Maintains 1 open connection to provider per session that handles all range requests + dynamically based on client Range headers for seeking functionality. + """ + + try: + # Use session_id as client_id for connection tracking + client_id = session_id + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Check for existing connection or create new one + persistent_conn = self._persistent_connections.get(session_id) + + # Cancel any pending cleanup timer for this session regardless of new/existing + if persistent_conn: + persistent_conn.cancel_cleanup() + + # If no existing connection, try to find a matching idle session first + if not persistent_conn: + # Look for existing idle sessions that match content and client criteria + matching_session_id = self.find_matching_idle_session( + content_type, content_uuid, client_ip, user_agent, + utc_start, utc_end, offset + ) + + if matching_session_id: + logger.info(f"[{client_id}] Found matching idle session {matching_session_id} - redirecting client") + + # Update the session activity and client info + session_key = f"vod_session:{matching_session_id}" + if self.redis_client: + update_data = { + "last_activity": str(time.time()), + "client_ip": client_ip, # Update in case IP changed + "user_agent": user_agent # Update in case user agent changed + } + self.redis_client.hset(session_key, mapping=update_data) + self.redis_client.expire(session_key, self.session_ttl) + + # Get the existing persistent connection + persistent_conn = self._persistent_connections.get(matching_session_id) + if persistent_conn: + # Update the session_id to use the matching one + client_id = matching_session_id + session_id = matching_session_id + logger.info(f"[{client_id}] Successfully redirected to existing idle session") + else: + logger.warning(f"[{client_id}] Matching session found but no persistent connection - will create new") + + if not persistent_conn: + logger.info(f"[{client_id}] Creating NEW persistent connection for {content_type} {content_name}") + + # Create session in Redis for tracking + session_info = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "created_at": str(time.time()), + "last_activity": str(time.time()), + "profile_id": str(m3u_profile.id), + "connection_counted": "True", + "client_ip": client_ip, + "user_agent": user_agent, + "utc_start": utc_start or "", + "utc_end": utc_end or "", + "offset": str(offset) if offset else "" + } + + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, mapping=session_info) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Created new session: {session_info}") + + # Apply timeshift parameters to URL + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Prepare headers + headers = { + 'User-Agent': user_agent or 'VLC/3.0.21 LibVLC/3.0.21', + 'Accept': '*/*', + 'Connection': 'keep-alive' + } + + # Add any authentication headers from profile + if hasattr(m3u_profile, 'auth_headers') and m3u_profile.auth_headers: + headers.update(m3u_profile.auth_headers) + + # Create persistent connection + persistent_conn = PersistentVODConnection(session_id, modified_stream_url, headers) + self._persistent_connections[session_id] = persistent_conn + + # Track connection in profile + self.create_connection(content_type, content_uuid, content_name, client_id, client_ip, user_agent, m3u_profile) + else: + logger.info(f"[{client_id}] Using EXISTING persistent connection for {content_type} {content_name}") + # Update session activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, "last_activity", str(time.time())) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Reusing existing session - no new connection created") + + # Log the incoming Range header for debugging + if range_header: + logger.info(f"[{client_id}] *** CLIENT RANGE REQUEST: {range_header} ***") + + # Parse range for seeking detection + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte and int(start_byte) > 0: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[{client_id}] *** VLC SEEKING TO: {start_pos_mb:.1f} MB ***") + else: + logger.info(f"[{client_id}] Range request from start") + except Exception as e: + logger.warning(f"[{client_id}] Could not parse range header: {e}") + else: + logger.info(f"[{client_id}] Full content request (no Range header)") + + # Get stream from persistent connection with current range + upstream_response = persistent_conn.get_stream(range_header) + + # Handle range not satisfiable + if upstream_response is None: + logger.warning(f"[{client_id}] Range not satisfiable - returning 416 error") + return HttpResponse( + "Requested Range Not Satisfiable", + status=416, + headers={ + 'Content-Range': f'bytes */{persistent_conn.content_length}' if persistent_conn.content_length else 'bytes */*' + } + ) + + connection_headers = persistent_conn.get_headers() + + # Ensure any pending cleanup is cancelled before starting stream + persistent_conn.cancel_cleanup() + + # Create streaming generator + def stream_generator(): + decremented = False # Track if we've already decremented the counter + + try: + logger.info(f"[{client_id}] Starting stream from persistent connection") + + # Increment active streams counter + persistent_conn.increment_active_streams() + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] Persistent stream completed normally: {bytes_sent} bytes sent") + # Stream completed normally - decrement counter + persistent_conn.decrement_active_streams() + decremented = True + + except GeneratorExit: + # Client disconnected - decrement counter and schedule cleanup only if no active streams + logger.info(f"[{client_id}] Client disconnected - checking if cleanup should be scheduled") + persistent_conn.decrement_active_streams() + decremented = True + scheduled = persistent_conn.schedule_cleanup_if_not_streaming(delay_seconds=10) + if not scheduled: + logger.info(f"[{client_id}] Cleanup not scheduled - connection still has active streams") + + except Exception as e: + logger.error(f"[{client_id}] Error in persistent stream: {e}") + # On error, decrement counter and cleanup the connection as it may be corrupted + persistent_conn.decrement_active_streams() + decremented = True + logger.info(f"[{client_id}] Cleaning up persistent connection due to error") + self.cleanup_persistent_connection(session_id) + yield b"Error: Stream interrupted" + + finally: + # Safety net: only decrement if we haven't already + if not decremented: + logger.warning(f"[{client_id}] Stream generator exited without decrement - applying safety net") + persistent_conn.decrement_active_streams() + # This runs regardless of how the generator exits + logger.debug(f"[{client_id}] Stream generator finished") + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers['content_type'] + ) + + # Set status code based on range request + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC expects + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # CRITICAL: Forward Content-Length from persistent connection + if connection_headers['content_length']: + response['Content-Length'] = connection_headers['content_length'] + logger.info(f"[{client_id}] *** FORWARDED Content-Length: {connection_headers['content_length']} *** (VLC seeking enabled)") + else: + logger.warning(f"[{client_id}] *** NO Content-Length available *** (VLC seeking may not work)") + + # Handle range requests - set Content-Range for partial responses + if range_header and connection_headers['content_length']: + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + end = int(end_byte) if end_byte else int(connection_headers['content_length']) - 1 + total_size = int(connection_headers['content_length']) + + content_range = f"bytes {start}-{end}/{total_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Set Content-Range: {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Could not set Content-Range: {e}") + + # Log response headers + logger.info(f"[{client_id}] PERSISTENT Response - Status: {response.status_code}, Content-Length: {response.get('Content-Length', 'MISSING')}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] PERSISTENT Content-Range: {response['Content-Range']}") + + # Log VLC seeking status + if response.status_code == 200: + if connection_headers['content_length']: + logger.info(f"[{client_id}] ✅ PERSISTENT VLC SEEKING: Full response with Content-Length - seeking should work!") + else: + logger.info(f"[{client_id}] ❌ PERSISTENT VLC SEEKING: Full response but no Content-Length - seeking won't work!") + elif response.status_code == 206: + logger.info(f"[{client_id}] ✅ PERSISTENT VLC SEEKING: Partial response - seeking is working!") + + return response + + except Exception as e: + logger.error(f"Error in persistent stream_content_with_session: {e}", exc_info=True) + # Cleanup persistent connection on error + if session_id in self._persistent_connections: + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """ + Apply timeshift parameters to the stream URL + + Args: + original_url: Original stream URL + utc_start: UTC start time (ISO format string) + utc_end: UTC end time (ISO format string) + offset: Offset in seconds + + Returns: + Modified URL with timeshift parameters + """ + try: + from urllib.parse import urlparse, parse_qs, urlencode, urlunparse + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + + logger.debug(f"Original URL: {original_url}") + logger.debug(f"Original query params: {query_params}") + + # Add timeshift parameters if provided + if utc_start: + # Support both utc_start and start parameter names + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] # Some providers use 'start' + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + # Support both utc_end and end parameter names + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] # Some providers use 'end' + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + # Ensure offset is a valid number + offset_seconds = int(offset) + # Support multiple offset parameter names + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] # Some providers use 'seek' + query_params['t'] = [str(offset_seconds)] # Some providers use 't' + logger.info(f"Added offset/seek/t parameter: {offset_seconds} seconds") + except (ValueError, TypeError): + logger.warning(f"Invalid offset value: {offset}, skipping") + + # Handle special URL patterns for VOD providers + # Some providers embed timeshift info in the path rather than query params + path = parsed_url.path + + # Check if this looks like an IPTV catchup URL pattern + catchup_pattern = r'/(\d{4}-\d{2}-\d{2})/(\d{2}-\d{2}-\d{2})' + if utc_start and re.search(catchup_pattern, path): + # Convert ISO format to provider-specific format if needed + try: + from datetime import datetime + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + # Replace existing date/time in path + path = re.sub(catchup_pattern, f'/{date_part}/{time_part}', path) + logger.info(f"Modified path for catchup: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL with new parameters + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, # Use potentially modified path + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific persistent connection""" + if session_id in self._persistent_connections: + logger.info(f"[{session_id}] Cleaning up persistent connection") + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + + # Clean up ALL Redis keys associated with this session + session_key = f"vod_session:{session_id}" + if self.redis_client: + try: + session_data = self.redis_client.hgetall(session_key) + if session_data: + # Get session details for connection cleanup + content_type = session_data.get(b'content_type', b'').decode('utf-8') + content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + profile_id = session_data.get(b'profile_id') + + # Generate client_id from session_id (matches what's used during streaming) + client_id = session_id + + # Remove individual connection tracking keys created during streaming + if content_type and content_uuid: + logger.info(f"[{session_id}] Cleaning up connection tracking keys") + self.remove_connection(content_type, content_uuid, client_id) + + # Remove from profile connections if counted (additional safety check) + if session_data.get(b'connection_counted') == b'True' and profile_id: + profile_key = self._get_profile_connections_key(int(profile_id.decode('utf-8'))) + current_count = int(self.redis_client.get(profile_key) or 0) + if current_count > 0: + self.redis_client.decr(profile_key) + logger.info(f"[{session_id}] Decremented profile {profile_id.decode('utf-8')} connections") + + # Remove session tracking key + self.redis_client.delete(session_key) + logger.info(f"[{session_id}] Removed session tracking") + + # Clean up any additional session-related keys (pattern cleanup) + try: + # Look for any other keys that might be related to this session + pattern = f"*{session_id}*" + cursor = 0 + session_related_keys = [] + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + session_related_keys.extend(keys) + if cursor == 0: + break + + if session_related_keys: + # Filter out keys we already deleted + remaining_keys = [k for k in session_related_keys if k.decode('utf-8') != session_key] + if remaining_keys: + self.redis_client.delete(*remaining_keys) + logger.info(f"[{session_id}] Cleaned up {len(remaining_keys)} additional session-related keys") + except Exception as scan_error: + logger.warning(f"[{session_id}] Error during pattern cleanup: {scan_error}") + + except Exception as e: + logger.error(f"[{session_id}] Error cleaning up session: {e}") + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale persistent connections that haven't been used recently""" + current_time = time.time() + stale_sessions = [] + + for session_id, conn in self._persistent_connections.items(): + try: + # Check connection's last activity time first + if hasattr(conn, 'last_activity'): + time_since_last_activity = current_time - conn.last_activity + if time_since_last_activity > max_age_seconds: + logger.info(f"[{session_id}] Connection inactive for {time_since_last_activity:.1f}s (max: {max_age_seconds}s)") + stale_sessions.append(session_id) + continue + + # Fallback to Redis session data if connection doesn't have last_activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + session_data = self.redis_client.hgetall(session_key) + if session_data: + created_at = float(session_data.get(b'created_at', b'0').decode('utf-8')) + if current_time - created_at > max_age_seconds: + logger.info(f"[{session_id}] Session older than {max_age_seconds}s") + stale_sessions.append(session_id) + else: + # Session data missing, connection is stale + logger.info(f"[{session_id}] Session data missing from Redis") + stale_sessions.append(session_id) + + except Exception as e: + logger.error(f"[{session_id}] Error checking session age: {e}") + stale_sessions.append(session_id) + + # Clean up stale connections + for session_id in stale_sessions: + logger.info(f"[{session_id}] Cleaning up stale persistent connection") + self.cleanup_persistent_connection(session_id) + + if stale_sessions: + logger.info(f"Cleaned up {len(stale_sessions)} stale persistent connections") + else: + logger.debug(f"No stale persistent connections found (checked {len(self._persistent_connections)} connections)") + + +# Global instance +_connection_manager = None + +def get_connection_manager() -> VODConnectionManager: + """Get the global VOD connection manager instance""" + global _connection_manager + if _connection_manager is None: + _connection_manager = VODConnectionManager() + return _connection_manager diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py new file mode 100644 index 00000000..1534f761 --- /dev/null +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -0,0 +1,1408 @@ +""" +Enhanced VOD Connection Manager with Redis-based connection sharing for multi-worker environments +""" + +import time +import json +import logging +import threading +import random +import re +import requests +import pickle +import base64 +import os +import socket +import mimetypes +from urllib.parse import urlparse +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +def get_vod_client_stop_key(client_id): + """Get the Redis key for signaling a VOD client to stop""" + return f"vod_proxy:client:{client_id}:stop" + + +def infer_content_type_from_url(url: str) -> Optional[str]: + """ + Infer MIME type from file extension in URL + + Args: + url: The stream URL + + Returns: + MIME type string or None if cannot be determined + """ + try: + parsed_url = urlparse(url) + path = parsed_url.path + + # Extract file extension + _, ext = os.path.splitext(path) + ext = ext.lower() + + # Common video format mappings + video_mime_types = { + '.mp4': 'video/mp4', + '.mkv': 'video/x-matroska', + '.avi': 'video/x-msvideo', + '.mov': 'video/quicktime', + '.wmv': 'video/x-ms-wmv', + '.flv': 'video/x-flv', + '.webm': 'video/webm', + '.m4v': 'video/x-m4v', + '.3gp': 'video/3gpp', + '.ts': 'video/mp2t', + '.m3u8': 'application/x-mpegURL', + '.mpg': 'video/mpeg', + '.mpeg': 'video/mpeg', + } + + if ext in video_mime_types: + logger.debug(f"Inferred content type '{video_mime_types[ext]}' from extension '{ext}' in URL: {url}") + return video_mime_types[ext] + + # Fallback to mimetypes module + mime_type, _ = mimetypes.guess_type(path) + if mime_type and mime_type.startswith('video/'): + logger.debug(f"Inferred content type '{mime_type}' using mimetypes for URL: {url}") + return mime_type + + logger.debug(f"Could not infer content type from URL: {url}") + return None + + except Exception as e: + logger.warning(f"Error inferring content type from URL '{url}': {e}") + return None + + +class SerializableConnectionState: + """Serializable connection state that can be stored in Redis""" + + def __init__(self, session_id: str, stream_url: str, headers: dict, + content_length: str = None, content_type: str = None, + final_url: str = None, m3u_profile_id: int = None, + # Session metadata fields (previously stored in vod_session key) + content_obj_type: str = None, content_uuid: str = None, + content_name: str = None, client_ip: str = None, + client_user_agent: str = None, utc_start: str = None, + utc_end: str = None, offset: str = None, + worker_id: str = None, connection_type: str = "redis_backed"): + self.session_id = session_id + self.stream_url = stream_url + self.headers = headers + self.content_length = content_length + self.content_type = content_type + self.final_url = final_url + self.m3u_profile_id = m3u_profile_id # Store M3U profile ID for connection counting + self.last_activity = time.time() + self.request_count = 0 + self.active_streams = 0 + + # Session metadata (consolidated from vod_session key) + self.content_obj_type = content_obj_type + self.content_uuid = content_uuid + self.content_name = content_name + self.client_ip = client_ip + self.client_user_agent = client_user_agent + self.utc_start = utc_start or "" + self.utc_end = utc_end or "" + self.offset = offset or "" + self.worker_id = worker_id + self.connection_type = connection_type + self.created_at = time.time() + + # Additional tracking fields + self.bytes_sent = 0 + self.position_seconds = 0 + + # Range/seek tracking for position calculation + self.last_seek_byte = 0 + self.last_seek_percentage = 0.0 + self.total_content_size = 0 + self.last_seek_timestamp = 0.0 + + def to_dict(self): + """Convert to dictionary for Redis storage""" + return { + 'session_id': self.session_id or '', + 'stream_url': self.stream_url or '', + 'headers': json.dumps(self.headers or {}), + 'content_length': str(self.content_length) if self.content_length is not None else '', + 'content_type': self.content_type or '', + 'final_url': self.final_url or '', + 'm3u_profile_id': str(self.m3u_profile_id) if self.m3u_profile_id is not None else '', + 'last_activity': str(self.last_activity), + 'request_count': str(self.request_count), + 'active_streams': str(self.active_streams), + # Session metadata + 'content_obj_type': self.content_obj_type or '', + 'content_uuid': self.content_uuid or '', + 'content_name': self.content_name or '', + 'client_ip': self.client_ip or '', + 'client_user_agent': self.client_user_agent or '', + 'utc_start': self.utc_start or '', + 'utc_end': self.utc_end or '', + 'offset': self.offset or '', + 'worker_id': self.worker_id or '', + 'connection_type': self.connection_type or 'redis_backed', + 'created_at': str(self.created_at), + # Additional tracking fields + 'bytes_sent': str(self.bytes_sent), + 'position_seconds': str(self.position_seconds), + # Range/seek tracking + 'last_seek_byte': str(self.last_seek_byte), + 'last_seek_percentage': str(self.last_seek_percentage), + 'total_content_size': str(self.total_content_size), + 'last_seek_timestamp': str(self.last_seek_timestamp) + } + + @classmethod + def from_dict(cls, data: dict): + """Create from dictionary loaded from Redis""" + obj = cls( + session_id=data['session_id'], + stream_url=data['stream_url'], + headers=json.loads(data['headers']) if data['headers'] else {}, + content_length=data.get('content_length') if data.get('content_length') else None, + content_type=data.get('content_type') or None, + final_url=data.get('final_url') if data.get('final_url') else None, + m3u_profile_id=int(data.get('m3u_profile_id')) if data.get('m3u_profile_id') else None, + # Session metadata + content_obj_type=data.get('content_obj_type') or None, + content_uuid=data.get('content_uuid') or None, + content_name=data.get('content_name') or None, + client_ip=data.get('client_ip') or None, + client_user_agent=data.get('client_user_agent') or data.get('user_agent') or None, + utc_start=data.get('utc_start') or '', + utc_end=data.get('utc_end') or '', + offset=data.get('offset') or '', + worker_id=data.get('worker_id') or None, + connection_type=data.get('connection_type', 'redis_backed') + ) + obj.last_activity = float(data.get('last_activity', time.time())) + obj.request_count = int(data.get('request_count', 0)) + obj.active_streams = int(data.get('active_streams', 0)) + obj.created_at = float(data.get('created_at', time.time())) + # Additional tracking fields + obj.bytes_sent = int(data.get('bytes_sent', 0)) + obj.position_seconds = int(data.get('position_seconds', 0)) + # Range/seek tracking + obj.last_seek_byte = int(data.get('last_seek_byte', 0)) + obj.last_seek_percentage = float(data.get('last_seek_percentage', 0.0)) + obj.total_content_size = int(data.get('total_content_size', 0)) + obj.last_seek_timestamp = float(data.get('last_seek_timestamp', 0.0)) + return obj + + +class RedisBackedVODConnection: + """Redis-backed VOD connection that can be accessed from any worker""" + + def __init__(self, session_id: str, redis_client=None): + self.session_id = session_id + self.redis_client = redis_client or RedisClient.get_client() + self.connection_key = f"vod_persistent_connection:{session_id}" + self.lock_key = f"vod_connection_lock:{session_id}" + self.local_session = None # Local requests session + self.local_response = None # Local current response + + def _get_connection_state(self) -> Optional[SerializableConnectionState]: + """Get connection state from Redis""" + if not self.redis_client: + return None + + try: + data = self.redis_client.hgetall(self.connection_key) + if not data: + return None + + # Convert bytes keys/values to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + return SerializableConnectionState.from_dict(data) + except Exception as e: + logger.error(f"[{self.session_id}] Error getting connection state from Redis: {e}") + return None + + def _save_connection_state(self, state: SerializableConnectionState): + """Save connection state to Redis""" + if not self.redis_client: + return False + + try: + data = state.to_dict() + # Log the data being saved for debugging + logger.trace(f"[{self.session_id}] Saving connection state: {data}") + + # Verify all values are valid for Redis + for key, value in data.items(): + if value is None: + logger.error(f"[{self.session_id}] None value found for key '{key}' - this should not happen") + return False + + self.redis_client.hset(self.connection_key, mapping=data) + self.redis_client.expire(self.connection_key, 3600) # 1 hour TTL + return True + except Exception as e: + logger.error(f"[{self.session_id}] Error saving connection state to Redis: {e}") + return False + + def _acquire_lock(self, timeout: int = 10) -> bool: + """Acquire distributed lock for connection operations""" + if not self.redis_client: + return False + + try: + return self.redis_client.set(self.lock_key, "locked", nx=True, ex=timeout) + except Exception as e: + logger.error(f"[{self.session_id}] Error acquiring lock: {e}") + return False + + def _release_lock(self): + """Release distributed lock""" + if not self.redis_client: + return + + try: + self.redis_client.delete(self.lock_key) + except Exception as e: + logger.error(f"[{self.session_id}] Error releasing lock: {e}") + + def create_connection(self, stream_url: str, headers: dict, m3u_profile_id: int = None, + # Session metadata (consolidated from vod_session key) + content_obj_type: str = None, content_uuid: str = None, + content_name: str = None, client_ip: str = None, + client_user_agent: str = None, utc_start: str = None, + utc_end: str = None, offset: str = None, + worker_id: str = None) -> bool: + """Create a new connection state in Redis with consolidated session metadata""" + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for connection creation") + return False + + try: + # Check if connection already exists + existing_state = self._get_connection_state() + if existing_state: + logger.info(f"[{self.session_id}] Connection already exists in Redis") + return True + + # Create new connection state with consolidated session metadata + state = SerializableConnectionState( + session_id=self.session_id, + stream_url=stream_url, + headers=headers, + m3u_profile_id=m3u_profile_id, + # Session metadata + content_obj_type=content_obj_type, + content_uuid=content_uuid, + content_name=content_name, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=offset, + worker_id=worker_id + ) + success = self._save_connection_state(state) + + if success: + logger.info(f"[{self.session_id}] Created new connection state in Redis with consolidated session metadata") + + return success + finally: + self._release_lock() + + def get_stream(self, range_header: str = None): + """Get stream with optional range header - works across workers""" + # Get connection state from Redis + state = self._get_connection_state() + if not state: + logger.error(f"[{self.session_id}] No connection state found in Redis") + return None + + # Update activity and increment request count + state.last_activity = time.time() + state.request_count += 1 + + try: + # Create local session if needed + if not self.local_session: + self.local_session = requests.Session() + + # Prepare headers + headers = state.headers.copy() + if range_header: + # Validate range against content length if available + if state.content_length: + validated_range = self._validate_range_header(range_header, int(state.content_length)) + if validated_range is None: + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header}") + return None + range_header = validated_range + + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Use final URL if available, otherwise original URL + target_url = state.final_url if state.final_url else state.stream_url + allow_redirects = not state.final_url # Only follow redirects if we don't have final URL + + logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL") + + # Make request (10s connect, 10s read timeout - keeps lock time reasonable if client disconnects) + response = self.local_session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 10), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Update state with response info on first request + if state.request_count == 1: + if not state.content_length: + # Try to get full file size from Content-Range header first (for range requests) + content_range = response.headers.get('content-range') + if content_range and '/' in content_range: + try: + # Parse "bytes 0-1023/12653476926" to get total size + total_size = content_range.split('/')[-1] + if total_size.isdigit(): + state.content_length = total_size + logger.debug(f"[{self.session_id}] Got full file size from Content-Range: {total_size}") + else: + # Fallback to Content-Length for partial size + state.content_length = response.headers.get('content-length') + except Exception as e: + logger.warning(f"[{self.session_id}] Error parsing Content-Range: {e}") + state.content_length = response.headers.get('content-length') + else: + # No Content-Range, use Content-Length (for non-range requests) + state.content_length = response.headers.get('content-length') + + logger.debug(f"[{self.session_id}] Response headers received: {dict(response.headers)}") + + if not state.content_type: # This will be True for None, '', or any falsy value + # Get content type from provider response headers + provider_content_type = (response.headers.get('content-type') or + response.headers.get('Content-Type') or + response.headers.get('CONTENT-TYPE')) + + if provider_content_type: + logger.debug(f"[{self.session_id}] Using provider Content-Type: '{provider_content_type}'") + state.content_type = provider_content_type + else: + # Provider didn't send Content-Type, infer from URL extension + inferred_content_type = infer_content_type_from_url(state.stream_url) + if inferred_content_type: + logger.info(f"[{self.session_id}] Provider missing Content-Type, inferred from URL: '{inferred_content_type}'") + state.content_type = inferred_content_type + else: + logger.debug(f"[{self.session_id}] No Content-Type from provider and could not infer from URL, using default: 'video/mp4'") + state.content_type = 'video/mp4' + else: + logger.debug(f"[{self.session_id}] Content-Type already set in state: {state.content_type}") + if not state.final_url: + state.final_url = response.url + + logger.info(f"[{self.session_id}] Updated connection state: length={state.content_length}, type={state.content_type}") + + # Save updated state + self._save_connection_state(state) + + self.local_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header: str, content_length: int): + """Validate range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + return None # Not satisfiable + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + end_byte = content_length - 1 + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + return None + + return f"bytes={start_byte}-{end_byte}" + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def increment_active_streams(self): + """Increment active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state: + state.active_streams += 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams incremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def decrement_active_streams(self): + """Decrement active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state and state.active_streams > 0: + state.active_streams -= 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams decremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + state = self._get_connection_state() + return state.active_streams > 0 if state else False + + def get_headers(self): + """Get headers for response""" + state = self._get_connection_state() + if state: + return { + 'content_length': state.content_length, + 'content_type': state.content_type or 'video/mp4', + 'final_url': state.final_url + } + return {} + + def get_session_metadata(self): + """Get session metadata from consolidated connection state""" + state = self._get_connection_state() + if state: + return { + 'content_obj_type': state.content_obj_type, + 'content_uuid': state.content_uuid, + 'content_name': state.content_name, + 'client_ip': state.client_ip, + 'client_user_agent': state.client_user_agent, + 'utc_start': state.utc_start, + 'utc_end': state.utc_end, + 'offset': state.offset, + 'worker_id': state.worker_id, + 'connection_type': state.connection_type, + 'created_at': state.created_at, + 'last_activity': state.last_activity, + 'm3u_profile_id': state.m3u_profile_id, + 'bytes_sent': state.bytes_sent, + 'position_seconds': state.position_seconds, + 'active_streams': state.active_streams, + 'request_count': state.request_count, + # Range/seek tracking + 'last_seek_byte': state.last_seek_byte, + 'last_seek_percentage': state.last_seek_percentage, + 'total_content_size': state.total_content_size, + 'last_seek_timestamp': state.last_seek_timestamp + } + return {} + + def cleanup(self, connection_manager=None, current_worker_id=None): + """Smart cleanup based on worker ownership and active streams""" + # Always clean up local resources first + if self.local_response: + self.local_response.close() + self.local_response = None + if self.local_session: + self.local_session.close() + self.local_session = None + + # Get current connection state to check ownership and active streams + state = self._get_connection_state() + + if not state: + logger.info(f"[{self.session_id}] No connection state found - local cleanup only") + return + + # Check if there are active streams + if state.active_streams > 0: + # There are active streams - check ownership + if current_worker_id and state.worker_id == current_worker_id: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) and we own them - local cleanup only") + else: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) but owned by worker {state.worker_id} - local cleanup only") + return + + # No active streams - we can clean up Redis state + if not self.redis_client: + logger.info(f"[{self.session_id}] No Redis client - local cleanup only") + return + + # Acquire lock and do final check before cleanup to prevent race conditions + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for cleanup - skipping") + return + + try: + # Re-check active streams with lock held to prevent race conditions + current_state = self._get_connection_state() + if not current_state: + logger.info(f"[{self.session_id}] Connection state no longer exists - cleanup already done") + return + + if current_state.active_streams > 0: + logger.info(f"[{self.session_id}] Active streams now present ({current_state.active_streams}) - skipping cleanup") + return + + # Use pipeline for atomic cleanup operations + pipe = self.redis_client.pipeline() + + # 1. Remove main connection state (contains consolidated data) + pipe.delete(self.connection_key) + + # 2. Remove distributed lock (will be released below anyway) + pipe.delete(self.lock_key) + + # Execute all cleanup operations + pipe.execute() + + logger.info(f"[{self.session_id}] Cleaned up Redis keys (verified no active streams)") + + # Decrement profile connections if we have the state and connection manager + if state.m3u_profile_id and connection_manager: + connection_manager._decrement_profile_connections(state.m3u_profile_id) + logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}") + else: + if not state.m3u_profile_id: + logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections") + elif not connection_manager: + logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections") + + except Exception as e: + logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}") + finally: + # Always release the lock + self._release_lock() + + +# Modify the VODConnectionManager to use Redis-backed connections +class MultiWorkerVODConnectionManager: + """Enhanced VOD Connection Manager that works across multiple uwsgi workers""" + + _instance = None + + @classmethod + def get_instance(cls): + """Get the singleton instance""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + self.worker_id = self._get_worker_id() + logger.info(f"MultiWorkerVODConnectionManager initialized for worker {self.worker_id}") + + def _get_worker_id(self): + """Get unique worker ID for this process""" + import os + import socket + try: + # Use combination of hostname and PID for unique worker ID + return f"{socket.gethostname()}-{os.getpid()}" + except: + import random + return f"worker-{random.randint(1000, 9999)}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _check_profile_limits(self, m3u_profile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-CHECK] Profile {m3u_profile.id} has {current_connections}/{m3u_profile.max_streams} connections") + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def _increment_profile_connections(self, m3u_profile): + """Increment profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + new_count = self.redis_client.incr(profile_connections_key) + logger.info(f"[PROFILE-INCR] Profile {m3u_profile.id} connections: {new_count}") + return new_count + except Exception as e: + logger.error(f"Error incrementing profile connections: {e}") + return None + + def _decrement_profile_connections(self, m3u_profile_id: int): + """Decrement profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + new_count = self.redis_client.decr(profile_connections_key) + logger.info(f"[PROFILE-DECR] Profile {m3u_profile_id} connections: {new_count}") + return new_count + else: + logger.warning(f"[PROFILE-DECR] Profile {m3u_profile_id} already at 0 connections") + return 0 + except Exception as e: + logger.error(f"Error decrementing profile connections: {e}") + return None + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, + client_ip, client_user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """Stream content with Redis-backed persistent connection""" + + # Generate client ID + content_type = "movie" if isinstance(content_obj, Movie) else "episode" + content_uuid = str(content_obj.uuid) + content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj) + client_id = session_id + + # Track whether we incremented profile connections (for cleanup on error) + profile_connections_incremented = False + redis_connection = None + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}") + + try: + # First, try to find an existing idle session that matches our criteria + matching_session_id = self.find_matching_idle_session( + content_type=content_type, + content_uuid=content_uuid, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=offset + ) + + # Use matching session if found, otherwise use the provided session_id + if matching_session_id: + logger.info(f"[{client_id}] Worker {self.worker_id} - Found matching idle session: {matching_session_id}") + effective_session_id = matching_session_id + client_id = matching_session_id # Update client_id for logging consistency + + # IMMEDIATELY reserve this session by incrementing active streams to prevent cleanup + temp_connection = RedisBackedVODConnection(effective_session_id, self.redis_client) + if temp_connection.increment_active_streams(): + logger.info(f"[{client_id}] Reserved idle session - incremented active streams") + else: + logger.warning(f"[{client_id}] Failed to reserve idle session - falling back to new session") + effective_session_id = session_id + matching_session_id = None # Clear the match so we create a new connection + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - No matching idle session found, using new session") + effective_session_id = session_id + + # Create Redis-backed connection + redis_connection = RedisBackedVODConnection(effective_session_id, self.redis_client) + + # Check if connection exists, create if not + existing_state = redis_connection._get_connection_state() + if not existing_state: + logger.info(f"[{client_id}] Worker {self.worker_id} - Creating new Redis-backed connection") + + # Check profile limits before creating new connection + if not self._check_profile_limits(m3u_profile): + logger.warning(f"[{client_id}] Profile {m3u_profile.name} connection limit exceeded") + return HttpResponse("Connection limit exceeded for profile", status=429) + + # Apply timeshift parameters + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + + # Prepare headers for provider request + headers = {} + # Use M3U account's user-agent for provider requests, not client's user-agent + m3u_user_agent = m3u_profile.m3u_account.get_user_agent() + if m3u_user_agent: + headers['User-Agent'] = m3u_user_agent.user_agent + logger.info(f"[{client_id}] Using M3U account user-agent: {m3u_user_agent.user_agent}") + elif client_user_agent: + # Fallback to client's user-agent if M3U doesn't have one + headers['User-Agent'] = client_user_agent + logger.info(f"[{client_id}] Using client user-agent (M3U fallback): {client_user_agent}") + else: + logger.warning(f"[{client_id}] No user-agent available (neither M3U nor client)") + + # Forward important headers from request + important_headers = ['authorization', 'referer', 'origin', 'accept'] + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + + # Create connection state in Redis with consolidated session metadata + if not redis_connection.create_connection( + stream_url=modified_stream_url, + headers=headers, + m3u_profile_id=m3u_profile.id, + # Session metadata (consolidated from separate vod_session key) + content_obj_type=content_type, + content_uuid=content_uuid, + content_name=content_name, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=str(offset) if offset else None, + worker_id=self.worker_id + ): + logger.error(f"[{client_id}] Worker {self.worker_id} - Failed to create Redis connection") + return HttpResponse("Failed to create connection", status=500) + + # Increment profile connections after successful connection creation + self._increment_profile_connections(m3u_profile) + profile_connections_incremented = True + + logger.info(f"[{client_id}] Worker {self.worker_id} - Created consolidated connection with session metadata") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection") + + # Transfer ownership to current worker and update session activity + if redis_connection._acquire_lock(): + try: + state = redis_connection._get_connection_state() + if state: + old_worker = state.worker_id + state.last_activity = time.time() + state.worker_id = self.worker_id # Transfer ownership to current worker + redis_connection._save_connection_state(state) + + if old_worker != self.worker_id: + logger.info(f"[{client_id}] Ownership transferred from worker {old_worker} to {self.worker_id}") + else: + logger.debug(f"[{client_id}] Worker {self.worker_id} retaining ownership") + finally: + redis_connection._release_lock() + + # Get stream from Redis-backed connection + upstream_response = redis_connection.get_stream(range_header) + + if upstream_response is None: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Range not satisfiable") + return HttpResponse("Requested Range Not Satisfiable", status=416) + + # Get connection headers + connection_headers = redis_connection.get_headers() + + # Create streaming generator + def stream_generator(): + decremented = False + stop_signal_detected = False + try: + logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") + + # Increment active streams (unless we already did it for session reuse) + if not matching_session_id: + # New session - increment active streams + redis_connection.increment_active_streams() + else: + # Reused session - we already incremented when reserving the session + logger.debug(f"[{client_id}] Using pre-reserved session - active streams already incremented") + + bytes_sent = 0 + chunk_count = 0 + + # Get the stop signal key for this client + stop_key = get_vod_client_stop_key(client_id) + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Check for stop signal every 100 chunks + if chunk_count % 100 == 0: + # Check if stop signal has been set + if self.redis_client and self.redis_client.exists(stop_key): + logger.info(f"[{client_id}] Worker {self.worker_id} - Stop signal detected, terminating stream") + # Delete the stop key + self.redis_client.delete(stop_key) + stop_signal_detected = True + break + + # Update the connection state + logger.debug(f"Client: [{client_id}] Worker: {self.worker_id} sent {chunk_count} chunks for VOD: {content_name}") + if redis_connection._acquire_lock(): + try: + state = redis_connection._get_connection_state() + if state: + state.last_activity = time.time() + # Store cumulative bytes sent in connection state + state.bytes_sent = bytes_sent # Use cumulative bytes_sent, not chunk size + redis_connection._save_connection_state(state) + finally: + redis_connection._release_lock() + + if stop_signal_detected: + logger.info(f"[{client_id}] Worker {self.worker_id} - Stream stopped by signal: {bytes_sent} bytes sent") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") + redis_connection.decrement_active_streams() + decremented = True + + # Schedule smart cleanup if no active streams after normal completion + if not redis_connection.has_active_streams(): + def delayed_cleanup(): + time.sleep(1) # Wait 1 second + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after normal completion") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + + import threading + cleanup_thread = threading.Thread(target=delayed_cleanup) + cleanup_thread.daemon = True + cleanup_thread.start() + + except GeneratorExit: + logger.info(f"[{client_id}] Worker {self.worker_id} - Client disconnected from Redis-backed stream") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + + # Schedule smart cleanup if no active streams + if not redis_connection.has_active_streams(): + def delayed_cleanup(): + time.sleep(1) # Wait 1 second + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after client disconnect") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + + import threading + cleanup_thread = threading.Thread(target=delayed_cleanup) + cleanup_thread.daemon = True + cleanup_thread.start() + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream: {e}") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + # Smart cleanup on error - immediate cleanup since we're in error state + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + yield b"Error: Stream interrupted" + + finally: + if not decremented: + redis_connection.decrement_active_streams() + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers.get('content_type', 'video/mp4') + ) + + # Set appropriate status code + response.status_code = 206 if range_header else 200 + + # Set required headers + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['X-Worker-ID'] = self.worker_id # Identify which worker served this + + if connection_headers.get('content_length'): + response['Accept-Ranges'] = 'bytes' + + # For range requests, Content-Length should be the partial content size, not full file size + if range_header and 'bytes=' in range_header: + try: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + + # Get the FULL content size from the connection state (from initial request) + state = redis_connection._get_connection_state() + if state and state.content_length: + full_content_size = int(state.content_length) + end = int(end_byte) if end_byte else full_content_size - 1 + + # Calculate partial content size for Content-Length header + partial_content_size = end - start + 1 + response['Content-Length'] = str(partial_content_size) + + # Content-Range should show full file size per HTTP standards + content_range = f"bytes {start}-{end}/{full_content_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Worker {self.worker_id} - Set Content-Range: {content_range}, Content-Length: {partial_content_size}") + + # Store range information for the VOD stats API to calculate position + if start > 0: + try: + position_percentage = (start / full_content_size) * 100 + current_timestamp = time.time() + + # Update the Redis connection state with seek information + if redis_connection._acquire_lock(): + try: + # Refresh state in case it changed + state = redis_connection._get_connection_state() + if state: + # Store range/seek information for stats API + state.last_seek_byte = start + state.last_seek_percentage = position_percentage + state.total_content_size = full_content_size + state.last_seek_timestamp = current_timestamp + state.last_activity = current_timestamp + redis_connection._save_connection_state(state) + logger.info(f"[{client_id}] *** SEEK INFO STORED *** {position_percentage:.1f}% at byte {start:,}/{full_content_size:,} (timestamp: {current_timestamp})") + finally: + redis_connection._release_lock() + else: + logger.warning(f"[{client_id}] Could not acquire lock to update seek info") + except Exception as pos_e: + logger.error(f"[{client_id}] Error storing seek info: {pos_e}") + else: + # Fallback to partial content size if full size not available + partial_size = int(connection_headers['content_length']) + end = int(end_byte) if end_byte else partial_size - 1 + content_range = f"bytes {start}-{end}/{partial_size}" + response['Content-Range'] = content_range + response['Content-Length'] = str(end - start + 1) + logger.warning(f"[{client_id}] Using partial content size for Content-Range (full size not available): {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Could not set Content-Range: {e}") + response['Content-Length'] = connection_headers['content_length'] + else: + # For non-range requests, use the full content length + response['Content-Length'] = connection_headers['content_length'] + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed response ready (status: {response.status_code})") + return response + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True) + + # Decrement profile connections if we incremented them but failed before streaming started + if profile_connections_incremented: + logger.info(f"[{client_id}] Connection error occurred after profile increment - decrementing profile connections") + self._decrement_profile_connections(m3u_profile.id) + + # Also clean up the Redis connection state since we won't be using it + if redis_connection: + try: + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + except Exception as cleanup_error: + logger.error(f"[{client_id}] Error during cleanup after connection failure: {cleanup_error}") + + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """Apply timeshift parameters to URL""" + if not any([utc_start, utc_end, offset]): + return original_url + + try: + from urllib.parse import urlparse, urlunparse, parse_qs, urlencode + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + path = parsed_url.path + + logger.info(f"Applying timeshift parameters: utc_start={utc_start}, utc_end={utc_end}, offset={offset}") + + # Add timeshift parameters + if utc_start: + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + offset_seconds = int(offset) + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] + query_params['t'] = [str(offset_seconds)] + logger.info(f"Added offset/seek/t parameter: {offset_seconds}") + except ValueError: + logger.warning(f"Invalid offset value: {offset}") + + # Handle special catchup URL patterns + if utc_start: + try: + from datetime import datetime + import re + + # Parse the UTC start time + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + + # Check for catchup URL patterns like /catchup/YYYY-MM-DD/HH-MM-SS/ + catchup_pattern = r'/catchup/\d{4}-\d{2}-\d{2}/\d{2}-\d{2}-\d{2}/' + if re.search(catchup_pattern, path): + # Replace the date/time in the path + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + path = re.sub(catchup_pattern, f'/catchup/{date_part}/{time_part}/', path) + logger.info(f"Modified catchup path: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific Redis-backed persistent connection""" + logger.info(f"[{session_id}] Cleaning up Redis-backed persistent connection") + + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + + # The cleanup method now handles all Redis keys including session data + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale Redis-backed persistent connections""" + if not self.redis_client: + return + + try: + logger.info(f"Cleaning up Redis-backed connections older than {max_age_seconds} seconds") + + # Find all persistent connection keys + pattern = "vod_persistent_connection:*" + cursor = 0 + cleanup_count = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + # Get connection state + data = self.redis_client.hgetall(key) + if not data: + continue + + # Convert bytes to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + last_activity = float(data.get('last_activity', 0)) + active_streams = int(data.get('active_streams', 0)) + + # Clean up if stale and no active streams + if (current_time - last_activity > max_age_seconds) and active_streams == 0: + session_id = key.decode('utf-8').replace('vod_persistent_connection:', '') + logger.info(f"Cleaning up stale connection: {session_id}") + + # Clean up connection and related keys + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + cleanup_count += 1 + + except Exception as e: + logger.error(f"Error processing connection key {key}: {e}") + continue + + if cursor == 0: + break + + if cleanup_count > 0: + logger.info(f"Cleaned up {cleanup_count} stale Redis-backed connections") + else: + logger.debug("No stale Redis-backed connections found") + + except Exception as e: + logger.error(f"Error during Redis-backed connection cleanup: {e}") + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """Create connection tracking in Redis (same as original but for Redis-backed connections)""" + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits + profile_connections_key = f"profile_connections:{m3u_profile.id}" + current_connections = self.redis_client.get(profile_connections_key) + max_connections = getattr(m3u_profile, 'max_connections', 3) # Default to 3 + + if current_connections and int(current_connections) >= max_connections: + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded ({current_connections}/{max_connections})") + return False + + # Create connection tracking + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Check if connection already exists + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0" + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + pipe.incr(profile_connections_key) + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + pipe.execute() + + logger.info(f"Created Redis-backed VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating Redis-backed connection: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str): + """Remove connection tracking from Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Get connection data to find profile + connection_data = self.redis_client.hgetall(connection_key) + if connection_data: + # Convert bytes to strings if needed + if isinstance(list(connection_data.keys())[0], bytes): + connection_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in connection_data.items()} + + profile_id = connection_data.get('m3u_profile_id') + if profile_id: + profile_connections_key = f"profile_connections:{profile_id}" + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.delete(connection_key) + pipe.srem(content_connections_key, client_id) + pipe.decr(profile_connections_key) + pipe.execute() + + logger.info(f"Removed Redis-backed connection: {client_id}") + + except Exception as e: + logger.error(f"Error removing Redis-backed connection: {e}") + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int): + """Update connection activity in Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping={ + "last_activity": str(time.time()), + "bytes_sent": str(bytes_sent) + }) + pipe.expire(connection_key, self.connection_ttl) + pipe.execute() + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, client_user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """Find existing Redis-backed session that matches criteria using consolidated connection state""" + if not self.redis_client: + return None + + try: + # Search for connections with consolidated session data + pattern = "vod_persistent_connection:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + connection_data = self.redis_client.hgetall(key) + if not connection_data: + continue + + # Convert bytes keys/values to strings if needed + if isinstance(list(connection_data.keys())[0], bytes): + connection_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in connection_data.items()} + + # Check if content matches (using consolidated data) + stored_content_type = connection_data.get('content_obj_type', '') + stored_content_uuid = connection_data.get('content_uuid', '') + + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID + session_id = key.decode('utf-8').replace('vod_persistent_connection:', '') + + # Check if Redis-backed connection exists and has no active streams + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + if redis_connection.has_active_streams(): + continue + + # Calculate match score + score = 10 # Content match + match_reasons = ["content"] + + # Check other criteria (using consolidated data) + stored_client_ip = connection_data.get('client_ip', '') + stored_user_agent = connection_data.get('client_user_agent', '') or connection_data.get('user_agent', '') + + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + if stored_user_agent and stored_user_agent == client_user_agent: + score += 3 + match_reasons.append("user-agent") + + # Check timeshift parameters (using consolidated data) + stored_utc_start = connection_data.get('utc_start', '') + stored_utc_end = connection_data.get('utc_end', '') + stored_offset = connection_data.get('offset', '') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + if score >= 13: # Good match threshold + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(connection_data.get('last_activity', '0')) + }) + + except Exception as e: + logger.debug(f"Error processing connection key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score and last activity + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching Redis-backed idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None + + def get_session_info(self, session_id: str) -> Optional[dict]: + """Get session information from consolidated connection state (compatibility method)""" + if not self.redis_client: + return None + + try: + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + return redis_connection.get_session_metadata() + except Exception as e: + logger.error(f"Error getting session info for {session_id}: {e}") + return None \ No newline at end of file diff --git a/apps/proxy/vod_proxy/urls.py b/apps/proxy/vod_proxy/urls.py new file mode 100644 index 00000000..f48f70e0 --- /dev/null +++ b/apps/proxy/vod_proxy/urls.py @@ -0,0 +1,27 @@ +from django.urls import path +from . import views + +app_name = 'vod_proxy' + +urlpatterns = [ + # Generic VOD streaming with session ID in path (for compatibility) + path('//', views.VODStreamView.as_view(), name='vod_stream_with_session'), + path('////', views.VODStreamView.as_view(), name='vod_stream_with_session_and_profile'), + + # Generic VOD streaming (supports movies, episodes, series) - legacy patterns + path('/', views.VODStreamView.as_view(), name='vod_stream'), + path('///', views.VODStreamView.as_view(), name='vod_stream_with_profile'), + + # VOD playlist generation + path('playlist/', views.VODPlaylistView.as_view(), name='vod_playlist'), + path('playlist//', views.VODPlaylistView.as_view(), name='vod_playlist_with_profile'), + + # Position tracking + path('position//', views.VODPositionView.as_view(), name='vod_position'), + + # VOD Stats + path('stats/', views.VODStatsView.as_view(), name='vod_stats'), + + # Stop VOD client connection + path('stop_client/', views.stop_vod_client, name='stop_vod_client'), +] diff --git a/apps/proxy/vod_proxy/utils.py b/apps/proxy/vod_proxy/utils.py new file mode 100644 index 00000000..7ccf08b4 --- /dev/null +++ b/apps/proxy/vod_proxy/utils.py @@ -0,0 +1,58 @@ +""" +Utility functions for VOD proxy operations. +""" + +import logging +from django.http import HttpResponse + +logger = logging.getLogger(__name__) + + +def get_client_info(request): + """ + Extract client IP and User-Agent from request. + + Args: + request: Django HttpRequest object + + Returns: + tuple: (client_ip, user_agent) + """ + # Get client IP, checking for proxy headers + client_ip = request.META.get('HTTP_X_FORWARDED_FOR') + if client_ip: + # Take the first IP if there are multiple (comma-separated) + client_ip = client_ip.split(',')[0].strip() + else: + client_ip = request.META.get('HTTP_X_REAL_IP') or request.META.get('REMOTE_ADDR', 'unknown') + + # Get User-Agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + return client_ip, user_agent + + +def create_vod_response(content, content_type='video/mp4', filename=None): + """ + Create a streaming HTTP response for VOD content. + + Args: + content: Content to stream (file-like object or bytes) + content_type: MIME type of the content + filename: Optional filename for Content-Disposition header + + Returns: + HttpResponse: Configured HTTP response for streaming + """ + response = HttpResponse(content, content_type=content_type) + + if filename: + response['Content-Disposition'] = f'attachment; filename="{filename}"' + + # Add headers for streaming + response['Accept-Ranges'] = 'bytes' + response['Cache-Control'] = 'no-cache, no-store, must-revalidate' + response['Pragma'] = 'no-cache' + response['Expires'] = '0' + + return response diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py new file mode 100644 index 00000000..2ec95cc3 --- /dev/null +++ b/apps/proxy/vod_proxy/views.py @@ -0,0 +1,1073 @@ +""" +VOD (Video on Demand) proxy views for handling movie and series streaming. +Supports M3U profiles for authentication and URL transformation. +""" + +import time +import random +import logging +import requests +from django.http import StreamingHttpResponse, JsonResponse, Http404, HttpResponse +from django.shortcuts import get_object_or_404 +from django.views.decorators.csrf import csrf_exempt +from django.utils.decorators import method_decorator +from django.views import View +from apps.vod.models import Movie, Series, Episode +from apps.m3u.models import M3UAccount, M3UAccountProfile +from apps.proxy.vod_proxy.connection_manager import VODConnectionManager +from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url, get_vod_client_stop_key +from .utils import get_client_info, create_vod_response + +logger = logging.getLogger(__name__) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODStreamView(View): + """Handle VOD streaming requests with M3U profile support""" + + def get(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Stream VOD content (movies or series episodes) with session-based connection reuse + + Args: + content_type: 'movie', 'series', or 'episode' + content_id: ID of the content + session_id: Optional session ID from URL path (for persistent connections) + profile_id: Optional M3U profile ID for authentication + """ + logger.info(f"[VOD-REQUEST] Starting VOD stream request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + logger.info(f"[VOD-REQUEST] Full request path: {request.get_full_path()}") + logger.info(f"[VOD-REQUEST] Request method: {request.method}") + logger.info(f"[VOD-REQUEST] Request headers: {dict(request.headers)}") + + try: + client_ip, client_user_agent = get_client_info(request) + + # Extract timeshift parameters from query string + # Support multiple timeshift parameter formats + utc_start = request.GET.get('utc_start') or request.GET.get('start') or request.GET.get('playliststart') + utc_end = request.GET.get('utc_end') or request.GET.get('end') or request.GET.get('playlistend') + offset = request.GET.get('offset') or request.GET.get('seek') or request.GET.get('t') + + # VLC specific timeshift parameters + if not utc_start and not offset: + # Check for VLC-style timestamp parameters + if 'timestamp' in request.GET: + offset = request.GET.get('timestamp') + elif 'time' in request.GET: + offset = request.GET.get('time') + + # Session ID now comes from URL path parameter + # Remove legacy query parameter extraction since we're using path-based routing + + # Extract Range header for seeking support + range_header = request.META.get('HTTP_RANGE') + + logger.info(f"[VOD-TIMESHIFT] Timeshift params - utc_start: {utc_start}, utc_end: {utc_end}, offset: {offset}") + logger.info(f"[VOD-SESSION] Session ID: {session_id}") + + # Log all query parameters for debugging + if request.GET: + logger.debug(f"[VOD-PARAMS] All query params: {dict(request.GET)}") + + if range_header: + logger.info(f"[VOD-RANGE] Range header: {range_header}") + + # Parse the range to understand what position VLC is seeking to + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] Seeking to byte position: {start_byte} (~{start_pos_mb:.1f} MB)") + if int(start_byte) > 0: + logger.info(f"[VOD-SEEK] *** ACTUAL SEEK DETECTED *** Position: {start_pos_mb:.1f} MB") + else: + logger.info(f"[VOD-SEEK] Open-ended range request (from start)") + if end_byte: + end_pos_mb = int(end_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] End position: {end_byte} bytes (~{end_pos_mb:.1f} MB)") + except Exception as e: + logger.warning(f"[VOD-SEEK] Could not parse range header: {e}") + + # Simple seek detection - track rapid requests + current_time = time.time() + request_key = f"{client_ip}:{content_type}:{content_id}" + + if not hasattr(self.__class__, '_request_times'): + self.__class__._request_times = {} + + if request_key in self.__class__._request_times: + time_diff = current_time - self.__class__._request_times[request_key] + if time_diff < 5.0: + logger.info(f"[VOD-SEEK] Rapid request detected ({time_diff:.1f}s) - likely seeking") + + self.__class__._request_times[request_key] = current_time + else: + logger.info(f"[VOD-RANGE] No Range header - full content request") + + logger.info(f"[VOD-CLIENT] Client info - IP: {client_ip}, User-Agent: {client_user_agent[:50]}...") + + # If no session ID, create one and redirect to path-based URL + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-SESSION] Creating new session: {new_session_id}") + + # Build redirect URL with session ID in path, preserve query parameters + path_parts = request.path.rstrip('/').split('/') + + # Construct new path: /vod/movie/UUID/SESSION_ID or /vod/movie/UUID/SESSION_ID/PROFILE_ID/ + if profile_id: + new_path = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + new_path = f"{'/'.join(path_parts)}/{new_session_id}" + + # Preserve any query parameters (except session_id) + query_params = dict(request.GET) + query_params.pop('session_id', None) # Remove if present + + if query_params: + from urllib.parse import urlencode + query_string = urlencode(query_params, doseq=True) + redirect_url = f"{new_path}?{query_string}" + else: + redirect_url = new_path + + logger.info(f"[VOD-SESSION] Redirecting to path-based URL: {redirect_url}") + + return HttpResponse( + status=301, + headers={'Location': redirect_url} + ) + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-PARAM] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-PARAM] Preferred stream ID: {preferred_stream_id}") + + # Get the content object and its relation + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-ERROR] Content or relation not found: {content_type} {content_id}") + raise Http404(f"Content not found: {content_type} {content_id}") + + logger.info(f"[VOD-CONTENT] Found content: {getattr(content_obj, 'name', 'Unknown')}") + + # Get M3U account from relation + m3u_account = relation.m3u_account + logger.info(f"[VOD-ACCOUNT] Using M3U account: {m3u_account.name}") + + # Get stream URL from relation + stream_url = self._get_stream_url_from_relation(relation) + logger.info(f"[VOD-CONTENT] Content URL: {stream_url or 'No URL found'}") + + if not stream_url: + logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) + + if not profile_result or not profile_result[0]: + logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}") + return HttpResponse("No available stream", status=503) + + m3u_profile, current_connections = profile_result + logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {current_connections})") + + # Connection tracking is handled by the connection manager + # Transform URL based on profile + final_stream_url = self._transform_url(stream_url, m3u_profile) + logger.info(f"[VOD-URL] Final stream URL: {final_stream_url}") + + # Validate stream URL + if not final_stream_url or not final_stream_url.startswith(('http://', 'https://')): + logger.error(f"[VOD-ERROR] Invalid stream URL: {final_stream_url}") + return HttpResponse("Invalid stream URL", status=500) + + # Get connection manager (Redis-backed for multi-worker support) + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + # Stream the content with session-based connection reuse + logger.info("[VOD-STREAM] Calling connection manager to stream content") + response = connection_manager.stream_content_with_session( + session_id=session_id, + content_obj=content_obj, + stream_url=final_stream_url, + m3u_profile=m3u_profile, + client_ip=client_ip, + client_user_agent=client_user_agent, + request=request, + utc_start=utc_start, + utc_end=utc_end, + offset=offset, + range_header=range_header + ) + + logger.info(f"[VOD-SUCCESS] Stream response created successfully, type: {type(response)}") + return response + + except Exception as e: + logger.error(f"[VOD-EXCEPTION] Error streaming {content_type} {content_id}: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def head(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Handle HEAD requests for FUSE filesystem integration + + Returns content length and session URL header for subsequent GET requests + """ + logger.info(f"[VOD-HEAD] HEAD request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + + try: + # Get client info for M3U profile selection + client_ip, client_user_agent = get_client_info(request) + logger.info(f"[VOD-HEAD] Client info - IP: {client_ip}, User-Agent: {client_user_agent[:50] if client_user_agent else 'None'}...") + + # If no session ID, create one (same logic as GET) + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-HEAD] Creating new session for HEAD: {new_session_id}") + + # Build session URL for response header + path_parts = request.path.rstrip('/').split('/') + if profile_id: + session_url = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + session_url = f"{'/'.join(path_parts)}/{new_session_id}" + + session_id = new_session_id + else: + # Session already in URL, construct the current session URL + session_url = request.path + logger.info(f"[VOD-HEAD] Using existing session: {session_id}") + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-HEAD] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-HEAD] Preferred stream ID: {preferred_stream_id}") + + # Get content and relation (same as GET) + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-HEAD] Content or relation not found: {content_type} {content_id}") + return HttpResponse("Content not found", status=404) + + # Get M3U account and stream URL + m3u_account = relation.m3u_account + stream_url = self._get_stream_url_from_relation(relation) + if not stream_url: + logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) + if not profile_result or not profile_result[0]: + logger.error(f"[VOD-HEAD] No M3U profile found or all profiles at capacity") + return HttpResponse("No available stream", status=503) + + m3u_profile, current_connections = profile_result + + # Transform URL if needed + final_stream_url = self._transform_url(stream_url, m3u_profile) + + # Make a small range GET request to get content length since providers don't support HEAD + # We'll use a tiny range to minimize data transfer but get the headers we need + # Use M3U account's user agent as primary, client user agent as fallback + m3u_user_agent = m3u_account.get_user_agent().user_agent if m3u_account.get_user_agent() else None + headers = { + 'User-Agent': m3u_user_agent or client_user_agent or 'Dispatcharr/1.0', + 'Accept': '*/*', + 'Range': 'bytes=0-1' # Request only first 2 bytes + } + + logger.info(f"[VOD-HEAD] Making small range GET request to provider: {final_stream_url}") + response = requests.get(final_stream_url, headers=headers, timeout=30, allow_redirects=True, stream=True) + + # Check for range support - should be 206 for partial content + if response.status_code == 206: + # Parse Content-Range header to get total file size + content_range = response.headers.get('Content-Range', '') + if content_range: + # Content-Range: bytes 0-1/1234567890 + total_size = content_range.split('/')[-1] + logger.info(f"[VOD-HEAD] Got file size from Content-Range: {total_size}") + else: + logger.warning(f"[VOD-HEAD] No Content-Range header in 206 response") + total_size = response.headers.get('Content-Length', '0') + elif response.status_code == 200: + # Server doesn't support range requests, use Content-Length from full response + total_size = response.headers.get('Content-Length', '0') + logger.info(f"[VOD-HEAD] Server doesn't support ranges, got Content-Length: {total_size}") + else: + logger.error(f"[VOD-HEAD] Provider GET request failed: {response.status_code}") + return HttpResponse("Provider error", status=response.status_code) + + # Close the small range request - we don't need to keep this connection + response.close() + + # Store the total content length in Redis for the persistent connection to use + try: + import redis + from django.conf import settings + redis_host = getattr(settings, 'REDIS_HOST', 'localhost') + redis_port = int(getattr(settings, 'REDIS_PORT', 6379)) + redis_db = int(getattr(settings, 'REDIS_DB', 0)) + r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True) + content_length_key = f"vod_content_length:{session_id}" + r.set(content_length_key, total_size, ex=1800) # Store for 30 minutes + logger.info(f"[VOD-HEAD] Stored total content length {total_size} for session {session_id}") + except Exception as e: + logger.error(f"[VOD-HEAD] Failed to store content length in Redis: {e}") + + # Now create a persistent connection for the session (if one doesn't exist) + # This ensures the FUSE GET requests will reuse the same connection + + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + logger.info(f"[VOD-HEAD] Pre-creating persistent connection for session: {session_id}") + + # We don't actually stream content here, just ensure connection is ready + # The actual GET requests from FUSE will use the persistent connection + + # Use the total_size we extracted from the range response + provider_content_type = response.headers.get('Content-Type') + + if provider_content_type: + content_type_header = provider_content_type + logger.info(f"[VOD-HEAD] Using provider Content-Type: {content_type_header}") + else: + # Provider didn't send Content-Type, infer from URL + inferred_content_type = infer_content_type_from_url(final_stream_url) + if inferred_content_type: + content_type_header = inferred_content_type + logger.info(f"[VOD-HEAD] Provider missing Content-Type, inferred from URL: {content_type_header}") + else: + content_type_header = 'video/mp4' + logger.info(f"[VOD-HEAD] No Content-Type from provider and could not infer from URL, using default: {content_type_header}") + + logger.info(f"[VOD-HEAD] Provider response - Total Size: {total_size}, Type: {content_type_header}") + + # Create response with content length and session URL header + head_response = HttpResponse() + head_response['Content-Length'] = total_size + head_response['Content-Type'] = content_type_header + head_response['Accept-Ranges'] = 'bytes' + + # Custom header with session URL for FUSE + head_response['X-Session-URL'] = session_url + head_response['X-Dispatcharr-Session'] = session_id + + logger.info(f"[VOD-HEAD] Returning HEAD response with session URL: {session_url}") + return head_response + + except Exception as e: + logger.error(f"[VOD-HEAD] Error in HEAD request: {e}", exc_info=True) + return HttpResponse(f"HEAD error: {str(e)}", status=500) + + def _get_content_and_relation(self, content_type, content_id, preferred_m3u_account_id=None, preferred_stream_id=None): + """Get the content object and its M3U relation""" + try: + logger.info(f"[CONTENT-LOOKUP] Looking up {content_type} with UUID {content_id}") + if preferred_m3u_account_id: + logger.info(f"[CONTENT-LOOKUP] Preferred M3U account ID: {preferred_m3u_account_id}") + if preferred_stream_id: + logger.info(f"[CONTENT-LOOKUP] Preferred stream ID: {preferred_stream_id}") + + if content_type == 'movie': + content_obj = get_object_or_404(Movie, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Movie: {content_obj.name} (ID: {content_obj.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'episode': + content_obj = get_object_or_404(Episode, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Episode: {content_obj.name} (ID: {content_obj.id}, Series: {content_obj.series.name})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'series': + # For series, get the first episode + series = get_object_or_404(Series, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Series: {series.name} (ID: {series.id})") + episode = series.episodes.first() + if not episode: + logger.error(f"[CONTENT-ERROR] No episodes found for series {series.name}") + return None, None + + logger.info(f"[CONTENT-FOUND] First episode: {episode.name} (ID: {episode.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = episode.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return episode, relation + else: + logger.error(f"[CONTENT-ERROR] Invalid content type: {content_type}") + return None, None + + except Exception as e: + logger.error(f"Error getting content object: {e}") + return None, None + + def _get_stream_url_from_relation(self, relation): + """Get stream URL from the M3U relation""" + try: + # Log the relation type and available attributes + logger.info(f"[VOD-URL] Relation type: {type(relation).__name__}") + logger.info(f"[VOD-URL] Account type: {relation.m3u_account.account_type}") + logger.info(f"[VOD-URL] Stream ID: {getattr(relation, 'stream_id', 'N/A')}") + + # First try the get_stream_url method (this should build URLs dynamically) + if hasattr(relation, 'get_stream_url'): + url = relation.get_stream_url() + if url: + logger.info(f"[VOD-URL] Built URL from get_stream_url(): {url}") + return url + else: + logger.warning(f"[VOD-URL] get_stream_url() returned None") + + logger.error(f"[VOD-URL] Relation has no get_stream_url method or it failed") + return None + except Exception as e: + logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True) + return None + + def _get_m3u_profile(self, m3u_account, profile_id, session_id=None): + """Get appropriate M3U profile for streaming using Redis-based viewer counts + + Args: + m3u_account: M3UAccount instance + profile_id: Optional specific profile ID requested + session_id: Optional session ID to check for existing connections + + Returns: + tuple: (M3UAccountProfile, current_connections) or None if no profile found + """ + try: + from core.utils import RedisClient + redis_client = RedisClient.get_client() + + if not redis_client: + logger.warning("Redis not available, falling back to default profile") + default_profile = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True, + is_default=True + ).first() + return (default_profile, 0) if default_profile else None + + # Check if this session already has an active connection + if session_id: + persistent_connection_key = f"vod_persistent_connection:{session_id}" + connection_data = redis_client.hgetall(persistent_connection_key) + + if connection_data: + # Decode Redis hash data + decoded_data = {} + for k, v in connection_data.items(): + k_str = k.decode('utf-8') if isinstance(k, bytes) else k + v_str = v.decode('utf-8') if isinstance(v, bytes) else v + decoded_data[k_str] = v_str + + existing_profile_id = decoded_data.get('m3u_profile_id') + if existing_profile_id: + try: + existing_profile = M3UAccountProfile.objects.get( + id=int(existing_profile_id), + m3u_account=m3u_account, + is_active=True + ) + # Get current connections for logging + profile_connections_key = f"profile_connections:{existing_profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-SELECTION] Session {session_id} reusing existing profile {existing_profile.id}: {current_connections}/{existing_profile.max_streams} connections") + return (existing_profile, current_connections) + except (M3UAccountProfile.DoesNotExist, ValueError): + logger.warning(f"[PROFILE-SELECTION] Session {session_id} has invalid profile ID {existing_profile_id}, selecting new profile") + except Exception as e: + logger.warning(f"[PROFILE-SELECTION] Error checking existing profile for session {session_id}: {e}") + else: + logger.debug(f"[PROFILE-SELECTION] Session {session_id} exists but has no profile ID stored") # If specific profile requested, try to use it + if profile_id: + try: + profile = M3UAccountProfile.objects.get( + id=profile_id, + m3u_account=m3u_account, + is_active=True + ) + # Check Redis-based current connections + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Using requested profile {profile.id}: {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile.id} is at capacity: {current_connections}/{profile.max_streams}") + except M3UAccountProfile.DoesNotExist: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile_id} not found") + + # Get active profiles ordered by priority (default first) + m3u_profiles = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True + ) + + default_profile = m3u_profiles.filter(is_default=True).first() + if not default_profile: + logger.error(f"[PROFILE-SELECTION] No default profile found for M3U account {m3u_account.id}") + return None + + # Check profiles in order: default first, then others + profiles = [default_profile] + list(m3u_profiles.filter(is_default=False)) + + for profile in profiles: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if profile has available connection slots + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Selected profile {profile.id} ({profile.name}): {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.debug(f"[PROFILE-SELECTION] Profile {profile.id} at capacity: {current_connections}/{profile.max_streams}") + + # All profiles are at capacity - return None to trigger error response + logger.error(f"[PROFILE-SELECTION] All profiles at capacity for M3U account {m3u_account.id}, rejecting request") + return None + + except Exception as e: + logger.error(f"Error getting M3U profile: {e}") + return None + + def _transform_url(self, original_url, m3u_profile): + """Transform URL based on M3U profile settings""" + try: + import re + + if not original_url: + return None + + search_pattern = m3u_profile.search_pattern + replace_pattern = m3u_profile.replace_pattern + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', replace_pattern) + + if search_pattern and replace_pattern: + transformed_url = re.sub(search_pattern, safe_replace_pattern, original_url) + return transformed_url + + return original_url + + except Exception as e: + logger.error(f"Error transforming URL: {e}") + return original_url + +@method_decorator(csrf_exempt, name='dispatch') +class VODPlaylistView(View): + """Generate M3U playlists for VOD content""" + + def get(self, request, profile_id=None): + """Generate VOD playlist""" + try: + # Get profile if specified + m3u_profile = None + if profile_id: + try: + m3u_profile = M3UAccountProfile.objects.get( + id=profile_id, + is_active=True + ) + except M3UAccountProfile.DoesNotExist: + return HttpResponse("Profile not found", status=404) + + # Generate playlist content + playlist_content = self._generate_playlist(m3u_profile) + + response = HttpResponse(playlist_content, content_type='application/vnd.apple.mpegurl') + response['Content-Disposition'] = 'attachment; filename="vod_playlist.m3u8"' + return response + + except Exception as e: + logger.error(f"Error generating VOD playlist: {e}") + return HttpResponse("Playlist generation error", status=500) + + def _generate_playlist(self, m3u_profile=None): + """Generate M3U playlist content for VOD""" + lines = ["#EXTM3U"] + + # Add movies + movies = Movie.objects.filter(is_active=True) + if m3u_profile: + movies = movies.filter(m3u_account=m3u_profile.m3u_account) + + for movie in movies: + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + lines.append(f'#EXTINF:-1 tvg-id="{movie.tmdb_id}" group-title="Movies",{movie.title}') + lines.append(f'/proxy/vod/movie/{movie.uuid}/{profile_param}') + + # Add series + series_list = Series.objects.filter(is_active=True) + if m3u_profile: + series_list = series_list.filter(m3u_account=m3u_profile.m3u_account) + + for series in series_list: + for episode in series.episodes.all(): + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + episode_title = f"{series.title} - S{episode.season_number:02d}E{episode.episode_number:02d}" + lines.append(f'#EXTINF:-1 tvg-id="{series.tmdb_id}" group-title="Series",{episode_title}') + lines.append(f'/proxy/vod/episode/{episode.uuid}/{profile_param}') + + return '\n'.join(lines) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODPositionView(View): + """Handle VOD position updates""" + + def post(self, request, content_id): + """Update playback position for VOD content""" + try: + import json + data = json.loads(request.body) + client_id = data.get('client_id') + position = data.get('position', 0) + + # Find the content object + content_obj = None + try: + content_obj = Movie.objects.get(uuid=content_id) + except Movie.DoesNotExist: + try: + content_obj = Episode.objects.get(uuid=content_id) + except Episode.DoesNotExist: + return JsonResponse({'error': 'Content not found'}, status=404) + + # Here you could store the position in a model or cache + # For now, just return success + logger.info(f"Position update for {content_obj.__class__.__name__} {content_id}: {position}s") + + return JsonResponse({ + 'success': True, + 'content_id': str(content_id), + 'position': position + }) + + except Exception as e: + logger.error(f"Error updating VOD position: {e}") + return JsonResponse({'error': str(e)}, status=500) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODStatsView(View): + """Get VOD connection statistics""" + + def get(self, request): + """Get current VOD connection statistics""" + try: + connection_manager = MultiWorkerVODConnectionManager.get_instance() + redis_client = connection_manager.redis_client + + if not redis_client: + return JsonResponse({'error': 'Redis not available'}, status=500) + + # Get all VOD persistent connections (consolidated data) + pattern = "vod_persistent_connection:*" + cursor = 0 + connections = [] + current_time = time.time() + + while True: + cursor, keys = redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + key_str = key.decode('utf-8') if isinstance(key, bytes) else key + connection_data = redis_client.hgetall(key) + + if connection_data: + # Extract session ID from key + session_id = key_str.replace('vod_persistent_connection:', '') + + # Decode Redis hash data + combined_data = {} + for k, v in connection_data.items(): + k_str = k.decode('utf-8') if isinstance(k, bytes) else k + v_str = v.decode('utf-8') if isinstance(v, bytes) else v + combined_data[k_str] = v_str + + # Get content info from the connection data (using correct field names) + content_type = combined_data.get('content_obj_type', 'unknown') + content_uuid = combined_data.get('content_uuid', 'unknown') + client_id = session_id + + # Get content info with enhanced metadata + content_name = "Unknown" + content_metadata = {} + try: + if content_type == 'movie': + content_obj = Movie.objects.select_related('logo').get(uuid=content_uuid) + content_name = content_obj.name + + # Get duration from content object + duration_secs = None + if hasattr(content_obj, 'duration_secs') and content_obj.duration_secs: + duration_secs = content_obj.duration_secs + + # If we don't have duration_secs, try to calculate it from file size and position data + if not duration_secs: + file_size_bytes = int(combined_data.get('total_content_size', 0)) + last_seek_byte = int(combined_data.get('last_seek_byte', 0)) + last_seek_percentage = float(combined_data.get('last_seek_percentage', 0.0)) + + # Calculate position if we have the required data + if file_size_bytes and file_size_bytes > 0 and last_seek_percentage > 0: + # If we know the seek percentage and current time position, we can estimate duration + # But we need to know the current time position in seconds first + # For now, let's use a rough estimate based on file size and typical bitrates + # This is a fallback - ideally duration should be in the database + estimated_duration = 6000 # 100 minutes as default for movies + duration_secs = estimated_duration + + content_metadata = { + 'year': content_obj.year, + 'rating': content_obj.rating, + 'genre': content_obj.genre, + 'duration_secs': duration_secs, + 'description': content_obj.description, + 'logo_url': content_obj.logo.url if content_obj.logo else None, + 'tmdb_id': content_obj.tmdb_id, + 'imdb_id': content_obj.imdb_id + } + elif content_type == 'episode': + content_obj = Episode.objects.select_related('series', 'series__logo').get(uuid=content_uuid) + content_name = f"{content_obj.series.name} - {content_obj.name}" + + # Get duration from content object + duration_secs = None + if hasattr(content_obj, 'duration_secs') and content_obj.duration_secs: + duration_secs = content_obj.duration_secs + + # If we don't have duration_secs, estimate for episodes + if not duration_secs: + estimated_duration = 2400 # 40 minutes as default for episodes + duration_secs = estimated_duration + + content_metadata = { + 'series_name': content_obj.series.name, + 'episode_name': content_obj.name, + 'season_number': content_obj.season_number, + 'episode_number': content_obj.episode_number, + 'air_date': content_obj.air_date.isoformat() if content_obj.air_date else None, + 'rating': content_obj.rating, + 'duration_secs': duration_secs, + 'description': content_obj.description, + 'logo_url': content_obj.series.logo.url if content_obj.series.logo else None, + 'series_year': content_obj.series.year, + 'series_genre': content_obj.series.genre, + 'tmdb_id': content_obj.tmdb_id, + 'imdb_id': content_obj.imdb_id + } + except: + pass + + # Get M3U profile information + m3u_profile_info = {} + m3u_profile_id = combined_data.get('m3u_profile_id') + if m3u_profile_id: + try: + from apps.m3u.models import M3UAccountProfile + profile = M3UAccountProfile.objects.select_related('m3u_account').get(id=m3u_profile_id) + m3u_profile_info = { + 'profile_name': profile.name, + 'account_name': profile.m3u_account.name, + 'account_id': profile.m3u_account.id, + 'max_streams': profile.m3u_account.max_streams, + 'm3u_profile_id': int(m3u_profile_id) + } + except Exception as e: + logger.warning(f"Could not fetch M3U profile {m3u_profile_id}: {e}") + + # Also try to get profile info from stored data if database lookup fails + if not m3u_profile_info and combined_data.get('m3u_profile_name'): + m3u_profile_info = { + 'profile_name': combined_data.get('m3u_profile_name', 'Unknown Profile'), + 'm3u_profile_id': combined_data.get('m3u_profile_id'), + 'account_name': 'Unknown Account' # We don't store account name directly + } + + # Calculate estimated current position based on seek percentage or last known position + last_known_position = int(combined_data.get('position_seconds', 0)) + last_position_update = combined_data.get('last_position_update') + last_seek_percentage = float(combined_data.get('last_seek_percentage', 0.0)) + last_seek_timestamp = float(combined_data.get('last_seek_timestamp', 0.0)) + estimated_position = last_known_position + + # If we have seek percentage and content duration, calculate position from that + if last_seek_percentage > 0 and content_metadata.get('duration_secs'): + try: + duration_secs = int(content_metadata['duration_secs']) + # Calculate position from seek percentage + seek_position = int((last_seek_percentage / 100) * duration_secs) + + # If we have a recent seek timestamp, add elapsed time since seek + if last_seek_timestamp > 0: + elapsed_since_seek = current_time - last_seek_timestamp + # Add elapsed time but don't exceed content duration + estimated_position = min( + seek_position + int(elapsed_since_seek), + duration_secs + ) + else: + estimated_position = seek_position + except (ValueError, TypeError): + pass + elif last_position_update and content_metadata.get('duration_secs'): + # Fallback: use time-based estimation from position_seconds + try: + update_timestamp = float(last_position_update) + elapsed_since_update = current_time - update_timestamp + # Add elapsed time to last known position, but don't exceed content duration + estimated_position = min( + last_known_position + int(elapsed_since_update), + int(content_metadata['duration_secs']) + ) + except (ValueError, TypeError): + # If timestamp parsing fails, fall back to last known position + estimated_position = last_known_position + + connection_info = { + 'content_type': content_type, + 'content_uuid': content_uuid, + 'content_name': content_name, + 'content_metadata': content_metadata, + 'm3u_profile': m3u_profile_info, + 'client_id': client_id, + 'client_ip': combined_data.get('client_ip', 'Unknown'), + 'user_agent': combined_data.get('client_user_agent', 'Unknown'), + 'connected_at': combined_data.get('created_at'), + 'last_activity': combined_data.get('last_activity'), + 'm3u_profile_id': m3u_profile_id, + 'position_seconds': estimated_position, # Use estimated position + 'last_known_position': last_known_position, # Include raw position for debugging + 'last_position_update': last_position_update, # Include timestamp for frontend use + 'bytes_sent': int(combined_data.get('bytes_sent', 0)), + # Seek/range information for position calculation and frontend display + 'last_seek_byte': int(combined_data.get('last_seek_byte', 0)), + 'last_seek_percentage': float(combined_data.get('last_seek_percentage', 0.0)), + 'total_content_size': int(combined_data.get('total_content_size', 0)), + 'last_seek_timestamp': float(combined_data.get('last_seek_timestamp', 0.0)) + } + + # Calculate connection duration + duration_calculated = False + if connection_info['connected_at']: + try: + connected_time = float(connection_info['connected_at']) + duration = current_time - connected_time + connection_info['duration'] = int(duration) + duration_calculated = True + except: + pass + + # Fallback: use last_activity if connected_at is not available + if not duration_calculated and connection_info['last_activity']: + try: + last_activity_time = float(connection_info['last_activity']) + # Estimate connection duration using client_id timestamp if available + if connection_info['client_id'].startswith('vod_'): + # Extract timestamp from client_id (format: vod_timestamp_random) + parts = connection_info['client_id'].split('_') + if len(parts) >= 2: + client_start_time = float(parts[1]) / 1000.0 # Convert ms to seconds + duration = current_time - client_start_time + connection_info['duration'] = int(duration) + duration_calculated = True + except: + pass + + # Final fallback + if not duration_calculated: + connection_info['duration'] = 0 + + connections.append(connection_info) + + except Exception as e: + logger.error(f"Error processing connection key {key}: {e}") + + if cursor == 0: + break + + # Group connections by content + content_stats = {} + for conn in connections: + content_key = f"{conn['content_type']}:{conn['content_uuid']}" + if content_key not in content_stats: + content_stats[content_key] = { + 'content_type': conn['content_type'], + 'content_name': conn['content_name'], + 'content_uuid': conn['content_uuid'], + 'content_metadata': conn['content_metadata'], + 'connection_count': 0, + 'connections': [] + } + content_stats[content_key]['connection_count'] += 1 + content_stats[content_key]['connections'].append(conn) + + return JsonResponse({ + 'vod_connections': list(content_stats.values()), + 'total_connections': len(connections), + 'timestamp': current_time + }) + + except Exception as e: + logger.error(f"Error getting VOD stats: {e}") + return JsonResponse({'error': str(e)}, status=500) + + +from rest_framework.decorators import api_view, permission_classes +from apps.accounts.permissions import IsAdmin + + +@csrf_exempt +@api_view(["POST"]) +@permission_classes([IsAdmin]) +def stop_vod_client(request): + """Stop a specific VOD client connection using stop signal mechanism""" + try: + # Parse request body + import json + try: + data = json.loads(request.body) + except json.JSONDecodeError: + return JsonResponse({'error': 'Invalid JSON'}, status=400) + + client_id = data.get('client_id') + if not client_id: + return JsonResponse({'error': 'No client_id provided'}, status=400) + + logger.info(f"Request to stop VOD client: {client_id}") + + # Get Redis client + connection_manager = MultiWorkerVODConnectionManager.get_instance() + redis_client = connection_manager.redis_client + + if not redis_client: + return JsonResponse({'error': 'Redis not available'}, status=500) + + # Check if connection exists + connection_key = f"vod_persistent_connection:{client_id}" + connection_data = redis_client.hgetall(connection_key) + if not connection_data: + logger.warning(f"VOD connection not found: {client_id}") + return JsonResponse({'error': 'Connection not found'}, status=404) + + # Set a stop signal key that the worker will check + stop_key = get_vod_client_stop_key(client_id) + redis_client.setex(stop_key, 60, "true") # 60 second TTL + + logger.info(f"Set stop signal for VOD client: {client_id}") + + return JsonResponse({ + 'message': 'VOD client stop signal sent', + 'client_id': client_id, + 'stop_key': stop_key + }) + + except Exception as e: + logger.error(f"Error stopping VOD client: {e}", exc_info=True) + return JsonResponse({'error': str(e)}, status=500) + + diff --git a/apps/vod/__init__.py b/apps/vod/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/admin.py b/apps/vod/admin.py new file mode 100644 index 00000000..c660f310 --- /dev/null +++ b/apps/vod/admin.py @@ -0,0 +1,67 @@ +from django.contrib import admin +from .models import ( + Series, VODCategory, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation +) + + +@admin.register(VODCategory) +class VODCategoryAdmin(admin.ModelAdmin): + list_display = ['name', 'category_type', 'created_at'] + list_filter = ['category_type', 'created_at'] + search_fields = ['name'] + + +@admin.register(Series) +class SeriesAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + +@admin.register(Movie) +class MovieAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'duration_secs', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('logo') + + +@admin.register(Episode) +class EpisodeAdmin(admin.ModelAdmin): + list_display = ['name', 'series', 'season_number', 'episode_number', 'duration_secs', 'created_at'] + list_filter = ['series', 'season_number', 'created_at'] + search_fields = ['name', 'description', 'series__name'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('series') + + +@admin.register(M3UMovieRelation) +class M3UMovieRelationAdmin(admin.ModelAdmin): + list_display = ['movie', 'm3u_account', 'category', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['movie__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3USeriesRelation) +class M3USeriesRelationAdmin(admin.ModelAdmin): + list_display = ['series', 'm3u_account', 'category', 'external_series_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['series__name', 'm3u_account__name', 'external_series_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3UEpisodeRelation) +class M3UEpisodeRelationAdmin(admin.ModelAdmin): + list_display = ['episode', 'm3u_account', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'created_at'] + search_fields = ['episode__name', 'episode__series__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py new file mode 100644 index 00000000..e897bd28 --- /dev/null +++ b/apps/vod/api_urls.py @@ -0,0 +1,22 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import ( + MovieViewSet, + EpisodeViewSet, + SeriesViewSet, + VODCategoryViewSet, + UnifiedContentViewSet, + VODLogoViewSet, +) + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet, basename='movie') +router.register(r'episodes', EpisodeViewSet, basename='episode') +router.register(r'series', SeriesViewSet, basename='series') +router.register(r'categories', VODCategoryViewSet, basename='vodcategory') +router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') + +urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py new file mode 100644 index 00000000..3bd984e6 --- /dev/null +++ b/apps/vod/api_views.py @@ -0,0 +1,899 @@ +from rest_framework import viewsets, status +from rest_framework.response import Response +from rest_framework.decorators import action +from rest_framework.filters import SearchFilter, OrderingFilter +from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny +from django_filters.rest_framework import DjangoFilterBackend +from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q +import django_filters +import logging +import os +import requests +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, +) +from .models import ( + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from .serializers import ( + MovieSerializer, + EpisodeSerializer, + SeriesSerializer, + VODCategorySerializer, + VODLogoSerializer, + M3UMovieRelationSerializer, + M3USeriesRelationSerializer, + M3UEpisodeRelationSerializer +) +from .tasks import refresh_series_episodes, refresh_movie_advanced_data +from django.utils import timezone +from datetime import timedelta + +logger = logging.getLogger(__name__) + + +class VODPagination(PageNumberPagination): + page_size = 20 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 100 # Prevent excessive page sizes for VOD content + + +class MovieFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Movie + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.rsplit('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class MovieViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Movie content""" + queryset = Movie.objects.all() + serializer_class = MovieSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = MovieFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return movies that have active M3U relations + return Movie.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('m3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this movie""" + movie = self.get_object() + relations = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3UMovieRelationSerializer(relations, many=True) + return Response(serializer.data) + + + @action(detail=True, methods=['get'], url_path='provider-info') + def provider_info(self, request, pk=None): + """Get detailed movie information from the original provider, throttled to 24h.""" + movie = self.get_object() + + # Get the highest priority active relation + relation = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this movie'}, + status=status.HTTP_400_BAD_REQUEST + ) + + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + now = timezone.now() + needs_refresh = ( + force_refresh or + not relation.last_advanced_refresh or + (now - relation.last_advanced_refresh).total_seconds() > 86400 + ) + + if needs_refresh: + # Trigger advanced data refresh + logger.debug(f"Refreshing advanced data for movie {movie.id} (relation ID: {relation.id})") + refresh_movie_advanced_data(relation.id, force_refresh=force_refresh) + + # Refresh objects from database after task completion + movie.refresh_from_db() + relation.refresh_from_db() + + # Use refreshed data from database + custom_props = relation.custom_properties or {} + info = custom_props.get('detailed_info', {}) + movie_data = custom_props.get('movie_data', {}) + + # Build response with available data + response_data = { + 'id': movie.id, + 'uuid': movie.uuid, + 'stream_id': relation.stream_id, + 'name': info.get('name', movie.name), + 'o_name': info.get('o_name', ''), + 'description': info.get('description', info.get('plot', movie.description)), + 'plot': info.get('plot', info.get('description', movie.description)), + 'year': movie.year or info.get('year'), + 'release_date': (movie.custom_properties or {}).get('release_date') or info.get('release_date') or info.get('releasedate', ''), + 'genre': movie.genre or info.get('genre', ''), + 'director': (movie.custom_properties or {}).get('director') or info.get('director', ''), + 'actors': (movie.custom_properties or {}).get('actors') or info.get('actors', ''), + 'country': (movie.custom_properties or {}).get('country') or info.get('country', ''), + 'rating': movie.rating or info.get('rating', movie.rating or 0), + 'tmdb_id': movie.tmdb_id or info.get('tmdb_id', ''), + 'imdb_id': movie.imdb_id or info.get('imdb_id', ''), + 'youtube_trailer': (movie.custom_properties or {}).get('youtube_trailer') or info.get('youtube_trailer') or info.get('trailer', ''), + 'duration_secs': movie.duration_secs or info.get('duration_secs'), + 'age': info.get('age', ''), + 'backdrop_path': (movie.custom_properties or {}).get('backdrop_path') or info.get('backdrop_path', []), + 'cover': info.get('cover_big', ''), + 'cover_big': info.get('cover_big', ''), + 'movie_image': movie.logo.url if movie.logo else info.get('movie_image', ''), + 'bitrate': info.get('bitrate', 0), + 'video': info.get('video', {}), + 'audio': info.get('audio', {}), + 'container_extension': movie_data.get('container_extension', 'mp4'), + 'direct_source': movie_data.get('direct_source', ''), + 'category_id': movie_data.get('category_id', ''), + 'added': movie_data.get('added', ''), + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + } + } + return Response(response_data) + +class EpisodeFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + series = django_filters.NumberFilter(field_name="series__id") + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + season_number = django_filters.NumberFilter() + episode_number = django_filters.NumberFilter() + + class Meta: + model = Episode + fields = ['name', 'series', 'm3u_account', 'season_number', 'episode_number'] + + +class SeriesFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Series + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.rsplit('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class EpisodeViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Episode content""" + queryset = Episode.objects.all() + serializer_class = EpisodeSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = EpisodeFilter + search_fields = ['name', 'description'] + ordering_fields = ['name', 'season_number', 'episode_number', 'created_at'] + ordering = ['series__name', 'season_number', 'episode_number'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + return Episode.objects.select_related( + 'series', 'm3u_account' + ).filter(m3u_account__is_active=True) + + +class SeriesViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Series management""" + queryset = Series.objects.all() + serializer_class = SeriesSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = SeriesFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return series that have active M3U relations + return Series.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('episodes', 'm3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this series""" + series = self.get_object() + relations = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3USeriesRelationSerializer(relations, many=True) + return Response(serializer.data) + + @action(detail=True, methods=['get'], url_path='episodes') + def get_episodes(self, request, pk=None): + """Get episodes for this series with provider information""" + series = self.get_object() + episodes = Episode.objects.filter(series=series).prefetch_related( + 'm3u_relations__m3u_account' + ).order_by('season_number', 'episode_number') + + episodes_data = [] + for episode in episodes: + episode_serializer = EpisodeSerializer(episode) + episode_data = episode_serializer.data + + # Add provider information + relations = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True + ).select_related('m3u_account') + + episode_data['providers'] = M3UEpisodeRelationSerializer(relations, many=True).data + episodes_data.append(episode_data) + + return Response(episodes_data) + + @action(detail=True, methods=['get'], url_path='provider-info') + def series_info(self, request, pk=None): + """Get detailed series information, refreshing from provider if needed""" + logger.debug(f"SeriesViewSet.series_info called for series ID: {pk}") + series = self.get_object() + logger.debug(f"Retrieved series: {series.name} (ID: {series.id})") + + # Get the highest priority active relation + relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this series'}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Check if we should refresh data (optional force refresh parameter) + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + refresh_interval_hours = int(request.query_params.get("refresh_interval", 24)) # Default to 24 hours + + now = timezone.now() + last_refreshed = relation.last_episode_refresh + + # Check if detailed data has been fetched + custom_props = relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes have never been fetched or if forced + if not episodes_fetched or not detailed_fetched or force_refresh: + force_refresh = True + logger.debug(f"Series {series.id} needs detailed/episode refresh, forcing refresh") + elif last_refreshed is None or (now - last_refreshed) > timedelta(hours=refresh_interval_hours): + force_refresh = True + logger.debug(f"Series {series.id} refresh interval exceeded or never refreshed, forcing refresh") + + if force_refresh: + logger.debug(f"Refreshing series {series.id} data from provider") + # Use existing refresh logic with external_series_id + from .tasks import refresh_series_episodes + account = relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, relation.external_series_id) + series.refresh_from_db() # Reload from database after refresh + relation.refresh_from_db() # Reload relation too + + # Return the database data (which should now be fresh) + custom_props = relation.custom_properties or {} + response_data = { + 'id': series.id, + 'series_id': relation.external_series_id, + 'name': series.name, + 'description': series.description, + 'year': series.year, + 'genre': series.genre, + 'rating': series.rating, + 'tmdb_id': series.tmdb_id, + 'imdb_id': series.imdb_id, + 'category_id': relation.category.id if relation.category else None, + 'category_name': relation.category.name if relation.category else None, + 'cover': { + 'id': series.logo.id, + 'url': series.logo.url, + 'name': series.logo.name, + } if series.logo else None, + 'last_refreshed': series.updated_at, + 'custom_properties': series.custom_properties, + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + }, + 'episodes_fetched': custom_props.get('episodes_fetched', False), + 'detailed_fetched': custom_props.get('detailed_fetched', False) + } + + # Always include episodes for series info if they've been fetched + include_episodes = request.query_params.get('include_episodes', 'true').lower() == 'true' + if include_episodes and custom_props.get('episodes_fetched', False): + logger.debug(f"Including episodes for series {series.id}") + episodes_by_season = {} + for episode in series.episodes.all().order_by('season_number', 'episode_number'): + season_key = str(episode.season_number or 0) + if season_key not in episodes_by_season: + episodes_by_season[season_key] = [] + + # Get episode relation for additional data + episode_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account=relation.m3u_account + ).first() + + episode_data = { + 'id': episode.id, + 'uuid': episode.uuid, + 'name': episode.name, + 'title': episode.name, + 'episode_number': episode.episode_number, + 'season_number': episode.season_number, + 'description': episode.description, + 'air_date': episode.air_date, + 'plot': episode.description, + 'duration_secs': episode.duration_secs, + 'rating': episode.rating, + 'tmdb_id': episode.tmdb_id, + 'imdb_id': episode.imdb_id, + 'movie_image': episode.custom_properties.get('movie_image', '') if episode.custom_properties else '', + 'container_extension': episode_relation.container_extension if episode_relation else 'mp4', + 'type': 'episode', + 'series': { + 'id': series.id, + 'name': series.name + } + } + episodes_by_season[season_key].append(episode_data) + + response_data['episodes'] = episodes_by_season + logger.debug(f"Added {len(episodes_by_season)} seasons of episodes to response") + elif include_episodes: + # Episodes not yet fetched, include empty episodes list + response_data['episodes'] = {} + + logger.debug(f"Returning series info response for series {series.id}") + return Response(response_data) + + except Exception as e: + logger.error(f"Error fetching series info for series {pk}: {str(e)}") + return Response( + {'error': f'Failed to fetch series information: {str(e)}'}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + +class VODCategoryFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + category_type = django_filters.ChoiceFilter(choices=VODCategory.CATEGORY_TYPE_CHOICES) + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + + class Meta: + model = VODCategory + fields = ['name', 'category_type', 'm3u_account'] + + +class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for VOD Categories""" + queryset = VODCategory.objects.all() + serializer_class = VODCategorySerializer + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = VODCategoryFilter + search_fields = ['name'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def list(self, request, *args, **kwargs): + """Override list to ensure Uncategorized categories and relations exist for all XC accounts with VOD enabled""" + from apps.m3u.models import M3UAccount + + # Ensure Uncategorized categories exist + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Get all active XC accounts with VOD enabled + xc_accounts = M3UAccount.objects.filter( + account_type=M3UAccount.Types.XC, + is_active=True + ) + + for account in xc_accounts: + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get("enable_vod", False) + + if vod_enabled: + # Ensure relations exist for this account + auto_enable_new = custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Now proceed with normal list operation + return super().list(request, *args, **kwargs) + + +class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet that combines Movies and Series for unified 'All' view""" + queryset = Movie.objects.none() # Empty queryset, we override list method + serializer_class = MovieSerializer # Default serializer, overridden in list + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def list(self, request, *args, **kwargs): + """Override list to handle unified content properly - database-level approach""" + import logging + from django.db import connection + + logger = logging.getLogger(__name__) + logger.error("=== UnifiedContentViewSet.list() called ===") + + try: + # Get pagination parameters + page_size = int(request.query_params.get('page_size', 24)) + page_number = int(request.query_params.get('page', 1)) + + logger.error(f"Page {page_number}, page_size {page_size}") + + # Calculate offset for unified pagination + offset = (page_number - 1) * page_size + + # For high page numbers, use raw SQL for efficiency + # This avoids loading and sorting massive amounts of data in Python + + search = request.query_params.get('search', '') + category = request.query_params.get('category', '') + + # Build WHERE clauses + where_conditions = [ + # Only active content + "movies.id IN (SELECT DISTINCT movie_id FROM vod_m3umovierelation mmr JOIN m3u_m3uaccount ma ON mmr.m3u_account_id = ma.id WHERE ma.is_active = true)", + "series.id IN (SELECT DISTINCT series_id FROM vod_m3useriesrelation msr JOIN m3u_m3uaccount ma ON msr.m3u_account_id = ma.id WHERE ma.is_active = true)" + ] + + params = [] + + if search: + where_conditions[0] += " AND LOWER(movies.name) LIKE %s" + where_conditions[1] += " AND LOWER(series.name) LIKE %s" + search_param = f"%{search.lower()}%" + params.extend([search_param, search_param]) + + if category: + if '|' in category: + cat_name, cat_type = category.rsplit('|', 1) + if cat_type == 'movie': + where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)" + where_conditions[1] = "1=0" # Exclude series + params.append(cat_name) + elif cat_type == 'series': + where_conditions[1] += " AND series.id IN (SELECT series_id FROM vod_m3useriesrelation msr JOIN vod_vodcategory c ON msr.category_id = c.id WHERE c.name = %s)" + where_conditions[0] = "1=0" # Exclude movies + params.append(cat_name) + else: + where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)" + where_conditions[1] += " AND series.id IN (SELECT series_id FROM vod_m3useriesrelation msr JOIN vod_vodcategory c ON msr.category_id = c.id WHERE c.name = %s)" + params.extend([category, category]) + + # Use UNION ALL with ORDER BY and LIMIT/OFFSET for true unified pagination + # This is much more efficient than Python sorting + sql = f""" + WITH unified_content AS ( + SELECT + movies.id, + movies.uuid, + movies.name, + movies.description, + movies.year, + movies.rating, + movies.genre, + movies.duration_secs as duration, + movies.created_at, + movies.updated_at, + movies.custom_properties, + movies.logo_id, + logo.name as logo_name, + logo.url as logo_url, + 'movie' as content_type + FROM vod_movie movies + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id + WHERE {where_conditions[0]} + + UNION ALL + + SELECT + series.id, + series.uuid, + series.name, + series.description, + series.year, + series.rating, + series.genre, + NULL as duration, + series.created_at, + series.updated_at, + series.custom_properties, + series.logo_id, + logo.name as logo_name, + logo.url as logo_url, + 'series' as content_type + FROM vod_series series + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id + WHERE {where_conditions[1]} + ) + SELECT * FROM unified_content + ORDER BY LOWER(name), id + LIMIT %s OFFSET %s + """ + + params.extend([page_size, offset]) + + logger.error(f"Executing SQL with LIMIT {page_size} OFFSET {offset}") + + with connection.cursor() as cursor: + cursor.execute(sql, params) + columns = [col[0] for col in cursor.description] + results = [] + + for row in cursor.fetchall(): + item_dict = dict(zip(columns, row)) + + # Build logo object in the format expected by frontend + logo_data = None + if item_dict['logo_id']: + logo_data = { + 'id': item_dict['logo_id'], + 'name': item_dict['logo_name'], + 'url': item_dict['logo_url'], + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True + } + + # Convert to the format expected by frontend + formatted_item = { + 'id': item_dict['id'], + 'uuid': str(item_dict['uuid']), + 'name': item_dict['name'], + 'description': item_dict['description'] or '', + 'year': item_dict['year'], + 'rating': float(item_dict['rating']) if item_dict['rating'] else 0.0, + 'genre': item_dict['genre'] or '', + 'duration': item_dict['duration'], + 'created_at': item_dict['created_at'].isoformat() if item_dict['created_at'] else None, + 'updated_at': item_dict['updated_at'].isoformat() if item_dict['updated_at'] else None, + 'custom_properties': item_dict['custom_properties'] or {}, + 'logo': logo_data, + 'content_type': item_dict['content_type'] + } + results.append(formatted_item) + + logger.error(f"Retrieved {len(results)} results via SQL") + + # Get total count estimate (for pagination info) + # Use a separate efficient count query + count_sql = f""" + SELECT COUNT(*) FROM ( + SELECT 1 FROM vod_movie movies WHERE {where_conditions[0]} + UNION ALL + SELECT 1 FROM vod_series series WHERE {where_conditions[1]} + ) as total_count + """ + + count_params = params[:-2] # Remove LIMIT and OFFSET params + + with connection.cursor() as cursor: + cursor.execute(count_sql, count_params) + total_count = cursor.fetchone()[0] + + response_data = { + 'count': total_count, + 'next': offset + page_size < total_count, + 'previous': page_number > 1, + 'results': results + } + + return Response(response_data) + + except Exception as e: + logger.error(f"Error in UnifiedContentViewSet.list(): {e}") + import traceback + logger.error(traceback.format_exc()) + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/apps.py b/apps/vod/apps.py new file mode 100644 index 00000000..0e2af56d --- /dev/null +++ b/apps/vod/apps.py @@ -0,0 +1,12 @@ +from django.apps import AppConfig + + +class VODConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'apps.vod' + verbose_name = 'Video on Demand' + + def ready(self): + """Initialize VOD app when Django is ready""" + # Import models to ensure they're registered + from . import models diff --git a/apps/vod/migrations/0001_initial.py b/apps/vod/migrations/0001_initial.py new file mode 100644 index 00000000..02c6ae2a --- /dev/null +++ b/apps/vod/migrations/0001_initial.py @@ -0,0 +1,201 @@ +# Generated by Django 5.2.4 on 2025-08-28 18:16 + +import django.db.models.deletion +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'), + ('m3u', '0016_m3uaccount_priority'), + ] + + operations = [ + migrations.CreateModel( + name='Movie', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the movie', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Movie', + 'verbose_name_plural': 'Movies', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Series', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the series', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Series', + 'verbose_name_plural': 'Series', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Episode', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('air_date', models.DateField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('season_number', models.IntegerField(blank=True, null=True)), + ('episode_number', models.IntegerField(blank=True, null=True)), + ('tmdb_id', models.CharField(blank=True, db_index=True, help_text='TMDB ID for metadata', max_length=50, null=True)), + ('imdb_id', models.CharField(blank=True, db_index=True, help_text='IMDB ID for metadata', max_length=50, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Custom properties for this episode', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episodes', to='vod.series')), + ], + options={ + 'verbose_name': 'Episode', + 'verbose_name_plural': 'Episodes', + 'ordering': ['series__name', 'season_number', 'episode_number'], + }, + ), + migrations.CreateModel( + name='VODCategory', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('category_type', models.CharField(choices=[('movie', 'Movie'), ('series', 'Series')], default='movie', help_text='Type of content this category contains', max_length=10)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ], + options={ + 'verbose_name': 'VOD Category', + 'verbose_name_plural': 'VOD Categories', + 'ordering': ['name'], + 'unique_together': {('name', 'category_type')}, + }, + ), + migrations.CreateModel( + name='M3UVODCategoryRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('enabled', models.BooleanField(default=False, help_text='Set to false to deactivate this category for the M3U account')), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_relations', to='m3u.m3uaccount')), + ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U VOD Category Relation', + 'verbose_name_plural': 'M3U VOD Category Relations', + }, + ), + migrations.CreateModel( + name='M3USeriesRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('external_series_id', models.CharField(help_text='External series ID from M3U provider', max_length=255)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_episode_refresh', models.DateTimeField(blank=True, help_text='Last time episodes were refreshed', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='series_relations', to='m3u.m3uaccount')), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.series')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Series Relation', + 'verbose_name_plural': 'M3U Series Relations', + }, + ), + migrations.CreateModel( + name='M3UMovieRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_advanced_refresh', models.DateTimeField(blank=True, help_text='Last time advanced data was fetched from provider', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movie_relations', to='m3u.m3uaccount')), + ('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.movie')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Movie Relation', + 'verbose_name_plural': 'M3U Movie Relations', + }, + ), + migrations.CreateModel( + name='M3UEpisodeRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('episode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.episode')), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episode_relations', to='m3u.m3uaccount')), + ], + options={ + 'verbose_name': 'M3U Episode Relation', + 'verbose_name_plural': 'M3U Episode Relations', + 'unique_together': {('m3u_account', 'stream_id')}, + }, + ), + migrations.AddConstraint( + model_name='movie', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_movie_name_year_no_external_id'), + ), + migrations.AddConstraint( + model_name='series', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_series_name_year_no_external_id'), + ), + migrations.AlterUniqueTogether( + name='episode', + unique_together={('series', 'season_number', 'episode_number')}, + ), + migrations.AlterUniqueTogether( + name='m3uvodcategoryrelation', + unique_together={('m3u_account', 'category')}, + ), + migrations.AlterUniqueTogether( + name='m3useriesrelation', + unique_together={('m3u_account', 'external_series_id')}, + ), + migrations.AlterUniqueTogether( + name='m3umovierelation', + unique_together={('m3u_account', 'stream_id')}, + ), + ] diff --git a/apps/vod/migrations/0002_add_last_seen_with_default.py b/apps/vod/migrations/0002_add_last_seen_with_default.py new file mode 100644 index 00000000..6ece988a --- /dev/null +++ b/apps/vod/migrations/0002_add_last_seen_with_default.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.4 on 2025-09-04 21:12 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='m3uepisoderelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + migrations.AddField( + model_name='m3umovierelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + migrations.AddField( + model_name='m3useriesrelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + ] diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 00000000..1bd2c418 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/migrations/__init__.py b/apps/vod/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/models.py b/apps/vod/models.py new file mode 100644 index 00000000..7067856e --- /dev/null +++ b/apps/vod/models.py @@ -0,0 +1,323 @@ +from django.db import models +from django.db.models import Q +from django.utils import timezone +from django.contrib.contenttypes.fields import GenericForeignKey +from django.contrib.contenttypes.models import ContentType +from apps.m3u.models import M3UAccount +import uuid + + +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + +class VODCategory(models.Model): + """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" + + CATEGORY_TYPE_CHOICES = [ + ('movie', 'Movie'), + ('series', 'Series'), + ] + + name = models.CharField(max_length=255) + category_type = models.CharField( + max_length=10, + choices=CATEGORY_TYPE_CHOICES, + default='movie', + help_text="Type of content this category contains" + ) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'VOD Category' + verbose_name_plural = 'VOD Categories' + ordering = ['name'] + unique_together = [('name', 'category_type')] + + @classmethod + def bulk_create_and_fetch(cls, objects, ignore_conflicts=False): + # Perform the bulk create operation + cls.objects.bulk_create(objects, ignore_conflicts=ignore_conflicts) + + # Use the unique fields to fetch the created objects + # Since we have unique_together on ('name', 'category_type'), we need both fields + filter_conditions = [] + for obj in objects: + filter_conditions.append( + Q(name=obj.name, category_type=obj.category_type) + ) + + if filter_conditions: + # Combine all conditions with OR + combined_condition = filter_conditions[0] + for condition in filter_conditions[1:]: + combined_condition |= condition + + created_objects = cls.objects.filter(combined_condition) + else: + created_objects = cls.objects.none() + + return created_objects + + def __str__(self): + return f"{self.name} ({self.get_category_type_display()})" + + +class Series(models.Model): + """Series information for TV shows""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the series') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Series' + verbose_name_plural = 'Series' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_series_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Movie(models.Model): + """Movie content""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the movie') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Movie' + verbose_name_plural = 'Movies' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_movie_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Episode(models.Model): + """Episode content for TV series""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + air_date = models.DateField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + + # Episode specific fields + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='episodes') + season_number = models.IntegerField(blank=True, null=True) + episode_number = models.IntegerField(blank=True, null=True) + + # Metadata IDs + tmdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="TMDB ID for metadata", db_index=True) + imdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="IMDB ID for metadata", db_index=True) + + # Custom properties for episode + custom_properties = models.JSONField(blank=True, null=True, help_text="Custom properties for this episode") + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Episode' + verbose_name_plural = 'Episodes' + ordering = ['series__name', 'season_number', 'episode_number'] + unique_together = [ + ('series', 'season_number', 'episode_number'), + ] + + def __str__(self): + season_ep = f"S{self.season_number or 0:02d}E{self.episode_number or 0:02d}" + return f"{self.series.name} - {season_ep} - {self.name}" + + +# New relation models to link M3U accounts with VOD content + +class M3USeriesRelation(models.Model): + """Links M3U accounts to Series with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='series_relations') + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Provider-specific fields - renamed to avoid clash with series ForeignKey + external_series_id = models.CharField(max_length=255, help_text="External series ID from M3U provider") + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_episode_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time episodes were refreshed") + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Series Relation' + verbose_name_plural = 'M3U Series Relations' + unique_together = [('m3u_account', 'external_series_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.series.name}" + + +class M3UMovieRelation(models.Model): + """Links M3U accounts to Movies with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='movie_relations') + movie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_advanced_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time advanced data was fetched from provider") + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Movie Relation' + verbose_name_plural = 'M3U Movie Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.movie.name}" + + def get_stream_url(self): + """Get the full stream URL for this movie from this provider""" + # Build URL dynamically for XtreamCodes accounts + if self.m3u_account.account_type == 'XC': + from core.xtream_codes import Client as XCClient + # Use XC client's URL normalization to handle malformed URLs + # (e.g., URLs with /player_api.php or query parameters) + normalized_url = XCClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url) + username = self.m3u_account.username + password = self.m3u_account.password + return f"{normalized_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # For other account types, we would need another way to build URLs + return None + + +class M3UEpisodeRelation(models.Model): + """Links M3U accounts to Episodes with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='episode_relations') + episode = models.ForeignKey(Episode, on_delete=models.CASCADE, related_name='m3u_relations') + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Episode Relation' + verbose_name_plural = 'M3U Episode Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.episode}" + + def get_stream_url(self): + """Get the full stream URL for this episode from this provider""" + from core.xtream_codes import Client as XtreamCodesClient + + if self.m3u_account.account_type == 'XC': + # For XtreamCodes accounts, build the URL dynamically + # Use XC client's URL normalization to handle malformed URLs + # (e.g., URLs with /player_api.php or query parameters) + normalized_url = XtreamCodesClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url) + username = self.m3u_account.username + password = self.m3u_account.password + return f"{normalized_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # We might support non XC accounts in the future + # For now, return None + return None + +class M3UVODCategoryRelation(models.Model): + """Links M3U accounts to categories with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='category_relations') + category = models.ForeignKey(VODCategory, on_delete=models.CASCADE, related_name='m3u_relations') + + enabled = models.BooleanField( + default=False, help_text="Set to false to deactivate this category for the M3U account" + ) + + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'M3U VOD Category Relation' + verbose_name_plural = 'M3U VOD Category Relations' + unique_together = [('m3u_account', 'category')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.category.name}" diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py new file mode 100644 index 00000000..7747cb88 --- /dev/null +++ b/apps/vod/serializers.py @@ -0,0 +1,304 @@ +from rest_framework import serializers +from django.urls import reverse +from .models import ( + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from apps.m3u.serializers import M3UAccountSerializer + + +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + +class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): + category = serializers.IntegerField(source="category.id") + m3u_account = serializers.IntegerField(source="m3u_account.id") + + class Meta: + model = M3UVODCategoryRelation + fields = ["category", "m3u_account", "enabled"] + + +class VODCategorySerializer(serializers.ModelSerializer): + category_type_display = serializers.CharField(source='get_category_type_display', read_only=True) + m3u_accounts = M3UVODCategoryRelationSerializer(many=True, source="m3u_relations", read_only=True) + + class Meta: + model = VODCategory + fields = [ + "id", + "name", + "category_type", + "category_type_display", + "m3u_accounts", + ] + +class SeriesSerializer(serializers.ModelSerializer): + logo = VODLogoSerializer(read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() + + +class MovieSerializer(serializers.ModelSerializer): + logo = VODLogoSerializer(read_only=True) + + class Meta: + model = Movie + fields = '__all__' + + +class EpisodeSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + + class Meta: + model = Episode + fields = '__all__' + + +class M3USeriesRelationSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + + class Meta: + model = M3USeriesRelation + fields = '__all__' + + +class M3UMovieRelationSerializer(serializers.ModelSerializer): + movie = MovieSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UMovieRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the movie if available + movie = obj.movie + if hasattr(movie, 'video') and movie.video: + video_data = movie.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from movie name/title + if movie and movie.name: + name = movie.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(movie, 'bitrate') and movie.bitrate and movie.bitrate > 0: + bitrate = movie.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class M3UEpisodeRelationSerializer(serializers.ModelSerializer): + episode = EpisodeSerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UEpisodeRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the episode if available + episode = obj.episode + if hasattr(episode, 'video') and episode.video: + video_data = episode.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from episode name/title + if episode and episode.name: + name = episode.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(episode, 'bitrate') and episode.bitrate and episode.bitrate > 0: + bitrate = episode.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class EnhancedSeriesSerializer(serializers.ModelSerializer): + """Enhanced serializer for series with provider information""" + logo = VODLogoSerializer(read_only=True) + providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py new file mode 100644 index 00000000..0dcd9cfd --- /dev/null +++ b/apps/vod/tasks.py @@ -0,0 +1,2208 @@ +from celery import shared_task, current_app, group +from django.utils import timezone +from django.db import transaction, IntegrityError +from django.db.models import Q +from apps.m3u.models import M3UAccount +from core.xtream_codes import Client as XtreamCodesClient +from .models import ( + VODCategory, Series, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from datetime import datetime +import logging +import json +import re + +logger = logging.getLogger(__name__) + + +@shared_task +def refresh_vod_content(account_id): + """Refresh VOD content for an M3U account with batch processing for improved performance""" + # Import here to avoid circular import + from apps.m3u.tasks import send_m3u_update + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"VOD refresh called for non-XC account {account_id}") + return "VOD refresh only available for XtreamCodes accounts" + + logger.info(f"Starting batch VOD refresh for account {account.name}") + start_time = timezone.now() + + # Send start notification + send_m3u_update(account_id, "vod_refresh", 0, status="processing") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + movie_categories, series_categories = refresh_categories(account.id, client) + + logger.debug("Fetching relations for filtering category filtering") + relations = { rel.category_id: rel for rel in M3UVODCategoryRelation.objects + .filter(m3u_account=account) + .select_related("category", "m3u_account") + } + + # Refresh movies with batch processing (pass scan start time) + refresh_movies(client, account, movie_categories, relations, scan_start_time=start_time) + + # Refresh series with batch processing (pass scan start time) + refresh_series(client, account, series_categories, relations, scan_start_time=start_time) + + end_time = timezone.now() + duration = (end_time - start_time).total_seconds() + + logger.info(f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds") + + # Cleanup orphaned VOD content after refresh (scoped to this account only) + logger.info(f"Starting cleanup of orphaned VOD content for account {account.name}") + cleanup_result = cleanup_orphaned_vod_content(account_id=account_id, scan_start_time=start_time) + logger.info(f"VOD cleanup completed: {cleanup_result}") + + # Send completion notification + send_m3u_update(account_id, "vod_refresh", 100, status="success", + message=f"VOD refresh completed in {duration:.2f} seconds") + + return f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds" + + except Exception as e: + logger.error(f"Error refreshing VOD for account {account_id}: {str(e)}") + + # Send error notification + send_m3u_update(account_id, "vod_refresh", 100, status="error", + message=f"VOD refresh failed: {str(e)}") + + return f"VOD refresh failed: {str(e)}" + +def refresh_categories(account_id, client=None): + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if not client: + client = XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) + logger.info(f"Refreshing movie categories for account {account.name}") + + # First, get the category list to properly map category IDs and names + logger.info("Fetching movie categories from provider...") + categories_data = client.get_vod_categories() + category_map = batch_create_categories(categories_data, 'movie', account) + + # Create a mapping from provider category IDs to our category objects + movies_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + movies_category_id_map[str(provider_cat_id)] = our_category + + # Get the category list to properly map category IDs and names + logger.info("Fetching series categories from provider...") + categories_data = client.get_series_categories() + category_map = batch_create_categories(categories_data, 'series', account) + + # Create a mapping from provider category IDs to our category objects + series_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + series_category_id_map[str(provider_cat_id)] = our_category + + return movies_category_id_map, series_category_id_map + +def refresh_movies(client, account, categories_by_provider, relations, scan_start_time=None): + """Refresh movie content using single API call for all movies""" + logger.info(f"Refreshing movies for account {account.name}") + + # Ensure "Uncategorized" category exists for movies without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for movies") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + + # Get all movies in a single API call + logger.info("Fetching all movies from provider...") + all_movies_data = client.get_vod_streams() # No category_id = get all movies + + # Process movies in chunks using the simple approach + chunk_size = 1000 + total_movies = len(all_movies_data) + total_chunks = (total_movies + chunk_size - 1) // chunk_size if total_movies > 0 else 0 + + for i in range(0, total_movies, chunk_size): + chunk = all_movies_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + + logger.info(f"Processing movie chunk {chunk_num}/{total_chunks} ({len(chunk)} movies)") + process_movie_batch(account, chunk, categories_by_provider, relations, scan_start_time) + + logger.info(f"Completed processing all {total_movies} movies in {total_chunks} chunks") + + +def refresh_series(client, account, categories_by_provider, relations, scan_start_time=None): + """Refresh series content using single API call for all series""" + logger.info(f"Refreshing series for account {account.name}") + + # Ensure "Uncategorized" category exists for series without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for series") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + + # Get all series in a single API call + logger.info("Fetching all series from provider...") + all_series_data = client.get_series() # No category_id = get all series + + # Process series in chunks using the simple approach + chunk_size = 1000 + total_series = len(all_series_data) + total_chunks = (total_series + chunk_size - 1) // chunk_size if total_series > 0 else 0 + + for i in range(0, total_series, chunk_size): + chunk = all_series_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + + logger.info(f"Processing series chunk {chunk_num}/{total_chunks} ({len(chunk)} series)") + process_series_batch(account, chunk, categories_by_provider, relations, scan_start_time) + + logger.info(f"Completed processing all {total_series} series in {total_chunks} chunks") + + +def batch_create_categories(categories_data, category_type, account): + """Create categories in batch and return a mapping""" + category_names = [cat.get('category_name', 'Unknown') for cat in categories_data] + + relations_to_create = [] + + # Get existing categories + logger.debug(f"Starting VOD {category_type} category refresh") + existing_categories = { + cat.name: cat for cat in VODCategory.objects.filter( + name__in=category_names, + category_type=category_type + ) + } + + logger.debug(f"Found {len(existing_categories)} existing categories") + + # Check if we should auto-enable new categories based on account settings + account_custom_props = account.custom_properties or {} + if category_type == 'movie': + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + else: # series + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + + # Create missing categories in batch + new_categories = [] + + for name in category_names: + if name not in existing_categories: + # Always create new categories + new_categories.append(VODCategory(name=name, category_type=category_type)) + else: + # Existing category - create relationship with enabled based on auto_enable setting + # (category exists globally but is new to this account) + relations_to_create.append(M3UVODCategoryRelation( + category=existing_categories[name], + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + )) + + logger.debug(f"{len(new_categories)} new categories found") + logger.debug(f"{len(relations_to_create)} existing categories found for account") + + if new_categories: + logger.debug("Creating new categories...") + created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)) + + # Create relations for newly created categories with enabled based on auto_enable setting + for cat in created_categories: + if not auto_enable_new: + logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}") + + relations_to_create.append( + M3UVODCategoryRelation( + category=cat, + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + ) + ) + + # Convert to dictionary for easy lookup + newly_created = {cat.name: cat for cat in created_categories} + existing_categories.update(newly_created) + + # Create missing relations + logger.debug("Updating category account relations...") + M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + # Delete orphaned category relationships (categories no longer in the M3U source) + # Exclude "Uncategorized" from cleanup as it's a special category we manage + current_category_ids = set(existing_categories[name].id for name in category_names) + existing_relations = M3UVODCategoryRelation.objects.filter( + m3u_account=account, + category__category_type=category_type + ).select_related('category') + + relations_to_delete = [ + rel for rel in existing_relations + if rel.category_id not in current_category_ids and rel.category.name != "Uncategorized" + ] + + if relations_to_delete: + M3UVODCategoryRelation.objects.filter( + id__in=[rel.id for rel in relations_to_delete] + ).delete() + logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}") + + # Check if any of the deleted relationships left categories with no remaining associations + orphaned_category_ids = [] + for rel in relations_to_delete: + category = rel.category + + # Check if this category has any remaining M3U account relationships + remaining_relationships = M3UVODCategoryRelation.objects.filter( + category=category + ).exists() + + # If no relationships remain, it's safe to delete the category + if not remaining_relationships: + orphaned_category_ids.append(category.id) + logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted") + + # Delete orphaned categories + if orphaned_category_ids: + VODCategory.objects.filter(id__in=orphaned_category_ids).delete() + logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations") + + # 🔑 Fetch all relations for this account, for all categories + # relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects + # .filter(category__in=existing_categories.values(), m3u_account=account) + # .select_related("category", "m3u_account") + # } + + # Attach relations to category objects + # for rel in relations: + # existing_categories[rel.category.name]['relation'] = { + # "relation_id": rel.id, + # "category_id": rel.category_id, + # "account_id": rel.m3u_account_id, + # } + + + return existing_categories + + + +@shared_task +def process_movie_batch(account, batch, categories, relations, scan_start_time=None): + """Process a batch of movies using simple bulk operations like M3U processing""" + logger.info(f"Processing movie batch of {len(batch)} movies for account {account.name}") + + movies_to_create = [] + movies_to_update = [] + relations_to_create = [] + relations_to_update = [] + movie_keys = {} # For deduplication like M3U stream_hashes + + # Process each movie in the batch + for movie_data in batch: + try: + stream_id = str(movie_data.get('stream_id')) + name = movie_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(movie_data.get('category_id', '')) if movie_data.get('category_id') else None + movie_data['_provider_category_id'] = provider_cat_id + movie_data['_category_id'] = None + + logger.debug(f"Checking for existing provider category ID {provider_cat_id}") + if provider_cat_id in categories: + category = categories[provider_cat_id] + movie_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for movie {name}") + + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for movie {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + movie_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue + + # Extract metadata + year = extract_year_from_data(movie_data, 'name') + tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb') + imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb') + + # Clean empty string IDs and zero values (some providers use 0 to indicate no ID) + if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0': + tmdb_id = None + if imdb_id == '' or imdb_id == 0 or imdb_id == '0': + imdb_id = None + + # Create a unique key for this movie (priority: TMDB > IMDB > name+year) + if tmdb_id: + movie_key = f"tmdb_{tmdb_id}" + elif imdb_id: + movie_key = f"imdb_{imdb_id}" + else: + movie_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if movie_key in movie_keys: + continue + + # Prepare movie properties + description = movie_data.get('description') or movie_data.get('plot') or '' + rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average')) + genre = movie_data.get('genre') or movie_data.get('category_name') or '' + duration_secs = extract_duration_from_data(movie_data) + trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or '' + trailer = extract_string_from_array_or_string(trailer_raw) if trailer_raw else None + logo_url = movie_data.get('stream_icon') or '' + + movie_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'duration_secs': duration_secs, + 'custom_properties': {'trailer': trailer} if trailer else None, + } + + movie_keys[movie_key] = { + 'props': movie_props, + 'stream_id': stream_id, + 'category': category, + 'movie_data': movie_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing movie {movie_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to movie names + for data in movie_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the movie name (use first occurrence if multiple movies share same logo) + if logo_url not in logo_url_to_name: + movie_name = data['props'].get('name', 'Unknown Movie') + logo_url_to_name[logo_url] = movie_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) + + if logos_to_create: + try: + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new VOD logos for movies") + except Exception as e: + logger.warning(f"Failed to create VOD logos: {e}") + + # Get existing movies based on our keys + existing_movies = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in movie_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for movie in Movie.objects.filter(tmdb_id__in=tmdb_ids): + existing_movies[f"tmdb_{movie.tmdb_id}"] = movie + + # Query by IMDB IDs + imdb_keys = [k for k in movie_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for movie in Movie.objects.filter(imdb_id__in=imdb_ids): + existing_movies[f"imdb_{movie.imdb_id}"] = movie + + # Query by name+year for movies without external IDs + name_year_keys = [k for k in movie_keys.keys() if k.startswith('name_')] + if name_year_keys: + for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{movie.name}_{movie.year or 'None'}" + if key in name_year_keys: + existing_movies[key] = movie + + # Get existing relations + stream_ids = [data['stream_id'] for data in movie_keys.values()] + existing_relations = { + rel.stream_id: rel for rel in M3UMovieRelation.objects.filter( + m3u_account=account, + stream_id__in=stream_ids + ).select_related('movie') + } + + # Process each movie + for movie_key, data in movie_keys.items(): + movie_props = data['props'] + stream_id = data['stream_id'] + category = data['category'] + movie_data = data['movie_data'] + logo_url = data.get('logo_url') + + if movie_key in existing_movies: + # Update existing movie + movie = existing_movies[movie_key] + updated = False + + for field, value in movie_props.items(): + if field == 'custom_properties': + if value != movie.custom_properties: + movie.custom_properties = value + updated = True + elif getattr(movie, field) != value: + setattr(movie, field, value) + updated = True + + # Handle logo assignment for existing movies + logo_updated = False + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if movie.logo != new_logo: + movie._logo_to_update = new_logo + logo_updated = True + elif (not logo_url or len(logo_url) > 500) and movie.logo: + # Clear logo if no logo URL provided or URL is too long + movie._logo_to_update = None + logo_updated = True + + if updated or logo_updated: + movies_to_update.append(movie) + else: + # Create new movie + movie = Movie(**movie_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + movie.logo = existing_logos[logo_url] + + movies_to_create.append(movie) + + # Handle relation + if stream_id in existing_relations: + # Update existing relation + relation = existing_relations[stream_id] + relation.movie = movie + relation.category = category + relation.container_extension = movie_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'basic_data': movie_data, + 'detailed_fetched': False + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UMovieRelation( + m3u_account=account, + movie=movie, + category=category, + stream_id=stream_id, + container_extension=movie_data.get('container_extension', 'mp4'), + custom_properties={ + 'basic_data': movie_data, + 'detailed_fetched': False + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(movies_to_create)} movies to create, {len(movies_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new movies and get their IDs + created_movies = {} + if movies_to_create: + # Bulk query to check which movies already exist + tmdb_ids = [m.tmdb_id for m in movies_to_create if m.tmdb_id] + imdb_ids = [m.imdb_id for m in movies_to_create if m.imdb_id] + name_year_pairs = [(m.name, m.year) for m in movies_to_create if not m.tmdb_id and not m.imdb_id] + + existing_by_tmdb = {m.tmdb_id: m for m in Movie.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {} + existing_by_imdb = {m.imdb_id: m for m in Movie.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {} + + existing_by_name_year = {} + if name_year_pairs: + for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = (movie.name, movie.year) + if key in name_year_pairs: + existing_by_name_year[key] = movie + + # Check each movie against the bulk query results + movies_actually_created = [] + for movie in movies_to_create: + existing = None + if movie.tmdb_id and movie.tmdb_id in existing_by_tmdb: + existing = existing_by_tmdb[movie.tmdb_id] + elif movie.imdb_id and movie.imdb_id in existing_by_imdb: + existing = existing_by_imdb[movie.imdb_id] + elif not movie.tmdb_id and not movie.imdb_id: + existing = existing_by_name_year.get((movie.name, movie.year)) + + if existing: + created_movies[id(movie)] = existing + else: + movies_actually_created.append(movie) + created_movies[id(movie)] = movie + + # Bulk create only movies that don't exist + if movies_actually_created: + Movie.objects.bulk_create(movies_actually_created) + + # Update existing movies + if movies_to_update: + # First, update all fields except logo to avoid unsaved related object issues + Movie.objects.bulk_update(movies_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'duration_secs', 'custom_properties' + ]) + + # Handle logo updates separately to avoid bulk_update issues + for movie in movies_to_update: + if hasattr(movie, '_logo_to_update'): + movie.logo = movie._logo_to_update + movie.save(update_fields=['logo']) + + # Update relations to reference the correct movie objects (with PKs) + for relation in relations_to_create: + if id(relation.movie) in created_movies: + relation.movie = created_movies[id(relation.movie)] + + for relation in relations_to_update: + if id(relation.movie) in created_movies: + relation.movie = created_movies[id(relation.movie)] + + # All movies now have PKs, safe to bulk create/update relations + if relations_to_create: + M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3UMovieRelation.objects.bulk_update(relations_to_update, [ + 'movie', 'category', 'container_extension', 'custom_properties', 'last_seen' + ]) + + logger.info("Movie batch processing completed successfully!") + return f"Movie batch processed: {len(movies_to_create)} created, {len(movies_to_update)} updated" + + except Exception as e: + logger.error(f"Movie batch processing failed: {str(e)}") + return f"Movie batch processing failed: {str(e)}" + + +@shared_task +def process_series_batch(account, batch, categories, relations, scan_start_time=None): + """Process a batch of series using simple bulk operations like M3U processing""" + logger.info(f"Processing series batch of {len(batch)} series for account {account.name}") + + series_to_create = [] + series_to_update = [] + relations_to_create = [] + relations_to_update = [] + series_keys = {} # For deduplication like M3U stream_hashes + + # Process each series in the batch + for series_data in batch: + try: + series_id = str(series_data.get('series_id')) + name = series_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(series_data.get('category_id', '')) if series_data.get('category_id') else None + series_data['_provider_category_id'] = provider_cat_id + series_data['_category_id'] = None + + if provider_cat_id in categories: + category = categories[provider_cat_id] + series_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for series {name}") + relation = relations.get(category.id, None) + + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for series {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + series_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue + + # Extract metadata + year = extract_year(series_data.get('releaseDate', '')) + if not year and series_data.get('release_date'): + year = extract_year(series_data.get('release_date')) + + tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id') + imdb_id = series_data.get('imdb') or series_data.get('imdb_id') + + # Clean empty string IDs and zero values (some providers use 0 to indicate no ID) + if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0': + tmdb_id = None + if imdb_id == '' or imdb_id == 0 or imdb_id == '0': + imdb_id = None + + # Create a unique key for this series (priority: TMDB > IMDB > name+year) + if tmdb_id: + series_key = f"tmdb_{tmdb_id}" + elif imdb_id: + series_key = f"imdb_{imdb_id}" + else: + series_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if series_key in series_keys: + continue + + # Prepare series properties + description = series_data.get('plot', '') + rating = normalize_rating(series_data.get('rating')) + genre = series_data.get('genre', '') + logo_url = series_data.get('cover') or '' + + # Extract additional metadata for custom_properties + additional_metadata = {} + for key in ['backdrop_path', 'poster_path', 'original_name', 'first_air_date', 'last_air_date', + 'episode_run_time', 'status', 'type', 'cast', 'director', 'country', 'language', + 'releaseDate', 'youtube_trailer', 'category_id', 'age', 'seasons']: + value = series_data.get(key) + if value: + # For string-like fields that might be arrays, extract clean strings + if key in ['poster_path', 'youtube_trailer', 'cast', 'director']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = clean_value + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = [clean_value] + else: + # For other fields, keep as-is if not null/empty + if value is not None and value != '' and value != []: + additional_metadata[key] = value + + series_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'custom_properties': additional_metadata if additional_metadata else None, + } + + series_keys[series_key] = { + 'props': series_props, + 'series_id': series_id, + 'category': category, + 'series_data': series_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing series {series_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to series names + for data in series_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the series name (use first occurrence if multiple series share same logo) + if logo_url not in logo_url_to_name: + series_name = data['props'].get('name', 'Unknown Series') + logo_url_to_name[logo_url] = series_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + series_name = logo_url_to_name.get(logo_url, 'Unknown Series') + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) + + if logos_to_create: + try: + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new VOD logos for series") + except Exception as e: + logger.warning(f"Failed to create VOD logos: {e}") + + # Get existing series based on our keys - same pattern as movies + existing_series = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in series_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for series in Series.objects.filter(tmdb_id__in=tmdb_ids): + existing_series[f"tmdb_{series.tmdb_id}"] = series + + # Query by IMDB IDs + imdb_keys = [k for k in series_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for series in Series.objects.filter(imdb_id__in=imdb_ids): + existing_series[f"imdb_{series.imdb_id}"] = series + + # Query by name+year for series without external IDs + name_year_keys = [k for k in series_keys.keys() if k.startswith('name_')] + if name_year_keys: + for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{series.name}_{series.year or 'None'}" + if key in name_year_keys: + existing_series[key] = series + + # Get existing relations + series_ids = [data['series_id'] for data in series_keys.values()] + existing_relations = { + rel.external_series_id: rel for rel in M3USeriesRelation.objects.filter( + m3u_account=account, + external_series_id__in=series_ids + ).select_related('series') + } + + # Process each series + for series_key, data in series_keys.items(): + series_props = data['props'] + series_id = data['series_id'] + category = data['category'] + series_data = data['series_data'] + logo_url = data.get('logo_url') + + if series_key in existing_series: + # Update existing series + series = existing_series[series_key] + updated = False + + for field, value in series_props.items(): + if field == 'custom_properties': + if value != series.custom_properties: + series.custom_properties = value + updated = True + elif getattr(series, field) != value: + setattr(series, field, value) + updated = True + + # Handle logo assignment for existing series + logo_updated = False + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if series.logo != new_logo: + series._logo_to_update = new_logo + logo_updated = True + elif (not logo_url or len(logo_url) > 500) and series.logo: + # Clear logo if no logo URL provided or URL is too long + series._logo_to_update = None + logo_updated = True + + if updated or logo_updated: + series_to_update.append(series) + else: + # Create new series + series = Series(**series_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + series.logo = existing_logos[logo_url] + + series_to_create.append(series) + + # Handle relation + if series_id in existing_relations: + # Update existing relation + relation = existing_relations[series_id] + relation.series = series + relation.category = category + relation.custom_properties = { + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3USeriesRelation( + m3u_account=account, + series=series, + category=category, + external_series_id=series_id, + custom_properties={ + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(series_to_create)} series to create, {len(series_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new series and get their IDs + created_series = {} + if series_to_create: + # Bulk query to check which series already exist + tmdb_ids = [s.tmdb_id for s in series_to_create if s.tmdb_id] + imdb_ids = [s.imdb_id for s in series_to_create if s.imdb_id] + name_year_pairs = [(s.name, s.year) for s in series_to_create if not s.tmdb_id and not s.imdb_id] + + existing_by_tmdb = {s.tmdb_id: s for s in Series.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {} + existing_by_imdb = {s.imdb_id: s for s in Series.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {} + + existing_by_name_year = {} + if name_year_pairs: + for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = (series.name, series.year) + if key in name_year_pairs: + existing_by_name_year[key] = series + + # Check each series against the bulk query results + series_actually_created = [] + for series in series_to_create: + existing = None + if series.tmdb_id and series.tmdb_id in existing_by_tmdb: + existing = existing_by_tmdb[series.tmdb_id] + elif series.imdb_id and series.imdb_id in existing_by_imdb: + existing = existing_by_imdb[series.imdb_id] + elif not series.tmdb_id and not series.imdb_id: + existing = existing_by_name_year.get((series.name, series.year)) + + if existing: + created_series[id(series)] = existing + else: + series_actually_created.append(series) + created_series[id(series)] = series + + # Bulk create only series that don't exist + if series_actually_created: + Series.objects.bulk_create(series_actually_created) + + # Update existing series + if series_to_update: + # First, update all fields except logo to avoid unsaved related object issues + Series.objects.bulk_update(series_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'custom_properties' + ]) + + # Handle logo updates separately to avoid bulk_update issues + for series in series_to_update: + if hasattr(series, '_logo_to_update'): + series.logo = series._logo_to_update + series.save(update_fields=['logo']) + + # Update relations to reference the correct series objects (with PKs) + for relation in relations_to_create: + if id(relation.series) in created_series: + relation.series = created_series[id(relation.series)] + + for relation in relations_to_update: + if id(relation.series) in created_series: + relation.series = created_series[id(relation.series)] + + # All series now have PKs, safe to bulk create/update relations + if relations_to_create: + M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3USeriesRelation.objects.bulk_update(relations_to_update, [ + 'series', 'category', 'custom_properties', 'last_seen' + ]) + + logger.info("Series batch processing completed successfully!") + return f"Series batch processed: {len(series_to_create)} created, {len(series_to_update)} updated" + + except Exception as e: + logger.error(f"Series batch processing failed: {str(e)}") + return f"Series batch processing failed: {str(e)}" + + +# Helper functions for year and date extraction + +def extract_duration_from_data(movie_data): + """Extract duration in seconds from movie data""" + duration_secs = None + + # Try to extract duration from various possible fields + if movie_data.get('duration_secs'): + duration_secs = int(movie_data.get('duration_secs')) + elif movie_data.get('duration'): + # Handle duration that might be in different formats + duration_str = str(movie_data.get('duration')) + if duration_str.isdigit(): + duration_secs = int(duration_str) * 60 # Assume minutes if just a number + else: + # Try to parse time format like "01:30:00" + try: + time_parts = duration_str.split(':') + if len(time_parts) == 3: + hours, minutes, seconds = map(int, time_parts) + duration_secs = (hours * 3600) + (minutes * 60) + seconds + elif len(time_parts) == 2: + minutes, seconds = map(int, time_parts) + duration_secs = minutes * 60 + seconds + except (ValueError, AttributeError): + pass + + return duration_secs + + +def normalize_rating(rating_value): + """Normalize rating value by converting commas to decimals and validating as float""" + if not rating_value: + return None + + try: + # Convert to string for processing + rating_str = str(rating_value).strip() + + if not rating_str or rating_str == '': + return None + + # Replace comma with decimal point (European format) + rating_str = rating_str.replace(',', '.') + + # Try to convert to float + rating_float = float(rating_str) + + # Return as string to maintain compatibility with existing code + # but ensure it's a valid numeric format + return str(rating_float) + except (ValueError, TypeError, AttributeError): + # If conversion fails, discard the rating + logger.debug(f"Invalid rating value discarded: {rating_value}") + return None + + +def extract_year(date_string): + """Extract year from date string""" + if not date_string: + return None + try: + return int(date_string.split('-')[0]) + except (ValueError, IndexError): + return None + + +def extract_year_from_title(title): + """Extract year from movie title if present""" + if not title: + return None + + # Pattern for (YYYY) format + pattern1 = r'\((\d{4})\)' + # Pattern for - YYYY format + pattern2 = r'\s-\s(\d{4})' + # Pattern for YYYY at the end + pattern3 = r'\s(\d{4})$' + + for pattern in [pattern1, pattern2, pattern3]: + match = re.search(pattern, title) + if match: + year = int(match.group(1)) + # Validate year is reasonable (between 1900 and current year + 5) + if 1900 <= year <= 2030: + return year + + return None + + +def extract_year_from_data(data, title_key='name'): + """Extract year from various data sources with fallback options""" + try: + # First try the year field + year = data.get('year') + if year and str(year).strip() and str(year).strip() != '': + try: + year_int = int(year) + if 1900 <= year_int <= 2030: + return year_int + except (ValueError, TypeError): + pass + + # Try releaseDate or release_date fields + for date_field in ['releaseDate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + # Extract year from date format like "2011-09-19" + try: + year_str = date_value.split('-')[0].strip() + if year_str: + year = int(year_str) + if 1900 <= year <= 2030: + return year + except (ValueError, IndexError): + continue + + # Finally try extracting from title + title = data.get(title_key, '') + if title and title.strip(): + return extract_year_from_title(title) + + except Exception: + # Don't fail processing if year extraction fails + pass + + return None + + +def extract_date_from_data(data): + """Extract date from various data sources with fallback options""" + try: + for date_field in ['air_date', 'releasedate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + parsed = parse_date(date_value) + if parsed: + return parsed + except Exception: + # Don't fail processing if date extraction fails + pass + return None + + +def parse_date(date_string): + """Parse date string into a datetime object""" + if not date_string: + return None + try: + # Try to parse ISO format first + return datetime.fromisoformat(date_string) + except ValueError: + # Fallback to parsing with strptime for common formats + try: + return datetime.strptime(date_string, '%Y-%m-%d') + except ValueError: + return None # Return None if parsing fails + + +# Episode processing and other advanced features + +def refresh_series_episodes(account, series, external_series_id, episodes_data=None): + """Refresh episodes for a series - only called on-demand""" + try: + if not episodes_data: + # Fetch detailed series info including episodes + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + series_info = client.get_series_info(external_series_id) + if series_info: + # Update series with detailed info + info = series_info.get('info', {}) + if info: + # Only update fields if new value is non-empty and either no existing value or existing value is empty + updated = False + if should_update_field(series.description, info.get('plot')): + series.description = extract_string_from_array_or_string(info.get('plot')) + updated = True + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and (not series.rating or not str(series.rating).strip()): + series.rating = normalized_rating + updated = True + if should_update_field(series.genre, info.get('genre')): + series.genre = extract_string_from_array_or_string(info.get('genre')) + updated = True + + year = extract_year_from_data(info) + if year and not series.year: + series.year = year + updated = True + + if updated: + series.save() + + episodes_data = series_info.get('episodes', {}) + else: + episodes_data = {} + + # Clear existing episodes for this account to handle deletions + Episode.objects.filter( + series=series, + m3u_relations__m3u_account=account + ).delete() + + # Process all episodes in batch + batch_process_episodes(account, series, episodes_data) + + # Update the series relation to mark episodes as fetched + series_relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account=account + ).first() + + if series_relation: + custom_props = series_relation.custom_properties or {} + custom_props['episodes_fetched'] = True + custom_props['detailed_fetched'] = True + series_relation.custom_properties = custom_props + series_relation.last_episode_refresh = timezone.now() + series_relation.save() + + except Exception as e: + logger.error(f"Error refreshing episodes for series {series.name}: {str(e)}") + + +def batch_process_episodes(account, series, episodes_data, scan_start_time=None): + """Process episodes in batches for better performance. + + Note: Multiple streams can represent the same episode (e.g., different languages + or qualities). Each stream has a unique stream_id, but they share the same + season/episode number. We create one Episode record per (series, season, episode) + and multiple M3UEpisodeRelation records pointing to it. + """ + if not episodes_data: + return + + # Flatten episodes data + all_episodes_data = [] + for season_num, season_episodes in episodes_data.items(): + for episode_data in season_episodes: + episode_data['_season_number'] = int(season_num) + all_episodes_data.append(episode_data) + + if not all_episodes_data: + return + + logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}") + + # Extract episode identifiers + # Note: episode_keys may have duplicates when multiple streams represent same episode + episode_keys = set() # Use set to track unique episode keys + episode_ids = [] + for episode_data in all_episodes_data: + season_num = episode_data['_season_number'] + episode_num = episode_data.get('episode_num', 0) + episode_keys.add((series.id, season_num, episode_num)) + episode_ids.append(str(episode_data.get('id'))) + + # Pre-fetch existing episodes + existing_episodes = {} + for episode in Episode.objects.filter(series=series): + key = (episode.series_id, episode.season_number, episode.episode_number) + existing_episodes[key] = episode + + # Pre-fetch existing episode relations + existing_relations = { + rel.stream_id: rel for rel in M3UEpisodeRelation.objects.filter( + m3u_account=account, + stream_id__in=episode_ids + ).select_related('episode') + } + + # Prepare batch operations + episodes_to_create = [] + episodes_to_update = [] + relations_to_create = [] + relations_to_update = [] + + # Track episodes we're creating in this batch to avoid duplicates + # Key: (series_id, season_number, episode_number) -> Episode object + episodes_pending_creation = {} + + for episode_data in all_episodes_data: + try: + episode_id = str(episode_data.get('id')) + episode_name = episode_data.get('title', 'Unknown Episode') + # Ensure season and episode numbers are integers (API may return strings) + try: + season_number = int(episode_data['_season_number']) + except (ValueError, TypeError) as e: + logger.warning(f"Invalid season_number '{episode_data.get('_season_number')}' for episode '{episode_name}': {e}") + season_number = 0 + try: + episode_number = int(episode_data.get('episode_num', 0)) + except (ValueError, TypeError) as e: + logger.warning(f"Invalid episode_num '{episode_data.get('episode_num')}' for episode '{episode_name}': {e}") + episode_number = 0 + info = episode_data.get('info', {}) + + # Extract episode metadata + description = info.get('plot') or info.get('overview', '') if info else '' + rating = normalize_rating(info.get('rating')) if info else None + air_date = extract_date_from_data(info) if info else None + duration_secs = info.get('duration_secs') if info else None + tmdb_id = info.get('tmdb_id') if info else None + imdb_id = info.get('imdb_id') if info else None + + # Prepare custom properties + custom_props = {} + if info: + if info.get('crew'): + custom_props['crew'] = info.get('crew') + if info.get('movie_image'): + movie_image = extract_string_from_array_or_string(info.get('movie_image')) + if movie_image: + custom_props['movie_image'] = movie_image + backdrop = extract_string_from_array_or_string(info.get('backdrop_path')) + if backdrop: + custom_props['backdrop_path'] = [backdrop] + + # Find existing episode - check DB first, then pending creations + episode_key = (series.id, season_number, episode_number) + episode = existing_episodes.get(episode_key) + + # Check if we already have this episode pending creation (multiple streams for same episode) + if not episode and episode_key in episodes_pending_creation: + episode = episodes_pending_creation[episode_key] + logger.debug(f"Reusing pending episode for S{season_number}E{episode_number} (stream_id: {episode_id})") + + if episode: + # Update existing episode + updated = False + if episode_name != episode.name: + episode.name = episode_name + updated = True + if description != episode.description: + episode.description = description + updated = True + if rating != episode.rating: + episode.rating = rating + updated = True + if air_date != episode.air_date: + episode.air_date = air_date + updated = True + if duration_secs != episode.duration_secs: + episode.duration_secs = duration_secs + updated = True + if tmdb_id != episode.tmdb_id: + episode.tmdb_id = tmdb_id + updated = True + if imdb_id != episode.imdb_id: + episode.imdb_id = imdb_id + updated = True + if custom_props != episode.custom_properties: + episode.custom_properties = custom_props if custom_props else None + updated = True + + # Only add to update list if episode has a PK (exists in DB) and isn't already in list + # Episodes pending creation don't have PKs yet and will be created via bulk_create + if updated and episode.pk and episode not in episodes_to_update: + episodes_to_update.append(episode) + else: + # Create new episode + episode = Episode( + series=series, + name=episode_name, + description=description, + air_date=air_date, + rating=rating, + duration_secs=duration_secs, + season_number=season_number, + episode_number=episode_number, + tmdb_id=tmdb_id, + imdb_id=imdb_id, + custom_properties=custom_props if custom_props else None + ) + episodes_to_create.append(episode) + # Track this episode so subsequent streams with same season/episode can reuse it + episodes_pending_creation[episode_key] = episode + + # Handle episode relation + if episode_id in existing_relations: + # Update existing relation + relation = existing_relations[episode_id] + relation.episode = episode + relation.container_extension = episode_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'info': episode_data, + 'season_number': season_number + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UEpisodeRelation( + m3u_account=account, + episode=episode, + stream_id=episode_id, + container_extension=episode_data.get('container_extension', 'mp4'), + custom_properties={ + 'info': episode_data, + 'season_number': season_number + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + except Exception as e: + logger.error(f"Error preparing episode {episode_data.get('title', 'Unknown')}: {str(e)}") + + # Execute batch operations + with transaction.atomic(): + # Create new episodes - use ignore_conflicts in case of race conditions + if episodes_to_create: + Episode.objects.bulk_create(episodes_to_create, ignore_conflicts=True) + + # Re-fetch the created episodes to get their PKs + # We need to do this because bulk_create with ignore_conflicts doesn't set PKs + created_episode_keys = [ + (ep.series_id, ep.season_number, ep.episode_number) + for ep in episodes_to_create + ] + db_episodes = Episode.objects.filter(series=series) + episode_pk_map = { + (ep.series_id, ep.season_number, ep.episode_number): ep + for ep in db_episodes + } + + # Update relations to point to the actual DB episodes with PKs + for relation in relations_to_create: + ep = relation.episode + key = (ep.series_id, ep.season_number, ep.episode_number) + if key in episode_pk_map: + relation.episode = episode_pk_map[key] + + # Filter out relations with unsaved episodes (no PK) + # This can happen if bulk_create had a conflict and ignore_conflicts=True didn't save the episode + valid_relations_to_create = [] + for relation in relations_to_create: + if relation.episode.pk is not None: + valid_relations_to_create.append(relation) + else: + season_num = relation.episode.season_number + episode_num = relation.episode.episode_number + logger.warning( + f"Skipping relation for episode S{season_num}E{episode_num} " + f"- episode not saved to database" + ) + relations_to_create = valid_relations_to_create + + # Update existing episodes + if episodes_to_update: + Episode.objects.bulk_update(episodes_to_update, [ + 'name', 'description', 'air_date', 'rating', 'duration_secs', + 'tmdb_id', 'imdb_id', 'custom_properties' + ]) + + # Create new episode relations - use ignore_conflicts for stream_id duplicates + if relations_to_create: + M3UEpisodeRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + # Update existing episode relations + if relations_to_update: + M3UEpisodeRelation.objects.bulk_update(relations_to_update, [ + 'episode', 'container_extension', 'custom_properties', 'last_seen' + ]) + + logger.info(f"Batch processed episodes: {len(episodes_to_create)} new, {len(episodes_to_update)} updated, " + f"{len(relations_to_create)} new relations, {len(relations_to_update)} updated relations") + + +@shared_task +def batch_refresh_series_episodes(account_id, series_ids=None): + """ + Batch refresh episodes for multiple series. + If series_ids is None, refresh all series that haven't been refreshed recently. + """ + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"Episode refresh called for non-XC account {account_id}") + return "Episode refresh only available for XtreamCodes accounts" + + # Determine which series to refresh + if series_ids: + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + series__id__in=series_ids + ).select_related('series') + else: + # Refresh series that haven't been refreshed in the last 24 hours + cutoff_time = timezone.now() - timezone.timedelta(hours=24) + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + last_episode_refresh__lt=cutoff_time + ).select_related('series') + + logger.info(f"Batch refreshing episodes for {series_relations.count()} series") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + refreshed_count = 0 + for relation in series_relations: + try: + refresh_series_episodes( + account, + relation.series, + relation.external_series_id + ) + refreshed_count += 1 + except Exception as e: + logger.error(f"Error refreshing episodes for series {relation.series.name}: {str(e)}") + + logger.info(f"Batch episode refresh completed for {refreshed_count} series") + return f"Batch episode refresh completed for {refreshed_count} series" + + except Exception as e: + logger.error(f"Error in batch episode refresh for account {account_id}: {str(e)}") + return f"Batch episode refresh failed: {str(e)}" + + +@shared_task +def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id=None): + """Clean up VOD content that has no M3U relations or has stale relations""" + from datetime import timedelta + + # Use scan start time as reference, or current time if not provided + reference_time = scan_start_time or timezone.now() + + # Calculate cutoff date for stale relations + cutoff_date = reference_time - timedelta(days=stale_days) + + # Build base query filters + base_filters = {'last_seen__lt': cutoff_date} + if account_id: + base_filters['m3u_account_id'] = account_id + logger.info(f"Cleaning up stale VOD content for account {account_id}") + else: + logger.info("Cleaning up stale VOD content across all accounts") + + # Clean up stale movie relations (haven't been seen in the specified days) + stale_movie_relations = M3UMovieRelation.objects.filter(**base_filters) + stale_movie_count = stale_movie_relations.count() + stale_movie_relations.delete() + + # Clean up stale series relations + stale_series_relations = M3USeriesRelation.objects.filter(**base_filters) + stale_series_count = stale_series_relations.count() + stale_series_relations.delete() + + # Clean up stale episode relations + stale_episode_relations = M3UEpisodeRelation.objects.filter(**base_filters) + stale_episode_count = stale_episode_relations.count() + stale_episode_relations.delete() + + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") + orphaned_movies.delete() + + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") + orphaned_series.delete() + + # Episodes will be cleaned up via CASCADE when series are deleted + + result = (f"Cleaned up {stale_movie_count} stale movie relations, " + f"{stale_series_count} stale series relations, " + f"{stale_episode_count} stale episode relations, " + f"{orphaned_movie_count} orphaned movies, and " + f"{orphaned_series_count} orphaned series") + + logger.info(result) + return result + + +def handle_movie_id_conflicts(current_movie, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id. + + Since this is called when a user is actively accessing movie details, we always + preserve the current movie (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (movie_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_movie_with_tmdb = None + existing_movie_with_imdb = None + + # Check for existing movies with these IDs + if tmdb_id_to_set: + try: + existing_movie_with_tmdb = Movie.objects.get(tmdb_id=tmdb_id_to_set) + except Movie.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_movie_with_imdb = Movie.objects.get(imdb_id=imdb_id_to_set) + except Movie.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_movie_with_tmdb and not existing_movie_with_imdb: + return current_movie, False + + # Determine which existing movie has the conflicting ID (prefer TMDB match) + existing_movie = existing_movie_with_tmdb or existing_movie_with_imdb + + # CRITICAL: Check if the existing movie is actually the same as the current movie + # This can happen if the current movie already has the ID we're trying to set + if existing_movie.id == current_movie.id: + logger.debug(f"Current movie {current_movie.id} already has the target ID, no conflict resolution needed") + return current_movie, False + + logger.info(f"ID conflict detected: Merging existing movie '{existing_movie.name}' (ID: {existing_movie.id}) into current movie '{current_movie.name}' (ID: {current_movie.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing movie before any merging + if existing_movie_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.tmdb_id = None + existing_movie.save(update_fields=['tmdb_id']) + + if existing_movie_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.imdb_id = None + existing_movie.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing movie into current movie (now safe to set IDs) + merge_movie_data(source_movie=existing_movie, target_movie=current_movie, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing movie to current movie + existing_relations = existing_movie.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing movie {existing_movie.id} to current movie {current_movie.id}") + existing_relations.update(movie=current_movie) + + # Now safe to delete the existing movie since all its relations have been transferred + logger.info(f"Deleting existing movie {existing_movie.id} '{existing_movie.name}' after merging data and transferring relations") + existing_movie.delete() + + return current_movie, False # No relation update needed since we kept current movie + + +def merge_movie_data(source_movie, target_movie, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_movie into target_movie. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_movie: Movie to copy data from + target_movie: Movie to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_movie.description and source_movie.description: + target_movie.description = source_movie.description + updated = True + logger.debug(f"Merged description from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.year and source_movie.year: + target_movie.year = source_movie.year + updated = True + logger.debug(f"Merged year from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.rating and source_movie.rating: + target_movie.rating = source_movie.rating + updated = True + logger.debug(f"Merged rating from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.genre and source_movie.genre: + target_movie.genre = source_movie.genre + updated = True + logger.debug(f"Merged genre from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.duration_secs and source_movie.duration_secs: + target_movie.duration_secs = source_movie.duration_secs + updated = True + logger.debug(f"Merged duration_secs from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.logo and source_movie.logo: + target_movie.logo = source_movie.logo + updated = True + logger.debug(f"Merged logo from movie {source_movie.id} to {target_movie.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_movie.tmdb_id: + if tmdb_id_to_set: + target_movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {target_movie.id}") + elif source_movie.tmdb_id: + target_movie.tmdb_id = source_movie.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.imdb_id: + if imdb_id_to_set: + target_movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {target_movie.id}") + elif source_movie.imdb_id: + target_movie.imdb_id = source_movie.imdb_id + updated = True + logger.debug(f"Merged imdb_id from movie {source_movie.id} to {target_movie.id}") + + # Merge custom properties + target_props = target_movie.custom_properties or {} + source_props = source_movie.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from movie {source_movie.id} to {target_movie.id}") + + if updated: + target_movie.custom_properties = target_props + target_movie.save() + logger.info(f"Successfully merged data from movie {source_movie.id} into {target_movie.id}") + + +def handle_series_id_conflicts(current_series, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id for series. + + Since this is called when a user is actively accessing series details, we always + preserve the current series (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (series_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_series_with_tmdb = None + existing_series_with_imdb = None + + # Check for existing series with these IDs + if tmdb_id_to_set: + try: + existing_series_with_tmdb = Series.objects.get(tmdb_id=tmdb_id_to_set) + except Series.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_series_with_imdb = Series.objects.get(imdb_id=imdb_id_to_set) + except Series.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_series_with_tmdb and not existing_series_with_imdb: + return current_series, False + + # Determine which existing series has the conflicting ID (prefer TMDB match) + existing_series = existing_series_with_tmdb or existing_series_with_imdb + + # CRITICAL: Check if the existing series is actually the same as the current series + # This can happen if the current series already has the ID we're trying to set + if existing_series.id == current_series.id: + logger.debug(f"Current series {current_series.id} already has the target ID, no conflict resolution needed") + return current_series, False + + logger.info(f"ID conflict detected: Merging existing series '{existing_series.name}' (ID: {existing_series.id}) into current series '{current_series.name}' (ID: {current_series.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing series before any merging + if existing_series_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.tmdb_id = None + existing_series.save(update_fields=['tmdb_id']) + + if existing_series_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.imdb_id = None + existing_series.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing series into current series (now safe to set IDs) + merge_series_data(source_series=existing_series, target_series=current_series, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing series to current series + existing_relations = existing_series.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing series {existing_series.id} to current series {current_series.id}") + existing_relations.update(series=current_series) + + # Now safe to delete the existing series since all its relations have been transferred + logger.info(f"Deleting existing series {existing_series.id} '{existing_series.name}' after merging data and transferring relations") + existing_series.delete() + + return current_series, False # No relation update needed since we kept current series + + +def merge_series_data(source_series, target_series, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_series into target_series. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_series: Series to copy data from + target_series: Series to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_series.description and source_series.description: + target_series.description = source_series.description + updated = True + logger.debug(f"Merged description from series {source_series.id} to {target_series.id}") + + if not target_series.year and source_series.year: + target_series.year = source_series.year + updated = True + logger.debug(f"Merged year from series {source_series.id} to {target_series.id}") + + if not target_series.rating and source_series.rating: + target_series.rating = source_series.rating + updated = True + logger.debug(f"Merged rating from series {source_series.id} to {target_series.id}") + + if not target_series.genre and source_series.genre: + target_series.genre = source_series.genre + updated = True + logger.debug(f"Merged genre from series {source_series.id} to {target_series.id}") + + if not target_series.logo and source_series.logo: + target_series.logo = source_series.logo + updated = True + logger.debug(f"Merged logo from series {source_series.id} to {target_series.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_series.tmdb_id: + if tmdb_id_to_set: + target_series.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on series {target_series.id}") + elif source_series.tmdb_id: + target_series.tmdb_id = source_series.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from series {source_series.id} to {target_series.id}") + + if not target_series.imdb_id: + if imdb_id_to_set: + target_series.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on series {target_series.id}") + elif source_series.imdb_id: + target_series.imdb_id = source_series.imdb_id + updated = True + logger.debug(f"Merged imdb_id from series {source_series.id} to {target_series.id}") + + # Merge custom properties + target_props = target_series.custom_properties or {} + source_props = source_series.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from series {source_series.id} to {target_series.id}") + + if updated: + target_series.custom_properties = target_props + target_series.save() + logger.info(f"Successfully merged data from series {source_series.id} into {target_series.id}") + + +def is_non_empty_string(value): + """ + Helper function to safely check if a value is a non-empty string. + Returns True only if value is a string and has non-whitespace content. + """ + return isinstance(value, str) and value.strip() + + +def extract_string_from_array_or_string(value): + """ + Helper function to extract a string value from either a string or array. + Returns the first non-null string from an array, or the string itself. + Returns None if no valid string is found. + """ + if isinstance(value, str): + return value.strip() if value.strip() else None + elif isinstance(value, list) and value: + # Find first non-null, non-empty string in the array + for item in value: + if isinstance(item, str) and item.strip(): + return item.strip() + elif item is not None and str(item).strip(): + return str(item).strip() + return None + + +def clean_custom_properties(custom_props): + """ + Remove null, empty, or invalid values from custom_properties dict. + Only keeps properties that have meaningful values. + """ + if not custom_props: + return None + + cleaned = {} + for key, value in custom_props.items(): + # Handle fields that should extract clean strings + if key in ['youtube_trailer', 'actors', 'director', 'cast']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = clean_value + # Handle backdrop_path which should remain as array format + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = [clean_value] + else: + # For other properties, keep them if they're not None and not empty + if value is not None and value != '' and value != []: + # If it's a list with only null values, skip it + if isinstance(value, list) and all(item is None for item in value): + continue + cleaned[key] = value + + return cleaned if cleaned else None + + +def should_update_field(existing_value, new_value): + """ + Helper function to determine if we should update a field. + Returns True if: + - new_value is a non-empty string (or contains one if it's an array) AND + - existing_value is None, empty string, array with null/empty values, or non-string + """ + # Extract actual string values from arrays if needed + new_string = extract_string_from_array_or_string(new_value) + existing_string = extract_string_from_array_or_string(existing_value) + + return new_string is not None and (existing_string is None or not existing_string) + + +@shared_task +def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): + """ + Fetch advanced movie data from provider and update Movie and M3UMovieRelation. + Only fetch if last_advanced_refresh > 24h ago, unless force_refresh is True. + """ + try: + relation = M3UMovieRelation.objects.select_related('movie', 'm3u_account').get(id=m3u_movie_relation_id) + now = timezone.now() + if not force_refresh and relation.last_advanced_refresh and (now - relation.last_advanced_refresh).total_seconds() < 86400: + return "Advanced data recently fetched, skipping." + + account = relation.m3u_account + movie = relation.movie + + from core.xtream_codes import Client as XtreamCodesClient + + with XtreamCodesClient( + server_url=account.server_url, + username=account.username, + password=account.password, + user_agent=account.get_user_agent().user_agent + ) as client: + vod_info = client.get_vod_info(relation.stream_id) + if vod_info and 'info' in vod_info: + info_raw = vod_info.get('info', {}) + + # Handle case where 'info' might be a list instead of dict + if isinstance(info_raw, list): + # If it's a list, try to use the first item or create empty dict + info = info_raw[0] if info_raw and isinstance(info_raw[0], dict) else {} + logger.warning(f"VOD info for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(info_raw, dict): + info = info_raw + else: + info = {} + logger.warning(f"VOD info for stream {relation.stream_id} returned unexpected type: {type(info_raw)}") + + movie_data_raw = vod_info.get('movie_data', {}) + + # Handle case where 'movie_data' might be a list instead of dict + if isinstance(movie_data_raw, list): + movie_data = movie_data_raw[0] if movie_data_raw and isinstance(movie_data_raw[0], dict) else {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(movie_data_raw, dict): + movie_data = movie_data_raw + else: + movie_data = {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned unexpected type: {type(movie_data_raw)}") + + # Update Movie fields if changed + updated = False + custom_props = movie.custom_properties or {} + if info.get('plot') and info.get('plot') != movie.description: + movie.description = info.get('plot') + updated = True + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and normalized_rating != movie.rating: + movie.rating = normalized_rating + updated = True + if info.get('genre') and info.get('genre') != movie.genre: + movie.genre = info.get('genre') + updated = True + if info.get('duration_secs'): + duration_secs = int(info.get('duration_secs')) + if duration_secs != movie.duration_secs: + movie.duration_secs = duration_secs + updated = True + # Check for releasedate or release_date + release_date_value = info.get('releasedate') or info.get('release_date') + if release_date_value: + try: + year = int(str(release_date_value).split('-')[0]) + if year != movie.year: + movie.year = year + updated = True + except Exception: + pass + # Handle TMDB/IMDB ID updates with duplicate key protection + tmdb_id_to_set = info.get('tmdb_id') if info.get('tmdb_id') and info.get('tmdb_id') != movie.tmdb_id else None + imdb_id_to_set = info.get('imdb_id') if info.get('imdb_id') and info.get('imdb_id') != movie.imdb_id else None + + logger.debug(f"Movie {movie.id} current IDs: tmdb_id={movie.tmdb_id}, imdb_id={movie.imdb_id}") + logger.debug(f"IDs to set: tmdb_id={tmdb_id_to_set}, imdb_id={imdb_id_to_set}") + + if tmdb_id_to_set or imdb_id_to_set: + # Check for existing movies with these IDs and handle duplicates + updated_movie, relation_updated = handle_movie_id_conflicts( + movie, relation, tmdb_id_to_set, imdb_id_to_set + ) + if relation_updated: + # If the relation was updated to point to a different movie, + # we need to update our reference and continue with that movie + movie = updated_movie + logger.info(f"Relation updated, now working with movie {movie.id}") + else: + # No relation update, safe to set the IDs + if tmdb_id_to_set: + movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {movie.id}") + if imdb_id_to_set: + movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {movie.id}") + # Only update trailer if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('youtube_trailer'), info.get('trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('trailer')) + updated = True + if should_update_field(custom_props.get('youtube_trailer'), info.get('youtube_trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('youtube_trailer')) + updated = True + # Only update backdrop_path if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('backdrop_path'), info.get('backdrop_path')): + backdrop_url = extract_string_from_array_or_string(info.get('backdrop_path')) + custom_props['backdrop_path'] = [backdrop_url] if backdrop_url else None + updated = True + # Only update actors if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('actors'), info.get('actors')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('actors')) + updated = True + if should_update_field(custom_props.get('actors'), info.get('cast')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('cast')) + updated = True + # Only update director if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('director'), info.get('director')): + custom_props['director'] = extract_string_from_array_or_string(info.get('director')) + updated = True + if updated: + # Clean custom_properties before saving to remove null/empty values + movie.custom_properties = clean_custom_properties(custom_props) + try: + movie.save() + except Exception as save_error: + # If we still get an integrity error after our conflict resolution, + # log it and try to save without the problematic IDs + logger.error(f"Failed to save movie {movie.id} after conflict resolution: {str(save_error)}") + if 'tmdb_id' in str(save_error) and movie.tmdb_id: + logger.warning(f"Clearing tmdb_id {movie.tmdb_id} from movie {movie.id} due to save error") + movie.tmdb_id = None + if 'imdb_id' in str(save_error) and movie.imdb_id: + logger.warning(f"Clearing imdb_id {movie.imdb_id} from movie {movie.id} due to save error") + movie.imdb_id = None + try: + movie.save() + logger.info(f"Successfully saved movie {movie.id} after clearing problematic IDs") + except Exception as final_error: + logger.error(f"Final save attempt failed for movie {movie.id}: {str(final_error)}") + raise + + # Update relation custom_properties and last_advanced_refresh + relation_custom_props = relation.custom_properties or {} + + # Clean the detailed_info before saving to avoid storing null/empty arrays + cleaned_info = clean_custom_properties(info) if info else None + cleaned_movie_data = clean_custom_properties(movie_data) if movie_data else None + + if cleaned_info: + relation_custom_props['detailed_info'] = cleaned_info + if cleaned_movie_data: + relation_custom_props['movie_data'] = cleaned_movie_data + relation_custom_props['detailed_fetched'] = True + + relation.custom_properties = relation_custom_props + relation.last_advanced_refresh = now + relation.save(update_fields=['custom_properties', 'last_advanced_refresh']) + + return "Advanced data refreshed." + except Exception as e: + logger.error(f"Error refreshing advanced movie data for relation {m3u_movie_relation_id}: {str(e)}") + return f"Error: {str(e)}" + + +def validate_logo_reference(obj, obj_type="object"): + """ + Validate that a VOD logo reference exists in the database. + If not, set it to None to prevent foreign key constraint violations. + + Args: + obj: Object with a logo attribute + obj_type: String description of the object type for logging + + Returns: + bool: True if logo was valid or None, False if logo was invalid and cleared + """ + if not hasattr(obj, 'logo') or not obj.logo: + return True + + if not obj.logo.pk: + # Logo doesn't have a primary key, so it's not saved + obj.logo = None + return False + + try: + # Verify the logo exists in the database + VODLogo.objects.get(pk=obj.logo.pk) + return True + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + obj.logo = None + return False diff --git a/apps/vod/urls.py b/apps/vod/urls.py new file mode 100644 index 00000000..3cea96a5 --- /dev/null +++ b/apps/vod/urls.py @@ -0,0 +1,16 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import MovieViewSet, EpisodeViewSet, SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet) +router.register(r'episodes', EpisodeViewSet) +router.register(r'series', SeriesViewSet) +router.register(r'categories', VODCategoryViewSet) +router.register(r'all', UnifiedContentViewSet, basename='unified-content') + +urlpatterns = [ + path('api/', include(router.urls)), +] diff --git a/core/api_urls.py b/core/api_urls.py index e30eb698..75257db1 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,15 +2,26 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version +from .api_views import ( + UserAgentViewSet, + StreamProfileViewSet, + CoreSettingsViewSet, + environment, + version, + rehash_streams_endpoint, + TimezoneListView, + get_system_events +) router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') router.register(r'streamprofiles', StreamProfileViewSet, basename='streamprofile') router.register(r'settings', CoreSettingsViewSet, basename='coresettings') -router.register(r'settings', CoreSettingsViewSet, basename='settings') urlpatterns = [ path('settings/env/', environment, name='token_refresh'), path('version/', version, name='version'), + path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), + path('timezones/', TimezoneListView.as_view(), name='timezones'), + path('system-events/', get_system_events, name='system_events'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index 7f3ecf57..30829174 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -1,55 +1,260 @@ # core/api_views.py +import json +import ipaddress +import logging from rest_framework import viewsets, status from rest_framework.response import Response +from rest_framework.views import APIView from django.shortcuts import get_object_or_404 -from .models import UserAgent, StreamProfile, CoreSettings -from .serializers import UserAgentSerializer, StreamProfileSerializer, CoreSettingsSerializer from rest_framework.permissions import IsAuthenticated -from rest_framework.decorators import api_view, permission_classes +from rest_framework.decorators import api_view, permission_classes, action from drf_yasg.utils import swagger_auto_schema +from drf_yasg import openapi +from .models import ( + UserAgent, + StreamProfile, + CoreSettings, + STREAM_SETTINGS_KEY, + DVR_SETTINGS_KEY, + NETWORK_ACCESS_KEY, + PROXY_SETTINGS_KEY, +) +from .serializers import ( + UserAgentSerializer, + StreamProfileSerializer, + CoreSettingsSerializer, + ProxySettingsSerializer, +) + import socket import requests import os +from core.tasks import rehash_streams +from apps.accounts.permissions import ( + Authenticated, +) +from dispatcharr.utils import get_client_ip + + +logger = logging.getLogger(__name__) + class UserAgentViewSet(viewsets.ModelViewSet): """ API endpoint that allows user agents to be viewed, created, edited, or deleted. """ + queryset = UserAgent.objects.all() serializer_class = UserAgentSerializer + class StreamProfileViewSet(viewsets.ModelViewSet): """ API endpoint that allows stream profiles to be viewed, created, edited, or deleted. """ + queryset = StreamProfile.objects.all() serializer_class = StreamProfileSerializer + class CoreSettingsViewSet(viewsets.ModelViewSet): """ API endpoint for editing core settings. This is treated as a singleton: only one instance should exist. """ + queryset = CoreSettings.objects.all() serializer_class = CoreSettingsSerializer + def update(self, request, *args, **kwargs): + instance = self.get_object() + old_value = instance.value + response = super().update(request, *args, **kwargs) + + # If stream settings changed and m3u_hash_key is different, rehash streams + if instance.key == STREAM_SETTINGS_KEY: + new_value = request.data.get("value", {}) + if isinstance(new_value, dict) and isinstance(old_value, dict): + old_hash = old_value.get("m3u_hash_key", "") + new_hash = new_value.get("m3u_hash_key", "") + if old_hash != new_hash: + hash_keys = new_hash.split(",") if isinstance(new_hash, str) else new_hash + rehash_streams.delay(hash_keys) + + # If DVR settings changed and pre/post offsets are different, reschedule upcoming recordings + if instance.key == DVR_SETTINGS_KEY: + new_value = request.data.get("value", {}) + if isinstance(new_value, dict) and isinstance(old_value, dict): + old_pre = old_value.get("pre_offset_minutes") + new_pre = new_value.get("pre_offset_minutes") + old_post = old_value.get("post_offset_minutes") + new_post = new_value.get("post_offset_minutes") + if old_pre != new_pre or old_post != new_post: + try: + # Prefer async task if Celery is available + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change + reschedule_upcoming_recordings_for_offset_change.delay() + except Exception: + # Fallback to synchronous implementation + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl + reschedule_upcoming_recordings_for_offset_change_impl() + + return response + + def create(self, request, *args, **kwargs): + response = super().create(request, *args, **kwargs) + # If creating DVR settings with offset values, reschedule upcoming recordings + try: + key = request.data.get("key") + if key == DVR_SETTINGS_KEY: + value = request.data.get("value", {}) + if isinstance(value, dict) and ("pre_offset_minutes" in value or "post_offset_minutes" in value): + try: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change + reschedule_upcoming_recordings_for_offset_change.delay() + except Exception: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl + reschedule_upcoming_recordings_for_offset_change_impl() + except Exception: + pass + return response + @action(detail=False, methods=["post"], url_path="check") + def check(self, request, *args, **kwargs): + data = request.data + + if data.get("key") == NETWORK_ACCESS_KEY: + client_ip = ipaddress.ip_address(get_client_ip(request)) + + in_network = {} + invalid = [] + + value = data.get("value", {}) + for key, val in value.items(): + in_network[key] = [] + cidrs = val.split(",") + for cidr in cidrs: + try: + network = ipaddress.ip_network(cidr) + + if client_ip in network: + in_network[key] = [] + break + + in_network[key].append(cidr) + except: + invalid.append(cidr) + + if len(invalid) > 0: + return Response( + { + "error": True, + "message": "Invalid CIDR(s)", + "data": invalid, + }, + status=status.HTTP_200_OK, + ) + + response_data = { + **in_network, + "client_ip": str(client_ip) + } + return Response(response_data, status=status.HTTP_200_OK) + + return Response({}, status=status.HTTP_200_OK) + +class ProxySettingsViewSet(viewsets.ViewSet): + """ + API endpoint for proxy settings stored as JSON in CoreSettings. + """ + serializer_class = ProxySettingsSerializer + + def _get_or_create_settings(self): + """Get or create the proxy settings CoreSettings entry""" + try: + settings_obj = CoreSettings.objects.get(key=PROXY_SETTINGS_KEY) + settings_data = settings_obj.value + except CoreSettings.DoesNotExist: + # Create default settings + settings_data = { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + settings_obj, created = CoreSettings.objects.get_or_create( + key=PROXY_SETTINGS_KEY, + defaults={ + "name": "Proxy Settings", + "value": settings_data + } + ) + return settings_obj, settings_data + + def list(self, request): + """Return proxy settings""" + settings_obj, settings_data = self._get_or_create_settings() + return Response(settings_data) + + def retrieve(self, request, pk=None): + """Return proxy settings regardless of ID""" + settings_obj, settings_data = self._get_or_create_settings() + return Response(settings_data) + + def update(self, request, pk=None): + """Update proxy settings""" + settings_obj, current_data = self._get_or_create_settings() + + serializer = ProxySettingsSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + + # Update the JSON data - store as dict directly + settings_obj.value = serializer.validated_data + settings_obj.save() + + return Response(serializer.validated_data) + + def partial_update(self, request, pk=None): + """Partially update proxy settings""" + settings_obj, current_data = self._get_or_create_settings() + + # Merge current data with new data + updated_data = {**current_data, **request.data} + + serializer = ProxySettingsSerializer(data=updated_data) + serializer.is_valid(raise_exception=True) + + # Update the JSON data - store as dict directly + settings_obj.value = serializer.validated_data + settings_obj.save() + + return Response(serializer.validated_data) + + @action(detail=False, methods=['get', 'patch']) + def settings(self, request): + """Get or update the proxy settings.""" + if request.method == 'GET': + return self.list(request) + elif request.method == 'PATCH': + return self.partial_update(request) + + + @swagger_auto_schema( - method='get', + method="get", operation_description="Endpoint for environment details", - responses={200: "Environment variables"} + responses={200: "Environment variables"}, ) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) +@api_view(["GET"]) +@permission_classes([Authenticated]) def environment(request): - - public_ip = None local_ip = None country_code = None country_name = None - # 1) Get the public IP + # 1) Get the public IP from ipify.org API try: r = requests.get("https://api64.ipify.org?format=json", timeout=5) r.raise_for_status() @@ -57,46 +262,214 @@ def environment(request): except requests.RequestException as e: public_ip = f"Error: {e}" - # 2) Get the local IP + # 2) Get the local IP by connecting to a public DNS server try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - # connect to a “public” address so the OS can determine our local interface + # connect to a "public" address so the OS can determine our local interface s.connect(("8.8.8.8", 80)) local_ip = s.getsockname()[0] s.close() except Exception as e: local_ip = f"Error: {e}" - # 3) If we got a valid public_ip, fetch geo info from ipapi.co + # 3) Get geolocation data from ipapi.co or ip-api.com if public_ip and "Error" not in public_ip: try: - geo = requests.get(f"https://ipapi.co/{public_ip}/json/", timeout=5).json() - # ipapi returns fields like country_code, country_name, etc. - country_code = geo.get("country_code", "") # e.g. "US" - country_name = geo.get("country_name", "") # e.g. "United States" - except requests.RequestException as e: + # Attempt to get geo information from ipapi.co first + r = requests.get(f"https://ipapi.co/{public_ip}/json/", timeout=5) + + if r.status_code == requests.codes.ok: + geo = r.json() + country_code = geo.get("country_code") # e.g. "US" + country_name = geo.get("country_name") # e.g. "United States" + + else: + # If ipapi.co fails, fallback to ip-api.com + # only supports http requests for free tier + r = requests.get("http://ip-api.com/json/", timeout=5) + + if r.status_code == requests.codes.ok: + geo = r.json() + country_code = geo.get("countryCode") # e.g. "US" + country_name = geo.get("country") # e.g. "United States" + + else: + raise Exception("Geo lookup failed with both services") + + except Exception as e: + logger.error(f"Error during geo lookup: {e}") country_code = None country_name = None - return Response({ - 'authenticated': True, - 'public_ip': public_ip, - 'local_ip': local_ip, - 'country_code': country_code, - 'country_name': country_name, - 'env_mode': "dev" if os.getenv('DISPATCHARR_ENV') == "dev" else "prod", - }) + # 4) Get environment mode from system environment variable + return Response( + { + "authenticated": True, + "public_ip": public_ip, + "local_ip": local_ip, + "country_code": country_code, + "country_name": country_name, + "env_mode": "dev" if os.getenv("DISPATCHARR_ENV") == "dev" else "prod", + } + ) + @swagger_auto_schema( - method='get', + method="get", operation_description="Get application version information", - responses={200: "Version information"} + responses={200: "Version information"}, ) -@api_view(['GET']) + +@api_view(["GET"]) def version(request): # Import version information from version import __version__, __timestamp__ - return Response({ - 'version': __version__, - 'timestamp': __timestamp__, - }) + + return Response( + { + "version": __version__, + "timestamp": __timestamp__, + } + ) + + +@swagger_auto_schema( + method="post", + operation_description="Trigger rehashing of all streams", + responses={200: "Rehash task started"}, +) +@api_view(["POST"]) +@permission_classes([Authenticated]) +def rehash_streams_endpoint(request): + """Trigger the rehash streams task""" + try: + # Get the current hash keys from settings + hash_key = CoreSettings.get_m3u_hash_key() + hash_keys = hash_key.split(",") if isinstance(hash_key, str) else hash_key + + # Queue the rehash task + task = rehash_streams.delay(hash_keys) + + return Response({ + "success": True, + "message": "Stream rehashing task has been queued", + "task_id": task.id + }, status=status.HTTP_200_OK) + + except Exception as e: + return Response({ + "success": False, + "message": f"Error triggering rehash: {str(e)}" + }, status=status.HTTP_400_BAD_REQUEST) + + except Exception as e: + logger.error(f"Error triggering rehash streams: {e}") + return Response({ + "success": False, + "message": "Failed to trigger rehash task" + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +# ───────────────────────────── +# Timezone List API +# ───────────────────────────── +class TimezoneListView(APIView): + """ + API endpoint that returns all available timezones supported by pytz. + Returns a list of timezone names grouped by region for easy selection. + This is a general utility endpoint that can be used throughout the application. + """ + + def get_permissions(self): + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Get list of all supported timezones", + responses={200: openapi.Response('List of timezones with grouping by region')} + ) + def get(self, request): + import pytz + + # Get all common timezones (excludes deprecated ones) + all_timezones = sorted(pytz.common_timezones) + + # Group by region for better UX + grouped = {} + for tz in all_timezones: + if '/' in tz: + region = tz.split('/')[0] + if region not in grouped: + grouped[region] = [] + grouped[region].append(tz) + else: + # Handle special zones like UTC, GMT, etc. + if 'Other' not in grouped: + grouped['Other'] = [] + grouped['Other'].append(tz) + + return Response({ + 'timezones': all_timezones, + 'grouped': grouped, + 'count': len(all_timezones) + }) + + +# ───────────────────────────── +# System Events API +# ───────────────────────────── +@api_view(['GET']) +@permission_classes([IsAuthenticated]) +def get_system_events(request): + """ + Get recent system events (channel start/stop, buffering, client connections, etc.) + + Query Parameters: + limit: Number of events to return per page (default: 100, max: 1000) + offset: Number of events to skip (for pagination, default: 0) + event_type: Filter by specific event type (optional) + """ + from core.models import SystemEvent + + try: + # Get pagination params + limit = min(int(request.GET.get('limit', 100)), 1000) + offset = int(request.GET.get('offset', 0)) + + # Start with all events + events = SystemEvent.objects.all() + + # Filter by event_type if provided + event_type = request.GET.get('event_type') + if event_type: + events = events.filter(event_type=event_type) + + # Get total count before applying pagination + total_count = events.count() + + # Apply offset and limit for pagination + events = events[offset:offset + limit] + + # Serialize the data + events_data = [{ + 'id': event.id, + 'event_type': event.event_type, + 'event_type_display': event.get_event_type_display(), + 'timestamp': event.timestamp.isoformat(), + 'channel_id': str(event.channel_id) if event.channel_id else None, + 'channel_name': event.channel_name, + 'details': event.details + } for event in events] + + return Response({ + 'events': events_data, + 'count': len(events_data), + 'total': total_count, + 'offset': offset, + 'limit': limit + }) + + except Exception as e: + logger.error(f"Error fetching system events: {e}") + return Response({ + 'error': 'Failed to fetch system events' + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) diff --git a/core/apps.py b/core/apps.py index 63c883ca..96ccfb3b 100644 --- a/core/apps.py +++ b/core/apps.py @@ -2,6 +2,19 @@ from django.apps import AppConfig from django.conf import settings import os, logging +# Define TRACE level (5 is below DEBUG which is 10) +TRACE = 5 +logging.addLevelName(TRACE, "TRACE") + +# Add trace method to the Logger class +def trace(self, message, *args, **kwargs): + """Log a message with TRACE level (more detailed than DEBUG)""" + if self.isEnabledFor(TRACE): + self._log(TRACE, message, args, **kwargs) + +# Add the trace method to the Logger class +logging.Logger.trace = trace + class CoreConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'core' diff --git a/core/fixtures/initial_data.json b/core/fixtures/initial_data.json index c037fa78..889f0d24 100644 --- a/core/fixtures/initial_data.json +++ b/core/fixtures/initial_data.json @@ -23,7 +23,7 @@ "model": "core.streamprofile", "pk": 1, "fields": { - "name": "ffmpeg", + "name": "FFmpeg", "command": "ffmpeg", "parameters": "-i {streamUrl} -c:v copy -c:a copy -f mpegts pipe:1", "is_active": true, @@ -34,11 +34,22 @@ "model": "core.streamprofile", "pk": 2, "fields": { - "name": "streamlink", + "name": "Streamlink", "command": "streamlink", "parameters": "{streamUrl} best --stdout", "is_active": true, "user_agent": "1" } + }, + { + "model": "core.streamprofile", + "pk": 3, + "fields": { + "name": "VLC", + "command": "cvlc", + "parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + "is_active": true, + "user_agent": "1" + } } ] diff --git a/core/management/commands/reset_network_access.py b/core/management/commands/reset_network_access.py new file mode 100644 index 00000000..a31d247c --- /dev/null +++ b/core/management/commands/reset_network_access.py @@ -0,0 +1,13 @@ +# your_app/management/commands/update_column.py + +from django.core.management.base import BaseCommand +from core.models import CoreSettings, NETWORK_ACCESS_KEY + + +class Command(BaseCommand): + help = "Reset network access settings" + + def handle(self, *args, **options): + setting = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY) + setting.value = {} + setting.save() diff --git a/core/migrations/0009_m3u_hash_settings.py b/core/migrations/0009_m3u_hash_settings.py index eab5f141..3c6283fa 100644 --- a/core/migrations/0009_m3u_hash_settings.py +++ b/core/migrations/0009_m3u_hash_settings.py @@ -8,7 +8,7 @@ def preload_core_settings(apps, schema_editor): CoreSettings.objects.create( key=slugify("M3U Hash Key"), name="M3U Hash Key", - value="name,url,tvg_id", + value="url", ) class Migration(migrations.Migration): diff --git a/core/migrations/0013_default_network_access_settings.py b/core/migrations/0013_default_network_access_settings.py new file mode 100644 index 00000000..be53ba05 --- /dev/null +++ b/core/migrations/0013_default_network_access_settings.py @@ -0,0 +1,24 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:01 + +from django.db import migrations +from django.utils.text import slugify + + +def preload_network_access_settings(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + CoreSettings.objects.create( + key=slugify("Network Access"), + name="Network Access", + value="{}", + ) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0012_default_active_m3u_accounts"), + ] + + operations = [ + migrations.RunPython(preload_network_access_settings), + ] diff --git a/core/migrations/0014_default_proxy_settings.py b/core/migrations/0014_default_proxy_settings.py new file mode 100644 index 00000000..f4a61a9e --- /dev/null +++ b/core/migrations/0014_default_proxy_settings.py @@ -0,0 +1,35 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:01 + +import json +from django.db import migrations +from django.utils.text import slugify + + +def preload_proxy_settings(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + + # Default proxy settings + default_proxy_settings = { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + + CoreSettings.objects.create( + key=slugify("Proxy Settings"), + name="Proxy Settings", + value=json.dumps(default_proxy_settings), + ) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0013_default_network_access_settings"), + ] + + operations = [ + migrations.RunPython(preload_proxy_settings), + ] diff --git a/core/migrations/0015_dvr_templates.py b/core/migrations/0015_dvr_templates.py new file mode 100644 index 00000000..130a80d4 --- /dev/null +++ b/core/migrations/0015_dvr_templates.py @@ -0,0 +1,30 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:10 + +from django.db import migrations +from django.utils.text import slugify + + +def add_dvr_defaults(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + + defaults = [ + (slugify("DVR TV Template"), "DVR TV Template", "Recordings/TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "DVR Movie Template", "Recordings/Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "DVR TV Fallback Template", "Recordings/TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "DVR Movie Fallback Template", "Recordings/Movies/{start}.mkv"), + (slugify("DVR Comskip Enabled"), "DVR Comskip Enabled", "false"), + ] + + for key, name, value in defaults: + CoreSettings.objects.get_or_create(key=key, defaults={"name": name, "value": value}) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0014_default_proxy_settings"), + ] + + operations = [ + migrations.RunPython(add_dvr_defaults), + ] diff --git a/core/migrations/0016_update_dvr_template_paths.py b/core/migrations/0016_update_dvr_template_paths.py new file mode 100644 index 00000000..5e729c47 --- /dev/null +++ b/core/migrations/0016_update_dvr_template_paths.py @@ -0,0 +1,61 @@ +# Generated manually to update DVR template paths + +from django.db import migrations +from django.utils.text import slugify + + +def update_dvr_template_paths(apps, schema_editor): + """Remove 'Recordings/' prefix from DVR template paths""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Define the updates needed + updates = [ + (slugify("DVR TV Template"), "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "Movies/{start}.mkv"), + ] + + # Update each setting + for key, new_value in updates: + try: + setting = CoreSettings.objects.get(key=key) + setting.value = new_value + setting.save() + print(f"Updated {setting.name}: {new_value}") + except CoreSettings.DoesNotExist: + print(f"Setting with key '{key}' not found - skipping") + + +def reverse_dvr_template_paths(apps, schema_editor): + """Add back 'Recordings/' prefix to DVR template paths""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Define the reverse updates (add back Recordings/ prefix) + updates = [ + (slugify("DVR TV Template"), "Recordings/TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "Recordings/Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "Recordings/TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "Recordings/Movies/{start}.mkv"), + ] + + # Update each setting back to original + for key, original_value in updates: + try: + setting = CoreSettings.objects.get(key=key) + setting.value = original_value + setting.save() + print(f"Reverted {setting.name}: {original_value}") + except CoreSettings.DoesNotExist: + print(f"Setting with key '{key}' not found - skipping") + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0015_dvr_templates"), + ] + + operations = [ + migrations.RunPython(update_dvr_template_paths, reverse_dvr_template_paths), + ] \ No newline at end of file diff --git a/core/migrations/0017_systemevent.py b/core/migrations/0017_systemevent.py new file mode 100644 index 00000000..9b97213c --- /dev/null +++ b/core/migrations/0017_systemevent.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-11-20 20:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_update_dvr_template_paths'), + ] + + operations = [ + migrations.CreateModel( + name='SystemEvent', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)), + ('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)), + ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), + ('channel_name', models.CharField(blank=True, max_length=255, null=True)), + ('details', models.JSONField(blank=True, default=dict)), + ], + options={ + 'ordering': ['-timestamp'], + 'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')], + }, + ), + ] diff --git a/core/migrations/0018_alter_systemevent_event_type.py b/core/migrations/0018_alter_systemevent_event_type.py new file mode 100644 index 00000000..3fe4eecd --- /dev/null +++ b/core/migrations/0018_alter_systemevent_event_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-11-21 15:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0017_systemevent'), + ] + + operations = [ + migrations.AlterField( + model_name='systemevent', + name='event_type', + field=models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded'), ('login_success', 'Login Successful'), ('login_failed', 'Login Failed'), ('logout', 'User Logged Out'), ('m3u_blocked', 'M3U Download Blocked'), ('epg_blocked', 'EPG Download Blocked')], db_index=True, max_length=50), + ), + ] diff --git a/core/migrations/0019_add_vlc_stream_profile.py b/core/migrations/0019_add_vlc_stream_profile.py new file mode 100644 index 00000000..c3f72592 --- /dev/null +++ b/core/migrations/0019_add_vlc_stream_profile.py @@ -0,0 +1,42 @@ +# Generated migration to add VLC stream profile + +from django.db import migrations + +def add_vlc_profile(apps, schema_editor): + StreamProfile = apps.get_model("core", "StreamProfile") + UserAgent = apps.get_model("core", "UserAgent") + + # Check if VLC profile already exists + if not StreamProfile.objects.filter(name="VLC").exists(): + # Get the TiviMate user agent (should be pk=1) + try: + tivimate_ua = UserAgent.objects.get(pk=1) + except UserAgent.DoesNotExist: + # Fallback: get first available user agent + tivimate_ua = UserAgent.objects.first() + if not tivimate_ua: + # No user agents exist, skip creating profile + return + + StreamProfile.objects.create( + name="VLC", + command="cvlc", + parameters="-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + is_active=True, + user_agent=tivimate_ua, + locked=True, # Make it read-only like ffmpeg/streamlink + ) + +def remove_vlc_profile(apps, schema_editor): + StreamProfile = apps.get_model("core", "StreamProfile") + StreamProfile.objects.filter(name="VLC").delete() + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0018_alter_systemevent_event_type'), + ] + + operations = [ + migrations.RunPython(add_vlc_profile, remove_vlc_profile), + ] diff --git a/core/migrations/0020_change_coresettings_value_to_jsonfield.py b/core/migrations/0020_change_coresettings_value_to_jsonfield.py new file mode 100644 index 00000000..ac6ad089 --- /dev/null +++ b/core/migrations/0020_change_coresettings_value_to_jsonfield.py @@ -0,0 +1,267 @@ +# Generated migration to change CoreSettings value field to JSONField and consolidate settings + +import json +from django.db import migrations, models + + +def convert_string_to_json(apps, schema_editor): + """Convert existing string values to appropriate JSON types before changing column type""" + CoreSettings = apps.get_model("core", "CoreSettings") + + for setting in CoreSettings.objects.all(): + value = setting.value + + if not value: + # Empty strings become empty string in JSON + setting.value = json.dumps("") + setting.save(update_fields=['value']) + continue + + # Try to parse as JSON if it looks like JSON (objects/arrays) + if value.startswith('{') or value.startswith('['): + try: + parsed = json.loads(value) + # Store as JSON string temporarily (column is still CharField) + setting.value = json.dumps(parsed) + setting.save(update_fields=['value']) + continue + except (json.JSONDecodeError, ValueError): + pass + + # Try to parse as number + try: + # Check if it's an integer + if '.' not in value and value.lstrip('-').isdigit(): + setting.value = json.dumps(int(value)) + setting.save(update_fields=['value']) + continue + # Check if it's a float + float_val = float(value) + setting.value = json.dumps(float_val) + setting.save(update_fields=['value']) + continue + except (ValueError, AttributeError): + pass + + # Check for booleans + if value.lower() in ('true', 'false', '1', '0', 'yes', 'no', 'on', 'off'): + bool_val = value.lower() in ('true', '1', 'yes', 'on') + setting.value = json.dumps(bool_val) + setting.save(update_fields=['value']) + continue + + # Default: store as JSON string + setting.value = json.dumps(value) + setting.save(update_fields=['value']) + + +def consolidate_settings(apps, schema_editor): + """Consolidate individual setting rows into grouped JSON objects.""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Helper to get setting value + def get_value(key, default=None): + try: + obj = CoreSettings.objects.get(key=key) + return obj.value if obj.value is not None else default + except CoreSettings.DoesNotExist: + return default + + # STREAM SETTINGS + stream_settings = { + "default_user_agent": get_value("default-user-agent"), + "default_stream_profile": get_value("default-stream-profile"), + "m3u_hash_key": get_value("m3u-hash-key", ""), + "preferred_region": get_value("preferred-region"), + "auto_import_mapped_files": get_value("auto-import-mapped-files"), + } + CoreSettings.objects.update_or_create( + key="stream_settings", + defaults={"name": "Stream Settings", "value": stream_settings} + ) + + # DVR SETTINGS + dvr_settings = { + "tv_template": get_value("dvr-tv-template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + "movie_template": get_value("dvr-movie-template", "Movies/{title} ({year}).mkv"), + "tv_fallback_dir": get_value("dvr-tv-fallback-dir", "TV_Shows"), + "tv_fallback_template": get_value("dvr-tv-fallback-template", "TV_Shows/{show}/{start}.mkv"), + "movie_fallback_template": get_value("dvr-movie-fallback-template", "Movies/{start}.mkv"), + "comskip_enabled": bool(get_value("dvr-comskip-enabled", False)), + "comskip_custom_path": get_value("dvr-comskip-custom-path", ""), + "pre_offset_minutes": int(get_value("dvr-pre-offset-minutes", 0) or 0), + "post_offset_minutes": int(get_value("dvr-post-offset-minutes", 0) or 0), + "series_rules": get_value("dvr-series-rules", []), + } + CoreSettings.objects.update_or_create( + key="dvr_settings", + defaults={"name": "DVR Settings", "value": dvr_settings} + ) + + # BACKUP SETTINGS - using underscore keys (not dashes) + backup_settings = { + "schedule_enabled": get_value("backup_schedule_enabled") if get_value("backup_schedule_enabled") is not None else True, + "schedule_frequency": get_value("backup_schedule_frequency") or "daily", + "schedule_time": get_value("backup_schedule_time") or "03:00", + "schedule_day_of_week": get_value("backup_schedule_day_of_week") if get_value("backup_schedule_day_of_week") is not None else 0, + "retention_count": get_value("backup_retention_count") if get_value("backup_retention_count") is not None else 3, + "schedule_cron_expression": get_value("backup_schedule_cron_expression") or "", + } + CoreSettings.objects.update_or_create( + key="backup_settings", + defaults={"name": "Backup Settings", "value": backup_settings} + ) + + # SYSTEM SETTINGS + system_settings = { + "time_zone": get_value("system-time-zone", "UTC"), + "max_system_events": int(get_value("max-system-events", 100) or 100), + } + CoreSettings.objects.update_or_create( + key="system_settings", + defaults={"name": "System Settings", "value": system_settings} + ) + + # Rename proxy-settings to proxy_settings (if it exists with old name) + try: + old_proxy = CoreSettings.objects.get(key="proxy-settings") + old_proxy.key = "proxy_settings" + old_proxy.save() + except CoreSettings.DoesNotExist: + pass + + # Ensure proxy_settings exists with defaults if not present + proxy_obj, proxy_created = CoreSettings.objects.get_or_create( + key="proxy_settings", + defaults={ + "name": "Proxy Settings", + "value": { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + } + ) + + # Rename network-access to network_access (if it exists with old name) + try: + old_network = CoreSettings.objects.get(key="network-access") + old_network.key = "network_access" + old_network.save() + except CoreSettings.DoesNotExist: + pass + + # Ensure network_access exists with defaults if not present + network_obj, network_created = CoreSettings.objects.get_or_create( + key="network_access", + defaults={ + "name": "Network Access", + "value": {} + } + ) + # Delete old individual setting rows (keep only the new grouped settings) + grouped_keys = ["stream_settings", "dvr_settings", "backup_settings", "system_settings", "proxy_settings", "network_access"] + CoreSettings.objects.exclude(key__in=grouped_keys).delete() + + +def reverse_migration(apps, schema_editor): + """Reverse migration: split grouped settings and convert JSON back to strings""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Helper to create individual setting + def create_setting(key, name, value): + # Convert value back to string representation for CharField + if isinstance(value, str): + str_value = value + elif isinstance(value, bool): + str_value = "true" if value else "false" + elif isinstance(value, (int, float)): + str_value = str(value) + elif isinstance(value, (dict, list)): + str_value = json.dumps(value) + elif value is None: + str_value = "" + else: + str_value = str(value) + + CoreSettings.objects.update_or_create( + key=key, + defaults={"name": name, "value": str_value} + ) + + # Split stream_settings + try: + stream = CoreSettings.objects.get(key="stream_settings") + if isinstance(stream.value, dict): + create_setting("default_user_agent", "Default User Agent", stream.value.get("default_user_agent")) + create_setting("default_stream_profile", "Default Stream Profile", stream.value.get("default_stream_profile")) + create_setting("stream_hash_key", "Stream Hash Key", stream.value.get("m3u_hash_key", "")) + create_setting("preferred_region", "Preferred Region", stream.value.get("preferred_region")) + create_setting("auto_import_mapped_files", "Auto Import Mapped Files", stream.value.get("auto_import_mapped_files")) + stream.delete() + except CoreSettings.DoesNotExist: + pass + + # Split dvr_settings + try: + dvr = CoreSettings.objects.get(key="dvr_settings") + if isinstance(dvr.value, dict): + create_setting("dvr_tv_template", "DVR TV Template", dvr.value.get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv")) + create_setting("dvr_movie_template", "DVR Movie Template", dvr.value.get("movie_template", "Movies/{title} ({year}).mkv")) + create_setting("dvr_tv_fallback_dir", "DVR TV Fallback Dir", dvr.value.get("tv_fallback_dir", "TV_Shows")) + create_setting("dvr_tv_fallback_template", "DVR TV Fallback Template", dvr.value.get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv")) + create_setting("dvr_movie_fallback_template", "DVR Movie Fallback Template", dvr.value.get("movie_fallback_template", "Movies/{start}.mkv")) + create_setting("dvr_comskip_enabled", "DVR Comskip Enabled", dvr.value.get("comskip_enabled", False)) + create_setting("dvr_comskip_custom_path", "DVR Comskip Custom Path", dvr.value.get("comskip_custom_path", "")) + create_setting("dvr_pre_offset_minutes", "DVR Pre Offset Minutes", dvr.value.get("pre_offset_minutes", 0)) + create_setting("dvr_post_offset_minutes", "DVR Post Offset Minutes", dvr.value.get("post_offset_minutes", 0)) + create_setting("dvr_series_rules", "DVR Series Rules", dvr.value.get("series_rules", [])) + dvr.delete() + except CoreSettings.DoesNotExist: + pass + + # Split backup_settings + try: + backup = CoreSettings.objects.get(key="backup_settings") + if isinstance(backup.value, dict): + create_setting("backup_schedule_enabled", "Backup Schedule Enabled", backup.value.get("schedule_enabled", False)) + create_setting("backup_schedule_frequency", "Backup Schedule Frequency", backup.value.get("schedule_frequency", "weekly")) + create_setting("backup_schedule_time", "Backup Schedule Time", backup.value.get("schedule_time", "02:00")) + create_setting("backup_schedule_day_of_week", "Backup Schedule Day of Week", backup.value.get("schedule_day_of_week", 0)) + create_setting("backup_retention_count", "Backup Retention Count", backup.value.get("retention_count", 7)) + create_setting("backup_schedule_cron_expression", "Backup Schedule Cron Expression", backup.value.get("schedule_cron_expression", "")) + backup.delete() + except CoreSettings.DoesNotExist: + pass + + # Split system_settings + try: + system = CoreSettings.objects.get(key="system_settings") + if isinstance(system.value, dict): + create_setting("system_time_zone", "System Time Zone", system.value.get("time_zone", "UTC")) + create_setting("max_system_events", "Max System Events", system.value.get("max_system_events", 100)) + system.delete() + except CoreSettings.DoesNotExist: + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0019_add_vlc_stream_profile'), + ] + + operations = [ + # First, convert all data to valid JSON strings while column is still CharField + migrations.RunPython(convert_string_to_json, migrations.RunPython.noop), + # Then change the field type to JSONField + migrations.AlterField( + model_name='coresettings', + name='value', + field=models.JSONField(blank=True, default=dict), + ), + # Finally, consolidate individual settings into grouped JSON objects + migrations.RunPython(consolidate_settings, reverse_migration), + ] diff --git a/core/models.py b/core/models.py index fe7e9eb5..683acb0d 100644 --- a/core/models.py +++ b/core/models.py @@ -1,26 +1,30 @@ # core/models.py + +from shlex import split as shlex_split + +from django.conf import settings from django.db import models from django.utils.text import slugify +from django.core.exceptions import ValidationError + class UserAgent(models.Model): name = models.CharField( - max_length=512, - unique=True, - help_text="The User-Agent name." + max_length=512, unique=True, help_text="The User-Agent name." ) user_agent = models.CharField( max_length=512, unique=True, - help_text="The complete User-Agent string sent by the client." + help_text="The complete User-Agent string sent by the client.", ) description = models.CharField( max_length=255, blank=True, - help_text="An optional description of the client or device type." + help_text="An optional description of the client or device type.", ) is_active = models.BooleanField( default=True, - help_text="Whether this user agent is currently allowed/recognized." + help_text="Whether this user agent is currently allowed/recognized.", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) @@ -28,31 +32,34 @@ class UserAgent(models.Model): def __str__(self): return self.name -PROXY_PROFILE_NAME = 'Proxy' -REDIRECT_PROFILE_NAME = 'Redirect' + +PROXY_PROFILE_NAME = "Proxy" +REDIRECT_PROFILE_NAME = "Redirect" + class StreamProfile(models.Model): name = models.CharField(max_length=255, help_text="Name of the stream profile") command = models.CharField( max_length=255, help_text="Command to execute (e.g., 'yt.sh', 'streamlink', or 'vlc')", - blank=True + blank=True, ) parameters = models.TextField( help_text="Command-line parameters. Use {userAgent} and {streamUrl} as placeholders.", - blank=True + blank=True, ) locked = models.BooleanField( - default=False, - help_text="Protected - can't be deleted or modified" + default=False, help_text="Protected - can't be deleted or modified" + ) + is_active = models.BooleanField( + default=True, help_text="Whether this profile is active" ) - is_active = models.BooleanField(default=True, help_text="Whether this profile is active") user_agent = models.ForeignKey( "UserAgent", on_delete=models.SET_NULL, null=True, blank=True, - help_text="Optional user agent to use. If not set, you can fall back to a default." + help_text="Optional user agent to use. If not set, you can fall back to a default.", ) def __str__(self): @@ -77,7 +84,9 @@ class StreamProfile(models.Model): new_value = new_value.pk if field_name not in allowed_fields and orig_value != new_value: - raise ValidationError(f"Cannot modify {field_name} on a protected profile.") + raise ValidationError( + f"Cannot modify {field_name} on a protected profile." + ) super().save(*args, **kwargs) @@ -90,10 +99,14 @@ class StreamProfile(models.Model): for field_name, new_value in kwargs.items(): if field_name not in allowed_fields: - raise ValidationError(f"Cannot modify {field_name} on a protected profile.") + raise ValidationError( + f"Cannot modify {field_name} on a protected profile." + ) # Ensure user_agent ForeignKey updates correctly - if field_name == "user_agent" and isinstance(new_value, cls._meta.get_field("user_agent").related_model): + if field_name == "user_agent" and isinstance( + new_value, cls._meta.get_field("user_agent").related_model + ): new_value = new_value.pk # Convert object to ID if needed setattr(instance, field_name, new_value) @@ -122,7 +135,8 @@ class StreamProfile(models.Model): # Split the command and iterate through each part to apply replacements cmd = [self.command] + [ - self._replace_in_part(part, replacements) for part in self.parameters.split() + self._replace_in_part(part, replacements) + for part in shlex_split(self.parameters) # use shlex to handle quoted strings ] return cmd @@ -134,11 +148,14 @@ class StreamProfile(models.Model): return part -DEFAULT_USER_AGENT_KEY= slugify("Default User-Agent") -DEFAULT_STREAM_PROFILE_KEY = slugify("Default Stream Profile") -STREAM_HASH_KEY = slugify("M3U Hash Key") -PREFERRED_REGION_KEY = slugify("Preferred Region") -AUTO_IMPORT_MAPPED_FILES = slugify("Auto-Import Mapped Files") +# Setting group keys +STREAM_SETTINGS_KEY = "stream_settings" +DVR_SETTINGS_KEY = "dvr_settings" +BACKUP_SETTINGS_KEY = "backup_settings" +PROXY_SETTINGS_KEY = "proxy_settings" +NETWORK_ACCESS_KEY = "network_access" +SYSTEM_SETTINGS_KEY = "system_settings" + class CoreSettings(models.Model): key = models.CharField( @@ -148,38 +165,208 @@ class CoreSettings(models.Model): name = models.CharField( max_length=255, ) - value = models.CharField( - max_length=255, + value = models.JSONField( + default=dict, + blank=True, ) def __str__(self): return "Core Settings" + # Helper methods to get/set grouped settings + @classmethod + def _get_group(cls, key, defaults=None): + """Get a settings group, returning defaults if not found.""" + try: + return cls.objects.get(key=key).value or (defaults or {}) + except cls.DoesNotExist: + return defaults or {} + + @classmethod + def _update_group(cls, key, name, updates): + """Update specific fields in a settings group.""" + obj, created = cls.objects.get_or_create( + key=key, + defaults={"name": name, "value": {}} + ) + current = obj.value if isinstance(obj.value, dict) else {} + current.update(updates) + obj.value = current + obj.save() + return current + + # Stream Settings + @classmethod + def get_stream_settings(cls): + """Get all stream-related settings.""" + return cls._get_group(STREAM_SETTINGS_KEY, { + "default_user_agent": None, + "default_stream_profile": None, + "m3u_hash_key": "", + "preferred_region": None, + "auto_import_mapped_files": None, + }) + @classmethod def get_default_user_agent_id(cls): - """Retrieve a system profile by name (or return None if not found).""" - return cls.objects.get(key=DEFAULT_USER_AGENT_KEY).value + return cls.get_stream_settings().get("default_user_agent") @classmethod def get_default_stream_profile_id(cls): - return cls.objects.get(key=DEFAULT_STREAM_PROFILE_KEY).value + return cls.get_stream_settings().get("default_stream_profile") @classmethod def get_m3u_hash_key(cls): - return cls.objects.get(key=STREAM_HASH_KEY).value + return cls.get_stream_settings().get("m3u_hash_key", "") @classmethod def get_preferred_region(cls): - """Retrieve the preferred region setting (or return None if not found).""" - try: - return cls.objects.get(key=PREFERRED_REGION_KEY).value - except cls.DoesNotExist: - return None + return cls.get_stream_settings().get("preferred_region") @classmethod def get_auto_import_mapped_files(cls): - """Retrieve the preferred region setting (or return None if not found).""" - try: - return cls.objects.get(key=AUTO_IMPORT_MAPPED_FILES).value - except cls.DoesNotExist: - return None + return cls.get_stream_settings().get("auto_import_mapped_files") + + # DVR Settings + @classmethod + def get_dvr_settings(cls): + """Get all DVR-related settings.""" + return cls._get_group(DVR_SETTINGS_KEY, { + "tv_template": "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv", + "movie_template": "Movies/{title} ({year}).mkv", + "tv_fallback_dir": "TV_Shows", + "tv_fallback_template": "TV_Shows/{show}/{start}.mkv", + "movie_fallback_template": "Movies/{start}.mkv", + "comskip_enabled": False, + "comskip_custom_path": "", + "pre_offset_minutes": 0, + "post_offset_minutes": 0, + "series_rules": [], + }) + + @classmethod + def get_dvr_tv_template(cls): + return cls.get_dvr_settings().get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv") + + @classmethod + def get_dvr_movie_template(cls): + return cls.get_dvr_settings().get("movie_template", "Movies/{title} ({year}).mkv") + + @classmethod + def get_dvr_tv_fallback_dir(cls): + return cls.get_dvr_settings().get("tv_fallback_dir", "TV_Shows") + + @classmethod + def get_dvr_tv_fallback_template(cls): + return cls.get_dvr_settings().get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv") + + @classmethod + def get_dvr_movie_fallback_template(cls): + return cls.get_dvr_settings().get("movie_fallback_template", "Movies/{start}.mkv") + + @classmethod + def get_dvr_comskip_enabled(cls): + return bool(cls.get_dvr_settings().get("comskip_enabled", False)) + + @classmethod + def get_dvr_comskip_custom_path(cls): + return cls.get_dvr_settings().get("comskip_custom_path", "") + + @classmethod + def set_dvr_comskip_custom_path(cls, path: str | None): + value = (path or "").strip() + cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"comskip_custom_path": value}) + return value + + @classmethod + def get_dvr_pre_offset_minutes(cls): + return int(cls.get_dvr_settings().get("pre_offset_minutes", 0) or 0) + + @classmethod + def get_dvr_post_offset_minutes(cls): + return int(cls.get_dvr_settings().get("post_offset_minutes", 0) or 0) + + @classmethod + def get_dvr_series_rules(cls): + return cls.get_dvr_settings().get("series_rules", []) + + @classmethod + def set_dvr_series_rules(cls, rules): + cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"series_rules": rules}) + return rules + + # Proxy Settings + @classmethod + def get_proxy_settings(cls): + """Get proxy settings.""" + return cls._get_group(PROXY_SETTINGS_KEY, { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + }) + + # System Settings + @classmethod + def get_system_settings(cls): + """Get all system-related settings.""" + return cls._get_group(SYSTEM_SETTINGS_KEY, { + "time_zone": getattr(settings, "TIME_ZONE", "UTC") or "UTC", + "max_system_events": 100, + }) + + @classmethod + def get_system_time_zone(cls): + return cls.get_system_settings().get("time_zone") or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + + @classmethod + def set_system_time_zone(cls, tz_name: str | None): + value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + cls._update_group(SYSTEM_SETTINGS_KEY, "System Settings", {"time_zone": value}) + return value + + +class SystemEvent(models.Model): + """ + Tracks system events like channel start/stop, buffering, failover, client connections. + Maintains a rolling history based on max_system_events setting. + """ + EVENT_TYPES = [ + ('channel_start', 'Channel Started'), + ('channel_stop', 'Channel Stopped'), + ('channel_buffering', 'Channel Buffering'), + ('channel_failover', 'Channel Failover'), + ('channel_reconnect', 'Channel Reconnected'), + ('channel_error', 'Channel Error'), + ('client_connect', 'Client Connected'), + ('client_disconnect', 'Client Disconnected'), + ('recording_start', 'Recording Started'), + ('recording_end', 'Recording Ended'), + ('stream_switch', 'Stream Switched'), + ('m3u_refresh', 'M3U Refreshed'), + ('m3u_download', 'M3U Downloaded'), + ('epg_refresh', 'EPG Refreshed'), + ('epg_download', 'EPG Downloaded'), + ('login_success', 'Login Successful'), + ('login_failed', 'Login Failed'), + ('logout', 'User Logged Out'), + ('m3u_blocked', 'M3U Download Blocked'), + ('epg_blocked', 'EPG Download Blocked'), + ] + + event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True) + timestamp = models.DateTimeField(auto_now_add=True, db_index=True) + channel_id = models.UUIDField(null=True, blank=True, db_index=True) + channel_name = models.CharField(max_length=255, null=True, blank=True) + details = models.JSONField(default=dict, blank=True) + + class Meta: + ordering = ['-timestamp'] + indexes = [ + models.Index(fields=['-timestamp']), + models.Index(fields=['event_type', '-timestamp']), + ] + + def __str__(self): + return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}" diff --git a/core/serializers.py b/core/serializers.py index c80ad630..b2bd8ecc 100644 --- a/core/serializers.py +++ b/core/serializers.py @@ -1,19 +1,100 @@ # core/serializers.py +import json +import ipaddress from rest_framework import serializers -from .models import UserAgent, StreamProfile, CoreSettings +from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS_KEY + class UserAgentSerializer(serializers.ModelSerializer): class Meta: model = UserAgent - fields = ['id', 'name', 'user_agent', 'description', 'is_active', 'created_at', 'updated_at'] + fields = [ + "id", + "name", + "user_agent", + "description", + "is_active", + "created_at", + "updated_at", + ] + class StreamProfileSerializer(serializers.ModelSerializer): class Meta: model = StreamProfile - fields = ['id', 'name', 'command', 'parameters', 'is_active', 'user_agent', 'locked'] + fields = [ + "id", + "name", + "command", + "parameters", + "is_active", + "user_agent", + "locked", + ] + class CoreSettingsSerializer(serializers.ModelSerializer): class Meta: model = CoreSettings - fields = '__all__' + fields = "__all__" + + def update(self, instance, validated_data): + if instance.key == NETWORK_ACCESS_KEY: + errors = False + invalid = {} + value = validated_data.get("value") + for key, val in value.items(): + cidrs = val.split(",") + for cidr in cidrs: + try: + ipaddress.ip_network(cidr) + except: + errors = True + if key not in invalid: + invalid[key] = [] + invalid[key].append(cidr) + + if errors: + # Perform CIDR validation + raise serializers.ValidationError( + { + "message": "Invalid CIDRs", + "value": invalid, + } + ) + + return super().update(instance, validated_data) + +class ProxySettingsSerializer(serializers.Serializer): + """Serializer for proxy settings stored as JSON in CoreSettings""" + buffering_timeout = serializers.IntegerField(min_value=0, max_value=300) + buffering_speed = serializers.FloatField(min_value=0.1, max_value=10.0) + redis_chunk_ttl = serializers.IntegerField(min_value=10, max_value=3600) + channel_shutdown_delay = serializers.IntegerField(min_value=0, max_value=300) + channel_init_grace_period = serializers.IntegerField(min_value=0, max_value=60) + + def validate_buffering_timeout(self, value): + if value < 0 or value > 300: + raise serializers.ValidationError("Buffering timeout must be between 0 and 300 seconds") + return value + + def validate_buffering_speed(self, value): + if value < 0.1 or value > 10.0: + raise serializers.ValidationError("Buffering speed must be between 0.1 and 10.0") + return value + + def validate_redis_chunk_ttl(self, value): + if value < 10 or value > 3600: + raise serializers.ValidationError("Redis chunk TTL must be between 10 and 3600 seconds") + return value + + def validate_channel_shutdown_delay(self, value): + if value < 0 or value > 300: + raise serializers.ValidationError("Channel shutdown delay must be between 0 and 300 seconds") + return value + + def validate_channel_init_grace_period(self, value): + if value < 0 or value > 60: + raise serializers.ValidationError("Channel init grace period must be between 0 and 60 seconds") + return value diff --git a/core/tasks.py b/core/tasks.py index 83682a69..207e7c5e 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -1,29 +1,31 @@ -# yourapp/tasks.py from celery import shared_task from channels.layers import get_channel_layer from asgiref.sync import async_to_sync -import redis import json import logging import re import time import os -from core.utils import RedisClient +from core.utils import RedisClient, send_websocket_update, acquire_task_lock, release_task_lock from apps.proxy.ts_proxy.channel_status import ChannelStatus from apps.m3u.models import M3UAccount from apps.epg.models import EPGSource from apps.m3u.tasks import refresh_single_m3u_account from apps.epg.tasks import refresh_epg_data from .models import CoreSettings +from apps.channels.models import Stream, ChannelStream +from django.db import transaction logger = logging.getLogger(__name__) EPG_WATCH_DIR = '/data/epgs' M3U_WATCH_DIR = '/data/m3us' +LOGO_WATCH_DIR = '/data/logos' MIN_AGE_SECONDS = 6 STARTUP_SKIP_AGE = 30 REDIS_PREFIX = "processed_file:" REDIS_TTL = 60 * 60 * 24 * 3 # expire keys after 3 days (optional) +SUPPORTED_LOGO_FORMATS = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg'] # Store the last known value to compare with new data last_known_data = {} @@ -31,11 +33,8 @@ last_known_data = {} _last_log_times = {} # Don't repeat similar log messages more often than this (in seconds) LOG_THROTTLE_SECONDS = 300 # 5 minutes - -@shared_task -def beat_periodic_task(): - fetch_channel_stats() - scan_and_process_files() +# Track if this is the first scan since startup +_first_scan_completed = False def throttled_log(logger_method, message, key=None, *args, **kwargs): """Only log messages with the same key once per throttle period""" @@ -48,19 +47,21 @@ def throttled_log(logger_method, message, key=None, *args, **kwargs): logger_method(message, *args, **kwargs) _last_log_times[key] = now +@shared_task +def beat_periodic_task(): + fetch_channel_stats() + scan_and_process_files() + @shared_task def scan_and_process_files(): + global _first_scan_completed redis_client = RedisClient.get_client() now = time.time() - # Add debug logging for the auto-import setting - auto_import_value = CoreSettings.get_auto_import_mapped_files() - logger.debug(f"Auto-import mapped files setting value: '{auto_import_value}' (type: {type(auto_import_value).__name__})") - # Check if directories exist - dirs_exist = all(os.path.exists(d) for d in [M3U_WATCH_DIR, EPG_WATCH_DIR]) + dirs_exist = all(os.path.exists(d) for d in [M3U_WATCH_DIR, EPG_WATCH_DIR, LOGO_WATCH_DIR]) if not dirs_exist: - throttled_log(logger.warning, f"Watch directories missing: M3U ({os.path.exists(M3U_WATCH_DIR)}), EPG ({os.path.exists(EPG_WATCH_DIR)})", "watch_dirs_missing") + throttled_log(logger.warning, f"Watch directories missing: M3U ({os.path.exists(M3U_WATCH_DIR)}), EPG ({os.path.exists(EPG_WATCH_DIR)}), LOGO ({os.path.exists(LOGO_WATCH_DIR)})", "watch_dirs_missing") # Process M3U files m3u_files = [f for f in os.listdir(M3U_WATCH_DIR) @@ -82,7 +83,11 @@ def scan_and_process_files(): # Check if this file is already in the database existing_m3u = M3UAccount.objects.filter(file_path=filepath).exists() if existing_m3u: - logger.debug(f"Skipping {filename}: Already exists in database") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already exists in database") + else: + logger.debug(f"Skipping {filename}: Already exists in database") redis_client.set(redis_key, mtime, ex=REDIS_TTL) m3u_skipped += 1 continue @@ -98,7 +103,11 @@ def scan_and_process_files(): # Skip if we've already processed this mtime if stored_mtime and float(stored_mtime) >= mtime: - logger.debug(f"Skipping {filename}: Already processed this version") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already processed this version") + else: + logger.debug(f"Skipping {filename}: Already processed this version") m3u_skipped += 1 continue @@ -109,11 +118,26 @@ def scan_and_process_files(): redis_client.set(redis_key, mtime, ex=REDIS_TTL) + # More descriptive creation logging that includes active status + if created: + if m3u_account.is_active: + logger.info(f"Created new M3U account '{filename}' (active)") + else: + logger.info(f"Created new M3U account '{filename}' (inactive due to auto-import setting)") + if not m3u_account.is_active: - logger.debug(f"Skipping {filename}: M3U account is inactive") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: M3U account is inactive") + else: + logger.debug(f"Skipping {filename}: M3U account is inactive") m3u_skipped += 1 continue + # Log update for existing files (we've already logged creation above) + if not created: + logger.info(f"Detected update to existing M3U file: {filename}") + logger.info(f"Queueing refresh for M3U file: {filename}") refresh_single_m3u_account.delay(m3u_account.id) m3u_processed += 1 @@ -127,12 +151,12 @@ def scan_and_process_files(): }, ) - logger.debug(f"M3U processing complete: {m3u_processed} processed, {m3u_skipped} skipped, {len(m3u_files)} total") + logger.trace(f"M3U processing complete: {m3u_processed} processed, {m3u_skipped} skipped, {len(m3u_files)} total") # Process EPG files try: epg_files = os.listdir(EPG_WATCH_DIR) - logger.debug(f"Found {len(epg_files)} files in EPG directory") + logger.trace(f"Found {len(epg_files)} files in EPG directory") except Exception as e: logger.error(f"Error listing EPG directory: {e}") epg_files = [] @@ -145,12 +169,20 @@ def scan_and_process_files(): filepath = os.path.join(EPG_WATCH_DIR, filename) if not os.path.isfile(filepath): - logger.debug(f"Skipping {filename}: Not a file") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not a file") + else: + logger.debug(f"Skipping {filename}: Not a file") epg_skipped += 1 continue - if not filename.endswith('.xml') and not filename.endswith('.gz'): - logger.debug(f"Skipping {filename}: Not an XML or GZ file") + if not filename.endswith('.xml') and not filename.endswith('.gz') and not filename.endswith('.zip'): + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not an XML, GZ or zip file") + else: + logger.debug(f"Skipping {filename}: Not an XML, GZ or zip file") epg_skipped += 1 continue @@ -164,7 +196,11 @@ def scan_and_process_files(): # Check if this file is already in the database existing_epg = EPGSource.objects.filter(file_path=filepath).exists() if existing_epg: - logger.debug(f"Skipping {filename}: Already exists in database") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already exists in database") + else: + logger.debug(f"Skipping {filename}: Already exists in database") redis_client.set(redis_key, mtime, ex=REDIS_TTL) epg_skipped += 1 continue @@ -174,13 +210,21 @@ def scan_and_process_files(): # File too new — probably still being written if age < MIN_AGE_SECONDS: - logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + else: + logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") epg_skipped += 1 continue # Skip if we've already processed this mtime if stored_mtime and float(stored_mtime) >= mtime: - logger.debug(f"Skipping {filename}: Already processed this version") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already processed this version") + else: + logger.debug(f"Skipping {filename}: Already processed this version") epg_skipped += 1 continue @@ -191,17 +235,28 @@ def scan_and_process_files(): "is_active": CoreSettings.get_auto_import_mapped_files() in [True, "true", "True"], }) - # Add debug logging for created sources - if created: - logger.info(f"Created new EPG source '{filename}'") - redis_client.set(redis_key, mtime, ex=REDIS_TTL) + # More descriptive creation logging that includes active status + if created: + if epg_source.is_active: + logger.info(f"Created new EPG source '{filename}' (active)") + else: + logger.info(f"Created new EPG source '{filename}' (inactive due to auto-import setting)") + if not epg_source.is_active: - logger.debug(f"Skipping {filename}: EPG source is marked as inactive") + # Use trace level if not first scan + if _first_scan_completed: + logger.trace(f"Skipping {filename}: EPG source is marked as inactive") + else: + logger.debug(f"Skipping {filename}: EPG source is marked as inactive") epg_skipped += 1 continue + # Log update for existing files (we've already logged creation above) + if not created: + logger.info(f"Detected update to existing EPG file: {filename}") + logger.info(f"Queueing refresh for EPG file: {filename}") refresh_epg_data.delay(epg_source.id) # Trigger Celery task epg_processed += 1 @@ -211,7 +266,130 @@ def scan_and_process_files(): epg_errors += 1 continue - logger.debug(f"EPG processing complete: {epg_processed} processed, {epg_skipped} skipped, {epg_errors} errors") + logger.trace(f"EPG processing complete: {epg_processed} processed, {epg_skipped} skipped, {epg_errors} errors") + + # Process Logo files (including subdirectories) + try: + logo_files = [] + if os.path.exists(LOGO_WATCH_DIR): + for root, dirs, files in os.walk(LOGO_WATCH_DIR): + for filename in files: + logo_files.append(os.path.join(root, filename)) + logger.trace(f"Found {len(logo_files)} files in LOGO directory (including subdirectories)") + except Exception as e: + logger.error(f"Error listing LOGO directory: {e}") + logo_files = [] + + logo_processed = 0 + logo_skipped = 0 + logo_errors = 0 + + for filepath in logo_files: + filename = os.path.basename(filepath) + + if not os.path.isfile(filepath): + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not a file") + else: + logger.debug(f"Skipping {filename}: Not a file") + logo_skipped += 1 + continue + + # Check if file has supported logo extension + file_ext = os.path.splitext(filename)[1].lower() + if file_ext not in SUPPORTED_LOGO_FORMATS: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not a supported logo format") + else: + logger.debug(f"Skipping {filename}: Not a supported logo format") + logo_skipped += 1 + continue + + mtime = os.path.getmtime(filepath) + age = now - mtime + redis_key = REDIS_PREFIX + filepath + stored_mtime = redis_client.get(redis_key) + + # Check if logo already exists in database + if not stored_mtime and age > STARTUP_SKIP_AGE: + from apps.channels.models import Logo + existing_logo = Logo.objects.filter(url=filepath).exists() + if existing_logo: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already exists in database") + else: + logger.debug(f"Skipping {filename}: Already exists in database") + redis_client.set(redis_key, mtime, ex=REDIS_TTL) + logo_skipped += 1 + continue + else: + logger.debug(f"Processing {filename} despite age: Not found in database") + + # File too new — probably still being written + if age < MIN_AGE_SECONDS: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + else: + logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + logo_skipped += 1 + continue + + # Skip if we've already processed this mtime + if stored_mtime and float(stored_mtime) >= mtime: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already processed this version") + else: + logger.debug(f"Skipping {filename}: Already processed this version") + logo_skipped += 1 + continue + + try: + from apps.channels.models import Logo + + # Create logo entry with just the filename (without extension) as name + logo_name = os.path.splitext(filename)[0] + + logo, created = Logo.objects.get_or_create( + url=filepath, + defaults={ + "name": logo_name, + } + ) + + redis_client.set(redis_key, mtime, ex=REDIS_TTL) + + if created: + logger.info(f"Created new logo entry: {logo_name}") + else: + logger.debug(f"Logo entry already exists: {logo_name}") + + logo_processed += 1 + + except Exception as e: + logger.error(f"Error processing logo file {filename}: {str(e)}", exc_info=True) + logo_errors += 1 + continue + + logger.trace(f"LOGO processing complete: {logo_processed} processed, {logo_skipped} skipped, {logo_errors} errors") + + # Send summary websocket update for logo processing + if logo_processed > 0 or logo_errors > 0: + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "logo_processing_summary", + "processed": logo_processed, + "skipped": logo_skipped, + "errors": logo_errors, + "total_files": len(logo_files), + "message": f"Logo processing complete: {logo_processed} processed, {logo_skipped} skipped, {logo_errors} errors" + } + ) + + # Mark that the first scan is complete + _first_scan_completed = True def fetch_channel_stats(): redis_client = RedisClient.get_client() @@ -236,16 +414,264 @@ def fetch_channel_stats(): if cursor == 0: break + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) + }, + collect_garbage=True + ) + + # Explicitly clean up large data structures + all_channels = None + except Exception as e: logger.error(f"Error in channel_status: {e}", exc_info=True) return - # return JsonResponse({'error': str(e)}, status=500) - channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( - "updates", - { - "type": "update", - "data": {"success": True, "type": "channel_stats", "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})} - }, - ) +@shared_task +def rehash_streams(keys): + """ + Regenerate stream hashes for all streams based on current hash key configuration. + This task checks for and blocks M3U refresh tasks to prevent conflicts. + """ + from apps.channels.models import Stream + from apps.m3u.models import M3UAccount + + logger.info("Starting stream rehash process") + + # Get all M3U account IDs for locking + m3u_account_ids = list(M3UAccount.objects.filter(is_active=True).values_list('id', flat=True)) + + # Check if any M3U refresh tasks are currently running + blocked_accounts = [] + for account_id in m3u_account_ids: + if not acquire_task_lock('refresh_single_m3u_account', account_id): + blocked_accounts.append(account_id) + + if blocked_accounts: + # Release any locks we did acquire + for account_id in m3u_account_ids: + if account_id not in blocked_accounts: + release_task_lock('refresh_single_m3u_account', account_id) + + logger.warning(f"Rehash blocked: M3U refresh tasks running for accounts: {blocked_accounts}") + + # Send WebSocket notification to inform user + send_websocket_update( + 'updates', + 'update', + { + "success": False, + "type": "stream_rehash", + "action": "blocked", + "blocked_accounts": len(blocked_accounts), + "total_accounts": len(m3u_account_ids), + "message": f"Stream rehash blocked: M3U refresh tasks are currently running for {len(blocked_accounts)} accounts. Please try again later." + } + ) + + return f"Rehash blocked: M3U refresh tasks running for {len(blocked_accounts)} accounts" + + acquired_locks = m3u_account_ids.copy() + + try: + batch_size = 1000 + queryset = Stream.objects.all() + + # Track statistics + total_processed = 0 + duplicates_merged = 0 + hash_keys = {} + + total_records = queryset.count() + logger.info(f"Starting rehash of {total_records} streams with keys: {keys}") + + # Send initial WebSocket update + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "starting", + "progress": 0, + "total_records": total_records, + "message": f"Starting rehash of {total_records} streams" + } + ) + + for start in range(0, total_records, batch_size): + batch_processed = 0 + batch_duplicates = 0 + + with transaction.atomic(): + batch = queryset[start:start + batch_size] + + for obj in batch: + # Generate new hash + group_name = obj.channel_group.name if obj.channel_group else None + new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id, group=group_name) + + # Check if this hash already exists in our tracking dict or in database + if new_hash in hash_keys: + # Found duplicate in current batch - merge the streams + existing_stream_id = hash_keys[new_hash] + existing_stream = Stream.objects.get(id=existing_stream_id) + + # Move any channel relationships from duplicate to existing stream + # Handle potential unique constraint violations + for channel_stream in ChannelStream.objects.filter(stream_id=obj.id): + # Check if this channel already has a relationship with the target stream + existing_relationship = ChannelStream.objects.filter( + channel_id=channel_stream.channel_id, + stream_id=existing_stream_id + ).first() + + if existing_relationship: + # Relationship already exists, just delete the duplicate + channel_stream.delete() + else: + # Safe to update the relationship + channel_stream.stream_id = existing_stream_id + channel_stream.save() + + # Update the existing stream with the most recent data + if obj.updated_at > existing_stream.updated_at: + existing_stream.name = obj.name + existing_stream.url = obj.url + existing_stream.logo_url = obj.logo_url + existing_stream.tvg_id = obj.tvg_id + existing_stream.m3u_account = obj.m3u_account + existing_stream.channel_group = obj.channel_group + existing_stream.custom_properties = obj.custom_properties + existing_stream.last_seen = obj.last_seen + existing_stream.updated_at = obj.updated_at + existing_stream.save() + + # Delete the duplicate + obj.delete() + batch_duplicates += 1 + else: + # Check if hash already exists in database (from previous batches or existing data) + existing_stream = Stream.objects.filter(stream_hash=new_hash).exclude(id=obj.id).first() + if existing_stream: + # Found duplicate in database - merge the streams + # Move any channel relationships from duplicate to existing stream + # Handle potential unique constraint violations + for channel_stream in ChannelStream.objects.filter(stream_id=obj.id): + # Check if this channel already has a relationship with the target stream + existing_relationship = ChannelStream.objects.filter( + channel_id=channel_stream.channel_id, + stream_id=existing_stream.id + ).first() + + if existing_relationship: + # Relationship already exists, just delete the duplicate + channel_stream.delete() + else: + # Safe to update the relationship + channel_stream.stream_id = existing_stream.id + channel_stream.save() + + # Update the existing stream with the most recent data + if obj.updated_at > existing_stream.updated_at: + existing_stream.name = obj.name + existing_stream.url = obj.url + existing_stream.logo_url = obj.logo_url + existing_stream.tvg_id = obj.tvg_id + existing_stream.m3u_account = obj.m3u_account + existing_stream.channel_group = obj.channel_group + existing_stream.custom_properties = obj.custom_properties + existing_stream.last_seen = obj.last_seen + existing_stream.updated_at = obj.updated_at + existing_stream.save() + + # Delete the duplicate + obj.delete() + batch_duplicates += 1 + hash_keys[new_hash] = existing_stream.id + else: + # Update hash for this stream + obj.stream_hash = new_hash + obj.save(update_fields=['stream_hash']) + hash_keys[new_hash] = obj.id + + batch_processed += 1 + + total_processed += batch_processed + duplicates_merged += batch_duplicates + + # Calculate progress percentage + progress_percent = int((total_processed / total_records) * 100) + current_batch = start // batch_size + 1 + total_batches = (total_records // batch_size) + 1 + + # Send progress update via WebSocket + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "processing", + "progress": progress_percent, + "batch": current_batch, + "total_batches": total_batches, + "processed": total_processed, + "duplicates_merged": duplicates_merged, + "message": f"Processed batch {current_batch}/{total_batches}: {batch_processed} streams, {batch_duplicates} duplicates merged" + } + ) + + logger.info(f"Rehashed batch {current_batch}/{total_batches}: " + f"{batch_processed} processed, {batch_duplicates} duplicates merged") + + logger.info(f"Rehashing complete: {total_processed} streams processed, " + f"{duplicates_merged} duplicates merged") + + # Send completion update via WebSocket + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "completed", + "progress": 100, + "total_processed": total_processed, + "duplicates_merged": duplicates_merged, + "final_count": total_processed - duplicates_merged, + "message": f"Rehashing complete: {total_processed} streams processed, {duplicates_merged} duplicates merged" + }, + collect_garbage=True # Force garbage collection after completion + ) + + logger.info("Stream rehash completed successfully") + return f"Successfully rehashed {total_processed} streams" + + except Exception as e: + logger.error(f"Error during stream rehash: {e}") + raise + finally: + # Always release all acquired M3U locks + for account_id in acquired_locks: + release_task_lock('refresh_single_m3u_account', account_id) + logger.info(f"Released M3U task locks for {len(acquired_locks)} accounts") + + +@shared_task +def cleanup_vod_persistent_connections(): + """Clean up stale VOD persistent connections""" + try: + from apps.proxy.vod_proxy.connection_manager import VODConnectionManager + + # Clean up connections older than 30 minutes + VODConnectionManager.cleanup_stale_persistent_connections(max_age_seconds=1800) + logger.info("VOD persistent connection cleanup completed") + + except Exception as e: + logger.error(f"Error during VOD persistent connection cleanup: {e}") diff --git a/core/utils.py b/core/utils.py index 3a5d84f4..e3d6c389 100644 --- a/core/utils.py +++ b/core/utils.py @@ -3,11 +3,14 @@ import logging import time import os import threading +import re from django.conf import settings from redis.exceptions import ConnectionError, TimeoutError from django.core.cache import cache from asgiref.sync import async_to_sync from channels.layers import get_channel_layer +from django.core.validators import URLValidator +from django.core.exceptions import ValidationError import gc logger = logging.getLogger(__name__) @@ -15,6 +18,29 @@ logger = logging.getLogger(__name__) # Import the command detector from .command_utils import is_management_command +def natural_sort_key(text): + """ + Convert a string into a list of string and number chunks for natural sorting. + "PPV 10" becomes ['PPV ', 10] so it sorts correctly with "PPV 2". + + This function enables natural/alphanumeric sorting where numbers within strings + are treated as actual numbers rather than strings. + + Args: + text (str): The text to convert for sorting + + Returns: + list: A list of strings and integers for proper sorting + + Example: + >>> sorted(['PPV 1', 'PPV 10', 'PPV 2'], key=natural_sort_key) + ['PPV 1', 'PPV 2', 'PPV 10'] + """ + def convert(chunk): + return int(chunk) if chunk.isdigit() else chunk.lower() + + return [convert(c) for c in re.split('([0-9]+)', text)] + class RedisClient: _client = None _pubsub_client = None @@ -52,6 +78,43 @@ class RedisClient: # Validate connection with ping client.ping() client.flushdb() + + # Disable persistence on first connection - improves performance + # Only try to disable if not in a read-only environment + try: + client.config_set('save', '') # Disable RDB snapshots + client.config_set('appendonly', 'no') # Disable AOF logging + + # Set optimal memory settings with environment variable support + # Get max memory from environment or use a larger default (512MB instead of 256MB) + #max_memory = os.environ.get('REDIS_MAX_MEMORY', '512mb') + #eviction_policy = os.environ.get('REDIS_EVICTION_POLICY', 'allkeys-lru') + + # Apply memory settings + #client.config_set('maxmemory-policy', eviction_policy) + #client.config_set('maxmemory', max_memory) + + #logger.info(f"Redis configured with maxmemory={max_memory}, policy={eviction_policy}") + + # Disable protected mode when in debug mode + if os.environ.get('DISPATCHARR_DEBUG', '').lower() == 'true': + client.config_set('protected-mode', 'no') # Disable protected mode in debug + logger.warning("Redis protected mode disabled for debug environment") + + logger.trace("Redis persistence disabled for better performance") + except redis.exceptions.ResponseError as e: + # Improve error handling for Redis configuration errors + if "OOM" in str(e): + logger.error(f"Redis OOM during configuration: {e}") + # Try to increase maxmemory as an emergency measure + try: + client.config_set('maxmemory', '768mb') + logger.warning("Applied emergency Redis memory increase to 768MB") + except: + pass + else: + logger.error(f"Redis configuration error: {e}") + logger.info(f"Connected to Redis at {redis_host}:{redis_port}/{redis_db}") cls._client = client @@ -151,12 +214,226 @@ def release_task_lock(task_name, id): # Remove the lock redis_client.delete(lock_id) -def send_websocket_event(event, success, data): +def send_websocket_update(group_name, event_type, data, collect_garbage=False): + """ + Standardized function to send WebSocket updates with proper memory management. + + Args: + group_name: The WebSocket group to send to (e.g. 'updates') + event_type: The type of message (e.g. 'update') + data: The data to send + collect_garbage: Whether to force garbage collection after sending + """ channel_layer = get_channel_layer() - async_to_sync(channel_layer.group_send)( - 'updates', - { - 'type': 'update', - "data": {"success": True, "type": "epg_channels"} - } - ) + try: + async_to_sync(channel_layer.group_send)( + group_name, + { + 'type': event_type, + 'data': data + } + ) + except Exception as e: + logger.warning(f"Failed to send WebSocket update: {e}") + finally: + # Explicitly release references to help garbage collection + channel_layer = None + + # Force garbage collection if requested + if collect_garbage: + gc.collect() + +def send_websocket_event(event, success, data): + """Acquire a lock to prevent concurrent task execution.""" + data_payload = {"success": success, "type": event} + if data: + # Make a copy to avoid modifying the original + data_payload.update(data) + + # Use the standardized function + send_websocket_update('updates', 'update', data_payload) + + # Help garbage collection by clearing references + data_payload = None + +# Add memory monitoring utilities +def get_memory_usage(): + """Returns current memory usage in MB""" + import psutil + process = psutil.Process(os.getpid()) + return process.memory_info().rss / (1024 * 1024) + +def monitor_memory_usage(func): + """Decorator to monitor memory usage before and after function execution""" + def wrapper(*args, **kwargs): + import gc + # Force garbage collection before measuring + gc.collect() + + # Get initial memory usage + start_mem = get_memory_usage() + logger.debug(f"Memory usage before {func.__name__}: {start_mem:.2f} MB") + + # Call the original function + result = func(*args, **kwargs) + + # Force garbage collection before measuring again + gc.collect() + + # Get final memory usage + end_mem = get_memory_usage() + logger.debug(f"Memory usage after {func.__name__}: {end_mem:.2f} MB (Change: {end_mem - start_mem:.2f} MB)") + + return result + return wrapper + +def cleanup_memory(log_usage=False, force_collection=True): + """ + Comprehensive memory cleanup function to reduce memory footprint + + Args: + log_usage: Whether to log memory usage before and after cleanup + force_collection: Whether to force garbage collection + """ + logger.trace("Starting memory cleanup django memory cleanup") + # Skip logging if log level is not set to debug or more verbose (like trace) + current_log_level = logger.getEffectiveLevel() + if not current_log_level <= logging.DEBUG: + log_usage = False + if log_usage: + try: + import psutil + process = psutil.Process() + before_mem = process.memory_info().rss / (1024 * 1024) + logger.debug(f"Memory before cleanup: {before_mem:.2f} MB") + except (ImportError, Exception) as e: + logger.debug(f"Error getting memory usage: {e}") + + # Clear any object caches from Django ORM + from django.db import connection, reset_queries + reset_queries() + + # Force garbage collection + if force_collection: + # Run full collection + gc.collect(generation=2) + # Clear cyclic references + gc.collect(generation=0) + + if log_usage: + try: + import psutil + process = psutil.Process() + after_mem = process.memory_info().rss / (1024 * 1024) + logger.debug(f"Memory after cleanup: {after_mem:.2f} MB (change: {after_mem-before_mem:.2f} MB)") + except (ImportError, Exception): + pass + logger.trace("Memory cleanup complete for django") + +def is_protected_path(file_path): + """ + Determine if a file path is in a protected directory that shouldn't be deleted. + + Args: + file_path (str): The file path to check + + Returns: + bool: True if the path is protected, False otherwise + """ + if not file_path: + return False + + # List of protected directory prefixes + protected_dirs = [ + '/data/epgs', # EPG files mapped from host + '/data/uploads', # User uploaded files + '/data/m3us' # M3U files mapped from host + ] + + # Check if the path starts with any protected directory + for protected_dir in protected_dirs: + if file_path.startswith(protected_dir): + return True + + return False + +def validate_flexible_url(value): + """ + Custom URL validator that accepts URLs with hostnames that aren't FQDNs. + This allows URLs like "http://hostname/" which + Django's standard URLValidator rejects. + """ + if not value: + return # Allow empty values since the field is nullable + + # Create a standard Django URL validator + url_validator = URLValidator() + + try: + # First try the standard validation + url_validator(value) + except ValidationError as e: + # If standard validation fails, check if it's a non-FQDN hostname + import re + + # More flexible pattern for non-FQDN hostnames with paths + # Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234 + # Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value + # Also supports authentication: rtsp://user:pass@hostname/path + non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' + non_fqdn_match = re.match(non_fqdn_pattern, value) + + if non_fqdn_match: + return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication + + # If it doesn't match our flexible patterns, raise the original error + raise ValidationError("Enter a valid URL.") + + +def log_system_event(event_type, channel_id=None, channel_name=None, **details): + """ + Log a system event and maintain the configured max history. + + Args: + event_type: Type of event (e.g., 'channel_start', 'client_connect') + channel_id: Optional UUID of the channel + channel_name: Optional name of the channel + **details: Additional details to store in the event (stored as JSON) + + Example: + log_system_event('channel_start', channel_id=uuid, channel_name='CNN', + stream_url='http://...', user='admin') + """ + from core.models import SystemEvent, CoreSettings + + try: + # Create the event + SystemEvent.objects.create( + event_type=event_type, + channel_id=channel_id, + channel_name=channel_name, + details=details + ) + + # Get max events from settings (default 100) + try: + from .models import CoreSettings + system_settings = CoreSettings.objects.filter(key='system_settings').first() + if system_settings and isinstance(system_settings.value, dict): + max_events = int(system_settings.value.get('max_system_events', 100)) + else: + max_events = 100 + except Exception: + max_events = 100 + + # Delete old events beyond the limit (keep it efficient with a single query) + total_count = SystemEvent.objects.count() + if total_count > max_events: + # Get the ID of the event at the cutoff point + cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events] + # Delete all events with ID less than cutoff (older events) + SystemEvent.objects.filter(id__lt=cutoff_event).delete() + + except Exception as e: + # Don't let event logging break the main application + logger.error(f"Failed to log system event {event_type}: {e}") diff --git a/core/views.py b/core/views.py index 397783fb..5806d63c 100644 --- a/core/views.py +++ b/core/views.py @@ -1,5 +1,6 @@ # core/views.py import os +from shlex import split as shlex_split import sys import subprocess import logging @@ -37,7 +38,9 @@ def stream_view(request, channel_uuid): """ try: redis_host = getattr(settings, "REDIS_HOST", "localhost") - redis_client = redis.Redis(host=settings.REDIS_HOST, port=6379, db=int(getattr(settings, "REDIS_DB", "0"))) + redis_port = int(getattr(settings, "REDIS_PORT", 6379)) + redis_db = int(getattr(settings, "REDIS_DB", "0")) + redis_client = redis.Redis(host=redis_host, port=redis_port, db=redis_db) # Retrieve the channel by the provided stream_id. channel = Channel.objects.get(uuid=channel_uuid) @@ -73,7 +76,6 @@ def stream_view(request, channel_uuid): default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) profiles = [obj for obj in m3u_profiles if not obj.is_default] - # -- Loop through profiles and pick the first active one -- for profile in [default_profile] + profiles: logger.debug(f'Checking profile {profile.name}...') @@ -130,7 +132,7 @@ def stream_view(request, channel_uuid): stream_profile = channel.stream_profile if not stream_profile: logger.error("No stream profile set for channel ID=%s, using default", channel.id) - stream_profile = StreamProfile.objects.get(id=CoreSettings.objects.get(key="default-stream-profile").value) + stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) logger.debug("Stream profile used: %s", stream_profile.name) @@ -143,7 +145,7 @@ def stream_view(request, channel_uuid): logger.debug("Formatted parameters: %s", parameters) # Build the final command. - cmd = [stream_profile.command] + parameters.split() + cmd = [stream_profile.command] + shlex_split(parameters) logger.debug("Executing command: %s", cmd) try: @@ -174,7 +176,7 @@ def stream_view(request, channel_uuid): persistent_lock.release() logger.debug("Persistent lock released for channel ID=%s", channel.id) - return StreamingHttpResponse( - stream_generator(process, stream, persistent_lock), - content_type="video/MP2T" - ) + return StreamingHttpResponse( + stream_generator(process, stream, persistent_lock), + content_type="video/MP2T" + ) diff --git a/core/xtream_codes.py b/core/xtream_codes.py new file mode 100644 index 00000000..9b56197a --- /dev/null +++ b/core/xtream_codes.py @@ -0,0 +1,465 @@ +import requests +import logging +import traceback +import json + +logger = logging.getLogger(__name__) + +class Client: + """Xtream Codes API Client with robust error handling""" + + def __init__(self, server_url, username, password, user_agent=None): + self.server_url = self._normalize_url(server_url) + self.username = username + self.password = password + self.user_agent = user_agent + + # Fix: Properly handle all possible user_agent input types + if user_agent: + if isinstance(user_agent, str): + user_agent_string = user_agent + elif hasattr(user_agent, 'user_agent'): + user_agent_string = user_agent.user_agent + else: + logger.warning(f"Unexpected user_agent type: {type(user_agent)}, using default") + user_agent_string = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' + else: + user_agent_string = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' + + # Create persistent session + self.session = requests.Session() + self.session.headers.update({'User-Agent': user_agent_string}) + + # Configure connection pooling + adapter = requests.adapters.HTTPAdapter( + pool_connections=1, + pool_maxsize=2, + max_retries=3, + pool_block=False + ) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + self.server_info = None + + def _normalize_url(self, url): + """Normalize server URL by removing trailing slashes and paths""" + if not url: + raise ValueError("Server URL cannot be empty") + + url = url.rstrip('/') + # Remove any path after domain - we'll construct proper API URLs + # Split by protocol first to preserve it + if '://' in url: + protocol, rest = url.split('://', 1) + domain = rest.split('/', 1)[0] + return f"{protocol}://{domain}" + return url + + def _make_request(self, endpoint, params=None): + """Make request with detailed error handling""" + try: + url = f"{self.server_url}/{endpoint}" + logger.debug(f"XC API Request: {url} with params: {params}") + + response = self.session.get(url, params=params, timeout=30) + response.raise_for_status() + + # Check if response is empty + if not response.content: + error_msg = f"XC API returned empty response from {url}" + logger.error(error_msg) + raise ValueError(error_msg) + + # Check for common blocking responses before trying to parse JSON + response_text = response.text.strip() + if response_text.lower() in ['blocked', 'forbidden', 'access denied', 'unauthorized']: + error_msg = f"XC API request blocked by server from {url}. Response: {response_text}" + logger.error(error_msg) + logger.error(f"This may indicate IP blocking, User-Agent filtering, or rate limiting") + raise ValueError(error_msg) + + try: + data = response.json() + except requests.exceptions.JSONDecodeError as json_err: + error_msg = f"XC API returned invalid JSON from {url}. Response: {response.text[:1000]}" + logger.error(error_msg) + logger.error(f"JSON decode error: {str(json_err)}") + + # Check if it looks like an HTML error page + if response_text.startswith('<'): + logger.error("Response appears to be HTML - server may be returning an error page") + + raise ValueError(error_msg) + + # Check for XC-specific error responses + if isinstance(data, dict) and data.get('user_info') is None and 'error' in data: + error_msg = f"XC API Error: {data.get('error', 'Unknown error')}" + logger.error(error_msg) + raise ValueError(error_msg) + + return data + except requests.RequestException as e: + error_msg = f"XC API Request failed: {str(e)}" + logger.error(error_msg) + logger.error(f"Request details: URL={url}, Params={params}") + raise + except ValueError as e: + # This could be from JSON parsing or our explicit raises + logger.error(f"XC API Invalid response: {str(e)}") + raise + except Exception as e: + logger.error(f"XC API Unexpected error: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def authenticate(self): + """Authenticate and validate server response""" + try: + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password + } + + self.server_info = self._make_request(endpoint, params) + + if not self.server_info or not self.server_info.get('user_info'): + error_msg = "Authentication failed: Invalid response from server" + logger.error(f"{error_msg}. Response: {self.server_info}") + raise ValueError(error_msg) + + logger.info(f"XC Authentication successful for user {self.username}") + return self.server_info + except Exception as e: + logger.error(f"XC Authentication failed: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_account_info(self): + """Get account information from the last authentication response""" + if not self.server_info: + raise ValueError("Not authenticated. Call authenticate() first.") + + from datetime import datetime + + # Extract relevant account information + user_info = self.server_info.get('user_info', {}) + server_info = self.server_info.get('server_info', {}) + + account_info = { + 'last_refresh': datetime.utcnow().isoformat() + 'Z', # Explicit UTC with Z suffix + 'auth_timestamp': datetime.utcnow().timestamp(), + 'user_info': { + 'username': user_info.get('username'), + 'password': user_info.get('password'), + 'message': user_info.get('message'), + 'auth': user_info.get('auth'), + 'status': user_info.get('status'), + 'exp_date': user_info.get('exp_date'), + 'is_trial': user_info.get('is_trial'), + 'active_cons': user_info.get('active_cons'), + 'created_at': user_info.get('created_at'), + 'max_connections': user_info.get('max_connections'), + 'allowed_output_formats': user_info.get('allowed_output_formats', []) + }, + 'server_info': { + 'url': server_info.get('url'), + 'port': server_info.get('port'), + 'https_port': server_info.get('https_port'), + 'server_protocol': server_info.get('server_protocol'), + 'rtmp_port': server_info.get('rtmp_port'), + 'timezone': server_info.get('timezone'), + 'timestamp_now': server_info.get('timestamp_now'), + 'time_now': server_info.get('time_now') + } + } + + return account_info + + def get_live_categories(self): + """Get live TV categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_live_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} live categories") + logger.debug(f"Categories: {json.dumps(categories[:5])}...") + return categories + except Exception as e: + logger.error(f"Failed to get live categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_live_category_streams(self, category_id): + """Get streams for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_live_streams', + 'category_id': category_id + } + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid streams response for category {category_id}: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} streams for category {category_id}") + return streams + except Exception as e: + logger.error(f"Failed to get streams for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_all_live_streams(self): + """Get all live streams (no category filter)""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_live_streams' + # No category_id = get all streams + } + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid streams response for all live streams: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} total live streams") + return streams + except Exception as e: + logger.error(f"Failed to get all live streams: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_stream_url(self, stream_id): + """Get the playback URL for a stream""" + return f"{self.server_url}/live/{self.username}/{self.password}/{stream_id}.ts" + + def get_episode_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for an episode stream""" + return f"{self.server_url}/series/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for a VOD stream""" + return f"{self.server_url}/movie/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_categories(self): + """Get VOD categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid VOD categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} VOD categories") + return categories + except Exception as e: + logger.error(f"Failed to get VOD categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_streams(self, category_id=None): + """Get VOD streams for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_streams' + } + + if category_id: + params['category_id'] = category_id + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid VOD streams response for category {category_id}: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} VOD streams for category {category_id}") + return streams + except Exception as e: + logger.error(f"Failed to get VOD streams for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_info(self, vod_id): + """Get detailed information for a specific VOD""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_info', + 'vod_id': vod_id + } + + vod_info = self._make_request(endpoint, params) + + if not isinstance(vod_info, dict): + error_msg = f"Invalid VOD info response for vod_id {vod_id}: {vod_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved VOD info for vod_id {vod_id}") + return vod_info + except Exception as e: + logger.error(f"Failed to get VOD info for vod_id {vod_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_categories(self): + """Get series categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid series categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} series categories") + return categories + except Exception as e: + logger.error(f"Failed to get series categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series(self, category_id=None): + """Get series for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series' + } + + if category_id: + params['category_id'] = category_id + + series = self._make_request(endpoint, params) + + if not isinstance(series, list): + error_msg = f"Invalid series response for category {category_id}: {series}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(series)} series for category {category_id}") + return series + except Exception as e: + logger.error(f"Failed to get series for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_info(self, series_id): + """Get detailed information for a specific series including episodes""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_info', + 'series_id': series_id + } + + series_info = self._make_request(endpoint, params) + + if not isinstance(series_info, dict): + error_msg = f"Invalid series info response for series_id {series_id}: {series_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved series info for series_id {series_id}") + return series_info + except Exception as e: + logger.error(f"Failed to get series info for series_id {series_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def close(self): + """Close the session and cleanup resources""" + if hasattr(self, 'session') and self.session: + try: + self.session.close() + except Exception as e: + logger.debug(f"Error closing XC session: {e}") + + def __enter__(self): + """Enter the context manager""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the context manager and cleanup resources""" + self.close() + return False # Don't suppress exceptions + + def __del__(self): + """Ensure session is closed when object is destroyed""" + self.close() diff --git a/debian_install.sh b/debian_install.sh old mode 100644 new mode 100755 index 0e41139e..bda506b1 --- a/debian_install.sh +++ b/debian_install.sh @@ -1,208 +1,243 @@ #!/usr/bin/env bash -set -e +set -euo pipefail +IFS=$'\n\t' + +# Root check +if [[ $EUID -ne 0 ]]; then + echo "[ERROR] This script must be run as root." >&2 + exit 1 +fi + +trap 'echo -e "\n[ERROR] Line $LINENO failed. Exiting." >&2; exit 1' ERR ############################################################################## # 0) Warning / Disclaimer ############################################################################## -echo "**************************************************************" -echo "WARNING: While we do not anticipate any problems, we disclaim all" -echo "responsibility for anything that happens to your machine." -echo "" -echo "This script is intended for **Debian-based operating systems only**." -echo "Running it on other distributions WILL cause unexpected issues." -echo "" -echo "This script is **NOT RECOMMENDED** for use on your primary machine." -echo "For safety and best results, we strongly advise running this inside a" -echo "clean virtual machine (VM) or LXC container environment." -echo "" -echo "Additionally, there is NO SUPPORT for this method; Docker is the only" -echo "officially supported way to run Dispatcharr." -echo "**************************************************************" -echo "" -echo "If you wish to proceed, type \"I understand\" and press Enter." -read user_input - -if [ "$user_input" != "I understand" ]; then - echo "Exiting script..." - exit 1 -fi - +show_disclaimer() { + echo "**************************************************************" + echo "WARNING: While we do not anticipate any problems, we disclaim all" + echo "responsibility for anything that happens to your machine." + echo "" + echo "This script is intended for **Debian-based operating systems only**." + echo "Running it on other distributions WILL cause unexpected issues." + echo "" + echo "This script is **NOT RECOMMENDED** for use on your primary machine." + echo "For safety and best results, we strongly advise running this inside a" + echo "clean virtual machine (VM) or LXC container environment." + echo "" + echo "Additionally, there is NO SUPPORT for this method; Docker is the only" + echo "officially supported way to run Dispatcharr." + echo "**************************************************************" + echo "" + echo "If you wish to proceed, type \"I understand\" and press Enter." + read user_input + if [ "$user_input" != "I understand" ]; then + echo "Exiting script..." + exit 1 + fi +} ############################################################################## # 1) Configuration ############################################################################## -# Linux user/group under which Dispatcharr processes will run -DISPATCH_USER="dispatcharr" -DISPATCH_GROUP="dispatcharr" - -# Where Dispatcharr source code should live -APP_DIR="/opt/dispatcharr" - -# Git branch to clone (e.g., "main" or "dev") -DISPATCH_BRANCH="dev" - -# PostgreSQL settings -POSTGRES_DB="dispatcharr" -POSTGRES_USER="dispatch" -POSTGRES_PASSWORD="secret" - -# The port on which Nginx will listen for HTTP -NGINX_HTTP_PORT="9191" - -# The TCP port for Daphné (Django Channels) -WEBSOCKET_PORT="8001" - -# Directory inside /run/ for our socket; full path becomes /run/dispatcharr/dispatcharr.sock -GUNICORN_RUNTIME_DIR="dispatcharr" -GUNICORN_SOCKET="/run/${GUNICORN_RUNTIME_DIR}/dispatcharr.sock" +configure_variables() { + DISPATCH_USER="dispatcharr" + DISPATCH_GROUP="dispatcharr" + APP_DIR="/opt/dispatcharr" + DISPATCH_BRANCH="main" + POSTGRES_DB="dispatcharr" + POSTGRES_USER="dispatch" + POSTGRES_PASSWORD="secret" + NGINX_HTTP_PORT="9191" + WEBSOCKET_PORT="8001" + GUNICORN_RUNTIME_DIR="dispatcharr" + GUNICORN_SOCKET="/run/${GUNICORN_RUNTIME_DIR}/dispatcharr.sock" + PYTHON_BIN=$(command -v python3) + SYSTEMD_DIR="/etc/systemd/system" + NGINX_SITE="/etc/nginx/sites-available/dispatcharr" +} ############################################################################## # 2) Install System Packages ############################################################################## -echo ">>> Installing system packages..." -apt-get update -apt-get install -y \ - git \ - curl \ - wget \ - build-essential \ - gcc \ - libpcre3-dev \ - libpq-dev \ - python3-dev \ - python3-venv \ - python3-pip \ - nginx \ - redis-server \ - postgresql \ - postgresql-contrib \ - ffmpeg \ - procps \ - streamlink +install_packages() { + echo ">>> Installing system packages..." + apt-get update + declare -a packages=( + git curl wget build-essential gcc libpq-dev + python3-dev python3-venv python3-pip nginx redis-server + postgresql postgresql-contrib ffmpeg procps streamlink + sudo + ) + apt-get install -y --no-install-recommends "${packages[@]}" -# Node.js setup (v23.x from NodeSource) - adjust version if needed -if ! command -v node >/dev/null 2>&1; then - echo ">>> Installing Node.js..." - curl -sL https://deb.nodesource.com/setup_23.x | bash - - apt-get install -y nodejs -fi + if ! command -v node >/dev/null 2>&1; then + echo ">>> Installing Node.js..." + curl -sL https://deb.nodesource.com/setup_24.x | bash - + apt-get install -y nodejs + fi -# Start & enable PostgreSQL and Redis -systemctl enable postgresql redis-server -systemctl start postgresql redis-server + systemctl enable --now postgresql redis-server +} ############################################################################## -# 3) Create Dispatcharr User/Group +# 3) Create User/Group ############################################################################## -if ! getent group "${DISPATCH_GROUP}" >/dev/null; then - echo ">>> Creating group: ${DISPATCH_GROUP}" - groupadd "${DISPATCH_GROUP}" -fi - -if ! id -u "${DISPATCH_USER}" >/dev/null; then - echo ">>> Creating user: ${DISPATCH_USER}" - useradd -m -g "${DISPATCH_GROUP}" -s /bin/bash "${DISPATCH_USER}" -fi +create_dispatcharr_user() { + if ! getent group "$DISPATCH_GROUP" >/dev/null; then + groupadd "$DISPATCH_GROUP" + fi + if ! id -u "$DISPATCH_USER" >/dev/null; then + useradd -m -g "$DISPATCH_GROUP" -s /bin/bash "$DISPATCH_USER" + fi +} ############################################################################## -# 4) Configure PostgreSQL Database +# 4) PostgreSQL Setup ############################################################################## -echo ">>> Configuring PostgreSQL..." -su - postgres -c "psql -tc \"SELECT 1 FROM pg_database WHERE datname='${POSTGRES_DB}'\"" | grep -q 1 || \ - su - postgres -c "psql -c \"CREATE DATABASE ${POSTGRES_DB};\"" +setup_postgresql() { + echo ">>> Checking PostgreSQL database and user..." -su - postgres -c "psql -tc \"SELECT 1 FROM pg_roles WHERE rolname='${POSTGRES_USER}'\"" | grep -q 1 || \ - su - postgres -c "psql -c \"CREATE USER ${POSTGRES_USER} WITH PASSWORD '${POSTGRES_PASSWORD}';\"" + db_exists=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_database WHERE datname='$POSTGRES_DB'") + if [[ "$db_exists" != "1" ]]; then + echo ">>> Creating database '${POSTGRES_DB}'..." + sudo -u postgres createdb "$POSTGRES_DB" + else + echo ">>> Database '${POSTGRES_DB}' already exists, skipping creation." + fi -su - postgres -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE ${POSTGRES_DB} TO ${POSTGRES_USER};\"" -su - postgres -c "psql -c \"ALTER DATABASE ${POSTGRES_DB} OWNER TO ${POSTGRES_USER};\"" -su - postgres -c "psql -d ${POSTGRES_DB} -c \"ALTER SCHEMA public OWNER TO ${POSTGRES_USER};\"" + user_exists=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='$POSTGRES_USER'") + if [[ "$user_exists" != "1" ]]; then + echo ">>> Creating user '${POSTGRES_USER}'..." + sudo -u postgres psql -c "CREATE USER $POSTGRES_USER WITH PASSWORD '$POSTGRES_PASSWORD';" + else + echo ">>> User '${POSTGRES_USER}' already exists, skipping creation." + fi + + echo ">>> Granting privileges..." + sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;" + sudo -u postgres psql -c "ALTER DATABASE $POSTGRES_DB OWNER TO $POSTGRES_USER;" + sudo -u postgres psql -d "$POSTGRES_DB" -c "ALTER SCHEMA public OWNER TO $POSTGRES_USER;" +} ############################################################################## -# 5) Clone or Update Dispatcharr Code +# 5) Clone Dispatcharr Repository ############################################################################## -echo ">>> Installing or updating Dispatcharr in ${APP_DIR} ..." +clone_dispatcharr_repo() { + echo ">>> Installing or updating Dispatcharr in ${APP_DIR} ..." + + if [ ! -d "$APP_DIR" ]; then + mkdir -p "$APP_DIR" + chown "$DISPATCH_USER:$DISPATCH_GROUP" "$APP_DIR" + fi -if [ ! -d "${APP_DIR}" ]; then - echo ">>> Cloning repository for the first time..." - mkdir -p "${APP_DIR}" - chown "${DISPATCH_USER}:${DISPATCH_GROUP}" "${APP_DIR}" - su - "${DISPATCH_USER}" -c "git clone -b ${DISPATCH_BRANCH} https://github.com/Dispatcharr/Dispatcharr.git ${APP_DIR}" -else - echo ">>> Updating existing repository..." - su - "${DISPATCH_USER}" <>> Updating existing Dispatcharr repo..." + su - "$DISPATCH_USER" <>> Cloning Dispatcharr repo into ${APP_DIR}..." + rm -rf "$APP_DIR"/* + chown "$DISPATCH_USER:$DISPATCH_GROUP" "$APP_DIR" + su - "$DISPATCH_USER" -c "git clone -b $DISPATCH_BRANCH https://github.com/Dispatcharr/Dispatcharr.git $APP_DIR" + fi +} ############################################################################## -# 6) Create Python Virtual Environment & Install Python Dependencies +# 6) Setup Python Environment ############################################################################## -echo ">>> Setting up Python virtual environment..." -su - "${DISPATCH_USER}" <>> Setting up Python virtual environment..." + su - "$DISPATCH_USER" <>> Linking ffmpeg into the virtual environment..." -ln -sf /usr/bin/ffmpeg ${APP_DIR}/env/bin/ffmpeg + ln -sf /usr/bin/ffmpeg "$APP_DIR/env/bin/ffmpeg" +} ############################################################################## -# 7) Build Frontend (React) +# 7) Build Frontend ############################################################################## -echo ">>> Building frontend..." -su - "${DISPATCH_USER}" <>> Building frontend..." + su - "$DISPATCH_USER" <>> Running Django migrations & collectstatic..." -su - "${DISPATCH_USER}" <>> Running Django migrations & collectstatic..." + su - "$DISPATCH_USER" </etc/systemd/system/dispatcharr.service +configure_services() { + echo ">>> Creating systemd service files..." + + # Gunicorn + cat <${SYSTEMD_DIR}/dispatcharr.service [Unit] Description=Gunicorn for Dispatcharr After=network.target postgresql.service redis-server.service @@ -211,36 +246,31 @@ After=network.target postgresql.service redis-server.service User=${DISPATCH_USER} Group=${DISPATCH_GROUP} WorkingDirectory=${APP_DIR} - RuntimeDirectory=${GUNICORN_RUNTIME_DIR} RuntimeDirectoryMode=0775 - -# Update PATH to include both the virtualenv and system binaries (for ffmpeg) Environment="PATH=${APP_DIR}/env/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin" Environment="POSTGRES_DB=${POSTGRES_DB}" Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" - +ExecStartPre=/usr/bin/bash -c 'until pg_isready -h localhost -U ${POSTGRES_USER}; do sleep 1; done' ExecStart=${APP_DIR}/env/bin/gunicorn \\ --workers=4 \\ --worker-class=gevent \\ --timeout=300 \\ --bind unix:${GUNICORN_SOCKET} \\ dispatcharr.wsgi:application - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 10) Create Systemd Service for Celery -############################################################################## - -cat </etc/systemd/system/dispatcharr-celery.service + # Celery + cat <${SYSTEMD_DIR}/dispatcharr-celery.service [Unit] Description=Celery Worker for Dispatcharr After=network.target redis-server.service @@ -256,21 +286,18 @@ Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" Environment="CELERY_BROKER_URL=redis://localhost:6379/0" - ExecStart=${APP_DIR}/env/bin/celery -A dispatcharr worker -l info - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-celery +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 11) Create Systemd Service for Celery Beat (Optional) -############################################################################## - -cat </etc/systemd/system/dispatcharr-celerybeat.service + # Celery Beat + cat <${SYSTEMD_DIR}/dispatcharr-celerybeat.service [Unit] Description=Celery Beat Scheduler for Dispatcharr After=network.target redis-server.service @@ -286,23 +313,20 @@ Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" Environment="CELERY_BROKER_URL=redis://localhost:6379/0" - ExecStart=${APP_DIR}/env/bin/celery -A dispatcharr beat -l info - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-celerybeat +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 12) Create Systemd Service for Daphné (WebSockets / Channels) -############################################################################## - -cat </etc/systemd/system/dispatcharr-daphne.service + # Daphne + cat <${SYSTEMD_DIR}/dispatcharr-daphne.service [Unit] -Description=Daphne for Dispatcharr (ASGI) +Description=Daphne for Dispatcharr (ASGI/WebSockets) After=network.target Requires=dispatcharr.service @@ -315,47 +339,33 @@ Environment="POSTGRES_DB=${POSTGRES_DB}" Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" - ExecStart=${APP_DIR}/env/bin/daphne -b 0.0.0.0 -p ${WEBSOCKET_PORT} dispatcharr.asgi:application - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-daphne +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 13) Configure Nginx -############################################################################## - -echo ">>> Configuring Nginx at /etc/nginx/sites-available/dispatcharr.conf ..." -cat </etc/nginx/sites-available/dispatcharr.conf + echo ">>> Creating Nginx config..." + cat </etc/nginx/sites-available/dispatcharr.conf server { listen ${NGINX_HTTP_PORT}; - - # Proxy to Gunicorn socket for main HTTP traffic location / { include proxy_params; proxy_pass http://unix:${GUNICORN_SOCKET}; } - - # Serve Django static files location /static/ { alias ${APP_DIR}/static/; } - - # Serve React build assets location /assets/ { alias ${APP_DIR}/frontend/dist/assets/; } - - # Serve media files if any location /media/ { alias ${APP_DIR}/media/; } - - # WebSockets for Daphné location /ws/ { proxy_pass http://127.0.0.1:${WEBSOCKET_PORT}; proxy_http_version 1.1; @@ -368,46 +378,67 @@ server { } EOF -ln -sf /etc/nginx/sites-available/dispatcharr.conf /etc/nginx/sites-enabled/dispatcharr.conf - -# Remove default site if it exists -if [ -f /etc/nginx/sites-enabled/default ]; then - rm -f /etc/nginx/sites-enabled/default -fi - -echo ">>> Testing Nginx config..." -nginx -t - -echo ">>> Restarting Nginx..." -systemctl restart nginx -systemctl enable nginx + ln -sf /etc/nginx/sites-available/dispatcharr.conf /etc/nginx/sites-enabled/dispatcharr.conf + [ -f /etc/nginx/sites-enabled/default ] && rm /etc/nginx/sites-enabled/default + nginx -t + systemctl restart nginx + systemctl enable nginx +} ############################################################################## -# 14) Start & Enable Services +# 11) Start Services ############################################################################## -echo ">>> Enabling systemd services..." -systemctl daemon-reload -systemctl enable dispatcharr -systemctl enable dispatcharr-celery -systemctl enable dispatcharr-celerybeat -systemctl enable dispatcharr-daphne - -echo ">>> Restarting / Starting services..." -systemctl restart dispatcharr -systemctl restart dispatcharr-celery -systemctl restart dispatcharr-celerybeat -systemctl restart dispatcharr-daphne +start_services() { + echo ">>> Enabling and starting services..." + systemctl daemon-reexec + systemctl daemon-reload + systemctl enable --now dispatcharr dispatcharr-celery dispatcharr-celerybeat dispatcharr-daphne +} ############################################################################## -# Done! +# 12) Summary ############################################################################## -echo "=================================================" -echo "Dispatcharr installation (or update) complete!" -echo "Nginx is listening on port ${NGINX_HTTP_PORT}." -echo "Gunicorn socket: ${GUNICORN_SOCKET}." -echo "WebSockets on port ${WEBSOCKET_PORT} (path /ws/)." -echo "You can check logs via 'sudo journalctl -u dispatcharr -f', etc." -echo "Visit http://:${NGINX_HTTP_PORT} in your browser." -echo "=================================================" +show_summary() { + server_ip=$(ip route get 1 | awk '{print $7; exit}') + cat < 500: # Only log if using more than 500MB + print(f"High memory usage detected in {task_name}: {mem:.2f} MB") + except (ImportError, Exception): + pass + +@app.on_after_configure.connect +def setup_celery_logging(**kwargs): + # Use our directly determined log level + log_level = effective_log_level + print(f"Celery configuring loggers with level: {log_level}") + + # Get the specific loggers that output potentially noisy messages + for logger_name in ['celery.app.trace', 'celery.beat', 'celery.worker.strategy', 'celery.beat.Scheduler', 'celery.pool']: + logger = logging.getLogger(logger_name) + + # Remove any existing filters first (in case this runs multiple times) + for filter in logger.filters[:]: + if hasattr(filter, '__class__') and filter.__class__.__name__ == 'SuppressFilter': + logger.removeFilter(filter) + + # Add filtering for both INFO and DEBUG levels - only TRACE will show full logging + if log_level not in ['TRACE']: + # Add a custom filter to completely filter out the repetitive messages + class SuppressFilter(logging.Filter): + def filter(self, record): + # Return False to completely suppress these specific patterns + if ( + "succeeded in" in getattr(record, 'msg', '') or + "Scheduler: Sending due task" in getattr(record, 'msg', '') or + "received" in getattr(record, 'msg', '') or + (logger_name == 'celery.pool' and "Apply" in getattr(record, 'msg', '')) + ): + return False # Don't log these messages at all + return True # Log all other messages + + # Add the filter to each logger + logger.addFilter(SuppressFilter()) + + # Set all Celery loggers to the configured level + # This ensures they respect TRACE/DEBUG when set + try: + numeric_level = getattr(logging, log_level) + logger.setLevel(numeric_level) + except (AttributeError, TypeError): + # If the log level string is invalid, default to DEBUG + logger.setLevel(logging.DEBUG) diff --git a/dispatcharr/consumers.py b/dispatcharr/consumers.py index 8d92c4fa..4e21bdae 100644 --- a/dispatcharr/consumers.py +++ b/dispatcharr/consumers.py @@ -6,12 +6,41 @@ logger = logging.getLogger(__name__) class MyWebSocketConsumer(AsyncWebsocketConsumer): async def connect(self): - await self.accept() self.room_name = "updates" - await self.channel_layer.group_add(self.room_name, self.channel_name) + + user = self.scope["user"] + if not user.is_authenticated: + await self.close() + return + + try: + await self.accept() + await self.channel_layer.group_add(self.room_name, self.channel_name) + # Send a connection confirmation to the client with consistent format + await self.send(text_data=json.dumps({ + 'type': 'connection_established', + 'data': { + 'success': True, + 'message': 'WebSocket connection established successfully' + } + })) + except Exception as e: + import logging + logger = logging.getLogger(__name__) + logger.error(f"Error in WebSocket connect: {str(e)}") + # If an error occurs during connection, attempt to close + try: + await self.close(code=1011) # Internal server error + except: + pass async def disconnect(self, close_code): - await self.channel_layer.group_discard(self.room_name, self.channel_name) + try: + await self.channel_layer.group_discard(self.room_name, self.channel_name) + except Exception as e: + import logging + logger = logging.getLogger(__name__) + logger.error(f"Error in WebSocket disconnect: {str(e)}") async def receive(self, text_data): data = json.loads(text_data) diff --git a/dispatcharr/jwt_ws_auth.py b/dispatcharr/jwt_ws_auth.py new file mode 100644 index 00000000..b478cd6f --- /dev/null +++ b/dispatcharr/jwt_ws_auth.py @@ -0,0 +1,47 @@ +from urllib.parse import parse_qs +from channels.middleware import BaseMiddleware +from channels.db import database_sync_to_async +from rest_framework_simplejwt.tokens import UntypedToken +from django.contrib.auth.models import AnonymousUser +from django.contrib.auth import get_user_model +from rest_framework_simplejwt.exceptions import InvalidToken, TokenError +from rest_framework_simplejwt.authentication import JWTAuthentication +import logging + +logger = logging.getLogger(__name__) +User = get_user_model() + +@database_sync_to_async +def get_user(validated_token): + try: + jwt_auth = JWTAuthentication() + user = jwt_auth.get_user(validated_token) + return user + except User.DoesNotExist: + logger.warning(f"User from token does not exist. User ID: {validated_token.get('user_id', 'unknown')}") + return AnonymousUser() + except Exception as e: + logger.error(f"Error getting user from token: {str(e)}") + return AnonymousUser() + +class JWTAuthMiddleware(BaseMiddleware): + async def __call__(self, scope, receive, send): + try: + # Extract the token from the query string + query_string = parse_qs(scope["query_string"].decode()) + token = query_string.get("token", [None])[0] + + if token is not None: + try: + validated_token = JWTAuthentication().get_validated_token(token) + scope["user"] = await get_user(validated_token) + except (InvalidToken, TokenError) as e: + logger.warning(f"Invalid token: {str(e)}") + scope["user"] = AnonymousUser() + else: + scope["user"] = AnonymousUser() + except Exception as e: + logger.error(f"Error in JWT authentication: {str(e)}") + scope["user"] = AnonymousUser() + + return await super().__call__(scope, receive, send) diff --git a/dispatcharr/persistent_lock.py b/dispatcharr/persistent_lock.py index 360c9b5d..27d480be 100644 --- a/dispatcharr/persistent_lock.py +++ b/dispatcharr/persistent_lock.py @@ -73,8 +73,12 @@ class PersistentLock: # Example usage (for testing purposes only): if __name__ == "__main__": - # Connect to Redis on localhost; adjust connection parameters as needed. - client = redis.Redis(host="localhost", port=6379, db=0) + import os + # Connect to Redis using environment variables; adjust connection parameters as needed. + redis_host = os.environ.get("REDIS_HOST", "localhost") + redis_port = int(os.environ.get("REDIS_PORT", 6379)) + redis_db = int(os.environ.get("REDIS_DB", 0)) + client = redis.Redis(host=redis_host, port=redis_port, db=redis_db) lock = PersistentLock(client, "lock:example_account", lock_timeout=120) if lock.acquire(): diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 96bda89b..1a9a1a44 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -4,69 +4,105 @@ from datetime import timedelta BASE_DIR = Path(__file__).resolve().parent.parent -SECRET_KEY = 'REPLACE_ME_WITH_A_REAL_SECRET' +SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY") REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") +REDIS_PORT = int(os.environ.get("REDIS_PORT", 6379)) REDIS_DB = os.environ.get("REDIS_DB", "0") # Set DEBUG to True for development, False for production -if os.environ.get('DISPATCHARR_DEBUG', 'False').lower() == 'true': +if os.environ.get("DISPATCHARR_DEBUG", "False").lower() == "true": DEBUG = True else: DEBUG = False ALLOWED_HOSTS = ["*"] +SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") INSTALLED_APPS = [ - 'apps.api', - 'apps.accounts', - 'apps.channels.apps.ChannelsConfig', - 'apps.dashboard', - 'apps.epg', - 'apps.hdhr', - 'apps.m3u', - 'apps.output', - 'apps.proxy.apps.ProxyConfig', - 'apps.proxy.ts_proxy', - 'core', - 'daphne', - 'drf_yasg', - 'channels', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'rest_framework', - 'corsheaders', - 'django_filters', - 'django_celery_beat', + "apps.api", + "apps.accounts", + "apps.channels.apps.ChannelsConfig", + "apps.dashboard", + "apps.epg", + "apps.hdhr", + "apps.m3u", + "apps.output", + "apps.proxy.apps.ProxyConfig", + "apps.proxy.ts_proxy", + "apps.vod.apps.VODConfig", + "core", + "daphne", + "drf_yasg", + "channels", + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "rest_framework", + "corsheaders", + "django_filters", + "django_celery_beat", + "apps.plugins", ] +# EPG Processing optimization settings +EPG_BATCH_SIZE = 1000 # Number of records to process in a batch +EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection +EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing + +# XtreamCodes Rate Limiting Settings +# Delay between profile authentications when refreshing multiple profiles +# This prevents providers from temporarily banning users with many profiles +XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes + +# Database optimization settings +DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries +DATABASE_CONN_MAX_AGE = ( + 60 # Connection max age in seconds, helps with frequent reconnects +) + +# Disable atomic requests for performance-sensitive views +ATOMIC_REQUESTS = False + +# Cache settings - add caching for EPG operations +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "dispatcharr-epg-cache", + "TIMEOUT": 3600, # 1 hour cache timeout + "OPTIONS": { + "MAX_ENTRIES": 10000, + "CULL_FREQUENCY": 3, # Purge 1/3 of entries when max is reached + }, + } +} + +# Timeouts for external connections +REQUESTS_TIMEOUT = 30 # Seconds for external API requests + MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', - 'corsheaders.middleware.CorsMiddleware', + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "corsheaders.middleware.CorsMiddleware", ] -ROOT_URLCONF = 'dispatcharr.urls' +ROOT_URLCONF = "dispatcharr.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [ - os.path.join(BASE_DIR, 'frontend/dist'), - BASE_DIR / "templates" - ], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [os.path.join(BASE_DIR, "frontend/dist"), BASE_DIR / "templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", @@ -76,88 +112,87 @@ TEMPLATES = [ }, ] -WSGI_APPLICATION = 'dispatcharr.wsgi.application' -ASGI_APPLICATION = 'dispatcharr.asgi.application' +WSGI_APPLICATION = "dispatcharr.wsgi.application" +ASGI_APPLICATION = "dispatcharr.asgi.application" CHANNEL_LAYERS = { "default": { "BACKEND": "channels_redis.core.RedisChannelLayer", "CONFIG": { - "hosts": [(REDIS_HOST, 6379, REDIS_DB)], # Ensure Redis is running + "hosts": [(REDIS_HOST, REDIS_PORT, REDIS_DB)], # Ensure Redis is running }, }, } -if os.getenv('DB_ENGINE', None) == 'sqlite': +if os.getenv("DB_ENGINE", None) == "sqlite": DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': '/data/dispatcharr.db', + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": "/data/dispatcharr.db", } } else: DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.postgresql', - 'NAME': os.environ.get('POSTGRES_DB', 'dispatcharr'), - 'USER': os.environ.get('POSTGRES_USER', 'dispatch'), - 'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'secret'), - 'HOST': os.environ.get('POSTGRES_HOST', 'localhost'), - 'PORT': int(os.environ.get('POSTGRES_PORT', 5432)), + "default": { + "ENGINE": "django.db.backends.postgresql", + "NAME": os.environ.get("POSTGRES_DB", "dispatcharr"), + "USER": os.environ.get("POSTGRES_USER", "dispatch"), + "PASSWORD": os.environ.get("POSTGRES_PASSWORD", "secret"), + "HOST": os.environ.get("POSTGRES_HOST", "localhost"), + "PORT": int(os.environ.get("POSTGRES_PORT", 5432)), + "CONN_MAX_AGE": DATABASE_CONN_MAX_AGE, } } AUTH_PASSWORD_VALIDATORS = [ { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, ] REST_FRAMEWORK = { - 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema', - 'DEFAULT_RENDERER_CLASSES': [ - 'rest_framework.renderers.JSONRenderer', - 'rest_framework.renderers.BrowsableAPIRenderer', + "DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema", + "DEFAULT_RENDERER_CLASSES": [ + "rest_framework.renderers.JSONRenderer", + "rest_framework.renderers.BrowsableAPIRenderer", ], - 'DEFAULT_AUTHENTICATION_CLASSES': [ - 'rest_framework_simplejwt.authentication.JWTAuthentication', + "DEFAULT_AUTHENTICATION_CLASSES": [ + "rest_framework_simplejwt.authentication.JWTAuthentication", ], - 'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'], + "DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"], } SWAGGER_SETTINGS = { - 'SECURITY_DEFINITIONS': { - 'Bearer': { - 'type': 'apiKey', - 'name': 'Authorization', - 'in': 'header' - } - } + "SECURITY_DEFINITIONS": { + "Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"} + } } -LANGUAGE_CODE = 'en-us' -TIME_ZONE = 'UTC' +LANGUAGE_CODE = "en-us" +TIME_ZONE = "UTC" USE_I18N = True USE_TZ = True -STATIC_URL = '/static/' -STATIC_ROOT = BASE_DIR / 'static' # Directory where static files will be collected +STATIC_URL = "/static/" +STATIC_ROOT = BASE_DIR / "static" # Directory where static files will be collected # Adjust STATICFILES_DIRS to include the paths to the directories that contain your static files. STATICFILES_DIRS = [ - os.path.join(BASE_DIR, 'frontend/dist'), # React build static files + os.path.join(BASE_DIR, "frontend/dist"), # React build static files ] -DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' -AUTH_USER_MODEL = 'accounts.User' +DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" +AUTH_USER_MODEL = "accounts.User" -CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379/0') -CELERY_RESULT_BACKEND = CELERY_BROKER_URL +# Build default Redis URL from components for Celery +_default_redis_url = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}" +CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", _default_redis_url) +CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", CELERY_BROKER_URL) # Configure Redis key prefix CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = { - 'global_keyprefix': 'celery-tasks:', # Set the Redis key prefix for Celery + "global_keyprefix": "celery-tasks:", # Set the Redis key prefix for Celery } # Set TTL (Time-to-Live) for task results (in seconds) @@ -165,43 +200,59 @@ CELERY_RESULT_EXPIRES = 3600 # 1 hour TTL for task results # Optionally, set visibility timeout for task retries (if using Redis) CELERY_BROKER_TRANSPORT_OPTIONS = { - 'visibility_timeout': 3600, # Time in seconds that a task remains invisible during retries + "visibility_timeout": 3600, # Time in seconds that a task remains invisible during retries } -CELERY_ACCEPT_CONTENT = ['json'] -CELERY_TASK_SERIALIZER = 'json' +CELERY_ACCEPT_CONTENT = ["json"] +CELERY_TASK_SERIALIZER = "json" CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers.DatabaseScheduler" CELERY_BEAT_SCHEDULE = { - 'fetch-channel-statuses': { - 'task': 'core.tasks.beat_periodic_task', - 'schedule': 2.0, + # Explicitly disable the old fetch-channel-statuses task + # This ensures it gets disabled when DatabaseScheduler syncs + "fetch-channel-statuses": { + "task": "apps.proxy.tasks.fetch_channel_stats", + "schedule": 2.0, # Original schedule (doesn't matter since disabled) + "enabled": False, # Explicitly disabled + }, + # Keep the file scanning task + "scan-files": { + "task": "core.tasks.scan_and_process_files", # Direct task call + "schedule": 20.0, # Every 20 seconds + }, + "maintain-recurring-recordings": { + "task": "apps.channels.tasks.maintain_recurring_recordings", + "schedule": 3600.0, # Once an hour ensure recurring schedules stay ahead }, } -MEDIA_ROOT = BASE_DIR / 'media' -MEDIA_URL = '/media/' +MEDIA_ROOT = BASE_DIR / "media" +MEDIA_URL = "/media/" +# Backup settings +BACKUP_ROOT = os.environ.get("BACKUP_ROOT", "/data/backups") +BACKUP_DATA_DIRS = [ + os.environ.get("LOGOS_DIR", "/data/logos"), + os.environ.get("UPLOADS_DIR", "/data/uploads"), + os.environ.get("PLUGINS_DIR", "/data/plugins"), +] SERVER_IP = "127.0.0.1" CORS_ALLOW_ALL_ORIGINS = True CORS_ALLOW_CREDENTIALS = True -CSRF_TRUSTED_ORIGINS = [ - 'http://*', - 'https://*' -] +CSRF_TRUSTED_ORIGINS = ["http://*", "https://*"] APPEND_SLASH = True SIMPLE_JWT = { - 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=30), - 'REFRESH_TOKEN_LIFETIME': timedelta(days=1), - 'ROTATE_REFRESH_TOKENS': False, # Optional: Whether to rotate refresh tokens - 'BLACKLIST_AFTER_ROTATION': True, # Optional: Whether to blacklist refresh tokens + "ACCESS_TOKEN_LIFETIME": timedelta(minutes=30), + "REFRESH_TOKEN_LIFETIME": timedelta(days=1), + "ROTATE_REFRESH_TOKENS": False, # Optional: Whether to rotate refresh tokens + "BLACKLIST_AFTER_ROTATION": True, # Optional: Whether to blacklist refresh tokens } # Redis connection settings -REDIS_URL = 'redis://localhost:6379/0' +REDIS_URL = os.environ.get("REDIS_URL", f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}") REDIS_SOCKET_TIMEOUT = 60 # Socket timeout in seconds REDIS_SOCKET_CONNECT_TIMEOUT = 5 # Connection timeout in seconds REDIS_HEALTH_CHECK_INTERVAL = 15 # Health check every 15 seconds @@ -212,23 +263,114 @@ REDIS_RETRY_INTERVAL = 1 # Initial retry interval in seconds # Proxy Settings PROXY_SETTINGS = { - 'HLS': { - 'DEFAULT_URL': '', # Default HLS stream URL if needed - 'BUFFER_SIZE': 1000, - 'USER_AGENT': 'VLC/3.0.20 LibVLC/3.0.20', - 'CHUNK_SIZE': 8192, - 'CLIENT_POLL_INTERVAL': 0.1, - 'MAX_RETRIES': 3, - 'MIN_SEGMENTS': 12, - 'MAX_SEGMENTS': 16, - 'WINDOW_SIZE': 12, - 'INITIAL_SEGMENTS': 3, + "HLS": { + "DEFAULT_URL": "", # Default HLS stream URL if needed + "BUFFER_SIZE": 1000, + "USER_AGENT": "VLC/3.0.20 LibVLC/3.0.20", + "CHUNK_SIZE": 8192, + "CLIENT_POLL_INTERVAL": 0.1, + "MAX_RETRIES": 3, + "MIN_SEGMENTS": 12, + "MAX_SEGMENTS": 16, + "WINDOW_SIZE": 12, + "INITIAL_SEGMENTS": 3, + }, + "TS": { + "DEFAULT_URL": "", # Default TS stream URL if needed + "BUFFER_SIZE": 1000, + "RECONNECT_DELAY": 5, + "USER_AGENT": "VLC/3.0.20 LibVLC/3.0.20", + }, +} + +# Map log level names to their numeric values +LOG_LEVEL_MAP = { + "TRACE": 5, + "DEBUG": 10, + "INFO": 20, + "WARNING": 30, + "ERROR": 40, + "CRITICAL": 50, +} + +# Get log level from environment variable, default to INFO if not set +# Add debugging output to see exactly what's being detected +env_log_level = os.environ.get("DISPATCHARR_LOG_LEVEL", "") +print(f"Environment DISPATCHARR_LOG_LEVEL detected as: '{env_log_level}'") + +if not env_log_level: + print("No DISPATCHARR_LOG_LEVEL found in environment, using default INFO") + LOG_LEVEL_NAME = "INFO" +else: + LOG_LEVEL_NAME = env_log_level.upper() + print(f"Setting log level to: {LOG_LEVEL_NAME}") + +LOG_LEVEL = LOG_LEVEL_MAP.get(LOG_LEVEL_NAME, 20) # Default to INFO (20) if invalid + +# Add this to your existing LOGGING configuration or create one if it doesn't exist +LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "verbose": { + "format": "{asctime} {levelname} {name} {message}", + "style": "{", + }, + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": "verbose", + "level": 5, # Always allow TRACE level messages through the handler + }, + }, + "loggers": { + "core.tasks": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use environment-configured level + "propagate": False, # Don't propagate to root logger to avoid duplicate logs + }, + "core.utils": { + "handlers": ["console"], + "level": LOG_LEVEL, + "propagate": False, + }, + "apps.proxy": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use environment-configured level + "propagate": False, # Don't propagate to root logger + }, + # Add parent logger for all app modules + "apps": { + "handlers": ["console"], + "level": LOG_LEVEL, + "propagate": False, + }, + # Celery loggers to capture task execution messages + "celery": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use configured log level for Celery logs + "propagate": False, + }, + "celery.task": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use configured log level for task-specific logs + "propagate": False, + }, + "celery.worker": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use configured log level for worker logs + "propagate": False, + }, + "celery.beat": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use configured log level for scheduler logs + "propagate": False, + }, + # Add any other loggers you need to capture TRACE logs from + }, + "root": { + "handlers": ["console"], + "level": LOG_LEVEL, # Use user-configured level instead of hardcoded 'INFO' }, - 'TS': { - 'DEFAULT_URL': '', # Default TS stream URL if needed - 'BUFFER_SIZE': 1000, - 'RECONNECT_DELAY': 5, - 'USER_AGENT': 'VLC/3.0.20 LibVLC/3.0.20', - 'REDIS_CHUNK_TTL': 60, # How long to keep chunks in Redis (seconds) - } } diff --git a/dispatcharr/urls.py b/dispatcharr/urls.py index f0de138e..890d0c2d 100644 --- a/dispatcharr/urls.py +++ b/dispatcharr/urls.py @@ -7,13 +7,15 @@ from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi from .routing import websocket_urlpatterns - +from apps.output.views import xc_player_api, xc_panel_api, xc_get, xc_xmltv +from apps.proxy.ts_proxy.views import stream_xc +from apps.output.views import xc_movie_stream, xc_series_stream # Define schema_view for Swagger schema_view = get_schema_view( openapi.Info( title="Dispatcharr API", - default_version='v1', + default_version="v1", description="API documentation for Dispatcharr", terms_of_service="https://www.google.com/policies/terms/", contact=openapi.Contact(email="contact@dispatcharr.local"), @@ -25,38 +27,57 @@ schema_view = get_schema_view( urlpatterns = [ # API Routes - path('api/', include(('apps.api.urls', 'api'), namespace='api')), - path('api', RedirectView.as_view(url='/api/', permanent=True)), - + path("api/", include(("apps.api.urls", "api"), namespace="api")), + path("api", RedirectView.as_view(url="/api/", permanent=True)), # Admin - path('admin', RedirectView.as_view(url='/admin/', permanent=True)), - path('admin/', admin.site.urls), - + path("admin", RedirectView.as_view(url="/admin/", permanent=True)), + path("admin/", admin.site.urls), # Outputs - path('output', RedirectView.as_view(url='/output/', permanent=True)), - path('output/', include(('apps.output.urls', 'output'), namespace='output')), - + path("output", RedirectView.as_view(url="/output/", permanent=True)), + path("output/", include(("apps.output.urls", "output"), namespace="output")), # HDHR - path('hdhr', RedirectView.as_view(url='/hdhr/', permanent=True)), - path('hdhr/', include(('apps.hdhr.urls', 'hdhr'), namespace='hdhr')), - + path("hdhr", RedirectView.as_view(url="/hdhr/", permanent=True)), + path("hdhr/", include(("apps.hdhr.urls", "hdhr"), namespace="hdhr")), # Add proxy apps - Move these before the catch-all - path('proxy/', include(('apps.proxy.urls', 'proxy'), namespace='proxy')), - path('proxy', RedirectView.as_view(url='/proxy/', permanent=True)), - - # Swagger UI - path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), + path("proxy/", include(("apps.proxy.urls", "proxy"), namespace="proxy")), + path("proxy", RedirectView.as_view(url="/proxy/", permanent=True)), + # xc + re_path("player_api.php", xc_player_api, name="xc_player_api"), + re_path("panel_api.php", xc_panel_api, name="xc_panel_api"), + re_path("get.php", xc_get, name="xc_get"), + re_path("xmltv.php", xc_xmltv, name="xc_xmltv"), + path( + "live///", + stream_xc, + name="xc_live_stream_endpoint", + ), + path( + "//", + stream_xc, + name="xc_stream_endpoint", + ), + # XC VOD endpoints + path( + "movie///.", + xc_movie_stream, + name="xc_movie_stream", + ), + path( + "series///.", + xc_series_stream, + name="xc_series_stream", + ), + re_path(r"^swagger/?$", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"), # ReDoc UI - path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), - + path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"), # Optionally, serve the raw Swagger JSON - path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'), + path("swagger.json", schema_view.without_ui(cache_timeout=0), name="schema-json"), + # VOD proxy is now handled by the main proxy URLs above # Catch-all routes should always be last - path('', TemplateView.as_view(template_name='index.html')), # React entry point - path('', TemplateView.as_view(template_name='index.html')), - + path("", TemplateView.as_view(template_name="index.html")), # React entry point + path("", TemplateView.as_view(template_name="index.html")), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += websocket_urlpatterns diff --git a/dispatcharr/utils.py b/dispatcharr/utils.py index e6392c6c..e588bcaa 100644 --- a/dispatcharr/utils.py +++ b/dispatcharr/utils.py @@ -1,23 +1,61 @@ # dispatcharr/utils.py +import json +import ipaddress from django.http import JsonResponse from django.core.exceptions import ValidationError +from core.models import CoreSettings, NETWORK_ACCESS_KEY + def json_error_response(message, status=400): """Return a standardized error JSON response.""" - return JsonResponse({'success': False, 'error': message}, status=status) + return JsonResponse({"success": False, "error": message}, status=status) + def json_success_response(data=None, status=200): """Return a standardized success JSON response.""" - response = {'success': True} + response = {"success": True} if data is not None: response.update(data) return JsonResponse(response, status=status) + def validate_logo_file(file): """Validate uploaded logo file size and MIME type.""" - valid_mime_types = ['image/jpeg', 'image/png', 'image/gif'] + valid_mime_types = ["image/jpeg", "image/png", "image/gif", "image/webp", "image/svg+xml"] if file.content_type not in valid_mime_types: - raise ValidationError('Unsupported file type. Allowed types: JPEG, PNG, GIF.') - if file.size > 2 * 1024 * 1024: - raise ValidationError('File too large. Max 2MB.') + raise ValidationError("Unsupported file type. Allowed types: JPEG, PNG, GIF, WebP, SVG.") + if file.size > 5 * 1024 * 1024: # 5MB + raise ValidationError("File too large. Max 5MB.") + +def get_client_ip(request): + x_forwarded_for = request.META.get("HTTP_X_REAL_IP") + if x_forwarded_for: + # X-Forwarded-For can be a comma-separated list of IPs + ip = x_forwarded_for.split(",")[0].strip() + else: + ip = request.META.get("REMOTE_ADDR") + return ip + + +def network_access_allowed(request, settings_key): + try: + network_access = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY).value + except CoreSettings.DoesNotExist: + network_access = {} + + cidrs = ( + network_access[settings_key].split(",") + if settings_key in network_access + else ["0.0.0.0/0", "::/0"] + ) + + network_allowed = False + client_ip = ipaddress.ip_address(get_client_ip(request)) + for cidr in cidrs: + network = ipaddress.ip_network(cidr) + if client_ip in network: + network_allowed = True + break + + return network_allowed diff --git a/docker/DispatcharrBase b/docker/DispatcharrBase new file mode 100644 index 00000000..149bfffb --- /dev/null +++ b/docker/DispatcharrBase @@ -0,0 +1,63 @@ +FROM lscr.io/linuxserver/ffmpeg:latest + +ENV DEBIAN_FRONTEND=noninteractive +ENV VIRTUAL_ENV=/dispatcharrpy +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +# --- Install Python 3.13 and build dependencies --- +# Note: Hardware acceleration (VA-API, VDPAU, NVENC) already included in base ffmpeg image +RUN apt-get update && apt-get install --no-install-recommends -y \ + ca-certificates software-properties-common gnupg2 curl wget \ + && add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update \ + && apt-get install --no-install-recommends -y \ + python3.13 python3.13-dev python3.13-venv libpython3.13 \ + python-is-python3 python3-pip \ + libpcre3 libpcre3-dev libpq-dev procps pciutils \ + nginx streamlink comskip \ + vlc-bin vlc-plugin-base \ + build-essential gcc g++ gfortran libopenblas-dev libopenblas0 ninja-build + +# --- Create Python virtual environment --- +RUN python3.13 -m venv $VIRTUAL_ENV && $VIRTUAL_ENV/bin/pip install --upgrade pip + +# --- Install Python dependencies --- +COPY requirements.txt /tmp/requirements.txt +RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir -r /tmp/requirements.txt && \ + rm /tmp/requirements.txt + +# --- Build legacy NumPy wheel for old hardware (store for runtime switching) --- +RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir build && \ + cd /tmp && \ + $VIRTUAL_ENV/bin/pip download --no-binary numpy --no-deps numpy && \ + tar -xzf numpy-*.tar.gz && \ + cd numpy-*/ && \ + $VIRTUAL_ENV/bin/python -m build --wheel -Csetup-args=-Dcpu-baseline="none" -Csetup-args=-Dcpu-dispatch="none" && \ + mv dist/*.whl /opt/ && \ + cd / && rm -rf /tmp/numpy-* /tmp/*.tar.gz && \ + $VIRTUAL_ENV/bin/pip uninstall -y build + +# --- Clean up build dependencies to reduce image size --- +RUN apt-get remove -y build-essential gcc g++ gfortran libopenblas-dev libpcre3-dev python3.13-dev ninja-build && \ + apt-get autoremove -y --purge && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /root/.cache /tmp/* + +# --- Set up Redis 7.x --- +RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | \ + tee /etc/apt/sources.list.d/redis.list && \ + apt-get update && apt-get install -y redis-server && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# --- Set up PostgreSQL 17.x --- +RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" | \ + tee /etc/apt/sources.list.d/pgdg.list && \ + apt-get update && apt-get install -y postgresql-17 postgresql-contrib-17 && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Create render group for hardware acceleration support with GID 109 +RUN groupadd -r -g 109 render || true + +ENTRYPOINT ["/app/docker/entrypoint.sh"] diff --git a/docker/Dockerfile b/docker/Dockerfile index 26b54975..bfb35c11 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,35 +1,43 @@ -FROM python:3.13-slim AS builder +# Define base image build arguments (must be before any FROM) +ARG REPO_OWNER=dispatcharr +ARG REPO_NAME=dispatcharr +ARG BASE_TAG=base + +# --- Build frontend --- + +FROM node:24 AS frontend-builder + +WORKDIR /app/frontend +COPY ./frontend /app/frontend +# remove any node_modules that may have been copied from the host (x86) +RUN rm -rf node_modules || true; \ + npm install --no-audit --progress=false; +RUN npm run build; \ + rm -rf node_modules .cache + +# --- Redeclare build arguments for the next stage --- +ARG REPO_OWNER +ARG REPO_NAME +ARG BASE_TAG + +# --- Final image based on the dynamic base --- +FROM ghcr.io/${REPO_OWNER}/${REPO_NAME}:${BASE_TAG} AS final +ENV VIRTUAL_ENV=/dispatcharrpy +ENV PATH="$VIRTUAL_ENV/bin:$PATH" +WORKDIR /app + +# Copy application code +COPY . /app +# Copy nginx configuration +COPY ./docker/nginx.conf /etc/nginx/sites-enabled/default +# Clean out existing frontend folder +RUN rm -rf /app/frontend +# Copy built frontend assets +COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist -# Define build arguments with default values -ARG BRANCH=main -# This will be overridden by the GitHub Actions workflow -# when building the Docker image for production. -ARG REPO_URL=https://github.com/Dispatcharr/Dispatcharr # Add timestamp argument ARG TIMESTAMP -ENV PATH="/dispatcharrpy/bin:$PATH" \ - VIRTUAL_ENV=/dispatcharrpy \ - DJANGO_SETTINGS_MODULE=dispatcharr.settings \ - PYTHONUNBUFFERED=1 \ - DISPATCHARR_BUILD=1 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - build-essential \ - curl \ - gcc \ - git \ - libpcre3 \ - libpcre3-dev \ - python3-dev \ - wget && \ - python -m pip install virtualenv && \ - virtualenv /dispatcharrpy && \ - git clone -b ${BRANCH} ${REPO_URL} /app && \ - cd /app && \ - rm -rf .git - # Update version.py with build timestamp if provided RUN if [ -n "$TIMESTAMP" ]; then \ echo "Updating timestamp to ${TIMESTAMP} in version.py" && \ @@ -37,72 +45,4 @@ RUN if [ -n "$TIMESTAMP" ]; then \ cat /app/version.py; \ fi -RUN cd /app && \ - pip install --no-cache-dir -r requirements.txt - -# Use a dedicated Node.js stage for frontend building -FROM node:20 AS frontend-builder -WORKDIR /app/frontend -COPY --from=builder /app /app -RUN corepack enable && corepack prepare yarn@stable --activate && \ - yarn install && \ - yarn build && \ - find . -maxdepth 1 ! -name '.' ! -name 'dist' -exec rm -rf '{}' \; - -FROM python:3.13-slim - -ENV PATH="/dispatcharrpy/bin:$PATH" \ - VIRTUAL_ENV=/dispatcharrpy \ - DJANGO_SETTINGS_MODULE=dispatcharr.settings \ - PYTHONUNBUFFERED=1 - -# Copy the virtual environment and application from the builder stage -COPY --from=builder /dispatcharrpy /dispatcharrpy -COPY --from=builder /app /app -COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist - -# Run collectstatic after frontend assets are copied -RUN cd /app && python manage.py collectstatic --noinput - -# Install base dependencies with memory optimization -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - curl \ - ffmpeg \ - libpcre3 \ - libpq-dev \ - nginx \ - procps \ - streamlink \ - wget \ - gnupg2 \ - lsb-release && \ - cp /app/docker/nginx.conf /etc/nginx/sites-enabled/default && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Set up Redis repository in a separate step -RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ - chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/redis.list && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y redis-server && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Set up PostgreSQL repository and install in a separate step -RUN echo "=== setting up postgres ====" && \ - sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \ - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y postgresql-14 postgresql-contrib-14 && \ - mkdir -p /data && \ - apt-get remove -y gnupg2 lsb-release && \ - apt-get clean && \ - apt-get autoremove -y && \ - rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* - -WORKDIR /app - -CMD ["/app/docker/entrypoint.sh"] +ENTRYPOINT ["/app/docker/entrypoint.sh"] \ No newline at end of file diff --git a/docker/build-dev.sh b/docker/build-dev.sh index b02c314e..61640814 100755 --- a/docker/build-dev.sh +++ b/docker/build-dev.sh @@ -1,11 +1,65 @@ -#!/bin/bash -docker build --build-arg BRANCH=dev -t dispatcharr/dispatcharr:dev -f Dockerfile .. +#!/bin/bash +set -e + +# Default values +VERSION=$(python3 -c "import sys; sys.path.append('..'); import version; print(version.__version__)") +REGISTRY="dispatcharr" # Registry or private repo to push to +IMAGE="dispatcharr" # Image that we're building +BRANCH="dev" +ARCH="" # Architectures to build for, e.g. linux/amd64,linux/arm64 +PUSH=false + +usage() { + cat <<- EOF + To test locally: + ./build-dev.sh + + To build and push to registry: + ./build-dev.sh -p + + To build and push to a private registry: + ./build-dev.sh -p -r myregistry:5000 + + To build for -both- x86_64 and arm_64: + ./build-dev.sh -p -a linux/amd64,linux/arm64 + + Do it all: + ./build-dev.sh -p -r myregistry:5000 -a linux/amd64,linux/arm64 +EOF +exit 0 +} + +# Parse options +while getopts "pr:a:b:i:h" opt; do + case $opt in + r) REGISTRY="$OPTARG" ;; + a) ARCH="--platform $OPTARG" ;; + b) BRANCH="$OPTARG" ;; + i) IMAGE="$OPTARG" ;; + p) PUSH=true ;; + h) usage ;; + \?) echo "Invalid option: -$OPTARG" >&2; exit 1 ;; + esac +done + +BUILD_ARGS="BRANCH=$BRANCH" + +echo docker build --build-arg $BUILD_ARGS $ARCH -t $IMAGE +docker build -f Dockerfile --build-arg $BUILD_ARGS $ARCH -t $IMAGE .. +docker tag $IMAGE $IMAGE:$BRANCH +docker tag $IMAGE $IMAGE:$VERSION + +if [ -z "$PUSH" ]; then + echo "Please run 'docker push -t $IMAGE:dev -t $IMAGE:${VERSION}' when ready" +else + for TAG in latest "$VERSION" "$BRANCH"; do + docker tag "$IMAGE" "$REGISTRY/$IMAGE:$TAG" + docker push -q "$REGISTRY/$IMAGE:$TAG" + done + echo "Images pushed successfully." +fi + + + -# Get version information -VERSION=$(python -c "import sys; sys.path.append('..'); import version; print(version.__version__)") -# Build with version tag -docker build --build-arg BRANCH=dev \ - -t dispatcharr/dispatcharr:dev \ - -t dispatcharr/dispatcharr:${VERSION} \ - -f Dockerfile .. diff --git a/docker/comskip.ini b/docker/comskip.ini new file mode 100644 index 00000000..5dc94fd0 --- /dev/null +++ b/docker/comskip.ini @@ -0,0 +1,6 @@ +; Minimal default comskip config +edl_out=1 +output_edl=1 +verbose=0 +thread_count=0 + diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index 77b9bec1..2b1fd2ae 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -13,6 +13,33 @@ services: - DISPATCHARR_ENV=aio - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 + - DISPATCHARR_LOG_LEVEL=info + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE + # Optional for hardware acceleration + #devices: + # - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API) + # Uncomment the following lines for NVIDIA GPU support + # NVidia GPU support (requires NVIDIA Container Toolkit) + #deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: all + # capabilities: [gpu] + volumes: dispatcharr_data: diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml index 40a87bfe..c576cfd1 100644 --- a/docker/docker-compose.debug.yml +++ b/docker/docker-compose.debug.yml @@ -3,7 +3,7 @@ services: # build: # context: .. # dockerfile: docker/Dockerfile.dev - image: dispatcharr/dispatcharr + image: ghcr.io/dispatcharr/dispatcharr:base container_name: dispatcharr_debug ports: - 5656:5656 # API port @@ -17,3 +17,17 @@ services: - DISPATCHARR_DEBUG=true - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 + - DISPATCHARR_LOG_LEVEL=trace + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 3b6f53df..b20c3296 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -3,7 +3,7 @@ services: # build: # context: .. # dockerfile: docker/Dockerfile.dev - image: ghcr.io/dispatcharr/dispatcharr:dev + image: ghcr.io/dispatcharr/dispatcharr:base container_name: dispatcharr_dev ports: - 5656:5656 @@ -11,11 +11,25 @@ services: - 8001:8001 volumes: - ../:/app - # - ./data/db:/data + - ./data:/data environment: - DISPATCHARR_ENV=dev - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 + - DISPATCHARR_LOG_LEVEL=debug + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE pgadmin: image: dpage/pgadmin4 diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index e6a06603..e4093e4b 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,9 +1,11 @@ services: web: - image: dispatcharr/dispatcharr:alpha-v1 + image: ghcr.io/dispatcharr/dispatcharr:latest container_name: dispatcharr_web ports: - 9191:9191 + volumes: + - ./data:/data depends_on: - db - redis @@ -14,9 +16,38 @@ services: - POSTGRES_PASSWORD=secret - REDIS_HOST=redis - CELERY_BROKER_URL=redis://redis:6379/0 + - DISPATCHARR_LOG_LEVEL=info + # Legacy CPU Support (Optional) + # Uncomment to enable legacy NumPy build for older CPUs (circa 2009) + # that lack support for newer baseline CPU features + #- USE_LEGACY_NUMPY=true + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE + # Optional for hardware acceleration + #group_add: + # - video + # #- render # Uncomment if your GPU requires it + #devices: + # - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API) + # Uncomment the following lines for NVIDIA GPU support + # NVidia GPU support (requires NVIDIA Container Toolkit) + #deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: all + # capabilities: [gpu] celery: - image: dispatcharr/dispatcharr:alpha-v1 + image: ghcr.io/dispatcharr/dispatcharr:latest container_name: dispatcharr_celery depends_on: - db @@ -35,7 +66,7 @@ services: command: > bash -c " cd /app && - celery -A dispatcharr worker -l info + nice -n 5 celery -A dispatcharr worker -l info " db: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index d2afb3a3..a50f2f49 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -27,17 +27,62 @@ echo_with_timestamp() { echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" } +# --- NumPy version switching for legacy hardware --- +if [ "$USE_LEGACY_NUMPY" = "true" ]; then + # Check if NumPy was compiled with baseline support + if /dispatcharrpy/bin/python -c "import numpy; numpy.show_config()" 2>&1 | grep -qi "baseline"; then + echo_with_timestamp "🔧 Switching to legacy NumPy (no CPU baseline)..." + /dispatcharrpy/bin/pip install --no-cache-dir --force-reinstall --no-deps /opt/numpy-*.whl + echo_with_timestamp "✅ Legacy NumPy installed" + else + echo_with_timestamp "✅ Legacy NumPy (no baseline) already installed, skipping reinstallation" + fi +fi + # Set PostgreSQL environment variables export POSTGRES_DB=${POSTGRES_DB:-dispatcharr} export POSTGRES_USER=${POSTGRES_USER:-dispatch} export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secret} export POSTGRES_HOST=${POSTGRES_HOST:-localhost} export POSTGRES_PORT=${POSTGRES_PORT:-5432} - +export PG_VERSION=$(ls /usr/lib/postgresql/ | sort -V | tail -n 1) +export PG_BINDIR="/usr/lib/postgresql/${PG_VERSION}/bin" export REDIS_HOST=${REDIS_HOST:-localhost} export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} +export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' +export LD_LIBRARY_PATH='/usr/local/lib' +export SECRET_FILE="/data/jwt" +# Ensure Django secret key exists or generate a new one +if [ ! -f "$SECRET_FILE" ]; then + echo "Generating new Django secret key..." + old_umask=$(umask) + umask 077 + tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; } + python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; } +import secrets +print(secrets.token_urlsafe(64)) +PY + mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } + umask $old_umask +fi +export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" +# Process priority configuration +# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) +# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority) +# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI +export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0} +CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5} + +# Calculate relative nice value for Celery (since nice is relative to parent process) +# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value +export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL)) + +# Set LIBVA_DRIVER_NAME if user has specified it +if [ -v LIBVA_DRIVER_NAME ]; then + export LIBVA_DRIVER_NAME +fi # Extract version information from version.py export DISPATCHARR_VERSION=$(python -c "import sys; sys.path.append('/app'); import version; print(version.__version__)") export DISPATCHARR_TIMESTAMP=$(python -c "import sys; sys.path.append('/app'); import version; print(version.__timestamp__ or '')") @@ -48,53 +93,84 @@ if [ -n "$DISPATCHARR_TIMESTAMP" ]; then else echo "📦 Dispatcharr version: ${DISPATCHARR_VERSION}" fi +export DISPATCHARR_LOG_LEVEL +# Set log level with default if not provided +DISPATCHARR_LOG_LEVEL=${DISPATCHARR_LOG_LEVEL:-INFO} +# Convert to uppercase +DISPATCHARR_LOG_LEVEL=${DISPATCHARR_LOG_LEVEL^^} + + +echo "Environment DISPATCHARR_LOG_LEVEL set to: '${DISPATCHARR_LOG_LEVEL}'" + +# Also make the log level available in /etc/environment for all login shells +#grep -q "DISPATCHARR_LOG_LEVEL" /etc/environment || echo "DISPATCHARR_LOG_LEVEL=${DISPATCHARR_LOG_LEVEL}" >> /etc/environment # READ-ONLY - don't let users change these export POSTGRES_DIR=/data/db # Global variables, stored so other users inherit them if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then - echo "export PATH=$PATH" >> /etc/profile.d/dispatcharr.sh - echo "export VIRTUAL_ENV=$VIRTUAL_ENV" >> /etc/profile.d/dispatcharr.sh - echo "export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE" >> /etc/profile.d/dispatcharr.sh - echo "export PYTHONUNBUFFERED=$PYTHONUNBUFFERED" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_DB=$POSTGRES_DB" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_USER=$POSTGRES_USER" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_PASSWORD=$POSTGRES_PASSWORD" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_HOST=$POSTGRES_HOST" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_PORT=$POSTGRES_PORT" >> /etc/profile.d/dispatcharr.sh - echo "export DISPATCHARR_ENV=$DISPATCHARR_ENV" >> /etc/profile.d/dispatcharr.sh - echo "export DISPATCHARR_DEBUG=$DISPATCHARR_DEBUG" >> /etc/profile.d/dispatcharr.sh - echo "export REDIS_HOST=$REDIS_HOST" >> /etc/profile.d/dispatcharr.sh - echo "export REDIS_DB=$REDIS_DB" >> /etc/profile.d/dispatcharr.sh - echo "export POSTGRES_DIR=$POSTGRES_DIR" >> /etc/profile.d/dispatcharr.sh - echo "export DISPATCHARR_PORT=$DISPATCHARR_PORT" >> /etc/profile.d/dispatcharr.sh - echo "export DISPATCHARR_VERSION=$DISPATCHARR_VERSION" >> /etc/profile.d/dispatcharr.sh - echo "export DISPATCHARR_TIMESTAMP=$DISPATCHARR_TIMESTAMP" >> /etc/profile.d/dispatcharr.sh + # Define all variables to process + variables=( + PATH VIRTUAL_ENV DJANGO_SETTINGS_MODULE PYTHONUNBUFFERED PYTHONDONTWRITEBYTECODE + POSTGRES_DB POSTGRES_USER POSTGRES_PASSWORD POSTGRES_HOST POSTGRES_PORT + DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL + REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT + DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL DJANGO_SECRET_KEY + ) + + # Process each variable for both profile.d and environment + for var in "${variables[@]}"; do + # Check if the variable is set in the environment + if [ -n "${!var+x}" ]; then + # Add to profile.d + echo "export ${var}=${!var}" >> /etc/profile.d/dispatcharr.sh + # Add to /etc/environment if not already there + grep -q "^${var}=" /etc/environment || echo "${var}=${!var}" >> /etc/environment + else + echo "Warning: Environment variable $var is not set" + fi + done fi chmod +x /etc/profile.d/dispatcharr.sh -pip install django-filter +# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells +if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then + cat >> /root/.bashrc << 'EOF' + +# Source Dispatcharr environment variables +if [ -f /etc/profile.d/dispatcharr.sh ]; then + . /etc/profile.d/dispatcharr.sh +fi +EOF +fi # Run init scripts -echo "Starting init process..." +echo "Starting user setup..." . /app/docker/init/01-user-setup.sh +echo "Setting up PostgreSQL..." . /app/docker/init/02-postgres.sh +echo "Starting init process..." . /app/docker/init/03-init-dispatcharr.sh # Start PostgreSQL echo "Starting Postgres..." -su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" +su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" # Wait for PostgreSQL to be ready -until su - postgres -c "/usr/lib/postgresql/14/bin/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do +until su - postgres -c "$PG_BINDIR/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do echo_with_timestamp "Waiting for PostgreSQL to be ready..." sleep 1 done -postgres_pid=$(su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') +postgres_pid=$(su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') echo "✅ Postgres started with PID $postgres_pid" pids+=("$postgres_pid") +# Ensure database encoding is UTF8 +. /app/docker/init/02-postgres.sh +ensure_utf8_encoding + if [[ "$DISPATCHARR_ENV" = "dev" ]]; then . /app/docker/init/99-init-dev.sh echo "Starting frontend dev environment" @@ -110,9 +186,9 @@ else pids+=("$nginx_pid") fi -cd /app -python manage.py migrate --noinput -python manage.py collectstatic --noinput +# Run Django commands as non-root user to prevent permission issues +su - $POSTGRES_USER -c "cd /app && python manage.py migrate --noinput" +su - $POSTGRES_USER -c "cd /app && python manage.py collectstatic --noinput" # Select proper uwsgi config based on environment if [ "$DISPATCHARR_ENV" = "dev" ] && [ "$DISPATCHARR_DEBUG" != "true" ]; then @@ -126,9 +202,20 @@ else uwsgi_file="/app/docker/uwsgi.ini" fi -su - $POSTGRES_USER -c "cd /app && uwsgi --ini $uwsgi_file &" -uwsgi_pid=$(pgrep uwsgi | sort | head -n1) -echo "✅ uwsgi started with PID $uwsgi_pid" +# Set base uwsgi args +uwsgi_args="--ini $uwsgi_file" + +# Conditionally disable logging if not in debug mode +if [ "$DISPATCHARR_DEBUG" != "true" ]; then + uwsgi_args+=" --disable-logging" +fi + +# Launch uwsgi with configurable nice level (default: 0 for normal priority) +# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose +# Start with nice as root, then use setpriv to drop privileges to dispatch user +# This preserves both the nice value and environment variables +nice -n $UWSGI_NICE_LEVEL su - "$POSTGRES_USER" -c "cd /app && exec /dispatcharrpy/bin/uwsgi $uwsgi_args" & uwsgi_pid=$! +echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") # sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf @@ -162,9 +249,18 @@ pids+=("$uwsgi_pid") # echo "✅ celery beat started with PID $beat_pid" # pids+=("$beat_pid") + +# Wait for services to fully initialize before checking hardware +echo "⏳ Waiting for services to fully initialize before hardware check..." +sleep 5 + +# Run hardware check +echo "🔍 Running hardware acceleration check..." +. /app/docker/init/04-check-hwaccel.sh + # Wait for at least one process to exit and log the process that exited first if [ ${#pids[@]} -gt 0 ]; then - echo "⏳ Waiting for processes to exit..." + echo "⏳ Dispatcharr is running. Monitoring processes..." while kill -0 "${pids[@]}" 2>/dev/null; do sleep 1 # Wait for a second before checking again done diff --git a/docker/init/01-user-setup.sh b/docker/init/01-user-setup.sh index 5be17671..d7041265 100644 --- a/docker/init/01-user-setup.sh +++ b/docker/init/01-user-setup.sh @@ -4,10 +4,21 @@ export PUID=${PUID:-1000} export PGID=${PGID:-1000} -# Create group if it doesn't exist -if ! getent group "$PGID" >/dev/null 2>&1; then +# Check if group with PGID exists +if getent group "$PGID" >/dev/null 2>&1; then + # Group exists, check if it's named 'dispatch' + existing_group=$(getent group "$PGID" | cut -d: -f1) + if [ "$existing_group" != "dispatch" ]; then + # Rename the existing group to 'dispatch' + groupmod -n "dispatch" "$existing_group" + echo "Group $existing_group with GID $PGID renamed to dispatch" + fi +else + # Group doesn't exist, create it groupadd -g "$PGID" dispatch + echo "Group dispatch with GID $PGID created" fi + # Create user if it doesn't exist if ! getent passwd $PUID > /dev/null 2>&1; then useradd -u $PUID -g $PGID -m $POSTGRES_USER @@ -18,5 +29,62 @@ else fi fi +# Get the GID of /dev/dri/renderD128 on the host (must be mounted into container) +if [ -e "/dev/dri/renderD128" ]; then + HOST_RENDER_GID=$(stat -c '%g' /dev/dri/renderD128) + + # Check if this GID belongs to the video group + VIDEO_GID=$(getent group video 2>/dev/null | cut -d: -f3) + + if [ "$HOST_RENDER_GID" = "$VIDEO_GID" ]; then + echo "RenderD128 GID ($HOST_RENDER_GID) matches video group GID. Using video group for GPU access." + # Make sure POSTGRES_USER is in video group + if ! id -nG "$POSTGRES_USER" | grep -qw "video"; then + usermod -a -G video "$POSTGRES_USER" + echo "Added user $POSTGRES_USER to video group for GPU access" + fi + else + # We need to ensure render group exists with correct GID + if getent group render >/dev/null; then + CURRENT_RENDER_GID=$(getent group render | cut -d: -f3) + if [ "$CURRENT_RENDER_GID" != "$HOST_RENDER_GID" ]; then + # Check if another group already has the target GID + if getent group "$HOST_RENDER_GID" >/dev/null 2>&1; then + EXISTING_GROUP=$(getent group "$HOST_RENDER_GID" | cut -d: -f1) + echo "Warning: Cannot change render group GID to $HOST_RENDER_GID as it's already used by group '$EXISTING_GROUP'" + # Add user to the existing group with the target GID to ensure device access + if ! id -nG "$POSTGRES_USER" | grep -qw "$EXISTING_GROUP"; then + usermod -a -G "$EXISTING_GROUP" "$POSTGRES_USER" || echo "Warning: Failed to add user to $EXISTING_GROUP group" + echo "Added user $POSTGRES_USER to $EXISTING_GROUP group for GPU access" + fi + else + echo "Changing render group GID from $CURRENT_RENDER_GID to $HOST_RENDER_GID" + groupmod -g "$HOST_RENDER_GID" render || echo "Warning: Failed to change render group GID. Continuing anyway..." + fi + fi + else + echo "Creating render group with GID $HOST_RENDER_GID" + groupadd -g "$HOST_RENDER_GID" render + fi + + # Make sure POSTGRES_USER is in render group + if ! id -nG "$POSTGRES_USER" | grep -qw "render"; then + usermod -a -G render "$POSTGRES_USER" + echo "Added user $POSTGRES_USER to render group for GPU access" + fi + fi +else + echo "Warning: /dev/dri/renderD128 not found. GPU acceleration may not be available." +fi + +# Always add user to video group for hardware acceleration if it exists +# (some systems use video group for general GPU access) +if getent group video >/dev/null 2>&1; then + if ! id -nG "$POSTGRES_USER" | grep -qw "video"; then + usermod -a -G video "$POSTGRES_USER" + echo "Added user $POSTGRES_USER to video group for hardware acceleration access" + fi +fi + # Run nginx as specified user -sed -i 's/user www-data;/user dispatch;/g' /etc/nginx/nginx.conf +sed -i "s/user www-data;/user $POSTGRES_USER;/g" /etc/nginx/nginx.conf diff --git a/docker/init/02-postgres.sh b/docker/init/02-postgres.sh index 69a81dd4..e36dd744 100644 --- a/docker/init/02-postgres.sh +++ b/docker/init/02-postgres.sh @@ -1,5 +1,4 @@ #!/bin/bash - # Temporary migration from postgres in /data to $POSTGRES_DIR. Can likely remove # some time in the future. if [ -e "/data/postgresql.conf" ]; then @@ -27,6 +26,66 @@ if [ -e "/data/postgresql.conf" ]; then echo "Migration completed successfully." fi +PG_VERSION_FILE="${POSTGRES_DIR}/PG_VERSION" + +# Detect current version from data directory, if present +if [ -f "$PG_VERSION_FILE" ]; then + CURRENT_VERSION=$(cat "$PG_VERSION_FILE") +else + CURRENT_VERSION="" +fi + +# Only run upgrade if current version is set and not the target +if [ -n "$CURRENT_VERSION" ] && [ "$CURRENT_VERSION" != "$PG_VERSION" ]; then + echo "Detected PostgreSQL data directory version $CURRENT_VERSION, upgrading to $PG_VERSION..." + # Set binary paths for upgrade if needed + OLD_BINDIR="/usr/lib/postgresql/${CURRENT_VERSION}/bin" + NEW_BINDIR="/usr/lib/postgresql/${PG_VERSION}/bin" + PG_INSTALLED_BY_SCRIPT=0 + if [ ! -d "$OLD_BINDIR" ]; then + echo "PostgreSQL binaries for version $CURRENT_VERSION not found. Installing..." + apt update && apt install -y postgresql-$CURRENT_VERSION postgresql-contrib-$CURRENT_VERSION + if [ $? -ne 0 ]; then + echo "Failed to install PostgreSQL version $CURRENT_VERSION. Exiting." + exit 1 + fi + PG_INSTALLED_BY_SCRIPT=1 + fi + + # Prepare new data directory + NEW_POSTGRES_DIR="${POSTGRES_DIR}_$PG_VERSION" + + # Remove new data directory if it already exists (from a failed/partial upgrade) + if [ -d "$NEW_POSTGRES_DIR" ]; then + echo "Warning: $NEW_POSTGRES_DIR already exists. Removing it to avoid upgrade issues." + rm -rf "$NEW_POSTGRES_DIR" + fi + + mkdir -p "$NEW_POSTGRES_DIR" + chown -R postgres:postgres "$NEW_POSTGRES_DIR" + chmod 700 "$NEW_POSTGRES_DIR" + + # Initialize new data directory + echo "Initializing new PostgreSQL data directory at $NEW_POSTGRES_DIR..." + su - postgres -c "$NEW_BINDIR/initdb -D $NEW_POSTGRES_DIR" + echo "Running pg_upgrade from $OLD_BINDIR to $NEW_BINDIR..." + # Run pg_upgrade + su - postgres -c "$NEW_BINDIR/pg_upgrade -b $OLD_BINDIR -B $NEW_BINDIR -d $POSTGRES_DIR -D $NEW_POSTGRES_DIR" + + # Move old data directory for backup, move new into place + mv "$POSTGRES_DIR" "${POSTGRES_DIR}_backup_${CURRENT_VERSION}_$(date +%s)" + mv "$NEW_POSTGRES_DIR" "$POSTGRES_DIR" + + echo "Upgrade complete. Old data directory backed up." + + # Uninstall PostgreSQL if we installed it just for upgrade + if [ "$PG_INSTALLED_BY_SCRIPT" -eq 1 ]; then + echo "Uninstalling temporary PostgreSQL $CURRENT_VERSION packages..." + apt remove -y postgresql-$CURRENT_VERSION postgresql-contrib-$CURRENT_VERSION + apt autoremove -y + fi +fi + # Initialize PostgreSQL database if [ -z "$(ls -A $POSTGRES_DIR)" ]; then echo "Initializing PostgreSQL database..." @@ -35,29 +94,28 @@ if [ -z "$(ls -A $POSTGRES_DIR)" ]; then chmod 700 $POSTGRES_DIR # Initialize PostgreSQL - su - postgres -c "/usr/lib/postgresql/14/bin/initdb -D ${POSTGRES_DIR}" + su - postgres -c "$PG_BINDIR/initdb -D ${POSTGRES_DIR}" # Configure PostgreSQL echo "host all all 0.0.0.0/0 md5" >> "${POSTGRES_DIR}/pg_hba.conf" echo "listen_addresses='*'" >> "${POSTGRES_DIR}/postgresql.conf" # Start PostgreSQL echo "Starting Postgres..." - su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" + su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" # Wait for PostgreSQL to be ready - until su - postgres -c "/usr/lib/postgresql/14/bin/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do + until su - postgres -c "$PG_BINDIR/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do echo "Waiting for PostgreSQL to be ready..." sleep 1 done - postgres_pid=$(su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') + postgres_pid=$(su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') # Setup database if needed if ! su - postgres -c "psql -p ${POSTGRES_PORT} -tAc \"SELECT 1 FROM pg_database WHERE datname = '$POSTGRES_DB';\"" | grep -q 1; then # Create PostgreSQL database echo "Creating PostgreSQL database..." - su - postgres -c "createdb -p ${POSTGRES_PORT} ${POSTGRES_DB}" - - # Create user, set ownership, and grant privileges + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 ${POSTGRES_DB}" + # Create user, set ownership, and grant privileges echo "Creating PostgreSQL user..." su - postgres -c "psql -p ${POSTGRES_PORT} -d ${POSTGRES_DB}" < $DUMP_FILE" + # Drop and recreate database with UTF8 encoding using template0 + su - postgres -c "dropdb -p ${POSTGRES_PORT} $POSTGRES_DB" + # Recreate database with UTF8 encoding + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 --template=template0 ${POSTGRES_DB}" + + + # Restore data + su - postgres -c "psql -p ${POSTGRES_PORT} -d $POSTGRES_DB < $DUMP_FILE" + #configure_db + + + rm -f "$DUMP_FILE" + echo "Database $POSTGRES_DB converted to UTF8 and permissions set." + fi +} + + diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index b9c3c63b..0c317017 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -1,24 +1,80 @@ #!/bin/bash -mkdir -p /data/logos -mkdir -p /data/recordings -mkdir -p /data/uploads/m3us -mkdir -p /data/uploads/epgs -mkdir -p /data/m3us -mkdir -p /data/epgs -mkdir -p /app/logo_cache -mkdir -p /app/media +# Define directories that need to exist and be owned by PUID:PGID +DATA_DIRS=( + "/data/logos" + "/data/recordings" + "/data/uploads/m3us" + "/data/uploads/epgs" + "/data/m3us" + "/data/epgs" + "/data/plugins" + "/data/models" +) +APP_DIRS=( + "/app/logo_cache" + "/app/media" + "/app/static" +) + +# Create all directories +for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do + mkdir -p "$dir" +done + +# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation) +if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then + if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /app (non-recursive)" + chown $PUID:$PGID /app + fi +fi +# Configure nginx port +if ! [[ "$DISPATCHARR_PORT" =~ ^[0-9]+$ ]]; then + echo "⚠️ Warning: DISPATCHARR_PORT is not a valid integer, using default port 9191" + DISPATCHARR_PORT=9191 +fi sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default +# Configure nginx based on IPv6 availability +if ip -6 addr show | grep -q "inet6"; then + echo "✅ IPv6 is available, enabling IPv6 in nginx" +else + echo "⚠️ IPv6 not available, disabling IPv6 in nginx" + sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default +fi + # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then - # Needs to own ALL of /data except db, we handle that below - chown -R $PUID:$PGID /data - chown -R $PUID:$PGID /app + # Fix data directories (non-recursive to avoid touching user files) + for dir in "${DATA_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir" + chown $PUID:$PGID "$dir" + fi + done + + # Fix app directories (recursive since they're managed by the app) + for dir in "${APP_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir (recursive)" + chown -R $PUID:$PGID "$dir" + fi + done + + # Database permissions + if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then + echo "Fixing ownership for /data/db" + chown -R postgres:postgres /data/db + fi + + # Fix /data directory ownership (non-recursive) + if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /data (non-recursive)" + chown $PUID:$PGID /data + fi - # Permissions - chown -R postgres:postgres /data/db chmod +x /data -fi +fi \ No newline at end of file diff --git a/docker/init/04-check-hwaccel.sh b/docker/init/04-check-hwaccel.sh new file mode 100644 index 00000000..f23cdd9a --- /dev/null +++ b/docker/init/04-check-hwaccel.sh @@ -0,0 +1,715 @@ +#!/bin/bash + +echo "🔍 Checking for GPU acceleration devices..." + +# Helper function for device access checks +check_dev() { + local dev=$1 + if [ -e "$dev" ]; then + if [ -r "$dev" ] && [ -w "$dev" ]; then + echo "✅ Device $dev is accessible." + else + echo "⚠️ Device $dev exists but is not accessible. Check permissions or container runtime options." + fi + else + echo "ℹ️ Device $dev does not exist." + fi +} + +# Initialize device detection flags +ANY_GPU_DEVICES_FOUND=false +DRI_DEVICES_FOUND=false +NVIDIA_FOUND=false +NVIDIA_GPU_IN_LSPCI=false +INTEL_GPU_IN_LSPCI=false +AMD_GPU_IN_LSPCI=false + +# Check for all GPU types in hardware via lspci +if command -v lspci >/dev/null 2>&1; then + # Check for NVIDIA GPUs + if lspci | grep -i "NVIDIA" | grep -i "VGA\|3D\|Display" >/dev/null; then + NVIDIA_GPU_IN_LSPCI=true + NVIDIA_MODEL=$(lspci | grep -i "NVIDIA" | grep -i "VGA\|3D\|Display" | head -1 | sed -E 's/.*: (.*) \[.*/\1/' | sed 's/Corporation //') + fi + + # Check for Intel GPUs - making sure it's not already detected as NVIDIA + if lspci | grep -i "Intel" | grep -v "NVIDIA" | grep -i "VGA\|3D\|Display" >/dev/null; then + INTEL_GPU_IN_LSPCI=true + INTEL_MODEL=$(lspci | grep -i "Intel" | grep -v "NVIDIA" | grep -i "VGA\|3D\|Display" | head -1 | sed -E 's/.*: (.*) \[.*/\1/' | sed 's/Corporation //') + fi + + # Check for AMD GPUs - making sure it's not already detected as NVIDIA or Intel + if lspci | grep -i "AMD\|ATI\|Advanced Micro Devices" | grep -v "NVIDIA\|Intel" | grep -i "VGA\|3D\|Display" >/dev/null; then + AMD_GPU_IN_LSPCI=true + AMD_MODEL=$(lspci | grep -i "AMD\|ATI\|Advanced Micro Devices" | grep -v "NVIDIA\|Intel" | grep -i "VGA\|3D\|Display" | head -1 | sed -E 's/.*: (.*) \[.*/\1/' | sed 's/Corporation //' | sed 's/Technologies //') + fi + + # Display detected GPU hardware + if [ "$NVIDIA_GPU_IN_LSPCI" = true ]; then + echo "🔍 Hardware detection: NVIDIA GPU ($NVIDIA_MODEL)" + fi + if [ "$INTEL_GPU_IN_LSPCI" = true ]; then + echo "🔍 Hardware detection: Intel GPU ($INTEL_MODEL)" + fi + if [ "$AMD_GPU_IN_LSPCI" = true ]; then + echo "🔍 Hardware detection: AMD GPU ($AMD_MODEL)" + fi +fi + +# Silently check for any GPU devices first +for dev in /dev/dri/renderD* /dev/dri/card* /dev/nvidia*; do + if [ -e "$dev" ]; then + ANY_GPU_DEVICES_FOUND=true + break + fi +done + +# Only if devices might exist, show detailed checks +if [ "$ANY_GPU_DEVICES_FOUND" = true ]; then + # Check Intel/AMD VAAPI devices + echo "🔍 Checking for VAAPI device nodes (Intel/AMD)..." + for dev in /dev/dri/renderD* /dev/dri/card*; do + if [ -e "$dev" ]; then + DRI_DEVICES_FOUND=true + check_dev "$dev" + fi + done + + # Check NVIDIA device nodes + echo "🔍 Checking for NVIDIA device nodes..." + for dev in /dev/nvidia*; do + if [ -e "$dev" ]; then + NVIDIA_FOUND=true + check_dev "$dev" + fi + done + + # Show GPU device availability messages + if [ "$NVIDIA_FOUND" = false ] && [ "$NVIDIA_GPU_IN_LSPCI" = true ]; then + echo "⚠️ No NVIDIA device nodes available despite hardware detection." + echo " You may be able to use VAAPI for hardware acceleration, but NVENC/CUDA won't be available." + echo " For optimal performance, configure proper NVIDIA container runtime." + elif [ "$NVIDIA_FOUND" = false ]; then + echo "ℹ️ No NVIDIA device nodes found under /dev." + fi + + # Check for Intel/AMD GPUs that might not be fully accessible + if [ "$DRI_DEVICES_FOUND" = false ] && [ "$INTEL_GPU_IN_LSPCI" = true ]; then + echo "⚠️ Intel GPU detected in hardware but no DRI devices found." + echo " Hardware acceleration will not be available." + echo " Make sure /dev/dri/ devices are properly mapped to the container." + elif [ "$DRI_DEVICES_FOUND" = false ] && [ "$AMD_GPU_IN_LSPCI" = true ]; then + echo "⚠️ AMD GPU detected in hardware but no DRI devices found." + echo " Hardware acceleration will not be available." + echo " Make sure /dev/dri/ devices are properly mapped to the container." + fi +else + # No GPU devices found, skip the detailed checks + echo "❌ No GPU acceleration devices detected in this container." + echo "ℹ️ Checking for potential configuration issues..." + + # Check if the host might have GPUs that aren't passed to the container + if command -v lspci >/dev/null 2>&1; then + if lspci | grep -i "VGA\|3D\|Display" | grep -i "NVIDIA\|Intel\|AMD" >/dev/null; then + echo "⚠️ Host system appears to have GPU hardware, but no devices are accessible to the container." + echo " - For NVIDIA GPUs: Ensure NVIDIA Container Runtime is configured properly" + echo " - For Intel/AMD GPUs: Verify that /dev/dri/ devices are passed to the container" + echo " - Check your Docker run command or docker-compose.yml for proper device mapping" + else + echo "ℹ️ No GPU hardware detected on the host system. CPU-only transcoding will be used." + fi + else + echo "ℹ️ Unable to check host GPU hardware (lspci not available). CPU-only transcoding will be used." + fi + + echo "📋 ==================================================" + echo "✅ GPU detection script complete. No GPUs available for hardware acceleration." + # Don't exit the container - just return from this script + return 0 2>/dev/null || true +fi + +# Check group membership for GPU access - context-aware based on hardware +echo "🔍 Checking user group memberships and device access..." +VIDEO_GID=$(getent group video | cut -d: -f3) +RENDER_GID=$(getent group render | cut -d: -f3) +NVIDIA_CONTAINER_TOOLKIT_FOUND=false +NVIDIA_ENV_MISMATCH=false + +# Improved device access check function +check_user_device_access() { + local device=$1 + local user=$2 + if [ -e "$device" ];then + if su -c "test -r $device && test -w $device" - $user 2>/dev/null; then + echo "✅ User $user has full access to $device" + return 0 + else + echo "⚠️ User $user cannot access $device (permission denied)" + return 1 + fi + else + # Device doesn't exist, no need to report here + return 2 + fi +} + +# Direct device access verification for DRI (Intel/AMD) +echo "🔍 Verifying if $POSTGRES_USER has direct access to GPU devices..." +HAS_DRI_ACCESS=false +DRI_ACCESS_COUNT=0 +DRI_DEVICE_COUNT=0 + +for dev in /dev/dri/renderD* /dev/dri/card*; do + if [ -e "$dev" ]; then + DRI_DEVICE_COUNT=$((DRI_DEVICE_COUNT + 1)) + if check_user_device_access "$dev" "$POSTGRES_USER"; then + DRI_ACCESS_COUNT=$((DRI_ACCESS_COUNT + 1)) + HAS_DRI_ACCESS=true + fi + fi +done + +# Direct device access verification for NVIDIA +HAS_NVIDIA_ACCESS=false +NVIDIA_ACCESS_COUNT=0 +NVIDIA_DEVICE_COUNT=0 + +for dev in /dev/nvidia*; do + if [ -e "$dev" ]; then + NVIDIA_DEVICE_COUNT=$((NVIDIA_DEVICE_COUNT + 1)) + if check_user_device_access "$dev" "$POSTGRES_USER"; then + NVIDIA_ACCESS_COUNT=$((NVIDIA_ACCESS_COUNT + 1)) + HAS_NVIDIA_ACCESS=true + fi + fi +done + +# Summary of device access +if [ $DRI_DEVICE_COUNT -gt 0 ]; then + if [ $DRI_ACCESS_COUNT -eq $DRI_DEVICE_COUNT ]; then + echo "✅ User $POSTGRES_USER has access to all DRI devices ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI hardware acceleration should work properly." + else + echo "⚠️ User $POSTGRES_USER has limited access to DRI devices ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI hardware acceleration may not work properly." + echo " Consider adding $POSTGRES_USER to the 'video' and/or 'render' groups." + fi +fi + +if [ $NVIDIA_DEVICE_COUNT -gt 0 ]; then + if [ $NVIDIA_ACCESS_COUNT -eq $NVIDIA_DEVICE_COUNT ]; then + echo "✅ User $POSTGRES_USER has access to all NVIDIA devices ($NVIDIA_ACCESS_COUNT/$NVIDIA_DEVICE_COUNT)" + echo " NVIDIA hardware acceleration should work properly." + else + echo "⚠️ User $POSTGRES_USER has limited access to NVIDIA devices ($NVIDIA_ACCESS_COUNT/$NVIDIA_DEVICE_COUNT)" + echo " NVIDIA hardware acceleration may not work properly." + if [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = false ]; then + echo " Consider adding $POSTGRES_USER to the 'video' group or use NVIDIA Container Toolkit." + fi + fi +fi + +# Check for traditional group memberships (as additional information) +USER_IN_VIDEO_GROUP=false +USER_IN_RENDER_GROUP=false + +if [ -n "$VIDEO_GID" ]; then + if id -nG "$POSTGRES_USER" 2>/dev/null | grep -qw "video"; then + USER_IN_VIDEO_GROUP=true + echo "ℹ️ User $POSTGRES_USER is in the 'video' group (GID $VIDEO_GID)." + fi +fi + +if [ -n "$RENDER_GID" ]; then + if id -nG "$POSTGRES_USER" 2>/dev/null | grep -qw "render"; then + USER_IN_RENDER_GROUP=true + echo "ℹ️ User $POSTGRES_USER is in the 'render' group (GID $RENDER_GID)." + fi +fi + +# Check if NVIDIA Container Toolkit is present through environment or CLI tool +# IMPORTANT: Only mark as found if both env vars AND actual NVIDIA devices exist +if [ "$NVIDIA_FOUND" = true ] && command -v nvidia-container-cli >/dev/null 2>&1; then + NVIDIA_CONTAINER_TOOLKIT_FOUND=true +# Check for environment variables set by NVIDIA Container Runtime, but only if NVIDIA hardware exists +elif [ "$NVIDIA_FOUND" = true ] && [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then + NVIDIA_CONTAINER_TOOLKIT_FOUND=true + echo "✅ NVIDIA Container Toolkit detected (via environment variables)." + echo " The container is properly configured with Docker Compose's 'driver: nvidia' syntax." +elif [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ] && [ "$NVIDIA_FOUND" = false ]; then + NVIDIA_ENV_MISMATCH=true +fi + +# Removed duplicate video group checks here - consolidated into the earlier checks that include GID + +# Check NVIDIA Container Toolkit support +echo "🔍 Checking NVIDIA container runtime support..." + +# More reliable detection of NVIDIA Container Runtime +NVIDIA_RUNTIME_ACTIVE=false + +# Method 1: Check for nvidia-container-cli tool +if command -v nvidia-container-cli >/dev/null 2>&1; then + NVIDIA_RUNTIME_ACTIVE=true + echo "✅ NVIDIA Container Runtime detected (nvidia-container-cli found)." + + if nvidia-container-cli info >/dev/null 2>&1; then + echo "✅ NVIDIA container runtime is functional." + else + echo "⚠️ nvidia-container-cli found, but 'info' command failed. Runtime may be misconfigured." + fi +fi + +# Method 2: Check for NVIDIA Container Runtime specific files +if [ -e "/dev/.nv" ] || [ -e "/.nv" ] || [ -e "/.nvidia-container-runtime" ]; then + NVIDIA_RUNTIME_ACTIVE=true + echo "✅ NVIDIA Container Runtime files detected." +fi + +# Method 3: Check cgroup information for NVIDIA +if grep -q "nvidia" /proc/self/cgroup 2>/dev/null; then + NVIDIA_RUNTIME_ACTIVE=true + echo "✅ NVIDIA Container Runtime cgroups detected." +fi + +# Final verdict based on hardware AND runtime with improved messaging +if [ "$NVIDIA_FOUND" = true ] && ([ "$NVIDIA_RUNTIME_ACTIVE" = true ] || [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = true ]); then + echo "✅ NVIDIA Container Runtime is properly configured with hardware access." +elif [ "$NVIDIA_FOUND" = true ] && [ "$NVIDIA_RUNTIME_ACTIVE" = false ] && [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = false ]; then + echo "ℹ️ NVIDIA devices accessible via direct passthrough instead of Container Runtime." + echo " This works but consider using the 'deploy: resources: reservations: devices:' method in docker-compose." +elif [ "$NVIDIA_FOUND" = false ] && [ "$NVIDIA_RUNTIME_ACTIVE" = true ]; then + echo "⚠️ NVIDIA Container Runtime appears to be configured, but no NVIDIA devices found." + echo " Check that your host has NVIDIA drivers installed and GPUs are properly passed to the container." +elif [ "$DRI_DEVICES_FOUND" = true ] && [ "$NVIDIA_GPU_IN_LSPCI" = true ]; then + echo "ℹ️ Limited GPU access: Only DRI devices available for NVIDIA hardware." + echo " VAAPI acceleration may work but NVENC/CUDA won't be available." + echo " For full NVIDIA capabilities, configure the NVIDIA Container Runtime." +elif [ "$DRI_DEVICES_FOUND" = true ]; then + echo "ℹ️ Using Intel/AMD GPU hardware for acceleration via VAAPI." +else + echo "⚠️ No GPU acceleration devices detected. CPU-only transcoding will be used." +fi + +# Run nvidia-smi if available +if command -v nvidia-smi >/dev/null 2>&1; then + echo "🔍 Running nvidia-smi to verify GPU visibility..." + if nvidia-smi >/dev/null 2>&1; then + echo "✅ nvidia-smi successful - GPU is accessible to container!" + echo " This confirms hardware acceleration should be available to FFmpeg." + else + echo "⚠️ nvidia-smi command failed. GPU may not be properly mapped into container." + fi +else + echo "ℹ️ nvidia-smi not installed or not in PATH." +fi + +# Show relevant environment variables with contextual suggestions +echo "🔍 Checking GPU-related environment variables..." + +# Set flags based on device detection +DRI_DEVICES_FOUND=false +for dev in /dev/dri/renderD* /dev/dri/card*; do + if [ -e "$dev" ];then + DRI_DEVICES_FOUND=true + break + fi +done + +# Give contextual suggestions based on detected hardware +if [ "$DRI_DEVICES_FOUND" = true ]; then + # Detect Intel/AMD GPU model - skip this if we already detected GPUs earlier + if [ "$NVIDIA_GPU_IN_LSPCI" = false ] && [ "$INTEL_GPU_IN_LSPCI" = false ] && [ "$AMD_GPU_IN_LSPCI" = false ] && command -v lspci >/dev/null 2>&1; then + GPU_INFO=$(lspci -nn | grep -i "VGA\|Display" | head -1) + if [ -n "$GPU_INFO" ]; then + echo "🔍 Detected GPU: $GPU_INFO" + # Extract model for cleaner display in summary + GPU_MODEL=$(echo "$GPU_INFO" | sed -E 's/.*: (.*) \[.*/\1/' | sed 's/Corporation //' | sed 's/Technologies //') + fi + else + # Use already detected GPU model info + if [ "$NVIDIA_GPU_IN_LSPCI" = true ]; then + GPU_MODEL=$NVIDIA_MODEL + elif [ "$INTEL_GPU_IN_LSPCI" = true ]; then + GPU_MODEL=$INTEL_MODEL + elif [ "$AMD_GPU_IN_LSPCI" = true ]; then + GPU_MODEL=$AMD_MODEL + fi + fi + + if [ -n "$GPU_MODEL" ]; then + echo "🔍 GPU model: $GPU_MODEL" + fi + # Check for LIBVA_DRIVER_NAME environment variable + if [ -n "$LIBVA_DRIVER_NAME" ]; then + echo "ℹ️ LIBVA_DRIVER_NAME is set to '$LIBVA_DRIVER_NAME'" + echo " Note: If you experience issues with hardware acceleration, try removing this" + echo " environment variable to let the system auto-detect the appropriate driver." + else + # Check if we can detect the GPU type + if command -v lspci >/dev/null 2>&1; then + echo "ℹ️ VAAPI driver auto-detection is usually reliable. Settings below only needed if you experience issues." + + # Create variables to store recommended driver and supported methods + INTEL_RECOMMENDED_DRIVER="" + INTEL_SUPPORTS_QSV=false + + # Use the Intel model information we already captured + if [ "$INTEL_GPU_IN_LSPCI" = true ] && [ -n "$INTEL_MODEL" ]; then + # Check for newer Intel generations that use iHD + if echo "$INTEL_MODEL" | grep -q -E "Arc|Xe|Alchemist|Tiger|Alder|Raptor|Meteor|Gen1[2-9]"; then + echo "💡 Detected Intel GPU that supports iHD (e.g. Gen12+/Arc/Xe)" + echo " Recommended: LIBVA_DRIVER_NAME=iHD" + echo " Note: Only set this environment variable if hardware acceleration doesn't work by default" + INTEL_RECOMMENDED_DRIVER="iHD" + INTEL_SUPPORTS_QSV=true + elif echo "$INTEL_MODEL" | grep -q -E "Coffee|Whiskey|Comet|Gen11"; then + echo "💡 Detected Intel GPU that supports both i965 and iHD (e.g. Gen9.5/Gen11)" + echo " Preferred: LIBVA_DRIVER_NAME=iHD" + echo " Recommended: Try i965 only if iHD has compatibility issues" + echo " Note: Only set this environment variable if hardware acceleration doesn't work by default" + INTEL_RECOMMENDED_DRIVER="iHD" + INTEL_SUPPORTS_QSV=true + elif echo "$INTEL_MODEL" | grep -q -E "Haswell|Broadwell|Skylake|Kaby"; then + echo "💡 Detected Intel GPU that supports i965 (e.g. Gen9 and below)" + echo " Recommended: Set LIBVA_DRIVER_NAME=i965" + echo " Note: Only set this environment variable if hardware acceleration doesn't work by default" + INTEL_RECOMMENDED_DRIVER="i965" + # Older Intel GPUs support QSV through i965 driver but with more limitations + INTEL_SUPPORTS_QSV=false + else + # Generic Intel case - we're not fully confident in our recommendation + echo "💡 Unable to definitively identify Intel GPU generation" + echo " Try auto-detection first (no environment variable)" + echo " If issues occur: Try LIBVA_DRIVER_NAME=iHD first (newer GPUs)" + echo " If that fails: Try LIBVA_DRIVER_NAME=i965 (older GPUs)" + INTEL_RECOMMENDED_DRIVER="unknown" # Mark as unknown rather than assuming + INTEL_SUPPORTS_QSV="maybe" # Mark as maybe instead of assuming true + fi + elif [ "$AMD_GPU_IN_LSPCI" = true ]; then + echo "💡 If auto-detection fails: Set LIBVA_DRIVER_NAME=radeonsi for AMD GPUs" + echo " Note: Only set this environment variable if hardware acceleration doesn't work by default" + else + echo "ℹ️ Common VAAPI driver options if auto-detection fails:" + echo " - For modern Intel GPUs (Gen12+/Arc/Xe): LIBVA_DRIVER_NAME=iHD" + echo " - For older Intel GPUs: LIBVA_DRIVER_NAME=i965" + echo " - For AMD GPUs: LIBVA_DRIVER_NAME=radeonsi" + echo " Note: Only set these environment variables if hardware acceleration doesn't work by default" + fi + else + echo "ℹ️ Intel/AMD GPU detected. Auto-detection should work in most cases." + echo " If VAAPI doesn't work, you may need to set LIBVA_DRIVER_NAME manually." + fi + fi +fi + +# Check FFmpeg hardware acceleration support +echo "🔍 Checking FFmpeg hardware acceleration capabilities..." +if command -v ffmpeg >/dev/null 2>&1; then + HWACCEL=$(ffmpeg -hide_banner -hwaccels 2>/dev/null | grep -v "Hardware acceleration methods:" || echo "None found") + + # Initialize variables to store compatible and missing methods + COMPATIBLE_METHODS="" + MISSING_METHODS="" + + # Format the list of hardware acceleration methods in a more readable way + echo "🔍 Available FFmpeg hardware acceleration methods:" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + # Process the list into a more readable format with relevance indicators + if [ -n "$HWACCEL" ] && [ "$HWACCEL" != "None found" ]; then + # First, show methods compatible with detected hardware + echo " 📌 Compatible with your hardware:" + COMPATIBLE_FOUND=false + + for method in $HWACCEL; do + # Skip if it's just the header line or empty + if [ "$method" = "Hardware" ] || [ -z "$method" ]; then + continue + fi + + # Check if this method is relevant to detected hardware + IS_COMPATIBLE=false + DESCRIPTION="" + + if [ "$NVIDIA_FOUND" = true ] && [[ "$method" =~ ^(cuda|cuvid|nvenc|nvdec)$ ]]; then + IS_COMPATIBLE=true + DESCRIPTION="NVIDIA GPU acceleration" + elif [ "$INTEL_GPU_IN_LSPCI" = true ] && [ "$method" = "qsv" ] && [ "$INTEL_SUPPORTS_QSV" = true ]; then + IS_COMPATIBLE=true + DESCRIPTION="Intel QuickSync acceleration" + elif [ "$method" = "vaapi" ] && (([ "$INTEL_GPU_IN_LSPCI" = true ] || [ "$AMD_GPU_IN_LSPCI" = true ]) && [ "$DRI_DEVICES_FOUND" = true ]); then + IS_COMPATIBLE=true + if [ "$INTEL_GPU_IN_LSPCI" = true ]; then + DESCRIPTION="Intel VAAPI acceleration" + else + DESCRIPTION="AMD VAAPI acceleration" + fi + fi + + # Display compatible methods and store for summary + if [ "$IS_COMPATIBLE" = true ]; then + COMPATIBLE_FOUND=true + COMPATIBLE_METHODS="$COMPATIBLE_METHODS $method" + echo " ✅ $method - $DESCRIPTION" + fi + done + + if [ "$COMPATIBLE_FOUND" = false ]; then + echo " ❌ No compatible acceleration methods found for your hardware" + fi + + # Then show all other available methods + echo " 📌 Other available methods (not compatible with detected hardware):" + OTHER_FOUND=false + + for method in $HWACCEL; do + # Skip if it's just the header line or empty + if [ "$method" = "Hardware" ] || [ -z "$method" ]; then + continue + fi + + # Check if this method is relevant to detected hardware + IS_COMPATIBLE=false + + if [ "$NVIDIA_FOUND" = true ] && [[ "$method" =~ ^(cuda|cuvid|nvenc|nvdec)$ ]]; then + IS_COMPATIBLE=true + elif [ "$INTEL_GPU_IN_LSPCI" = true ] && [ "$method" = "qsv" ] && [ "$INTEL_SUPPORTS_QSV" = true ]; then + IS_COMPATIBLE=true + elif [ "$method" = "vaapi" ] && (([ "$INTEL_GPU_IN_LSPCI" = true ] || [ "$AMD_GPU_IN_LSPCI" = true ]) && [ "$DRI_DEVICES_FOUND" = true ]); then + IS_COMPATIBLE=true + fi + + # Display other methods that aren't compatible + if [ "$IS_COMPATIBLE" = false ]; then + OTHER_FOUND=true + echo " ℹ️ $method" + fi + done + + if [ "$OTHER_FOUND" = false ]; then + echo " None" + fi + + # Show expected methods that are missing + echo " 📌 Missing methods that should be available for your hardware:" + MISSING_FOUND=false + + # Check for NVIDIA methods if NVIDIA GPU is detected + if [ "$NVIDIA_FOUND" = true ]; then + EXPECTED_NVIDIA="cuda" # cuvid nvenc nvdec" keeping these in case future support is added + for method in $EXPECTED_NVIDIA; do + if ! echo "$HWACCEL" | grep -q "$method"; then + MISSING_FOUND=true + MISSING_METHODS="$MISSING_METHODS $method" + echo " ⚠️ $method - NVIDIA acceleration (missing but should be available)" + fi + done + fi + + # Check for Intel methods if Intel GPU is detected + if [ "$INTEL_GPU_IN_LSPCI" = true ] && [ "$DRI_DEVICES_FOUND" = true ]; then + if [ "$INTEL_SUPPORTS_QSV" = true ] && ! echo "$HWACCEL" | grep -q "qsv"; then + MISSING_FOUND=true + MISSING_METHODS="$MISSING_METHODS qsv" + echo " ⚠️ qsv - Intel QuickSync acceleration (missing but should be available)" + fi + + if ! echo "$HWACCEL" | grep -q "vaapi"; then + MISSING_FOUND=true + MISSING_METHODS="$MISSING_METHODS vaapi" + echo " ⚠️ vaapi - Intel VAAPI acceleration (missing but should be available)" + fi + fi + + # Check for AMD methods if AMD GPU is detected + if [ "$AMD_GPU_IN_LSPCI" = true ] && [ "$DRI_DEVICES_FOUND" = true ]; then + if ! echo "$HWACCEL" | grep -q "vaapi"; then + MISSING_FOUND=true + MISSING_METHODS="$MISSING_METHODS vaapi" + echo " ⚠️ vaapi - AMD VAAPI acceleration (missing but should be available)" + fi + fi + + if [ "$MISSING_FOUND" = false ]; then + echo " None - All expected methods are available" + fi + else + echo " ❌ No hardware acceleration methods found" + fi + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + # Show hardware-appropriate method summary using the already gathered information + if [ -n "$COMPATIBLE_METHODS" ]; then + echo "✅ Hardware-appropriate acceleration methods available:$COMPATIBLE_METHODS" + fi + + # Show missing expected methods + if [ -n "$MISSING_METHODS" ]; then + echo "⚠️ Expected acceleration methods not found:$MISSING_METHODS" + echo " This might indicate missing libraries or improper driver configuration." + fi + + # Display specific cases of interest (simplify using previously captured information) + if [ "$NVIDIA_FOUND" = true ] && ! echo "$COMPATIBLE_METHODS" | grep -q "cuda\|nvenc\|cuvid"; then + echo "⚠️ NVIDIA GPU detected but no NVIDIA acceleration methods available." + echo " Ensure ffmpeg is built with NVIDIA support and required libraries are installed." + fi + + if (([ "$INTEL_GPU_IN_LSPCI" = true ] || [ "$AMD_GPU_IN_LSPCI" = true ]) && + [ "$DRI_DEVICES_FOUND" = true ] && ! echo "$COMPATIBLE_METHODS" | grep -q "vaapi"); then + echo "⚠️ Intel/AMD GPU detected but VAAPI acceleration not available." + echo " Ensure ffmpeg is built with VAAPI support and proper drivers are installed." + fi +else + echo "⚠️ FFmpeg not found in PATH." +fi + +# Provide a final summary of the hardware acceleration setup +echo "📋 ===================== SUMMARY =====================" + +# Identify which GPU type is active and working +if [ "$NVIDIA_FOUND" = true ] && (nvidia-smi >/dev/null 2>&1 || [ -n "$NVIDIA_VISIBLE_DEVICES" ]); then + if [ -n "$NVIDIA_MODEL" ]; then + echo "🔰 NVIDIA GPU: $NVIDIA_MODEL" + else + echo "🔰 NVIDIA GPU: ACTIVE (model detection unavailable)" + echo "ℹ️ Note: GPU model information couldn't be retrieved, but devices are present." + echo " This may be due to missing nvidia-smi tool or container limitations." + fi + + if [ "$NVIDIA_CONTAINER_TOOLKIT_FOUND" = true ]; then + echo "✅ NVIDIA Container Toolkit: CONFIGURED CORRECTLY" + elif [ -n "$NVIDIA_VISIBLE_DEVICES" ] && [ -n "$NVIDIA_DRIVER_CAPABILITIES" ]; then + echo "✅ NVIDIA Docker configuration: USING MODERN DEPLOYMENT" + else + echo "⚠️ NVIDIA setup method: DIRECT DEVICE MAPPING (functional but not optimal)" + fi + + # Add device accessibility status + if [ $NVIDIA_DEVICE_COUNT -gt 0 ]; then + if [ $NVIDIA_ACCESS_COUNT -eq $NVIDIA_DEVICE_COUNT ]; then + echo "✅ Device access: ALL NVIDIA DEVICES ACCESSIBLE ($NVIDIA_ACCESS_COUNT/$NVIDIA_DEVICE_COUNT)" + else + echo "⚠️ Device access: LIMITED NVIDIA DEVICE ACCESS ($NVIDIA_ACCESS_COUNT/$NVIDIA_DEVICE_COUNT)" + echo " Some hardware acceleration features may not work properly." + fi + fi + + # Display FFmpeg NVIDIA acceleration methods in more detail + if echo "$COMPATIBLE_METHODS" | grep -q "cuda\|nvenc\|cuvid"; then + echo "✅ FFmpeg NVIDIA acceleration: AVAILABLE" + + # Show detailed breakdown of available NVIDIA methods + NVIDIA_METHODS=$(echo "$COMPATIBLE_METHODS" | grep -o '\(cuda\|cuvid\|nvenc\|nvdec\)') + echo " Available NVIDIA methods: $NVIDIA_METHODS" + echo " Recommended for: Video transcoding with NVIDIA GPUs" + else + echo "⚠️ FFmpeg NVIDIA acceleration: NOT DETECTED" + if [ -n "$MISSING_METHODS" ]; then + echo " Missing methods that should be available: $MISSING_METHODS" + fi + fi +elif [ "$NVIDIA_GPU_IN_LSPCI" = true ] && [ "$DRI_DEVICES_FOUND" = true ]; then + # NVIDIA through DRI only (suboptimal but possible) + if [ -n "$NVIDIA_MODEL" ]; then + echo "🔰 NVIDIA GPU: $NVIDIA_MODEL (SUBOPTIMALLY CONFIGURED)" + else + echo "🔰 NVIDIA GPU: DETECTED BUT SUBOPTIMALLY CONFIGURED" + fi + echo "⚠️ Your NVIDIA GPU is only accessible through DRI devices" + echo " - VAAPI acceleration may work for some tasks" + echo " - NVENC/CUDA acceleration is NOT available" + + # Add device accessibility status + if [ $DRI_DEVICE_COUNT -gt 0 ]; then + if [ $DRI_ACCESS_COUNT -eq $DRI_DEVICE_COUNT ]; then + echo "✅ Device access: ALL DRI DEVICES ACCESSIBLE ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI acceleration should work properly." + else + echo "⚠️ Device access: LIMITED DRI DEVICE ACCESS ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI acceleration may not work properly." + fi + fi + + echo "💡 RECOMMENDATION: Use the proper NVIDIA container configuration:" + echo " deploy:" + echo " resources:" + echo " reservations:" + echo " devices:" + echo " - driver: nvidia" + echo " count: all" + echo " capabilities: [gpu]" + + if echo "$COMPATIBLE_METHODS" | grep -q "vaapi"; then + echo "✅ FFmpeg VAAPI acceleration: AVAILABLE (limited without NVENC)" + echo " VAAPI can be used for transcoding, but NVENC/CUDA would be more efficient" + else + echo "⚠️ FFmpeg VAAPI acceleration: NOT DETECTED" + fi +elif [ "$DRI_DEVICES_FOUND" = true ]; then + # Intel/AMD detection with model if available + if [ -n "$GPU_MODEL" ]; then + echo "🔰 GPU: $GPU_MODEL" + elif [ -n "$LIBVA_DRIVER_NAME" ]; then + echo "🔰 ${LIBVA_DRIVER_NAME^^} GPU: ACTIVE" + else + echo "🔰 INTEL/AMD GPU: ACTIVE (model detection unavailable)" + echo "ℹ️ Note: Basic GPU drivers appear to be loaded (device nodes exist), but" + echo " couldn't identify specific model. This doesn't necessarily indicate a problem." + fi + + # Add device accessibility status + if [ $DRI_DEVICE_COUNT -gt 0 ]; then + if [ $DRI_ACCESS_COUNT -eq $DRI_DEVICE_COUNT ]; then + echo "✅ Device access: ALL DRI DEVICES ACCESSIBLE ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI hardware acceleration should work properly." + else + echo "⚠️ Device access: LIMITED DRI DEVICE ACCESS ($DRI_ACCESS_COUNT/$DRI_DEVICE_COUNT)" + echo " VAAPI hardware acceleration may not work properly." + fi + fi + + # Display FFmpeg VAAPI acceleration method with more details + if echo "$COMPATIBLE_METHODS" | grep -q "vaapi"; then + echo "✅ FFmpeg VAAPI acceleration: AVAILABLE" + + # Add recommended usage information + echo " Recommended for: General video transcoding with Intel/AMD GPUs" + + # Add recommended driver information for Intel GPUs + if [ "$INTEL_GPU_IN_LSPCI" = true ] && [ -n "$INTEL_RECOMMENDED_DRIVER" ]; then + if [ "$INTEL_RECOMMENDED_DRIVER" = "unknown" ]; then + echo "ℹ️ Uncertain about recommended VAAPI driver for this Intel GPU" + echo " Auto-detection should work, but if issues occur try iHD or i965" + else + echo "ℹ️ Recommended VAAPI driver for this Intel GPU: $INTEL_RECOMMENDED_DRIVER" + fi + + if [ "$INTEL_SUPPORTS_QSV" = true ] && echo "$COMPATIBLE_METHODS" | grep -q "qsv"; then + echo "✅ QSV acceleration: AVAILABLE" + echo " Recommended for: Intel-specific optimized transcoding" + echo " Works best with: $INTEL_RECOMMENDED_DRIVER driver" + elif [ "$INTEL_SUPPORTS_QSV" = true ]; then + echo "ℹ️ QSV acceleration: NOT DETECTED (may be available with proper configuration)" + echo " Your Intel GPU supports QSV but it's not available in FFmpeg" + echo " Check if FFmpeg is built with QSV support" + elif [ "$INTEL_SUPPORTS_QSV" = "maybe" ]; then + echo "ℹ️ QSV acceleration: MAY BE AVAILABLE (depends on exact GPU model)" + fi + elif [ "$AMD_GPU_IN_LSPCI" = true ]; then + echo "ℹ️ Recommended VAAPI driver for AMD GPUs: radeonsi" + fi + else + echo "⚠️ FFmpeg VAAPI acceleration: NOT DETECTED" + if [ -n "$MISSING_METHODS" ]; then + echo " Missing methods that should be available: $MISSING_METHODS" + fi + fi +else + echo "❌ NO GPU ACCELERATION DETECTED" + echo "⚠️ Hardware acceleration is unavailable or misconfigured" +fi + +echo "📋 ==================================================" +echo "✅ GPU detection script complete." diff --git a/docker/nginx.conf b/docker/nginx.conf index b440f773..406d587c 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -3,19 +3,24 @@ proxy_cache_path /app/logo_cache levels=1:2 keys_zone=logo_cache:10m server { listen NGINX_PORT; + listen [::]:NGINX_PORT; proxy_connect_timeout 75; proxy_send_timeout 300; proxy_read_timeout 300; - client_max_body_size 128M; # Allow file uploads up to 128MB + client_max_body_size 0; + + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host:$server_port; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Port $server_port; # Serve Django via uWSGI location / { include uwsgi_params; uwsgi_pass unix:/app/uwsgi.sock; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } location /assets/ { @@ -30,6 +35,13 @@ server { root /data; } + # Internal location for X-Accel-Redirect backup downloads + # Django handles auth, nginx serves the file directly + location /protected-backups/ { + internal; + alias /data/backups/; + } + location /api/logos/(?\d+)/cache/ { proxy_pass http://127.0.0.1:5656; proxy_cache logo_cache; @@ -55,11 +67,6 @@ server { location /hdhr { include uwsgi_params; uwsgi_pass unix:/app/uwsgi.sock; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Host $host:$server_port; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Host $host; } # Serve FFmpeg streams efficiently @@ -78,9 +85,6 @@ server { proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } # Route TS proxy requests to the dedicated instance @@ -94,8 +98,5 @@ server { proxy_read_timeout 300s; proxy_send_timeout 300s; client_max_body_size 0; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } } diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 957f4f4c..69c040f2 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -7,9 +7,10 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker -l info -attach-daemon = celery -A dispatcharr beat -l info +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev @@ -46,6 +47,7 @@ thunder-lock = true log-4xx = true log-5xx = true disable-logging = false +log-buffering = 1024 # Add buffer size limit for logging ; Longer timeouts for debugging sessions harakiri = 3600 @@ -58,9 +60,6 @@ ignore-sigpipe = true ignore-write-errors = true disable-write-exception = true -# Explicitly disable for-server option that confuses debugpy -for-server = false - # Debugging settings py-autoreload = 1 honour-stdin = true @@ -78,4 +77,10 @@ env = DEBUGPY_LOG_DIR=/app/debugpy_logs env = WAIT_FOR_DEBUGGER=false env = DEBUG_TIMEOUT=30 - +# Enable console logging (stdout) +log-master = true +# Enable strftime formatting for timestamps +logformat-strftime = true +log-date = %%Y-%%m-%%d %%H:%%M:%%S,000 +# Use the environment variable in log format - ensure consistent formatting with other files +log-format = %(ftime) $(DISPATCHARR_LOG_LEVEL) uwsgi.requests Worker ID: %(wid) %(method) %(status) %(uri) %(msecs)ms \ No newline at end of file diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 191423bf..e476e216 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -1,15 +1,18 @@ [uwsgi] -; exec-before = python manage.py collectstatic --noinput -; exec-before = python manage.py migrate --noinput +; Remove file creation commands since we're not logging to files anymore +; exec-pre = mkdir -p /data/logs +; exec-pre = touch /data/logs/uwsgi-dev.log +; exec-pre = chmod 666 /data/logs/uwsgi-dev.log ; First run Redis availability check script once exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker -l info --concurrency=4 -attach-daemon = celery -A dispatcharr beat -l info +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev @@ -47,3 +50,13 @@ thunder-lock = true log-4xx = true log-5xx = true disable-logging = false + +# Logging configuration - development mode +# Enable console logging (stdout) +log-master = true +# Enable strftime formatting for timestamps +logformat-strftime = true +log-date = %%Y-%%m-%%d %%H:%%M:%%S,000 +# Use formatted time with environment variable for log level +log-format = %(ftime) $(DISPATCHARR_LOG_LEVEL) uwsgi.requests Worker ID: %(wid) %(method) %(status) %(uri) %(msecs)ms +log-buffering = 1024 # Add buffer size limit for logging \ No newline at end of file diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index 326f4b5d..920bac48 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -1,15 +1,18 @@ [uwsgi] -; exec-before = python manage.py collectstatic --noinput -; exec-before = python manage.py migrate --noinput +; Remove file creation commands since we're not logging to files anymore +; exec-pre = mkdir -p /data/logs +; exec-pre = touch /data/logs/uwsgi.log +; exec-pre = chmod 666 /data/logs/uwsgi.log ; First run Redis availability check script once exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker -l error --concurrency=4 -attach-daemon = celery -A dispatcharr beat -l error +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application # Core settings @@ -18,17 +21,15 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = USE_NGINX_ACCEL=true socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true die-on-term = true static-map = /static=/app/static -# Worker management (Optimize for I/O bound tasks) +# Worker management workers = 4 -threads = 4 -enable-threads = true -thread-stacksize=512 # Optimize for streaming http = 0.0.0.0:5656 @@ -36,14 +37,26 @@ http-keepalive = 1 buffer-size = 65536 # Increase buffer for large payloads post-buffering = 4096 # Reduce buffering for real-time streaming http-timeout = 600 # Prevent disconnects from long streams +socket-timeout = 600 # Prevent write timeouts when client buffers lazy-apps = true # Improve memory efficiency # Async mode (use gevent for high concurrency) -gevent = 100 -async = 100 +gevent = 400 # Each unused greenlet costs ~2-4KB of memory +# Higher values have minimal performance impact when idle, but provide capacity for traffic spikes +# If memory usage becomes an issue, reduce this value # Performance tuning thunder-lock = true log-4xx = true log-5xx = true disable-logging = false + +# Logging configuration +# Enable console logging (stdout) +log-master = true +# Enable strftime formatting for timestamps +logformat-strftime = true +log-date = %%Y-%%m-%%d %%H:%%M:%%S,000 +# Use formatted time with environment variable for log level +log-format = %(ftime) $(DISPATCHARR_LOG_LEVEL) uwsgi.requests Worker ID: %(wid) %(method) %(status) %(uri) %(msecs)ms +log-buffering = 1024 # Add buffer size limit for logging diff --git a/fixtures.json b/fixtures.json index 2d42f84e..3c31f926 100644 --- a/fixtures.json +++ b/fixtures.json @@ -36,7 +36,7 @@ "model": "core.streamprofile", "pk": 1, "fields": { - "profile_name": "ffmpeg", + "profile_name": "FFmpeg", "command": "ffmpeg", "parameters": "-i {streamUrl} -c:a copy -c:v copy -f mpegts pipe:1", "is_active": true, @@ -46,13 +46,23 @@ { "model": "core.streamprofile", "fields": { - "profile_name": "streamlink", + "profile_name": "Streamlink", "command": "streamlink", "parameters": "{streamUrl} best --stdout", "is_active": true, "user_agent": "1" } }, + { + "model": "core.streamprofile", + "fields": { + "profile_name": "VLC", + "command": "cvlc", + "parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}", + "is_active": true, + "user_agent": "1" + } + }, { "model": "core.coresettings", "fields": { diff --git a/frontend/package-lock.json b/frontend/package-lock.json index d8da7f76..ed9e6010 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,39 +1,35 @@ { - "name": "vite", + "name": "frontend", "version": "0.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "vite", + "name": "frontend", "version": "0.0.0", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", - "@mantine/charts": "^7.17.2", - "@mantine/core": "^7.17.2", - "@mantine/dates": "^7.17.2", - "@mantine/dropzone": "^7.17.2", - "@mantine/form": "^7.17.3", - "@mantine/hooks": "^7.17.2", - "@mantine/notifications": "^7.17.2", - "@tabler/icons-react": "^3.31.0", + "@hookform/resolvers": "^5.2.2", + "@mantine/charts": "~8.0.1", + "@mantine/core": "~8.0.1", + "@mantine/dates": "~8.0.1", + "@mantine/dropzone": "~8.0.1", + "@mantine/form": "~8.0.1", + "@mantine/hooks": "~8.0.1", + "@mantine/notifications": "~8.0.1", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", - "axios": "^1.8.2", - "clsx": "^2.1.1", + "allotment": "^1.20.4", "dayjs": "^1.11.13", - "formik": "^2.4.6", "hls.js": "^1.5.20", - "lucide-react": "^0.479.0", - "mantine-react-table": "^2.0.0-beta.9", + "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "prettier": "^3.5.3", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", + "react-hook-form": "^7.70.0", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", "react-virtualized": "^9.22.6", @@ -46,84 +42,151 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", - "eslint": "^9.21.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", + "eslint": "^9.27.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", - "vite": "^6.2.0" + "jsdom": "^27.0.0", + "prettier": "^3.5.3", + "vite": "^7.1.7", + "vitest": "^3.2.4" } }, - "node_modules/@babel/code-frame": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", - "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "node_modules/@acemir/cssom": { + "version": "0.9.29", + "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.29.tgz", + "integrity": "sha512-G90x0VW+9nW4dFajtjCoT+NM0scAfH9Mb08IcjgFHYbfiL/lU04dTF9JuVOi3/OH+DJCQdcIseSXkdCB9Ky6JA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@asamuzakjp/css-color": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz", + "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==", + "dev": true, "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.25.9", + "@csstools/css-calc": "^2.1.4", + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "lru-cache": "^11.2.4" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "6.7.6", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz", + "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.1.0", + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.4" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/generator": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.10.tgz", - "integrity": "sha512-rRHT8siFIXQrAYOYqZQVsAr8vJ+cBNqcVAY6m5V8/4QqzaPl+zDBe6cLEPRDuNOUf3ww8RfJVlOyQMoSI+5Ang==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.10", - "@babel/types": "^7.26.10", - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25", + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-module-imports": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", - "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", "license": "MIT", "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.10.tgz", - "integrity": "sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", "license": "MIT", "dependencies": { - "@babel/types": "^7.26.10" + "@babel/types": "^7.28.5" }, "bin": { "parser": "bin/babel-parser.js" @@ -133,71 +196,194 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.10.tgz", - "integrity": "sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", "license": "MIT", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/template": { - "version": "7.26.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.9.tgz", - "integrity": "sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.26.9", - "@babel/types": "^7.26.9" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.10.tgz", - "integrity": "sha512-k8NuDrxr0WrPH5Aupqb2LCVURP/S0vBEn5mK6iH+GIYob66U5EtoZvcdudR2jQ4cmTwhEwW1DLB+Yyas9zjF6A==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.10", - "@babel/parser": "^7.26.10", - "@babel/template": "^7.26.9", - "@babel/types": "^7.26.10", - "debug": "^4.3.1", - "globals": "^11.1.0" + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/traverse/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/types": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.10.tgz", - "integrity": "sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.0.21", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.21.tgz", + "integrity": "sha512-plP8N8zKfEZ26figX4Nvajx8DuzfuRpLTqglQ5d0chfnt35Qt3X+m6ASZ+rG0D0kxe/upDVNwSIVJP5n4FuNfw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@dnd-kit/accessibility": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz", @@ -304,9 +490,9 @@ "license": "MIT" }, "node_modules/@emotion/is-prop-valid": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.3.1.tgz", - "integrity": "sha512-/ACwoqx7XQi9knQs/G0qKvv5teDMhD7bXYns9N/wM8ah8iNb8jZ2uNO0YOgiq2o2poIvVtJS2YALasQuMSQ7Kw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.4.0.tgz", + "integrity": "sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==", "license": "MIT", "dependencies": { "@emotion/memoize": "^0.9.0" @@ -362,9 +548,9 @@ "license": "MIT" }, "node_modules/@emotion/styled": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/styled/-/styled-11.14.0.tgz", - "integrity": "sha512-XxfOnXFffatap2IyCeJyNov3kiDQWoR08gPUQxvbL7fxKryGBKUZUkG6Hz48DZwVrJSVh9sJboyV1Ds4OW6SgA==", + "version": "11.14.1", + "resolved": "https://registry.npmjs.org/@emotion/styled/-/styled-11.14.1.tgz", + "integrity": "sha512-qEEJt42DuToa3gurlH4Qqc1kVpNq8wO8cJtDzU46TjlzWjDlsVyevtYCRijVq3SrHsROS+gVQ8Fnea108GnKzw==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.18.3", @@ -411,10 +597,282 @@ "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", "license": "MIT" }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", + "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", + "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", + "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", + "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", + "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", + "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", + "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", + "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", + "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", + "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", + "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", + "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", + "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", + "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", + "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", + "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/linux-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz", - "integrity": "sha512-xbfUhu/gnvSEg+EGovRc+kjBAkrvtk38RlerAzQxvMzlB4fXpCFCeUAYzJvrnhFtdeyVCDANSjJvOvGYoeKzFA==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", + "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", "cpu": [ "x64" ], @@ -428,10 +886,163 @@ "node": ">=18" } }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", + "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", + "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", + "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", + "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", + "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", + "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", + "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", + "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", + "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.5.0.tgz", - "integrity": "sha512-RoV8Xs9eNwiDvhv7M+xcL4PWyRyIXRY/FLp3buU4h1EYfdF7unWUy3dOjPqb3C7rMUewIcqwW850PgS8h1o1yg==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", "dev": true, "license": "MIT", "dependencies": { @@ -461,9 +1072,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", - "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", "dev": true, "license": "MIT", "engines": { @@ -471,13 +1082,13 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.2.tgz", - "integrity": "sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==", + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@eslint/object-schema": "^2.1.6", + "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -486,19 +1097,22 @@ } }, "node_modules/@eslint/config-helpers": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.1.0.tgz", - "integrity": "sha512-kLrdPDJE1ckPo94kmPPf9Hfd0DU0Jw6oKYrhe+pwSC0iTUInmTa+w6fw8sGgcfkFJGNdWOUeOaDM4quW4a7OkA==", + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", "dev": true, "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/core": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.12.0.tgz", - "integrity": "sha512-cmrR6pytBuSMTaBweKoGMwu3EiHiEC+DoyupPmlZ0HxBJBtIxwe+j/E4XPIKNx+Q74c8lXKPwYawBf5glsTkHg==", + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -509,9 +1123,9 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.0.tgz", - "integrity": "sha512-yaVPAiNAalnCZedKLdR21GOGILMLKPyqSLWaAjQFvYA2i/ciDi8ArYVr69Anohb6cH2Ukhqti4aFnYyPm8wdwQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", "dev": true, "license": "MIT", "dependencies": { @@ -521,7 +1135,7 @@ "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", + "js-yaml": "^4.1.1", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" }, @@ -546,19 +1160,22 @@ } }, "node_modules/@eslint/js": { - "version": "9.22.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.22.0.tgz", - "integrity": "sha512-vLFajx9o8d1/oL2ZkpMYbkLv8nDB6yaIwFNt7nI4+I80U/z03SxmfOMsLbvWr3p7C+Wnoh//aOu2pQW8cS0HCQ==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", "dev": true, "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" } }, "node_modules/@eslint/object-schema": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", - "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -566,13 +1183,13 @@ } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.7.tgz", - "integrity": "sha512-JubJ5B2pJ4k4yGxaNLdbjrnk9d/iDz6/q8wOilpIowd6PJPgaxCuHBnBszq7Ce2TyMrywm5r4PnKm6V3iiZF+g==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@eslint/core": "^0.12.0", + "@eslint/core": "^0.17.0", "levn": "^0.4.1" }, "engines": { @@ -580,22 +1197,22 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.9", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.9.tgz", - "integrity": "sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==", + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", "license": "MIT", "dependencies": { - "@floating-ui/utils": "^0.2.9" + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.13", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.13.tgz", - "integrity": "sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==", + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.9" + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/react": { @@ -614,12 +1231,12 @@ } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", - "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", "license": "MIT", "dependencies": { - "@floating-ui/dom": "^1.0.0" + "@floating-ui/dom": "^1.7.4" }, "peerDependencies": { "react": ">=16.8.0", @@ -627,11 +1244,23 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.9.tgz", - "integrity": "sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==", + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", "license": "MIT" }, + "node_modules/@hookform/resolvers": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-5.2.2.tgz", + "integrity": "sha512-A/IxlMLShx3KjV/HeTcTfaMxdwy690+L/ZADoeaTltLx+CVuzkeVIPuybK3jrRfw7YZnmdKsVVHAlEPIAEUNlA==", + "license": "MIT", + "dependencies": { + "@standard-schema/utils": "^0.3.0" + }, + "peerDependencies": { + "react-hook-form": "^7.55.0" + } + }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -643,33 +1272,19 @@ } }, "node_modules/@humanfs/node": { - "version": "0.16.6", - "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", - "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, "license": "Apache-2.0", "dependencies": { "@humanfs/core": "^0.19.1", - "@humanwhocodes/retry": "^0.3.0" + "@humanwhocodes/retry": "^0.4.0" }, "engines": { "node": ">=18.18.0" } }, - "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", - "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18.18" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -685,9 +1300,9 @@ } }, "node_modules/@humanwhocodes/retry": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.2.tgz", - "integrity": "sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -699,17 +1314,13 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", - "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { @@ -721,25 +1332,16 @@ "node": ">=6.0.0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -753,72 +1355,72 @@ "license": "Apache-2.0" }, "node_modules/@mantine/charts": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-7.17.2.tgz", - "integrity": "sha512-ckB23pIqRjzysUz2EiWZD9AVyf7t0r7o7zfJbl01nzOezFgYq5RGeRoxvpcsfBC+YoSbB/43rjNcXtYhtA7QzA==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-8.0.2.tgz", + "integrity": "sha512-hVS1+CT+7e3+ZbU1xx7Nyx/5ZBSxzS+68SKeVLeOZPGl9Wx35CY1oLn0n53vQPWV2WFKd0u0Bq3d1iuaDpkzGA==", "license": "MIT", "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x", "recharts": "^2.13.3" } }, "node_modules/@mantine/core": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.17.2.tgz", - "integrity": "sha512-R6MYhitJ0JEgrhadd31Nw9FhRaQwDHjXUs5YIlitKH/fTOz9gKSxKjzmNng3bEBQCcbEDOkZj3FRcBgTUh/F0Q==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.0.2.tgz", + "integrity": "sha512-2Ps7bRTeTbRwAKTCL9xdflPz0pwOlTq6ohyTbDZMCADqecf09GHI7GiX+HJatqbPZ2t8jK0fN1b48YhjJaxTqg==", "license": "MIT", "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", "react-number-format": "^5.4.3", "react-remove-scroll": "^2.6.2", - "react-textarea-autosize": "8.5.6", + "react-textarea-autosize": "8.5.9", "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.17.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dates": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.17.2.tgz", - "integrity": "sha512-7bB992j8f+uEi280jab0/8i5yfsN/3oSrMDFwatZ+7XSDUwiP0YFib/FVX0pNSSqdFpbXhUmsZEECX71QtHw+Q==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.0.2.tgz", + "integrity": "sha512-V1xU00gECfykA4UFln8ulPsPHvaTncsg9zUbzCwqwEAYlZFG3Nnj5eBzzpV3IN1LNDPEVGb1gAOM6jZ+fi2uRQ==", "license": "MIT", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "dayjs": ">=1.0.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dropzone": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-7.17.2.tgz", - "integrity": "sha512-NMQ1SDmnW0sf3GO6p1r/VIcg/xWqlRmfnWCr00/bGRbBEGbyaUwL3LSn+KYBJdY+3/jNGvGa+xflWDvnby5tzw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-8.0.2.tgz", + "integrity": "sha512-dWsz99QjWOQy7wDx4zzvBrPQ6l3201kg0iugk2Dm+MmN9mlboychz/LIZzoCGsodtQRLAsoTlN2zOqhsiggRfw==", "license": "MIT", "dependencies": { - "react-dropzone-esm": "15.2.0" + "react-dropzone": "14.3.8" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/form": { - "version": "7.17.3", - "resolved": "https://registry.npmjs.org/@mantine/form/-/form-7.17.3.tgz", - "integrity": "sha512-ktERldD8f9lrjjz6wIbwMnNbAZq8XEWPx4K5WuFyjXaK0PI8D+gsXIGKMtA5rVrAUFHCWCdbK3yLgtjJNki8ew==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/form/-/form-8.0.2.tgz", + "integrity": "sha512-vSp9BfrhC9o7RMRYMaND2UAflXO4i6c5F1qPkiM2FID6ye2RJxW8YHaGa3kA0VfBbhDw9sFBbl8p7ttE4RPzcw==", "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3", @@ -829,34 +1431,34 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.17.2.tgz", - "integrity": "sha512-tbErVcGZu0E4dSmE6N0k6Tv1y9R3SQmmQgwqorcc+guEgKMdamc36lucZGlJnSGUmGj+WLUgELkEQ0asdfYBDA==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.0.2.tgz", + "integrity": "sha512-0jpEdC0KIAZ54D5kd9rJudrEm6vkvnrL9yYHnkuNbxokXSzDdYA/wpHnKR5WW+u6fW4JF6A6A7gN1vXKeC9MSw==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" } }, "node_modules/@mantine/notifications": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.17.2.tgz", - "integrity": "sha512-vg0L8cmihz0ODg4WJ9MAyK06WPt/6g67ksIUFxd4F8RfdJbIMLTsNG9yWoSfuhtXenUg717KaA917IWLjDSaqw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.0.2.tgz", + "integrity": "sha512-whSuoCCZxQF3VM40sumCte9tA79to8OCV/vv0z8PeVTj/eKlaTR+P9LKigO9ovhuNELrvvO3Rxcnno5aMBz0oA==", "license": "MIT", "dependencies": { - "@mantine/store": "7.17.2", + "@mantine/store": "8.0.2", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.2", + "@mantine/hooks": "8.0.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/store": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.17.2.tgz", - "integrity": "sha512-UoMUYQK/z58hMueCkpDIXc49gPgrVO/zcpb0k+B7MFU51EIUiFzHLxLFBmWrgCAM6rzJORqN8JjyCd/PB9j4aw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.0.2.tgz", + "integrity": "sha512-/LuizGWAXjVnLLZ55f0QYotiqb8GlHpIb4KRf4LqRkbsA6UAZEVb6beuk0vI2Azf6vfuh7sTHu1xVC5zI6C+Cw==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" @@ -872,10 +1474,227 @@ "url": "https://opencollective.com/popperjs" } }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.47", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.47.tgz", + "integrity": "sha512-8QagwMH3kNCuzD8EWL8R2YPW5e4OrHNSAHRFDdmFqEwEaD/KcNKjVoumo+gP2vW5eKB2UPbM6vTYiGZX0ixLnw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.5.tgz", + "integrity": "sha512-iDGS/h7D8t7tvZ1t6+WPK04KD0MwzLZrG0se1hzBjSi5fyxlsiggoJHwh18PCFNn7tG43OWb6pdZ6Y+rMlmyNQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.5.tgz", + "integrity": "sha512-wrSAViWvZHBMMlWk6EJhvg8/rjxzyEhEdgfMMjREHEq11EtJ6IP6yfcCH57YAEca2Oe3FNCE9DSTgU70EIGmVw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.5.tgz", + "integrity": "sha512-S87zZPBmRO6u1YXQLwpveZm4JfPpAa6oHBX7/ghSiGH3rz/KDgAu1rKdGutV+WUI6tKDMbaBJomhnT30Y2t4VQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.5.tgz", + "integrity": "sha512-YTbnsAaHo6VrAczISxgpTva8EkfQus0VPEVJCEaboHtZRIb6h6j0BNxRBOwnDciFTZLDPW5r+ZBmhL/+YpTZgA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.5.tgz", + "integrity": "sha512-1T8eY2J8rKJWzaznV7zedfdhD1BqVs1iqILhmHDq/bqCUZsrMt+j8VCTHhP0vdfbHK3e1IQ7VYx3jlKqwlf+vw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.5.tgz", + "integrity": "sha512-sHTiuXyBJApxRn+VFMaw1U+Qsz4kcNlxQ742snICYPrY+DDL8/ZbaC4DVIB7vgZmp3jiDaKA0WpBdP0aqPJoBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.5.tgz", + "integrity": "sha512-dV3T9MyAf0w8zPVLVBptVlzaXxka6xg1f16VAQmjg+4KMSTWDvhimI/Y6mp8oHwNrmnmVl9XxJ/w/mO4uIQONA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.5.tgz", + "integrity": "sha512-wIGYC1x/hyjP+KAu9+ewDI+fi5XSNiUi9Bvg6KGAh2TsNMA3tSEs+Sh6jJ/r4BV/bx/CyWu2ue9kDnIdRyafcQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.5.tgz", + "integrity": "sha512-Y+qVA0D9d0y2FRNiG9oM3Hut/DgODZbU9I8pLLPwAsU0tUKZ49cyV1tzmB/qRbSzGvY8lpgGkJuMyuhH7Ma+Vg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.5.tgz", + "integrity": "sha512-juaC4bEgJsyFVfqhtGLz8mbopaWD+WeSOYr5E16y+1of6KQjc0BpwZLuxkClqY1i8sco+MdyoXPNiCkQou09+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.5.tgz", + "integrity": "sha512-rIEC0hZ17A42iXtHX+EPJVL/CakHo+tT7W0pbzdAGuWOt2jxDFh7A/lRhsNHBcqL4T36+UiAgwO8pbmn3dE8wA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.5.tgz", + "integrity": "sha512-T7l409NhUE552RcAOcmJHj3xyZ2h7vMWzcwQI0hvn5tqHh3oSoclf9WgTl+0QqffWFG8MEVZZP1/OBglKZx52Q==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.5.tgz", + "integrity": "sha512-7OK5/GhxbnrMcxIFoYfhV/TkknarkYC1hqUw1wU2xUN3TVRLNT5FmBv4KkheSG2xZ6IEbRAhTooTV2+R5Tk0lQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.5.tgz", + "integrity": "sha512-GwuDBE/PsXaTa76lO5eLJTyr2k8QkPipAyOrs4V/KJufHCZBJ495VCGJol35grx9xryk4V+2zd3Ri+3v7NPh+w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.5.tgz", + "integrity": "sha512-IAE1Ziyr1qNfnmiQLHBURAD+eh/zH1pIeJjeShleII7Vj8kyEm2PF77o+lf3WTHDpNJcu4IXJxNO0Zluro8bOw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.35.0.tgz", - "integrity": "sha512-Pim1T8rXOri+0HmV4CdKSGrqcBWX0d1HoPnQ0uw0bdp1aP5SdQVNBy8LjYncvnLgu3fnnCt17xjWGd4cqh8/hA==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.5.tgz", + "integrity": "sha512-Pg6E+oP7GvZ4XwgRJBuSXZjcqpIW3yCBhK4BcsANvb47qMvAbCjR6E+1a/U2WXz1JJxp9/4Dno3/iSJLcm5auw==", "cpu": [ "x64" ], @@ -887,9 +1706,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.35.0.tgz", - "integrity": "sha512-QysqXzYiDvQWfUiTm8XmJNO2zm9yC9P/2Gkrwg2dH9cxotQzunBHYr6jk4SujCTqnfGxduOmQcI7c2ryuW8XVg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.5.tgz", + "integrity": "sha512-txGtluxDKTxaMDzUduGP0wdfng24y1rygUMnmlUJ88fzCCULCLn7oE5kb2+tRB+MWq1QDZT6ObT5RrR8HFRKqg==", "cpu": [ "x64" ], @@ -900,137 +1719,96 @@ "linux" ] }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.5.tgz", + "integrity": "sha512-3DFiLPnTxiOQV993fMc+KO8zXHTcIjgaInrqlG8zDp1TlhYl6WgrOHuJkJQ6M8zHEcntSJsUp1XFZSY8C1DYbg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.5.tgz", + "integrity": "sha512-nggc/wPpNTgjGg75hu+Q/3i32R00Lq1B6N1DO7MCU340MRKL3WZJMjA9U4K4gzy3dkZPXm9E1Nc81FItBVGRlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.5.tgz", + "integrity": "sha512-U/54pTbdQpPLBdEzCT6NBCFAfSZMvmjr0twhnD9f4EIvlm9wy3jjQ38yQj1AGznrNO65EWQMgm/QUjuIVrYF9w==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.5.tgz", + "integrity": "sha512-2NqKgZSuLH9SXBBV2dWNRCZmocgSOx8OJSdpRaEcRlIfX8YrKxUT6z0F1NpvDVhOsl190UFTRh2F2WDWWCYp3A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.5.tgz", + "integrity": "sha512-JRpZUhCfhZ4keB5v0fe02gQJy05GqboPOaxvjugW04RLSYYoB/9t2lx2u/tMs/Na/1NXfY8QYjgRljRpN+MjTQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/utils": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz", + "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==", + "license": "MIT" + }, "node_modules/@swc/core": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.11.8.tgz", - "integrity": "sha512-UAL+EULxrc0J73flwYHfu29mO8CONpDJiQv1QPDXsyCvDUcEhqAqUROVTgC+wtJCFFqMQdyr4stAA5/s0KSOmA==", - "dev": true, - "hasInstallScript": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3", - "@swc/types": "^0.1.19" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/swc" - }, - "optionalDependencies": { - "@swc/core-darwin-arm64": "1.11.8", - "@swc/core-darwin-x64": "1.11.8", - "@swc/core-linux-arm-gnueabihf": "1.11.8", - "@swc/core-linux-arm64-gnu": "1.11.8", - "@swc/core-linux-arm64-musl": "1.11.8", - "@swc/core-linux-x64-gnu": "1.11.8", - "@swc/core-linux-x64-musl": "1.11.8", - "@swc/core-win32-arm64-msvc": "1.11.8", - "@swc/core-win32-ia32-msvc": "1.11.8", - "@swc/core-win32-x64-msvc": "1.11.8" - }, - "peerDependencies": { - "@swc/helpers": "*" - }, - "peerDependenciesMeta": { - "@swc/helpers": { - "optional": true - } - } - }, - "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.11.8.tgz", - "integrity": "sha512-r72onUEIU1iJi9EUws3R28pztQ/eM3EshNpsPRBfuLwKy+qn3et55vXOyDhIjGCUph5Eg2Yn8H3h6MTxDdLd+w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/core-linux-x64-musl": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.11.8.tgz", - "integrity": "sha512-294k8cLpO103++f4ZUEDr3vnBeUfPitW6G0a3qeVZuoXFhFgaW7ANZIWknUc14WiLOMfMecphJAEiy9C8OeYSw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/counter": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "name": "@swc/wasm", + "version": "1.13.20", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", + "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", "dev": true, "license": "Apache-2.0" }, - "node_modules/@swc/types": { - "version": "0.1.19", - "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.19.tgz", - "integrity": "sha512-WkAZaAfj44kh/UFdAQcrMP1I0nwRqpt27u+08LMBYMqmQfwwMofYoMh/48NGkMMRfC4ynpfwRbJuu8ErfNloeA==", + "node_modules/@swc/wasm": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.15.7.tgz", + "integrity": "sha512-m1Cslgkp7gFIUB2ZiIUHMoUskwxOAi9uaf27inoKb7Oc8MkMjt+eNTeSyeGckkwRtMQiybKYTGGnA5imxSsedQ==", "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3" - } - }, - "node_modules/@tabler/icons": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.31.0.tgz", - "integrity": "sha512-dblAdeKY3+GA1U+Q9eziZ0ooVlZMHsE8dqP0RkwvRtEsAULoKOYaCUOcJ4oW1DjWegdxk++UAt2SlQVnmeHv+g==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/codecalm" - } - }, - "node_modules/@tabler/icons-react": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.31.0.tgz", - "integrity": "sha512-2rrCM5y/VnaVKnORpDdAua9SEGuJKVqPtWxeQ/vUVsgaUx30LDgBZph7/lterXxDY1IKR6NO//HDhWiifXTi3w==", - "license": "MIT", - "dependencies": { - "@tabler/icons": "3.31.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/codecalm" - }, - "peerDependencies": { - "react": ">= 16" - } - }, - "node_modules/@tanstack/match-sorter-utils": { - "version": "8.19.4", - "resolved": "https://registry.npmjs.org/@tanstack/match-sorter-utils/-/match-sorter-utils-8.19.4.tgz", - "integrity": "sha512-Wo1iKt2b9OT7d+YGhvEPD3DXvPv2etTusIMhMUoG7fbhmxcXCtIjJDEygy91Y2JFlwGyjqiBPRozme7UD8hoqg==", - "license": "MIT", - "dependencies": { - "remove-accents": "0.5.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - } + "license": "Apache-2.0" }, "node_modules/@tanstack/react-table": { "version": "8.21.3", @@ -1052,23 +1830,6 @@ "react-dom": ">=16.8" } }, - "node_modules/@tanstack/react-virtual": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.11.2.tgz", - "integrity": "sha512-OuFzMXPF4+xZgx8UzJha0AieuMihhhaWG0tCqpp6tDzlFwOmNBPYMuLOtMJ1Tr4pXLHmgjcWhG6RlknY2oNTdQ==", - "license": "MIT", - "dependencies": { - "@tanstack/virtual-core": "3.11.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", - "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" - } - }, "node_modules/@tanstack/table-core": { "version": "8.21.3", "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.21.3.tgz", @@ -1082,26 +1843,117 @@ "url": "https://github.com/sponsors/tannerlinsley" } }, - "node_modules/@tanstack/virtual-core": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.11.2.tgz", - "integrity": "sha512-vTtpNt7mKCiZ1pwU9hfKPhpdVO2sVzFQsxoVBGtOSHxlrRRzYr8iQ2TlwbAcRYCcEiZ9ECAM8kBzH0v2+VzfKw==", + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" } }, - "node_modules/@types/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "node_modules/@testing-library/jest-dom": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, "license": "MIT" }, + "node_modules/@testing-library/react": { + "version": "16.3.1", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz", + "integrity": "sha512-gr4KtAWqIOQoucWYD/f6ki+j5chXfcPc74Col/6poTyqTmn7zRmodWahWRCp8tYd+GMqBonw6hstNzqjbs6gjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, "node_modules/@types/d3-array": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", - "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", "license": "MIT" }, "node_modules/@types/d3-color": { @@ -1161,22 +2013,19 @@ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", "license": "MIT" }, - "node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", "dev": true, "license": "MIT" }, - "node_modules/@types/hoist-non-react-statics": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.6.tgz", - "integrity": "sha512-lPByRJUer/iN/xa4qpyL0qmL11DqNW81iU/IG1S3uvRUq4oKagz8VCxZjiWkumgt66YT3vOdDgZ0o32sGKtCEw==", - "license": "MIT", - "dependencies": { - "@types/react": "*", - "hoist-non-react-statics": "^3.3.0" - } + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" }, "node_modules/@types/json-schema": { "version": "7.0.15", @@ -1192,28 +2041,29 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "19.0.10", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.10.tgz", - "integrity": "sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==", + "version": "19.2.7", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz", + "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + "devOptional": true, "license": "MIT", "dependencies": { - "csstype": "^3.0.2" + "csstype": "^3.2.2" } }, "node_modules/@types/react-dom": { - "version": "19.0.4", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.4.tgz", - "integrity": "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "dev": true, "license": "MIT", "peerDependencies": { - "@types/react": "^19.0.0" + "@types/react": "^19.2.0" } }, "node_modules/@videojs/http-streaming": { - "version": "3.17.0", - "resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.17.0.tgz", - "integrity": "sha512-Ch1P3tvvIEezeZXyK11UfWgp4cWKX4vIhZ30baN/lRinqdbakZ5hiAI3pGjRy3d+q/Epyc8Csz5xMdKNNGYpcw==", + "version": "3.17.2", + "resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.17.2.tgz", + "integrity": "sha512-VBQ3W4wnKnVKb/limLdtSD2rAd5cmHN70xoMf4OmuDd0t2kfJX04G+sfw6u2j8oOm2BXYM9E1f4acHruqKnM1g==", "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.12.5", @@ -1259,31 +2109,150 @@ } }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.8.0.tgz", - "integrity": "sha512-T4sHPvS+DIqDP51ifPqa9XIRAz/kIvIi8oXcnOZZgHmMotgmmdxe/DD5tMFlt5nuIRzT0/QuiwmKlH0503Aapw==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.2.2.tgz", + "integrity": "sha512-x+rE6tsxq/gxrEJN3Nv3dIV60lFflPj94c90b+NNo6n1QV1QQUTLoL0MpaOVasUZ0zqVBn7ead1B5ecx1JAGfA==", "dev": true, "license": "MIT", "dependencies": { - "@swc/core": "^1.10.15" + "@rolldown/pluginutils": "1.0.0-beta.47", + "@swc/core": "^1.13.5" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" }, "peerDependencies": { - "vite": "^4 || ^5 || ^6" + "vite": "^4 || ^5 || ^6 || ^7" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, "node_modules/@xmldom/xmldom": { - "version": "0.8.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", - "integrity": "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==", + "version": "0.8.11", + "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.11.tgz", + "integrity": "sha512-cQzWCtO6C8TQiYl1ruKNn2U6Ao4o4WBBcbL61yJl84x+j5sOWWFU9X7DpND8XZG3daDppSsigMdfAIl2upQBRw==", "license": "MIT", "engines": { "node": ">=10.0.0" } }, "node_modules/acorn": { - "version": "8.14.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", - "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", "bin": { @@ -1315,6 +2284,16 @@ "pkcs7": "^1.0.4" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -1333,9 +2312,9 @@ } }, "node_modules/allotment": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.3.tgz", - "integrity": "sha512-JCnklt7j0OsyDjD7A9AdT6wqJ3FSoo1ASV6w02Am02lo6NwO25yhG1DcWW8ueBV38ppXQmvrXBXuzX7iVkq6Tw==", + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.4.tgz", + "integrity": "sha512-LMM5Xe5nLePFOLAlW/5k3ARqznYGUyNekV4xJrfDKn1jimW3nlZE6hT/Tu0T8s0VgAkr9s2P7+uM0WvJKn5DAw==", "license": "MIT", "dependencies": { "classnames": "^2.3.0", @@ -1363,6 +2342,16 @@ "react-dom": "16.8.0 - 18" } }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", @@ -1386,21 +2375,33 @@ "dev": true, "license": "Python-2.0" }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "license": "MIT" - }, - "node_modules/axios": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.8.3.tgz", - "integrity": "sha512-iP4DebzoNlP/YN2dpwCgb8zoCmhtkajzS48JvwmkSkXvPI3DHc7m+XYL5tGnSlJtR6nImXZmdCuN5aP8dh1d8A==", - "license": "MIT", + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "dequal": "^2.0.3" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/attr-accept": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz", + "integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==", + "license": "MIT", + "engines": { + "node": ">=4" } }, "node_modules/babel-plugin-macros": { @@ -1425,10 +2426,20 @@ "dev": true, "license": "MIT" }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", "dependencies": { @@ -1436,17 +2447,14 @@ "concat-map": "0.0.1" } }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, "engines": { - "node": ">= 0.4" + "node": ">=8" } }, "node_modules/callsites": { @@ -1458,6 +2466,23 @@ "node": ">=6" } }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -1475,6 +2500,16 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, "node_modules/classnames": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", @@ -1510,18 +2545,6 @@ "dev": true, "license": "MIT" }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -1536,12 +2559,16 @@ "license": "MIT" }, "node_modules/cookie": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", - "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", "license": "MIT", "engines": { "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/cosmiconfig": { @@ -1584,10 +2611,46 @@ "node": ">= 8" } }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssstyle": { + "version": "5.3.5", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.5.tgz", + "integrity": "sha512-GlsEptulso7Jg0VaOZ8BXQi3AkYM5BOJKEO/rjMidSCq70FkIC5y0eawrCXeYzxgt3OCf4Ls+eoxN+/05vN0Ag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^4.1.1", + "@csstools/css-syntax-patches-for-csstree": "^1.0.21", + "css-tree": "^3.1.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/csstype": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", - "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", "license": "MIT" }, "node_modules/d3-array": { @@ -1711,16 +2774,30 @@ "node": ">=12" } }, + "node_modules/data-urls": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", + "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/dayjs": { - "version": "1.11.13", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", - "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", + "version": "1.11.19", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", "license": "MIT" }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -1734,12 +2811,29 @@ } } }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, "node_modules/decimal.js-light": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", "license": "MIT" }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -1747,22 +2841,14 @@ "dev": true, "license": "MIT" }, - "node_modules/deepmerge": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz", - "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==", + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, "license": "MIT", "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "license": "MIT", - "engines": { - "node": ">=0.4.0" + "node": ">=6" } }, "node_modules/detect-node-es": { @@ -1771,6 +2857,13 @@ "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", "license": "MIT" }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT" + }, "node_modules/dom-helpers": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", @@ -1786,73 +2879,34 @@ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", "engines": { - "node": ">= 0.4" + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" } }, "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" } }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-set-tostringtag": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.6", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - } + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" }, "node_modules/es6-promise": { "version": "4.2.8", @@ -1861,9 +2915,9 @@ "license": "MIT" }, "node_modules/esbuild": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.1.tgz", - "integrity": "sha512-BGO5LtrGC7vxnqucAe/rmvKdJllfGaYWdyABvyMoXQlfYMb2bbRuReWR5tEGE//4LcNJj9XrkovTqNYRFZHAMQ==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", + "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -1874,31 +2928,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.1", - "@esbuild/android-arm": "0.25.1", - "@esbuild/android-arm64": "0.25.1", - "@esbuild/android-x64": "0.25.1", - "@esbuild/darwin-arm64": "0.25.1", - "@esbuild/darwin-x64": "0.25.1", - "@esbuild/freebsd-arm64": "0.25.1", - "@esbuild/freebsd-x64": "0.25.1", - "@esbuild/linux-arm": "0.25.1", - "@esbuild/linux-arm64": "0.25.1", - "@esbuild/linux-ia32": "0.25.1", - "@esbuild/linux-loong64": "0.25.1", - "@esbuild/linux-mips64el": "0.25.1", - "@esbuild/linux-ppc64": "0.25.1", - "@esbuild/linux-riscv64": "0.25.1", - "@esbuild/linux-s390x": "0.25.1", - "@esbuild/linux-x64": "0.25.1", - "@esbuild/netbsd-arm64": "0.25.1", - "@esbuild/netbsd-x64": "0.25.1", - "@esbuild/openbsd-arm64": "0.25.1", - "@esbuild/openbsd-x64": "0.25.1", - "@esbuild/sunos-x64": "0.25.1", - "@esbuild/win32-arm64": "0.25.1", - "@esbuild/win32-ia32": "0.25.1", - "@esbuild/win32-x64": "0.25.1" + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2" } }, "node_modules/escape-string-regexp": { @@ -1914,33 +2969,32 @@ } }, "node_modules/eslint": { - "version": "9.22.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.22.0.tgz", - "integrity": "sha512-9V/QURhsRN40xuHXWjV64yvrzMjcz7ZyNoF2jJFmy9j/SLk0u1OLSZgXi28MrXjymnjEGSR80WCdab3RGMDveQ==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, "license": "MIT", "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", - "@eslint/config-array": "^0.19.2", - "@eslint/config-helpers": "^0.1.0", - "@eslint/core": "^0.12.0", - "@eslint/eslintrc": "^3.3.0", - "@eslint/js": "9.22.0", - "@eslint/plugin-kit": "^0.2.7", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", - "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^8.3.0", - "eslint-visitor-keys": "^4.2.0", - "espree": "^10.3.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -1988,9 +3042,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.19.tgz", - "integrity": "sha512-eyy8pcr/YxSYjBoqIFSrlbn9i/xvxUFa8CjzAYo9cFjgGXqq1hyjihcpZvxRLalpaWmueWR81xn7vuKmAFijDQ==", + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -1998,9 +3052,9 @@ } }, "node_modules/eslint-scope": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.3.0.tgz", - "integrity": "sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==", + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -2015,9 +3069,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", - "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -2028,15 +3082,15 @@ } }, "node_modules/espree": { - "version": "10.3.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", - "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "acorn": "^8.14.0", + "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^4.2.0" + "eslint-visitor-keys": "^4.2.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2081,6 +3135,16 @@ "node": ">=4.0" } }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", @@ -2097,6 +3161,16 @@ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", "license": "MIT" }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -2104,9 +3178,9 @@ "license": "MIT" }, "node_modules/fast-equals": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.2.2.tgz", - "integrity": "sha512-V7/RktU11J3I36Nwq2JnZEM7tNm17eBJz+u25qdxBZeCKiX6BkVSZQjwWIr+IobgnZy+ag73tTZgZi7tr0LrBw==", + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.4.0.tgz", + "integrity": "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==", "license": "MIT", "engines": { "node": ">=6.0.0" @@ -2126,6 +3200,24 @@ "dev": true, "license": "MIT" }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, "node_modules/file-entry-cache": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", @@ -2139,6 +3231,18 @@ "node": ">=16.0.0" } }, + "node_modules/file-selector": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/file-selector/-/file-selector-2.1.2.tgz", + "integrity": "sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==", + "license": "MIT", + "dependencies": { + "tslib": "^2.7.0" + }, + "engines": { + "node": ">= 12" + } + }, "node_modules/find-root": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", @@ -2183,64 +3287,19 @@ "dev": true, "license": "ISC" }, - "node_modules/follow-redirects": { - "version": "1.15.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", - "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", - "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "es-set-tostringtag": "^2.1.0", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/formik": { - "version": "2.4.6", - "resolved": "https://registry.npmjs.org/formik/-/formik-2.4.6.tgz", - "integrity": "sha512-A+2EI7U7aG296q2TLGvNapDNTZp1khVt5Vk0Q/fyfSROss0V/V6+txt2aJnwEos44IxTCW/LYAi/zgWzlevj+g==", - "funding": [ - { - "type": "individual", - "url": "https://opencollective.com/formik" - } + "optional": true, + "os": [ + "darwin" ], - "license": "Apache-2.0", - "dependencies": { - "@types/hoist-non-react-statics": "^3.3.1", - "deepmerge": "^2.1.1", - "hoist-non-react-statics": "^3.3.0", - "lodash": "^4.17.21", - "lodash-es": "^4.17.21", - "react-fast-compare": "^2.0.1", - "tiny-warning": "^1.0.2", - "tslib": "^2.0.0" - }, - "peerDependencies": { - "react": ">=16.8.0" + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, "node_modules/function-bind": { @@ -2252,30 +3311,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -2285,19 +3320,6 @@ "node": ">=6" } }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -2334,18 +3356,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -2356,33 +3366,6 @@ "node": ">=8" } }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/hasown": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", @@ -2396,9 +3379,9 @@ } }, "node_modules/hls.js": { - "version": "1.5.20", - "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.5.20.tgz", - "integrity": "sha512-uu0VXUK52JhihhnN/MVVo1lvqNNuhoxkonqgO3IpjvQiGpJBdIXMGkofjQb/j9zvV7a1SW8U9g1FslWx/1HOiQ==", + "version": "1.6.15", + "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.6.15.tgz", + "integrity": "sha512-E3a5VwgXimGHwpRGV+WxRTKeSp2DW5DI5MWv34ulL3t5UNmyJWCQ1KmLEHbYzcfThfXG8amBL+fCYPneGHC4VA==", "license": "Apache-2.0" }, "node_modules/hoist-non-react-statics": { @@ -2410,11 +3393,59 @@ "react-is": "^16.7.0" } }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" + "node_modules/html-encoding-sniffer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^3.1.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } }, "node_modules/ignore": { "version": "5.3.2", @@ -2452,6 +3483,16 @@ "node": ">=0.8.19" } }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/internmap": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", @@ -2511,6 +3552,13 @@ "node": ">=0.10.0" } }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -2525,9 +3573,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -2537,6 +3585,46 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/jsdom": { + "version": "27.3.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.3.0.tgz", + "integrity": "sha512-GtldT42B8+jefDUC4yUKAvsaOrH7PDHmZxZXNgF2xMmymjUbRYJvpAybZAKEmXDGTM0mCsz8duOa4vTm5AY2Kg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@acemir/cssom": "^0.9.28", + "@asamuzakjp/dom-selector": "^6.7.6", + "cssstyle": "^5.3.4", + "data-urls": "^6.0.0", + "decimal.js": "^10.6.0", + "html-encoding-sniffer": "^4.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", + "is-potential-custom-element-name": "^1.0.1", + "parse5": "^8.0.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.0", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.0", + "whatwg-encoding": "^3.1.1", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.1.0", + "ws": "^8.18.3", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, "node_modules/jsesc": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", @@ -2637,12 +3725,6 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "license": "MIT" }, - "node_modules/lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", - "license": "MIT" - }, "node_modules/lodash.clamp": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/lodash.clamp/-/lodash.clamp-4.0.3.tgz", @@ -2681,15 +3763,42 @@ "loose-envify": "cli.js" } }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/lucide-react": { - "version": "0.479.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.479.0.tgz", - "integrity": "sha512-aBhNnveRhorBOK7uA4gDjgaf+YlHMdMhQ/3cupk6exM10hWlEU+2QtWYOfhXhjAsmdb6LeKR+NZnow4UxRRiTQ==", + "version": "0.511.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.511.0.tgz", + "integrity": "sha512-VK5a2ydJ7xm8GvBeKLS9mu1pVK6ucef9780JVUjw6bAjJL/QXnd4Y0p7SPeOUMC27YhzNCZvm5d/QX0Tp3rc0w==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, "node_modules/m3u8-parser": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/m3u8-parser/-/m3u8-parser-7.2.0.tgz", @@ -2701,75 +3810,22 @@ "global": "^4.4.0" } }, - "node_modules/mantine-react-table": { - "version": "2.0.0-beta.9", - "resolved": "https://registry.npmjs.org/mantine-react-table/-/mantine-react-table-2.0.0-beta.9.tgz", - "integrity": "sha512-ZdfcwebWaPERoDvAuk43VYcBCzamohARVclnbuepT0PHZ0wRcDPMBR+zgaocL+pFy8EXUGwvWTOKNh25ITpjNQ==", + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, "license": "MIT", "dependencies": { - "@tanstack/match-sorter-utils": "8.19.4", - "@tanstack/react-table": "8.20.5", - "@tanstack/react-virtual": "3.11.2" - }, - "engines": { - "node": ">=16" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/kevinvandy" - }, - "peerDependencies": { - "@mantine/core": "^7.9", - "@mantine/dates": "^7.9", - "@mantine/hooks": "^7.9", - "@tabler/icons-react": ">=2.23.0", - "clsx": ">=2", - "dayjs": ">=1.11", - "react": ">=18.0", - "react-dom": ">=18.0" + "@jridgewell/sourcemap-codec": "^1.5.5" } }, - "node_modules/mantine-react-table/node_modules/@tanstack/react-table": { - "version": "8.20.5", - "resolved": "https://registry.npmjs.org/@tanstack/react-table/-/react-table-8.20.5.tgz", - "integrity": "sha512-WEHopKw3znbUZ61s9i0+i9g8drmDo6asTWbrQh8Us63DAk/M0FkmIqERew6P71HI75ksZ2Pxyuf4vvKh9rAkiA==", - "license": "MIT", - "dependencies": { - "@tanstack/table-core": "8.20.5" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": ">=16.8", - "react-dom": ">=16.8" - } - }, - "node_modules/mantine-react-table/node_modules/@tanstack/table-core": { - "version": "8.20.5", - "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.20.5.tgz", - "integrity": "sha512-P9dF7XbibHph2PFRz8gfBKEXEY/HJPOhym8CHmjF8y3q5mWpKx9xtZapXQUWCgkqvsK0R46Azuz+VaxD4Xl+Tg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "dev": true, + "license": "CC0-1.0" }, "node_modules/memoize-one": { "version": "5.2.1", @@ -2777,35 +3833,25 @@ "integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==", "license": "MIT" }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha512-9Wy1B3m3f66bPPmU5hdA4DR4PB2OfDU/+GS3yAB7IQozE3tqXaVv2zOjgla7MEGSRv95+ILmOuvhLkOK6wJtCQ==", + "version": "2.19.2", + "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.2.tgz", + "integrity": "sha512-8S5I8db/uZN8r9HSLFVWPdJCvYOejMcEC82VIzNUc6Zkklf/d1gg2psfE79/vyhWOj4+J8MtwmoOz3TmvaGu5A==", + "license": "MIT", "dependencies": { "dom-walk": "^0.1.0" } }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -2868,9 +3914,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.9", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.9.tgz", - "integrity": "sha512-SppoicMGpZvbF1l3z4x7No3OlIjP7QJvC9XR7AhZr1kL133KHnKPztkKDc+Ir4aJ/1VhTySrtKhrsycmrMQfvg==", + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "dev": true, "funding": [ { @@ -2982,6 +4028,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", + "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -3017,12 +4076,42 @@ "node": ">=8" } }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "license": "ISC" }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/pkcs7": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/pkcs7/-/pkcs7-1.0.4.tgz", @@ -3036,9 +4125,9 @@ } }, "node_modules/postcss": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", - "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", "dev": true, "funding": [ { @@ -3056,7 +4145,7 @@ ], "license": "MIT", "dependencies": { - "nanoid": "^3.3.8", + "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, @@ -3075,9 +4164,10 @@ } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz", + "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", + "dev": true, "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" @@ -3089,6 +4179,41 @@ "url": "https://github.com/prettier/prettier?sponsor=1" } }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT" + }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -3109,24 +4234,12 @@ "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, "node_modules/property-expr": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==", "license": "MIT" }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "license": "MIT" - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -3138,33 +4251,33 @@ } }, "node_modules/react": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", - "integrity": "sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", + "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.0.0.tgz", - "integrity": "sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", "license": "MIT", "dependencies": { - "scheduler": "^0.25.0" + "scheduler": "^0.27.0" }, "peerDependencies": { - "react": "^19.0.0" + "react": "^19.2.3" } }, "node_modules/react-draggable": { - "version": "4.4.6", - "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.4.6.tgz", - "integrity": "sha512-LtY5Xw1zTPqHkVmtM3X8MUOxNDOUhv/khTgBgrUvwaS064bwVvxT+q5El0uUFNx5IEPKXuRejr7UqLwBIg5pdw==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.5.0.tgz", + "integrity": "sha512-VC+HBLEZ0XJxnOxVAZsdRi8rD04Iz3SiiKOoYzamjylUcju/hP9np/aZdLHf/7WOD268WMoNJMvYfB5yAK45cw==", "license": "MIT", "dependencies": { - "clsx": "^1.1.1", + "clsx": "^2.1.1", "prop-types": "^15.8.1" }, "peerDependencies": { @@ -3172,21 +4285,14 @@ "react-dom": ">= 16.3.0" } }, - "node_modules/react-draggable/node_modules/clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/react-dropzone-esm": { - "version": "15.2.0", - "resolved": "https://registry.npmjs.org/react-dropzone-esm/-/react-dropzone-esm-15.2.0.tgz", - "integrity": "sha512-pPwR8xWVL+tFLnbAb8KVH5f6Vtl397tck8dINkZ1cPMxHWH+l9dFmIgRWgbh7V7jbjIcuKXCsVrXbhQz68+dVA==", + "node_modules/react-dropzone": { + "version": "14.3.8", + "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz", + "integrity": "sha512-sBgODnq+lcA4P296DY4wacOZz3JFpD99fp+hb//iBO2HHnyeZU3FwWyXJ6salNpqQdsZrgMrotuko/BdJMV8Ug==", "license": "MIT", "dependencies": { + "attr-accept": "^2.2.4", + "file-selector": "^2.1.0", "prop-types": "^15.8.1" }, "engines": { @@ -3196,16 +4302,26 @@ "react": ">= 16.8 || 18.0.0" } }, - "node_modules/react-fast-compare": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-2.0.4.tgz", - "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==", - "license": "MIT" + "node_modules/react-hook-form": { + "version": "7.70.0", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.70.0.tgz", + "integrity": "sha512-COOMajS4FI3Wuwrs3GPpi/Jeef/5W1DRR84Yl5/ShlT3dKVFUfoGiEZ/QE6Uw8P4T2/CLJdcTVYKvWBMQTEpvw==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18 || ^19" + } }, "node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "license": "MIT" }, "node_modules/react-lifecycles-compat": { @@ -3215,9 +4331,9 @@ "license": "MIT" }, "node_modules/react-number-format": { - "version": "5.4.3", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.3.tgz", - "integrity": "sha512-VCY5hFg/soBighAoGcdE+GagkJq0230qN6jcS5sp8wQX1qy1fYN/RX7/BXkrs0oyzzwqR8/+eSUrqXbGeywdUQ==", + "version": "5.4.4", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.4.tgz", + "integrity": "sha512-wOmoNZoOpvMminhifQYiYSTCLUDOiUbBunrMrMjA+dV52sY+vck1S4UhR6PkgnoCquvvMSeJjErXZ4qSaWCliA==", "license": "MIT", "peerDependencies": { "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", @@ -3241,9 +4357,9 @@ } }, "node_modules/react-remove-scroll": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.3.tgz", - "integrity": "sha512-pnAi91oOk8g8ABQKGF5/M9qxmmOPxaAnopyTHYfqYEwJhyFrbbBtHuSgtKEoH0jpcxx5o3hXqH1mNd9/Oi+8iQ==", + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", "license": "MIT", "dependencies": { "react-remove-scroll-bar": "^2.3.7", @@ -3288,15 +4404,13 @@ } }, "node_modules/react-router": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.3.0.tgz", - "integrity": "sha512-466f2W7HIWaNXTKM5nHTqNxLrHTyXybm7R0eBlVSt0k/u55tTCDO194OIx/NrYD4TS5SXKTNekXfT37kMKUjgw==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.11.0.tgz", + "integrity": "sha512-uI4JkMmjbWCZc01WVP2cH7ZfSzH91JAZUDd7/nIprDgWxBV1TkkmLToFh7EbMTcMak8URFRa2YoBL/W8GWnCTQ==", "license": "MIT", "dependencies": { - "@types/cookie": "^0.6.0", "cookie": "^1.0.1", - "set-cookie-parser": "^2.6.0", - "turbo-stream": "2.4.0" + "set-cookie-parser": "^2.6.0" }, "engines": { "node": ">=20.0.0" @@ -3312,12 +4426,12 @@ } }, "node_modules/react-router-dom": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.3.0.tgz", - "integrity": "sha512-z7Q5FTiHGgQfEurX/FBinkOXhWREJIAB2RiU24lvcBa82PxUpwqvs/PAXb9lJyPjTs2jrl6UkLvCZVGJPeNuuQ==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.11.0.tgz", + "integrity": "sha512-e49Ir/kMGRzFOOrYQBdoitq3ULigw4lKbAyKusnvtDu2t4dBX4AGYPrzNvorXmVuOyeakai6FUPW5MmibvVG8g==", "license": "MIT", "dependencies": { - "react-router": "7.3.0" + "react-router": "7.11.0" }, "engines": { "node": ">=20.0.0" @@ -3365,9 +4479,9 @@ } }, "node_modules/react-textarea-autosize": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.6.tgz", - "integrity": "sha512-aT3ioKXMa8f6zHYGebhbdMD2L00tKeRX1zuVuDx9YQK/JLLRSaSxq3ugECEmUB9z2kvk6bFSIoRHLkkUv0RJiw==", + "version": "8.5.9", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.9.tgz", + "integrity": "sha512-U1DGlIQN5AwgjTyOEnI1oCcMuEr1pv1qOtklB2l4nyMGbHzWrI0eFsYK0zos2YWqAolJyG0IWJaqWmWj5ETh0A==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.20.13", @@ -3452,9 +4566,9 @@ } }, "node_modules/recharts": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.1.tgz", - "integrity": "sha512-v8PUTUlyiDe56qUj82w/EDVuzEFXwEHp9/xOowGAZwfLjB9uAy3GllQVIYMWF6nU+qibx85WF75zD7AjqoT54Q==", + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.4.tgz", + "integrity": "sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==", "license": "MIT", "dependencies": { "clsx": "^2.0.0", @@ -3489,25 +4603,43 @@ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "license": "MIT" }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "node_modules/recharts/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", "license": "MIT" }, - "node_modules/remove-accents": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.5.0.tgz", - "integrity": "sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A==", - "license": "MIT" - }, - "node_modules/resolve": { - "version": "1.22.10", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", - "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, "license": "MIT", "dependencies": { - "is-core-module": "^2.16.0", + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -3531,13 +4663,13 @@ } }, "node_modules/rollup": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.35.0.tgz", - "integrity": "sha512-kg6oI4g+vc41vePJyO6dHt/yl0Rz3Thv0kJeVQ3D1kS3E5XSuKbPc29G4IpT/Kv1KQwgHVcN+HtyS+HYLNSvQg==", + "version": "4.53.5", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.5.tgz", + "integrity": "sha512-iTNAbFSlRpcHeeWu73ywU/8KuU/LZmNCSxp6fjQkJBD3ivUb8tpDrXhIxEzA05HlYMEwmtaUnb3RP+YNv162OQ==", "dev": true, "license": "MIT", "dependencies": { - "@types/estree": "1.0.6" + "@types/estree": "1.0.8" }, "bin": { "rollup": "dist/bin/rollup" @@ -3547,38 +4679,61 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.35.0", - "@rollup/rollup-android-arm64": "4.35.0", - "@rollup/rollup-darwin-arm64": "4.35.0", - "@rollup/rollup-darwin-x64": "4.35.0", - "@rollup/rollup-freebsd-arm64": "4.35.0", - "@rollup/rollup-freebsd-x64": "4.35.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.35.0", - "@rollup/rollup-linux-arm-musleabihf": "4.35.0", - "@rollup/rollup-linux-arm64-gnu": "4.35.0", - "@rollup/rollup-linux-arm64-musl": "4.35.0", - "@rollup/rollup-linux-loongarch64-gnu": "4.35.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.35.0", - "@rollup/rollup-linux-riscv64-gnu": "4.35.0", - "@rollup/rollup-linux-s390x-gnu": "4.35.0", - "@rollup/rollup-linux-x64-gnu": "4.35.0", - "@rollup/rollup-linux-x64-musl": "4.35.0", - "@rollup/rollup-win32-arm64-msvc": "4.35.0", - "@rollup/rollup-win32-ia32-msvc": "4.35.0", - "@rollup/rollup-win32-x64-msvc": "4.35.0", + "@rollup/rollup-android-arm-eabi": "4.53.5", + "@rollup/rollup-android-arm64": "4.53.5", + "@rollup/rollup-darwin-arm64": "4.53.5", + "@rollup/rollup-darwin-x64": "4.53.5", + "@rollup/rollup-freebsd-arm64": "4.53.5", + "@rollup/rollup-freebsd-x64": "4.53.5", + "@rollup/rollup-linux-arm-gnueabihf": "4.53.5", + "@rollup/rollup-linux-arm-musleabihf": "4.53.5", + "@rollup/rollup-linux-arm64-gnu": "4.53.5", + "@rollup/rollup-linux-arm64-musl": "4.53.5", + "@rollup/rollup-linux-loong64-gnu": "4.53.5", + "@rollup/rollup-linux-ppc64-gnu": "4.53.5", + "@rollup/rollup-linux-riscv64-gnu": "4.53.5", + "@rollup/rollup-linux-riscv64-musl": "4.53.5", + "@rollup/rollup-linux-s390x-gnu": "4.53.5", + "@rollup/rollup-linux-x64-gnu": "4.53.5", + "@rollup/rollup-linux-x64-musl": "4.53.5", + "@rollup/rollup-openharmony-arm64": "4.53.5", + "@rollup/rollup-win32-arm64-msvc": "4.53.5", + "@rollup/rollup-win32-ia32-msvc": "4.53.5", + "@rollup/rollup-win32-x64-gnu": "4.53.5", + "@rollup/rollup-win32-x64-msvc": "4.53.5", "fsevents": "~2.3.2" } }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, "node_modules/scheduler": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", - "integrity": "sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", "license": "MIT" }, "node_modules/set-cookie-parser": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", - "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", "license": "MIT" }, "node_modules/shebang-command": { @@ -3604,6 +4759,13 @@ "node": ">=8" } }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, "node_modules/source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -3623,6 +4785,33 @@ "node": ">=0.10.0" } }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -3636,6 +4825,26 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/strip-literal": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", + "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/stylis": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", @@ -3667,10 +4876,17 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, "node_modules/tabbable": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.3.0.tgz", + "integrity": "sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ==", "license": "MIT" }, "node_modules/tiny-case": { @@ -3685,10 +4901,85 @@ "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", "license": "MIT" }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz", + "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.19" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz", + "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", + "dev": true, "license": "MIT" }, "node_modules/toposort": { @@ -3697,18 +4988,38 @@ "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==", "license": "MIT" }, + "node_modules/tough-cookie": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", + "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, - "node_modules/turbo-stream": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", - "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", - "license": "ISC" - }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3723,9 +5034,9 @@ } }, "node_modules/type-fest": { - "version": "4.37.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz", - "integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==", + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=16" @@ -3780,9 +5091,9 @@ } }, "node_modules/use-isomorphic-layout-effect": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.0.tgz", - "integrity": "sha512-q6ayo8DWoPZT0VdG4u3D3uxcgONP3Mevx2i2b0434cwWBoL+aelL1DzkXI6w3PhTZzUeR2kaVlZn70iCiseP6w==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.1.tgz", + "integrity": "sha512-tpZZ+EX0gaghDAiFR37hj5MgY6ZN55kLiPkJsKxBMZ6GZdOSPJXiOzPM984oPYZ5AnehYx5WQp1+ME8I/P/pRA==", "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" @@ -3855,13 +5166,13 @@ } }, "node_modules/video.js": { - "version": "8.22.0", - "resolved": "https://registry.npmjs.org/video.js/-/video.js-8.22.0.tgz", - "integrity": "sha512-xge2kpjsvC0zgFJ1cqt+wTqsi21+huFswlonPFh7qiplypsb4FN/D2Rz6bWdG/S9eQaPHfWHsarmJL/7D3DHoA==", + "version": "8.23.4", + "resolved": "https://registry.npmjs.org/video.js/-/video.js-8.23.4.tgz", + "integrity": "sha512-qI0VTlYmKzEqRsz1Nppdfcaww4RSxZAq77z2oNSl3cNg2h6do5C8Ffl0KqWQ1OpD8desWXsCrde7tKJ9gGTEyQ==", "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.12.5", - "@videojs/http-streaming": "^3.17.0", + "@videojs/http-streaming": "^3.17.2", "@videojs/vhs-utils": "^4.1.1", "@videojs/xhr": "2.7.0", "aes-decrypter": "^4.0.2", @@ -3906,21 +5217,24 @@ } }, "node_modules/vite": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.2.1.tgz", - "integrity": "sha512-n2GnqDb6XPhlt9B8olZPrgMD/es/Nd1RdChF6CBD/fHW6pUyUTt2sQW2fPRX5GiD9XEa6+8A6A4f2vT6pSsE7Q==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", + "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", "dev": true, "license": "MIT", "dependencies": { - "esbuild": "^0.25.0", - "postcss": "^8.5.3", - "rollup": "^4.30.1" + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + "node": "^20.19.0 || >=22.12.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" @@ -3929,14 +5243,14 @@ "fsevents": "~2.3.3" }, "peerDependencies": { - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@types/node": "^20.19.0 || >=22.12.0", "jiti": ">=1.21.0", - "less": "*", + "less": "^4.0.0", "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" @@ -3977,11 +5291,167 @@ } } }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", + "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, "node_modules/webworkify-webpack": { "version": "2.1.5", "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git#24d1e719b4a6cac37a518b2bb10fe124527ef4ef", "license": "MIT" }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-url": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", + "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -3998,6 +5468,23 @@ "node": ">= 8" } }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/word-wrap": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", @@ -4008,10 +5495,49 @@ "node": ">=0.10.0" } }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, "node_modules/yaml": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.0.tgz", - "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", "dev": true, "license": "ISC", "optional": true, @@ -4020,7 +5546,10 @@ "yaml": "bin.mjs" }, "engines": { - "node": ">= 14" + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" } }, "node_modules/yocto-queue": { @@ -4037,9 +5566,9 @@ } }, "node_modules/yup": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/yup/-/yup-1.6.1.tgz", - "integrity": "sha512-JED8pB50qbA4FOkDol0bYF/p60qSEDQqBD0/qeIrUCG1KbPBIQ776fCUNb9ldbPcSTxA69g/47XTo4TqWiuXOA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/yup/-/yup-1.7.1.tgz", + "integrity": "sha512-GKHFX2nXul2/4Dtfxhozv701jLQHdf6J34YDh2cEkpqoo8le5Mg6/LrdseVLrFarmFygZTlfIhHx/QKfb/QWXw==", "license": "MIT", "dependencies": { "property-expr": "^2.0.5", @@ -4061,9 +5590,9 @@ } }, "node_modules/zustand": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.3.tgz", - "integrity": "sha512-14fwWQtU3pH4dE0dOpdMiWjddcH+QzKIgk1cl8epwSE7yag43k/AD/m4L6+K7DytAOr9gGBe3/EXj9g7cdostg==", + "version": "5.0.9", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.9.tgz", + "integrity": "sha512-ALBtUj0AfjJt3uNRQoL1tL2tMvj6Gp/6e39dnfT6uzpelGru8v1tPOGBzayOWbPJvujM8JojDk3E1LxeFisBNg==", "license": "MIT", "engines": { "node": ">=12.20.0" diff --git a/frontend/package.json b/frontend/package.json index 3b287d79..7b2d5927 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,5 +1,5 @@ { - "name": "vite", + "name": "frontend", "private": true, "version": "0.0.0", "type": "module", @@ -7,34 +7,32 @@ "dev": "vite --host", "build": "vite build", "lint": "eslint .", - "preview": "vite preview" + "preview": "vite preview", + "test": "vitest --run", + "test:watch": "vitest" }, "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", - "@mantine/charts": "^7.17.2", - "@mantine/core": "^7.17.2", - "@mantine/dates": "^7.17.2", - "@mantine/dropzone": "^7.17.2", - "@mantine/form": "^7.17.3", - "@mantine/hooks": "^7.17.2", - "@mantine/notifications": "^7.17.2", - "@tabler/icons-react": "^3.31.0", + "@mantine/charts": "~8.0.1", + "@mantine/core": "~8.0.1", + "@mantine/dates": "~8.0.1", + "@mantine/dropzone": "~8.0.1", + "@mantine/form": "~8.0.1", + "@mantine/hooks": "~8.0.1", + "@mantine/notifications": "~8.0.1", + "@hookform/resolvers": "^5.2.2", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", - "axios": "^1.8.2", - "clsx": "^2.1.1", + "allotment": "^1.20.4", "dayjs": "^1.11.13", - "formik": "^2.4.6", "hls.js": "^1.5.20", - "lucide-react": "^0.479.0", - "mantine-react-table": "^2.0.0-beta.9", + "react-hook-form": "^7.70.0", + "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "prettier": "^3.5.3", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", @@ -48,13 +46,30 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", - "eslint": "^9.21.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", + "eslint": "^9.27.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", - "vite": "^6.2.0" + "jsdom": "^27.0.0", + "prettier": "^3.5.3", + "vite": "^7.1.7", + "vitest": "^3.2.4" + }, + "resolutions": { + "vite": "7.1.7", + "react": "19.1.0", + "react-dom": "19.1.0" + }, + "overrides": { + "js-yaml": "^4.1.1" } } diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index e13c5af8..f22d408f 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -14,13 +14,16 @@ import Guide from './pages/Guide'; import Stats from './pages/Stats'; import DVR from './pages/DVR'; import Settings from './pages/Settings'; +import PluginsPage from './pages/Plugins'; +import Users from './pages/Users'; +import LogosPage from './pages/Logos'; +import VODsPage from './pages/VODs'; import useAuthStore from './store/auth'; import FloatingVideo from './components/FloatingVideo'; import { WebsocketProvider } from './WebSocket'; import { Box, AppShell, MantineProvider } from '@mantine/core'; import '@mantine/core/styles.css'; // Ensure Mantine global styles load import '@mantine/notifications/styles.css'; -import 'mantine-react-table/styles.css'; import '@mantine/dropzone/styles.css'; import '@mantine/dates/styles.css'; import './index.css'; @@ -57,6 +60,12 @@ const App = () => { } } catch (error) { console.error('Error checking superuser status:', error); + // If authentication error, redirect to login + if (error.status === 401) { + localStorage.removeItem('token'); + localStorage.removeItem('refreshToken'); + window.location.href = '/login'; + } } } checkSuperuser(); @@ -65,16 +74,22 @@ const App = () => { // Authentication check useEffect(() => { const checkAuth = async () => { - const loggedIn = await initializeAuth(); - if (loggedIn) { - await initData(); - setIsAuthenticated(true); - } else { + try { + const loggedIn = await initializeAuth(); + if (loggedIn) { + await initData(); + // Logos are now loaded at the end of initData, no need for background loading + } else { + await logout(); + } + } catch (error) { + console.error('Auth check failed:', error); await logout(); } }; + checkAuth(); - }, [initializeAuth, initData, setIsAuthenticated, logout]); + }, [initializeAuth, initData, logout]); return ( { height: 0, }} navbar={{ - width: open ? drawerWidth : miniDrawerWidth, + width: isAuthenticated + ? open + ? drawerWidth + : miniDrawerWidth + : 0, }} > - + {isAuthenticated && ( + + )} { } /> } /> } /> + } /> + } /> } /> + } /> + } /> ) : ( } /> diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index f538ee29..87d80953 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -5,162 +5,906 @@ import React, { createContext, useContext, useMemo, + useCallback, } from 'react'; -import useStreamsStore from './store/streams'; import { notifications } from '@mantine/notifications'; import useChannelsStore from './store/channels'; +import useLogosStore from './store/logos'; import usePlaylistsStore from './store/playlists'; import useEPGsStore from './store/epgs'; -import { Box, Button, Stack } from '@mantine/core'; +import { Box, Button, Stack, Alert, Group } from '@mantine/core'; import API from './api'; +import useSettingsStore from './store/settings'; +import useAuthStore from './store/auth'; -export const WebsocketContext = createContext([false, () => { }, null]); +export const WebsocketContext = createContext([false, () => {}, null]); export const WebsocketProvider = ({ children }) => { const [isReady, setIsReady] = useState(false); const [val, setVal] = useState(null); + const ws = useRef(null); + const reconnectTimerRef = useRef(null); + const [reconnectAttempts, setReconnectAttempts] = useState(0); + const [connectionError, setConnectionError] = useState(null); + const maxReconnectAttempts = 5; + const initialBackoffDelay = 1000; // 1 second initial delay + const env_mode = useSettingsStore((s) => s.environment.env_mode); + const accessToken = useAuthStore((s) => s.accessToken); + const isAuthenticated = useAuthStore((s) => s.isAuthenticated); + + const epgs = useEPGsStore((s) => s.epgs); + const updateEPG = useEPGsStore((s) => s.updateEPG); + const updateEPGProgress = useEPGsStore((s) => s.updateEPGProgress); + + const updatePlaylist = usePlaylistsStore((s) => s.updatePlaylist); + + // Calculate reconnection delay with exponential backoff + const getReconnectDelay = useCallback(() => { + return Math.min( + initialBackoffDelay * Math.pow(1.5, reconnectAttempts), + 30000 + ); // max 30 seconds + }, [reconnectAttempts]); + + // Clear any existing reconnect timers + const clearReconnectTimer = useCallback(() => { + if (reconnectTimerRef.current) { + clearTimeout(reconnectTimerRef.current); + reconnectTimerRef.current = null; + } + }, []); + + // Function to get WebSocket URL that works with both HTTP and HTTPS + const getWebSocketUrl = useCallback(() => { + const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'; + const host = window.location.hostname; + const appPort = window.location.port; + + // In development mode, connect directly to the WebSocket server on port 8001 + if (env_mode === 'dev') { + return `${protocol}//${host}:8001/ws/?token=${accessToken}`; + } else { + // In production mode, use the same port as the main application + // This allows nginx to handle the WebSocket forwarding + return appPort + ? `${protocol}//${host}:${appPort}/ws/?token=${accessToken}` + : `${protocol}//${host}/ws/?token=${accessToken}`; + } + }, [env_mode, accessToken]); + + // Function to handle websocket connection + const connectWebSocket = useCallback(() => { + // Clear any existing timers to avoid multiple reconnection attempts + clearReconnectTimer(); + + // Clear old websocket if exists + if (ws.current) { + // Remove event handlers to prevent duplicate events + ws.current.onclose = null; + ws.current.onerror = null; + ws.current.onopen = null; + ws.current.onmessage = null; + + try { + ws.current.close(); + } catch (e) { + console.warn('Error closing existing WebSocket:', e); + } + } + + try { + console.log( + `Attempting WebSocket connection (attempt ${reconnectAttempts + 1}/${maxReconnectAttempts})...` + ); + + // Use the function to get the correct WebSocket URL + const wsUrl = getWebSocketUrl(); + console.log(`Connecting to WebSocket at: ${wsUrl}`); + + // Create new WebSocket connection + const socket = new WebSocket(wsUrl); + + socket.onopen = () => { + console.log('WebSocket connected successfully'); + setIsReady(true); + setConnectionError(null); + setReconnectAttempts(0); + }; + + socket.onerror = (error) => { + console.error('WebSocket connection error:', error); + + // Don't show error notification on initial page load, + // only show it after a connection was established then lost + if (reconnectAttempts > 0 || isReady) { + setConnectionError('Failed to connect to WebSocket server.'); + } else { + console.log('Initial connection attempt failed, will retry...'); + } + }; + + socket.onclose = (event) => { + console.warn('WebSocket connection closed', event); + setIsReady(false); + + // Only attempt reconnect if we haven't reached max attempts + if (reconnectAttempts < maxReconnectAttempts) { + const delay = getReconnectDelay(); + setConnectionError( + `Connection lost. Reconnecting in ${Math.ceil(delay / 1000)} seconds...` + ); + console.log( + `Scheduling reconnect in ${delay}ms (attempt ${reconnectAttempts + 1}/${maxReconnectAttempts})...` + ); + + // Store timer reference so we can cancel it if needed + reconnectTimerRef.current = setTimeout(() => { + setReconnectAttempts((prev) => prev + 1); + connectWebSocket(); + }, delay); + } else { + setConnectionError( + 'Maximum reconnection attempts reached. Please reload the page.' + ); + console.error( + 'Maximum reconnection attempts reached. WebSocket connection failed.' + ); + } + }; + + // Message handler + socket.onmessage = async (event) => { + try { + const parsedEvent = JSON.parse(event.data); + + // Handle connection_established event + if (parsedEvent.type === 'connection_established') { + console.log( + 'WebSocket connection established:', + parsedEvent.data?.message + ); + // Don't need to do anything else for this event type + return; + } + + // Handle standard message format for other event types + switch (parsedEvent.data?.type) { + case 'comskip_status': { + const rid = parsedEvent.data.recording_id; + const id = `comskip-${rid}`; + const status = parsedEvent.data.status; + const title = parsedEvent.data.title || 'Recording'; + if (status === 'started') { + notifications.show({ + id, + title: 'Removing commercials', + message: `Processing ${title}...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (status === 'completed') { + notifications.update({ + id, + title: 'Commercials removed', + message: `${title} — kept ${parsedEvent.data.segments_kept} segments`, + color: 'green.5', + loading: false, + autoClose: 4000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } else if (status === 'skipped') { + notifications.update({ + id, + title: 'No commercials to remove', + message: parsedEvent.data.reason || '', + color: 'teal', + loading: false, + autoClose: 3000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } else if (status === 'error') { + notifications.update({ + id, + title: 'Comskip failed', + message: parsedEvent.data.reason || 'Unknown error', + color: 'red', + loading: false, + autoClose: 6000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } + break; + } + case 'epg_file': + fetchEPGs(); + notifications.show({ + title: 'EPG File Detected', + message: `Processing ${parsedEvent.data.filename}`, + }); + break; + + case 'm3u_file': + fetchPlaylists(); + notifications.show({ + title: 'M3U File Detected', + message: `Processing ${parsedEvent.data.filename}`, + }); + break; + + case 'm3u_refresh': + // Update the store with progress information + setRefreshProgress(parsedEvent.data); + + // Update the playlist status whenever we receive a status update + // Not just when progress is 100% or status is pending_setup + if (parsedEvent.data.status && parsedEvent.data.account) { + // Get fresh playlists from store to avoid stale state from React render cycle + const currentPlaylists = usePlaylistsStore.getState().playlists; + const isArray = Array.isArray(currentPlaylists); + const playlist = isArray + ? currentPlaylists.find( + (p) => p.id === parsedEvent.data.account + ) + : currentPlaylists[parsedEvent.data.account]; + + if (playlist) { + // When we receive a "success" status with 100% progress, this is a completed refresh + // So we should also update the updated_at timestamp + const updateData = { + ...playlist, + status: parsedEvent.data.status, + last_message: + parsedEvent.data.message || playlist.last_message, + }; + + // Update the timestamp when we complete a successful refresh + if ( + parsedEvent.data.status === 'success' && + parsedEvent.data.progress === 100 + ) { + updateData.updated_at = new Date().toISOString(); + // Log successful completion for debugging + console.log( + 'M3U refresh completed successfully:', + updateData + ); + fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date + fetchChannelProfiles(); // Ensure channel profiles are updated + } + + updatePlaylist(updateData); + } else { + // Playlist not in store yet - this happens when backend sends websocket + // updates immediately after creating the playlist, before the API response + // returns. The frontend will receive a 'playlist_created' event shortly + // which will trigger a fetchPlaylists() to sync the store. + console.log( + `Received update for playlist ID ${parsedEvent.data.account} not yet in store. ` + + `Waiting for playlist_created event to sync...` + ); + } + } + break; + + case 'channel_stats': + setChannelStats(JSON.parse(parsedEvent.data.stats)); + break; + + case 'epg_channels': + notifications.show({ + message: 'EPG channels updated!', + color: 'green.5', + }); + + // If source_id is provided, update that specific EPG's status + if (parsedEvent.data.source_id) { + const epg = epgs[parsedEvent.data.source_id]; + if (epg) { + updateEPG({ + ...epg, + status: 'success', + }); + } + } + + fetchEPGData(); + break; + + case 'epg_match': + notifications.show({ + message: parsedEvent.data.message || 'EPG match is complete!', + color: 'green.5', + }); + + // Check if we have associations data and use the more efficient batch API + if ( + parsedEvent.data.associations && + parsedEvent.data.associations.length > 0 + ) { + API.batchSetEPG(parsedEvent.data.associations); + } + break; + + case 'epg_matching_progress': { + const progress = parsedEvent.data; + const id = 'epg-matching-progress'; + + if (progress.stage === 'starting') { + notifications.show({ + id, + title: 'EPG Matching in Progress', + message: `Starting to match ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'matching') { + let message = `Matched ${progress.matched} of ${progress.total} channels`; + if (progress.remaining > 0) { + message += ` (${progress.remaining} remaining)`; + } + if (progress.current_channel) { + message += `\nCurrently processing: ${progress.current_channel}`; + } + + notifications.update({ + id, + title: 'EPG Matching in Progress', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'completed') { + notifications.update({ + id, + title: 'EPG Matching Complete', + message: `Successfully matched ${progress.matched} of ${progress.total} channels (${progress.progress_percent}%)`, + color: progress.matched > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + } + break; + } + + case 'epg_logo_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-logo-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Logos from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + if (progress.created_logos_count !== undefined) { + message += `, created ${progress.created_logos_count} logos`; + } + + notifications.update({ + id, + title: 'Setting Logos from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Logo Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel logos${progress.created_logos_count ? `, created ${progress.created_logos_count} new logos` : ''}`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data and logos + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + + // Get updated channel data and extract logo IDs to load + const channels = useChannelsStore.getState().channels; + const logoIds = Object.values(channels) + .filter((channel) => channel.logo_id) + .map((channel) => channel.logo_id); + + // Fetch the specific logos that were just assigned + if (logoIds.length > 0) { + await useLogosStore.getState().fetchLogosByIds(logoIds); + } + } catch (e) { + console.warn( + 'Failed to refresh channels after logo setting:', + e + ); + } + } + break; + } + + case 'epg_name_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-name-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Names from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + + notifications.update({ + id, + title: 'Setting Names from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Name Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel names from EPG data`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + } catch (e) { + console.warn( + 'Failed to refresh channels after name setting:', + e + ); + } + } + break; + } + + case 'm3u_profile_test': + setProfilePreview( + parsedEvent.data.search_preview, + parsedEvent.data.result + ); + break; + + case 'recording_updated': + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on update:', e); + } + break; + + case 'recordings_refreshed': + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on refreshed:', e); + } + break; + + case 'recording_started': + notifications.show({ + title: 'Recording started!', + message: `Started recording channel ${parsedEvent.data.channel}`, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on start:', e); + } + break; + + case 'recording_ended': + notifications.show({ + title: 'Recording finished!', + message: `Stopped recording channel ${parsedEvent.data.channel}`, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on end:', e); + } + break; + + case 'epg_fetch_error': + notifications.show({ + title: 'EPG Source Error', + message: parsedEvent.data.message, + color: 'orange.5', + autoClose: 8000, + }); + + // Update EPG status in store + if (parsedEvent.data.source_id) { + const epg = epgs[parsedEvent.data.source_id]; + if (epg) { + updateEPG({ + ...epg, + status: 'error', + last_message: parsedEvent.data.message, + }); + } + } + break; + + case 'epg_refresh': + // If we have source/account info, check if EPG exists before processing + if (parsedEvent.data.source || parsedEvent.data.account) { + const sourceId = + parsedEvent.data.source || parsedEvent.data.account; + const epg = epgs[sourceId]; + + // Only update progress if the EPG still exists in the store + // This prevents crashes when receiving updates for deleted EPGs + if (epg) { + // Update the store with progress information + updateEPGProgress(parsedEvent.data); + } else { + // EPG was deleted, ignore this update + console.debug( + `Ignoring EPG refresh update for deleted EPG ${sourceId}` + ); + break; + } + + if (epg) { + // Check for any indication of an error (either via status or error field) + const hasError = + parsedEvent.data.status === 'error' || + !!parsedEvent.data.error || + (parsedEvent.data.message && + parsedEvent.data.message.toLowerCase().includes('error')); + + if (hasError) { + // Handle error state + const errorMessage = + parsedEvent.data.error || + parsedEvent.data.message || + 'Unknown error occurred'; + + updateEPG({ + ...epg, + status: 'error', + last_message: errorMessage, + }); + + // Show notification for the error + notifications.show({ + title: 'EPG Refresh Error', + message: errorMessage, + color: 'red.5', + }); + } + // Update status on completion only if no errors + else if (parsedEvent.data.progress === 100) { + updateEPG({ + ...epg, + status: parsedEvent.data.status || 'success', + last_message: + parsedEvent.data.message || epg.last_message, + // Use the timestamp from the backend if provided + ...(parsedEvent.data.updated_at && { + updated_at: parsedEvent.data.updated_at, + }), + }); + + // Only show success notification if we've finished parsing programs and had no errors + if (parsedEvent.data.action === 'parsing_programs') { + notifications.show({ + title: 'EPG Processing Complete', + message: 'EPG data has been updated successfully', + color: 'green.5', + }); + + fetchEPGData(); + } + } + } + } + break; + + case 'epg_sources_changed': + // A plugin or backend process signaled that the EPG sources changed + try { + await fetchEPGs(); + } catch (e) { + console.warn( + 'Failed to refresh EPG sources after change notification:', + e + ); + } + break; + + case 'epg_data_created': + // A new EPG data entry was created (e.g., for a dummy EPG) + // Fetch EPG data so the channel form can immediately assign it + try { + await fetchEPGData(); + } catch (e) { + console.warn('Failed to refresh EPG data after creation:', e); + } + break; + + case 'stream_rehash': + // Handle stream rehash progress updates + if (parsedEvent.data.action === 'starting') { + notifications.show({ + id: 'stream-rehash-progress', // Persistent ID + title: 'Stream Rehash Started', + message: parsedEvent.data.message, + color: 'blue.5', + autoClose: false, // Don't auto-close + withCloseButton: false, // No close button during processing + loading: true, // Show loading indicator + }); + } else if (parsedEvent.data.action === 'processing') { + // Update the existing notification + notifications.update({ + id: 'stream-rehash-progress', + title: 'Stream Rehash in Progress', + message: `${parsedEvent.data.progress}% complete - ${parsedEvent.data.processed} streams processed, ${parsedEvent.data.duplicates_merged} duplicates merged`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (parsedEvent.data.action === 'completed') { + // Update to completion state + notifications.update({ + id: 'stream-rehash-progress', + title: 'Stream Rehash Complete', + message: `Processed ${parsedEvent.data.total_processed} streams, merged ${parsedEvent.data.duplicates_merged} duplicates. Final count: ${parsedEvent.data.final_count}`, + color: 'green.5', + autoClose: 8000, // Auto-close after completion + withCloseButton: true, // Allow manual close + loading: false, // Remove loading indicator + }); + } else if (parsedEvent.data.action === 'blocked') { + // Handle blocked rehash attempt + notifications.show({ + title: 'Stream Rehash Blocked', + message: parsedEvent.data.message, + color: 'orange.5', + autoClose: 8000, + }); + } + break; + + case 'logo_processing_summary': + notifications.show({ + title: 'Logo Processing Summary', + message: `${parsedEvent.data.message}`, + color: 'blue', + autoClose: 5000, + }); + fetchLogos(); + break; + + case 'account_info_refresh_success': + notifications.show({ + title: 'Account Info Refreshed', + message: `Successfully updated account information for ${parsedEvent.data.profile_name}`, + color: 'green', + autoClose: 4000, + }); + // Trigger refresh of playlists to update the UI + fetchPlaylists(); + break; + + case 'account_info_refresh_error': + notifications.show({ + title: 'Account Info Refresh Failed', + message: + parsedEvent.data.error || + 'Failed to refresh account information', + color: 'red', + autoClose: 8000, + }); + break; + + case 'channels_created': + // General notification for channel creation + notifications.show({ + title: 'Channels Created', + message: `Successfully created ${parsedEvent.data.count || 'multiple'} channel(s)`, + color: 'green', + autoClose: 4000, + }); + + // Refresh the channels table to show new channels + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + await fetchChannelProfiles(); + console.log('Channels refreshed after bulk creation'); + } catch (error) { + console.error( + 'Error refreshing channels after creation:', + error + ); + } + + break; + + case 'playlist_created': + // Backend signals that a new playlist has been created and we should refresh + console.log( + 'Playlist created event received, refreshing playlists...' + ); + fetchPlaylists(); + break; + + case 'bulk_channel_creation_progress': { + // Handle progress updates with persistent notifications like stream rehash + const data = parsedEvent.data; + + if (data.status === 'starting') { + notifications.show({ + id: 'bulk-channel-creation-progress', // Persistent ID + title: 'Bulk Channel Creation Started', + message: data.message || 'Starting bulk channel creation...', + color: 'blue.5', + autoClose: false, // Don't auto-close + withCloseButton: false, // No close button during processing + loading: true, // Show loading indicator + }); + } else if ( + data.status === 'processing' || + data.status === 'creating_logos' || + data.status === 'creating_channels' + ) { + // Calculate progress percentage + const progressPercent = + data.total > 0 + ? Math.round((data.progress / data.total) * 100) + : 0; + + // Update the existing notification with progress + notifications.update({ + id: 'bulk-channel-creation-progress', + title: 'Bulk Channel Creation in Progress', + message: `${progressPercent}% complete - ${data.message}`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (data.status === 'completed') { + // Hide the progress notification since channels_created will show success + notifications.hide('bulk-channel-creation-progress'); + } else if (data.status === 'failed') { + // Update to error state + notifications.update({ + id: 'bulk-channel-creation-progress', + title: 'Bulk Channel Creation Failed', + message: + data.error || + 'An error occurred during bulk channel creation', + color: 'red.5', + autoClose: 12000, // Auto-close after longer delay for errors + withCloseButton: true, // Allow manual close + loading: false, // Remove loading indicator + }); + } + + // Pass through to individual components for any additional handling + setVal(parsedEvent); + break; + } + + default: + console.error( + `Unknown websocket event type: ${parsedEvent.data?.type}` + ); + break; + } + } catch (error) { + console.error( + 'Error processing WebSocket message:', + error, + event.data + ); + } + }; + + ws.current = socket; + } catch (error) { + console.error('Error creating WebSocket connection:', error); + setConnectionError(`WebSocket error: ${error.message}`); + + // Schedule a reconnect if we haven't reached max attempts + if (reconnectAttempts < maxReconnectAttempts) { + const delay = getReconnectDelay(); + reconnectTimerRef.current = setTimeout(() => { + setReconnectAttempts((prev) => prev + 1); + connectWebSocket(); + }, delay); + } + } + }, [ + reconnectAttempts, + clearReconnectTimer, + getReconnectDelay, + getWebSocketUrl, + isReady, + ]); + + // Initial connection and cleanup + useEffect(() => { + // Only attempt to connect if the user is authenticated + if (isAuthenticated && accessToken) { + connectWebSocket(); + } else if (ws.current) { + // Close the connection if the user logs out + clearReconnectTimer(); + console.log('Closing WebSocket connection due to logout'); + ws.current.onclose = null; + ws.current.close(); + ws.current = null; + setIsReady(false); + } + + return () => { + clearReconnectTimer(); // Clear any pending reconnect timers + + if (ws.current) { + console.log('Closing WebSocket connection due to component unmount'); + ws.current.onclose = null; // Remove handlers to avoid reconnection + ws.current.close(); + ws.current = null; + } + }; + }, [connectWebSocket, clearReconnectTimer, isAuthenticated, accessToken]); const setChannelStats = useChannelsStore((s) => s.setChannelStats); - const fetchChannelGroups = useChannelsStore((s) => s.fetchChannelGroups); const fetchPlaylists = usePlaylistsStore((s) => s.fetchPlaylists); const setRefreshProgress = usePlaylistsStore((s) => s.setRefreshProgress); const setProfilePreview = usePlaylistsStore((s) => s.setProfilePreview); const fetchEPGData = useEPGsStore((s) => s.fetchEPGData); const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); - - const ws = useRef(null); - - useEffect(() => { - let wsUrl = `${window.location.host}/ws/`; - if (import.meta.env.DEV) { - wsUrl = `${window.location.hostname}:8001/ws/`; - } - - if (window.location.protocol.match(/https/)) { - wsUrl = `wss://${wsUrl}`; - } else { - wsUrl = `ws://${wsUrl}`; - } - - const socket = new WebSocket(wsUrl); - - socket.onopen = () => { - console.log('websocket connected'); - setIsReady(true); - }; - - // Reconnection logic - socket.onclose = () => { - setIsReady(false); - setTimeout(() => { - const reconnectWs = new WebSocket(wsUrl); - reconnectWs.onopen = () => setIsReady(true); - }, 3000); // Attempt to reconnect every 3 seconds - }; - - socket.onmessage = async (event) => { - event = JSON.parse(event.data); - switch (event.data.type) { - case 'epg_file': - fetchEPGs(); - notifications.show({ - title: 'EPG File Detected', - message: `Processing ${event.data.filename}`, - }); - break; - - case 'm3u_file': - fetchPlaylists(); - notifications.show({ - title: 'M3U File Detected', - message: `Processing ${event.data.filename}`, - }); - break; - - case 'm3u_group_refresh': - fetchChannelGroups(); - fetchPlaylists(); - - notifications.show({ - title: 'Group processing finished!', - autoClose: 5000, - message: ( - - Refresh M3U or filter out groups to pull in streams. - - - ), - color: 'green.5', - }); - break; - - case 'm3u_refresh': - setRefreshProgress(event.data); - break; - - case 'channel_stats': - setChannelStats(JSON.parse(event.data.stats)); - break; - - case 'epg_channels': - notifications.show({ - message: 'EPG channels updated!', - color: 'green.5', - }); - fetchEPGData(); - break; - - case 'epg_match': - notifications.show({ - message: event.data.message || 'EPG match is complete!', - color: 'green.5', - }); - - // Check if we have associations data and use the more efficient batch API - if (event.data.associations && event.data.associations.length > 0) { - API.batchSetEPG(event.data.associations); - } - break; - - case 'm3u_profile_test': - setProfilePreview(event.data.search_preview, event.data.result); - break; - - case 'recording_started': - notifications.show({ - title: 'Recording started!', - message: `Started recording channel ${event.data.channel}`, - }); - break; - - case 'recording_ended': - notifications.show({ - title: 'Recording finished!', - message: `Stopped recording channel ${event.data.channel}`, - }); - break; - - default: - console.error(`Unknown websocket event type: ${event.type}`); - break; - } - }; - - ws.current = socket; - - return () => { - socket.close(); - }; - }, []); + const fetchLogos = useLogosStore((s) => s.fetchAllLogos); + const fetchChannelProfiles = useChannelsStore((s) => s.fetchChannelProfiles); const ret = useMemo(() => { return [isReady, ws.current?.send.bind(ws.current), val]; @@ -168,6 +912,51 @@ export const WebsocketProvider = ({ children }) => { return ( + {connectionError && + !isReady && + reconnectAttempts >= maxReconnectAttempts && ( + + {connectionError} + + + )} + {connectionError && + !isReady && + reconnectAttempts < maxReconnectAttempts && + reconnectAttempts > 0 && ( + + {connectionError} + + )} {children} ); diff --git a/frontend/src/api.js b/frontend/src/api.js index 3cec6e38..c33ff1ee 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1,6 +1,7 @@ // src/api.js (updated) import useAuthStore from './store/auth'; import useChannelsStore from './store/channels'; +import useLogosStore from './store/logos'; import useUserAgentsStore from './store/userAgents'; import usePlaylistsStore from './store/playlists'; import useEPGsStore from './store/epgs'; @@ -9,6 +10,7 @@ import useStreamProfilesStore from './store/streamProfiles'; import useSettingsStore from './store/settings'; import { notifications } from '@mantine/notifications'; import useChannelsTableStore from './store/channelsTable'; +import useUsersStore from './store/users'; // If needed, you can set a base host or keep it empty if relative requests const host = import.meta.env.DEV @@ -103,14 +105,13 @@ export default class API { static async fetchSuperUser() { try { - const response = await request( - `${host}/api/accounts/initialize-superuser/`, - { auth: false } - ); - - return response; - } catch (e) { - errorNotification('Failed to fetch superuser', e); + return await request(`${host}/api/accounts/initialize-superuser/`, { + auth: false, + method: 'GET', + }); + } catch (error) { + console.error('Error checking superuser status:', error); + throw error; } } @@ -150,16 +151,26 @@ export default class API { } static async refreshToken(refresh) { - return await request(`${host}/api/accounts/token/refresh/`, { - auth: false, - method: 'POST', - body: { refresh }, - }); + try { + return await request(`${host}/api/accounts/token/refresh/`, { + auth: false, + method: 'POST', + body: { refresh }, + }); + } catch (error) { + // If user does not exist or token is invalid, clear tokens + if (error.status === 401 || error.message?.includes('does not exist')) { + localStorage.removeItem('token'); + localStorage.removeItem('refreshToken'); + window.location.href = '/login'; // Redirect to login + } + throw error; + } } static async logout() { return await request(`${host}/api/accounts/auth/logout/`, { - auth: false, + auth: true, // Send JWT token so backend can identify the user method: 'POST', }); } @@ -240,7 +251,17 @@ export default class API { }); if (response.id) { - useChannelsStore.getState().addChannelGroup(response); + // Add association flags for new groups + const processedGroup = { + ...response, + hasChannels: false, + hasM3UAccounts: false, + canEdit: true, + canDelete: true, + }; + useChannelsStore.getState().addChannelGroup(processedGroup); + // Refresh channel groups to update the UI + useChannelsStore.getState().fetchChannelGroups(); } return response; @@ -267,17 +288,71 @@ export default class API { } } + static async deleteChannelGroup(id) { + try { + await request(`${host}/api/channels/groups/${id}/`, { + method: 'DELETE', + }); + + // Remove from store after successful deletion + useChannelsStore.getState().removeChannelGroup(id); + + return true; + } catch (e) { + errorNotification('Failed to delete channel group', e); + throw e; + } + } + + static async cleanupUnusedChannelGroups() { + try { + const response = await request(`${host}/api/channels/groups/cleanup/`, { + method: 'POST', + }); + + // Refresh channel groups to update the UI + useChannelsStore.getState().fetchChannelGroups(); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused channel groups', e); + throw e; + } + } + static async addChannel(channel) { try { let body = null; + // Prepare a copy to safely mutate + const channelData = { ...channel }; + + // Remove channel_number if empty, null, or not a valid number + if ( + channelData.channel_number === '' || + channelData.channel_number === null || + channelData.channel_number === undefined || + (typeof channelData.channel_number === 'string' && channelData.channel_number.trim() === '') + ) { + delete channelData.channel_number; + } + + // Add channel profile IDs based on current selection + const selectedProfileId = useChannelsStore.getState().selectedProfileId; + if (selectedProfileId && selectedProfileId !== '0') { + // Specific profile selected - add only to that profile + channelData.channel_profile_ids = [parseInt(selectedProfileId)]; + } + // If selectedProfileId is '0' or not set, don't include channel_profile_ids + // which will trigger the backend's default behavior of adding to all profiles + if (channel.logo_file) { // Must send FormData for file upload body = new FormData(); - for (const prop in channel) { - body.append(prop, channel[prop]); + for (const prop in channelData) { + body.append(prop, channelData[prop]); } } else { - body = { ...channel }; + body = { ...channelData }; delete body.logo_file; } @@ -347,6 +422,11 @@ export default class API { payload.tvg_id = null; } + // Ensure tvc_guide_stationid is included properly (not as empty string) + if (payload.tvc_guide_stationid === '') { + payload.tvc_guide_stationid = null; + } + // Handle channel_number properly if (payload.channel_number === '') { payload.channel_number = null; @@ -354,8 +434,8 @@ export default class API { payload.channel_number !== null && payload.channel_number !== undefined ) { - const parsedNumber = parseInt(payload.channel_number, 10); - payload.channel_number = isNaN(parsedNumber) ? null : parsedNumber; + // Ensure channel_number is explicitly treated as a float + payload.channel_number = parseFloat(payload.channel_number); } const response = await request( @@ -373,6 +453,57 @@ export default class API { } } + static async updateChannels(ids, values) { + const body = []; + for (const id of ids) { + body.push({ + id: id, + ...values, + }); + } + + try { + const response = await request( + `${host}/api/channels/channels/edit/bulk/`, + { + method: 'PATCH', + body, + } + ); + + // Show success notification + if (response.message) { + notifications.show({ + title: 'Channels Updated', + message: response.message, + color: 'green', + autoClose: 4000, + }); + } + + return response; + } catch (e) { + errorNotification('Failed to update channels', e); + } + } + + // Bulk update with per-channel payloads (e.g., regex renames) + static async bulkUpdateChannels(updates) { + try { + const response = await request( + `${host}/api/channels/channels/edit/bulk/`, + { + method: 'PATCH', + body: updates, + } + ); + + return response; + } catch (e) { + errorNotification('Failed to update channels', e); + } + } + static async setChannelEPG(channelId, epgDataId) { try { const response = await request( @@ -403,16 +534,82 @@ export default class API { } } - static async assignChannelNumbers(channelIds) { + static async setChannelNamesFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-names-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG name setting task', e); + throw e; + } + } + + static async setChannelLogosFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-logos-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG logo setting task', e); + throw e; + } + } + + static async setChannelTvgIdsFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-tvg-ids-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG TVG-ID setting task', e); + throw e; + } + } + + static async assignChannelNumbers(channelIds, startingNum = 1) { try { const response = await request(`${host}/api/channels/channels/assign/`, { method: 'POST', - body: { channel_order: channelIds }, + body: { channel_ids: channelIds, starting_number: startingNum }, }); - // Optionally refesh the channel list in Zustand - // await useChannelsStore.getState().fetchChannels(); - return response; } catch (e) { errorNotification('Failed to assign channel #s', e); @@ -439,23 +636,32 @@ export default class API { } } - static async createChannelsFromStreams(values) { + static async createChannelsFromStreamsAsync(streamIds, channelProfileIds = null, startingChannelNumber = null) { try { + const requestBody = { + stream_ids: streamIds, + }; + + if (channelProfileIds !== null) { + requestBody.channel_profile_ids = channelProfileIds; + } + + if (startingChannelNumber !== null) { + requestBody.starting_channel_number = startingChannelNumber; + } + const response = await request( `${host}/api/channels/channels/from-stream/bulk/`, { method: 'POST', - body: values, + body: requestBody, } ); - if (response.created.length > 0) { - useChannelsStore.getState().addChannels(response.created); - } - return response; } catch (e) { - errorNotification('Failed to create channels', e); + errorNotification('Failed to start bulk channel creation task', e); + throw e; } } @@ -654,6 +860,31 @@ export default class API { } } + static async updateM3UGroupSettings( + playlistId, + groupSettings = [], + categorySettings = [] + ) { + try { + const response = await request( + `${host}/api/m3u/accounts/${playlistId}/group-settings/`, + { + method: 'PATCH', + body: { + group_settings: groupSettings, + category_settings: categorySettings, + }, + } + ); + // Fetch the updated playlist and update the store + const updatedPlaylist = await API.getPlaylist(playlistId); + usePlaylistsStore.getState().updatePlaylist(updatedPlaylist); + return response; + } catch (e) { + errorNotification('Failed to update M3U group settings', e); + } + } + static async addPlaylist(values) { try { let body = null; @@ -685,13 +916,11 @@ export default class API { const response = await request(`${host}/api/m3u/refresh/${id}/`, { method: 'POST', }); - return response; } catch (e) { errorNotification('Failed to refresh M3U account', e); } } - static async refreshAllPlaylist() { try { const response = await request(`${host}/api/m3u/refresh/`, { @@ -703,6 +932,19 @@ export default class API { errorNotification('Failed to refresh all M3U accounts', e); } } + static async refreshVODContent(accountId) { + try { + const response = await request( + `${host}/api/m3u/accounts/${accountId}/refresh-vod/`, + { + method: 'POST', + } + ); + return response; + } catch (e) { + errorNotification('Failed to refresh VOD content', e); + } + } static async deletePlaylist(id) { try { @@ -719,10 +961,26 @@ export default class API { } } - static async updatePlaylist(values) { + static async updatePlaylist(values, isToggle = false) { const { id, ...payload } = values; try { + // If this is just toggling the active state, make a simpler request + if ( + isToggle && + 'is_active' in payload && + Object.keys(payload).length === 1 + ) { + const response = await request(`${host}/api/m3u/accounts/${id}/`, { + method: 'PATCH', + body: { is_active: payload.is_active }, + }); + + usePlaylistsStore.getState().updatePlaylist(response); + return response; + } + + // Original implementation for full updates let body = null; if (payload.file) { delete payload.server_url; @@ -803,10 +1061,38 @@ export default class API { } } - static async updateEPG(values) { + static async updateEPG(values, isToggle = false) { + // Validate that values is an object + if (!values || typeof values !== 'object') { + console.error('updateEPG called with invalid values:', values); + return; + } + const { id, ...payload } = values; + // Validate that we have an ID and payload is an object + if (!id || typeof payload !== 'object') { + console.error('updateEPG: invalid id or payload', { id, payload }); + return; + } + try { + // If this is just toggling the active state, make a simpler request + if ( + isToggle && + 'is_active' in payload && + Object.keys(payload).length === 1 + ) { + const response = await request(`${host}/api/epg/sources/${id}/`, { + method: 'PATCH', + body: { is_active: payload.is_active }, + }); + + useEPGsStore.getState().updateEPG(response); + return response; + } + + // Original implementation for full updates let body = null; if (payload.files) { body = new FormData(); @@ -862,6 +1148,21 @@ export default class API { } } + static async getTimezones() { + try { + const response = await request(`${host}/api/core/timezones/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve timezones', e); + // Return fallback data instead of throwing + return { + timezones: ['UTC', 'US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific'], + grouped: {}, + count: 5 + }; + } + } + static async getStreamProfiles() { try { const response = await request(`${host}/api/core/streamprofiles/`); @@ -979,6 +1280,64 @@ export default class API { } } + static async refreshAccountInfo(profileId) { + try { + const response = await request(`${host}/api/m3u/refresh-account-info/${profileId}/`, { + method: 'POST', + }); + return response; + } catch (e) { + // If it's a structured error response, return it instead of throwing + if (e.body && typeof e.body === 'object') { + return e.body; + } + errorNotification(`Failed to refresh account info for profile ${profileId}`, e); + throw e; + } + } + + static async addM3UFilter(accountId, values) { + try { + const response = await request( + `${host}/api/m3u/accounts/${accountId}/filters/`, + { + method: 'POST', + body: values, + } + ); + + return response; + } catch (e) { + errorNotification(`Failed to add profile to account ${accountId}`, e); + } + } + + static async deleteM3UFilter(accountId, id) { + try { + await request(`${host}/api/m3u/accounts/${accountId}/filters/${id}/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification(`Failed to delete profile for account ${accountId}`, e); + } + } + + static async updateM3UFilter(accountId, filterId, values) { + const { id, ...payload } = values; + + try { + await request( + `${host}/api/m3u/accounts/${accountId}/filters/${filterId}/`, + { + method: 'PUT', + body: payload, + } + ); + } catch (e) { + errorNotification(`Failed to update profile for account ${accountId}`, e); + } + } + static async getSettings() { try { const response = await request(`${host}/api/core/settings/`); @@ -999,6 +1358,183 @@ export default class API { } } + // Backup API (async with Celery task polling) + static async listBackups() { + try { + const response = await request(`${host}/api/backups/`); + return response || []; + } catch (e) { + errorNotification('Failed to load backups', e); + throw e; + } + } + + static async getBackupStatus(taskId, token = null) { + try { + let url = `${host}/api/backups/status/${taskId}/`; + if (token) { + url += `?token=${encodeURIComponent(token)}`; + } + const response = await request(url, { auth: !token }); + return response; + } catch (e) { + throw e; + } + } + + static async waitForBackupTask(taskId, onProgress, token = null) { + const pollInterval = 2000; // Poll every 2 seconds + const maxAttempts = 300; // Max 10 minutes (300 * 2s) + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + const status = await API.getBackupStatus(taskId, token); + + if (onProgress) { + onProgress(status); + } + + if (status.state === 'completed') { + return status.result; + } else if (status.state === 'failed') { + throw new Error(status.error || 'Task failed'); + } + } catch (e) { + throw e; + } + + // Wait before next poll + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + } + + throw new Error('Task timed out'); + } + + static async createBackup(onProgress) { + try { + // Start the backup task + const response = await request(`${host}/api/backups/create/`, { + method: 'POST', + }); + + // Wait for the task to complete using token for auth + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to create backup', e); + throw e; + } + } + + static async uploadBackup(file) { + try { + const formData = new FormData(); + formData.append('file', file); + + const response = await request( + `${host}/api/backups/upload/`, + { + method: 'POST', + body: formData, + } + ); + return response; + } catch (e) { + errorNotification('Failed to upload backup', e); + throw e; + } + } + + static async deleteBackup(filename) { + try { + const encodedFilename = encodeURIComponent(filename); + await request(`${host}/api/backups/${encodedFilename}/delete/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification('Failed to delete backup', e); + throw e; + } + } + + static async getDownloadToken(filename) { + // Get a download token from the server + try { + const response = await request(`${host}/api/backups/${encodeURIComponent(filename)}/download-token/`); + return response.token; + } catch (e) { + throw e; + } + } + + static async downloadBackup(filename) { + try { + // Get a download token first (requires auth) + const token = await API.getDownloadToken(filename); + const encodedFilename = encodeURIComponent(filename); + + // Build the download URL with token + const downloadUrl = `${host}/api/backups/${encodedFilename}/download/?token=${encodeURIComponent(token)}`; + + // Use direct browser navigation instead of fetch to avoid CORS issues + const link = document.createElement('a'); + link.href = downloadUrl; + link.download = filename; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + return { filename }; + } catch (e) { + errorNotification('Failed to download backup', e); + throw e; + } + } + + static async restoreBackup(filename, onProgress) { + try { + // Start the restore task + const encodedFilename = encodeURIComponent(filename); + const response = await request( + `${host}/api/backups/${encodedFilename}/restore/`, + { + method: 'POST', + } + ); + + // Wait for the task to complete using token for auth + // Token-based auth allows status polling even after DB restore invalidates user sessions + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to restore backup', e); + throw e; + } + } + + static async getBackupSchedule() { + try { + const response = await request(`${host}/api/backups/schedule/`); + return response; + } catch (e) { + errorNotification('Failed to get backup schedule', e); + throw e; + } + } + + static async updateBackupSchedule(settings) { + try { + const response = await request(`${host}/api/backups/schedule/update/`, { + method: 'PUT', + body: settings, + }); + return response; + } catch (e) { + errorNotification('Failed to update backup schedule', e); + throw e; + } + } + static async getVersion() { try { const response = await request(`${host}/api/core/version/`, { @@ -1011,6 +1547,109 @@ export default class API { } } + // Plugins API + static async getPlugins() { + try { + const response = await request(`${host}/api/plugins/plugins/`); + return response.plugins || []; + } catch (e) { + errorNotification('Failed to retrieve plugins', e); + } + } + + static async reloadPlugins() { + try { + const response = await request(`${host}/api/plugins/plugins/reload/`, { + method: 'POST', + }); + return response; + } catch (e) { + errorNotification('Failed to reload plugins', e); + } + } + + static async importPlugin(file) { + try { + const form = new FormData(); + form.append('file', file); + const response = await request(`${host}/api/plugins/plugins/import/`, { + method: 'POST', + body: form, + }); + return response; + } catch (e) { + // Show only the concise error message for plugin import + const msg = (e?.body && (e.body.error || e.body.detail)) || e?.message || 'Failed to import plugin'; + notifications.show({ title: 'Import failed', message: msg, color: 'red' }); + throw e; + } + } + + static async deletePlugin(key) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/delete/`, { + method: 'DELETE', + }); + return response; + } catch (e) { + errorNotification('Failed to delete plugin', e); + } + } + + static async updatePluginSettings(key, settings) { + try { + const response = await request( + `${host}/api/plugins/plugins/${key}/settings/`, + { + method: 'POST', + body: { settings }, + } + ); + return response?.settings || {}; + } catch (e) { + errorNotification('Failed to update plugin settings', e); + } + } + + static async runPluginAction(key, action, params = {}) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/run/`, { + method: 'POST', + body: { action, params }, + }); + return response; + } catch (e) { + errorNotification('Failed to run plugin action', e); + } + } + + static async setPluginEnabled(key, enabled) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/enabled/`, { + method: 'POST', + body: { enabled }, + }); + return response; + } catch (e) { + errorNotification('Failed to update plugin enabled state', e); + } + } + + static async checkSetting(values) { + const { id, ...payload } = values; + + try { + const response = await request(`${host}/api/core/settings/check/`, { + method: 'POST', + body: payload, + }); + + return response; + } catch (e) { + errorNotification('Failed to update settings', e); + } + } + static async updateSetting(values) { const { id, ...payload } = values; @@ -1028,6 +1667,19 @@ export default class API { } } + static async createSetting(values) { + try { + const response = await request(`${host}/api/core/settings/`, { + method: 'POST', + body: values, + }); + useSettingsStore.getState().updateSetting(response); + return response; + } catch (e) { + errorNotification('Failed to create setting', e); + } + } + static async getChannelStats(uuid = null) { try { const response = await request(`${host}/proxy/ts/status`); @@ -1038,6 +1690,29 @@ export default class API { } } + static async getVODStats() { + try { + const response = await request(`${host}/proxy/vod/stats/`); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD stats', e); + } + } + + static async stopVODClient(clientId) { + try { + const response = await request(`${host}/proxy/vod/stop_client/`, { + method: 'POST', + body: { client_id: clientId }, + }); + + return response; + } catch (e) { + errorNotification('Failed to stop VOD client', e); + } + } + static async stopChannel(id) { try { const response = await request(`${host}/proxy/ts/stop/${id}`, { @@ -1066,12 +1741,18 @@ export default class API { } } - static async matchEpg() { + static async matchEpg(channelIds = null) { try { + const requestBody = channelIds ? { channel_ids: channelIds } : {}; + const response = await request( `${host}/api/channels/channels/match-epg/`, { method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestBody), } ); @@ -1081,9 +1762,42 @@ export default class API { } } - static async getLogos() { + static async matchChannelEpg(channelId) { try { - const response = await request(`${host}/api/channels/logos/`); + const response = await request( + `${host}/api/channels/channels/${channelId}/match-epg/`, + { + method: 'POST', + } + ); + + // Update the channel in the store with the refreshed data if provided + if (response.channel) { + useChannelsStore.getState().updateChannel(response.channel); + } + + return response; + } catch (e) { + errorNotification('Failed to run EPG auto-match for channel', e); + } + } + + static async fetchActiveChannelStats() { + try { + const response = await request(`${host}/proxy/ts/status`); + return response; + } catch (e) { + errorNotification('Failed to fetch active channel stats', e); + throw e; + } + } + + static async getLogos(params = {}) { + try { + const queryParams = new URLSearchParams(params); + const response = await request( + `${host}/api/channels/logos/?${queryParams.toString()}` + ); return response; } catch (e) { @@ -1091,21 +1805,277 @@ export default class API { } } - static async uploadLogo(file) { + static async getLogosByIds(logoIds) { + try { + if (!logoIds || logoIds.length === 0) return []; + + const params = new URLSearchParams(); + logoIds.forEach(id => params.append('ids', id)); + // Disable pagination for ID-based queries to get all matching logos + params.append('no_pagination', 'true'); + + const response = await request( + `${host}/api/channels/logos/?${params.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve logos by IDs', e); + return []; + } + } + + static async fetchLogos() { + try { + const response = await this.getLogos(); + useLogosStore.getState().setLogos(response); + return response; + } catch (e) { + errorNotification('Failed to fetch logos', e); + } + } + + static async fetchUsedLogos() { + try { + const response = await useLogosStore.getState().fetchUsedLogos(); + return response; + } catch (e) { + errorNotification('Failed to fetch used logos', e); + } + } + + static async fetchLogosByIds(logoIds) { + try { + const response = await useLogosStore.getState().fetchLogosByIds(logoIds); + return response; + } catch (e) { + errorNotification('Failed to fetch logos by IDs', e); + } + } + + static async uploadLogo(file, name = null) { try { const formData = new FormData(); formData.append('file', file); - const response = await request(`${host}/api/channels/logos/upload/`, { + // Add custom name if provided + if (name && name.trim()) { + formData.append('name', name.trim()); + } + + // Add timeout handling for file uploads + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout + + const response = await fetch(`${host}/api/channels/logos/upload/`, { + method: 'POST', + body: formData, + headers: { + Authorization: `Bearer ${await API.getAuthToken()}`, + }, + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = new Error(`HTTP error! Status: ${response.status}`); + let errorBody = await response.text(); + + try { + errorBody = JSON.parse(errorBody); + } catch (e) { + // If parsing fails, leave errorBody as the raw text + } + + error.status = response.status; + error.response = response; + error.body = errorBody; + throw error; + } + + const result = await response.json(); + useLogosStore.getState().addLogo(result); + return result; + } catch (e) { + if (e.name === 'AbortError') { + const timeoutError = new Error('Upload timed out. Please try again.'); + timeoutError.code = 'NETWORK_ERROR'; + throw timeoutError; + } + errorNotification('Failed to upload logo', e); + throw e; + } + } + + static async createLogo(values) { + try { + // Use FormData for logo creation to match backend expectations + const formData = new FormData(); + for (const [key, value] of Object.entries(values)) { + if (value !== null && value !== undefined) { + formData.append(key, value); + } + } + + const response = await request(`${host}/api/channels/logos/`, { method: 'POST', body: formData, }); - useChannelsStore.getState().addLogo(response); + useLogosStore.getState().addLogo(response); return response; } catch (e) { - errorNotification('Failed to upload logo', e); + errorNotification('Failed to create logo', e); + } + } + + static async updateLogo(id, values) { + try { + const response = await request(`${host}/api/channels/logos/${id}/`, { + method: 'PUT', + body: values, // This will be converted to JSON in the request function + }); + + useLogosStore.getState().updateLogo(response); + + return response; + } catch (e) { + errorNotification('Failed to update logo', e); + } + } + + static async deleteLogo(id, deleteFile = false) { + try { + const params = new URLSearchParams(); + if (deleteFile) { + params.append('delete_file', 'true'); + } + + const url = `${host}/api/channels/logos/${id}/?${params.toString()}`; + await request(url, { + method: 'DELETE', + }); + + useLogosStore.getState().removeLogo(id); + + return true; + } catch (e) { + errorNotification('Failed to delete logo', e); + } + } + + static async deleteLogos(ids, deleteFiles = false) { + try { + const body = { logo_ids: ids }; + if (deleteFiles) { + body.delete_files = true; + } + + await request(`${host}/api/channels/logos/bulk-delete/`, { + method: 'DELETE', + body: body, + }); + + // Remove multiple logos from store + ids.forEach((id) => { + useLogosStore.getState().removeLogo(id); + }); + + return true; + } catch (e) { + errorNotification('Failed to delete logos', e); + } + } + + static async cleanupUnusedLogos(deleteFiles = false) { + try { + const body = {}; + if (deleteFiles) { + body.delete_files = true; + } + + const response = await request(`${host}/api/channels/logos/cleanup/`, { + method: 'POST', + body: body, + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused logos', e); + throw e; + } + } + + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; } } @@ -1151,6 +2121,24 @@ export default class API { } } + static async duplicateChannelProfile(id, name) { + try { + const response = await request( + `${host}/api/channels/profiles/${id}/duplicate/`, + { + method: 'POST', + body: { name }, + } + ); + + useChannelsStore.getState().addProfile(response); + + return response; + } catch (e) { + errorNotification(`Failed to duplicate channel profile ${id}`, e); + } + } + static async deleteChannelProfile(id) { try { await request(`${host}/api/channels/profiles/${id}/`, { @@ -1232,24 +2220,191 @@ export default class API { } } - static async deleteRecording(id) { + static async updateRecording(id, values) { try { - await request(`${host}/api/channels/recordings/${id}/`, { + const response = await request(`${host}/api/channels/recordings/${id}/`, { + method: 'PATCH', + body: values, + }); + useChannelsStore.getState().fetchRecordings(); + return response; + } catch (e) { + errorNotification(`Failed to update recording ${id}`, e); + } + } + + static async getComskipConfig() { + try { + return await request(`${host}/api/channels/dvr/comskip-config/`); + } catch (e) { + errorNotification('Failed to retrieve comskip configuration', e); + } + } + + static async uploadComskipIni(file) { + try { + const formData = new FormData(); + formData.append('file', file); + return await request(`${host}/api/channels/dvr/comskip-config/`, { + method: 'POST', + body: formData, + }); + } catch (e) { + errorNotification('Failed to upload comskip.ini', e); + } + } + + static async listRecurringRules() { + try { + const response = await request(`${host}/api/channels/recurring-rules/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve recurring DVR rules', e); + } + } + + static async createRecurringRule(payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/`, { + method: 'POST', + body: payload, + }); + return response; + } catch (e) { + errorNotification('Failed to create recurring DVR rule', e); + } + } + + static async updateRecurringRule(ruleId, payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'PATCH', + body: payload, + }); + return response; + } catch (e) { + errorNotification(`Failed to update recurring rule ${ruleId}`, e); + } + } + + static async deleteRecurringRule(ruleId) { + try { + await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { method: 'DELETE', }); + } catch (e) { + errorNotification(`Failed to delete recurring rule ${ruleId}`, e); + } + } - useChannelsStore.getState().fetchRecordings(); + static async deleteRecording(id) { + try { + await request(`${host}/api/channels/recordings/${id}/`, { method: 'DELETE' }); + // Optimistically remove locally for instant UI update + try { useChannelsStore.getState().removeRecording(id); } catch {} } catch (e) { errorNotification(`Failed to delete recording ${id}`, e); } } + static async runComskip(recordingId) { + try { + const resp = await request(`${host}/api/channels/recordings/${recordingId}/comskip/`, { + method: 'POST', + }); + // Refresh recordings list to reflect comskip status when done later + // This endpoint just queues the task; the websocket/refresh will update eventually + return resp; + } catch (e) { + errorNotification('Failed to run comskip', e); + throw e; + } + } + + // DVR Series Rules + static async listSeriesRules() { + try { + const resp = await request(`${host}/api/channels/series-rules/`); + return resp?.rules || []; + } catch (e) { + errorNotification('Failed to load series rules', e); + return []; + } + } + + static async createSeriesRule(values) { + try { + const resp = await request(`${host}/api/channels/series-rules/`, { + method: 'POST', + body: values, + }); + notifications.show({ title: 'Series rule saved' }); + return resp; + } catch (e) { + errorNotification('Failed to save series rule', e); + throw e; + } + } + + static async deleteSeriesRule(tvgId) { + try { + const encodedTvgId = encodeURIComponent(tvgId); + await request(`${host}/api/channels/series-rules/${encodedTvgId}/`, { method: 'DELETE' }); + notifications.show({ title: 'Series rule removed' }); + } catch (e) { + errorNotification('Failed to remove series rule', e); + throw e; + } + } + + static async deleteAllUpcomingRecordings() { + try { + const resp = await request(`${host}/api/channels/recordings/bulk-delete-upcoming/`, { + method: 'POST', + }); + notifications.show({ title: `Removed ${resp.removed || 0} upcoming` }); + useChannelsStore.getState().fetchRecordings(); + return resp; + } catch (e) { + errorNotification('Failed to delete upcoming recordings', e); + throw e; + } + } + + static async evaluateSeriesRules(tvgId = null) { + try { + await request(`${host}/api/channels/series-rules/evaluate/`, { + method: 'POST', + body: tvgId ? { tvg_id: tvgId } : {}, + }); + } catch (e) { + errorNotification('Failed to evaluate series rules', e); + } + } + + static async bulkRemoveSeriesRecordings({ tvg_id, title = null, scope = 'title' }) { + try { + const resp = await request(`${host}/api/channels/series-rules/bulk-remove/`, { + method: 'POST', + body: { tvg_id, title, scope }, + }); + notifications.show({ title: `Removed ${resp.removed || 0} scheduled` }); + return resp; + } catch (e) { + errorNotification('Failed to bulk-remove scheduled recordings', e); + throw e; + } + } + static async switchStream(channelId, streamId) { try { - const response = await request(`${host}/proxy/ts/change_stream/${channelId}`, { - method: 'POST', - body: { stream_id: streamId }, - }); + const response = await request( + `${host}/proxy/ts/change_stream/${channelId}`, + { + method: 'POST', + body: { stream_id: streamId }, + } + ); return response; } catch (e) { @@ -1260,10 +2415,13 @@ export default class API { static async nextStream(channelId, streamId) { try { - const response = await request(`${host}/proxy/ts/next_stream/${channelId}`, { - method: 'POST', - body: { stream_id: streamId }, - }); + const response = await request( + `${host}/proxy/ts/next_stream/${channelId}`, + { + method: 'POST', + body: { stream_id: streamId }, + } + ); return response; } catch (e) { @@ -1284,9 +2442,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); @@ -1301,4 +2465,267 @@ export default class API { errorNotification('Failed to update channel EPGs', e); } } + + static async getChannel(id) { + try { + const response = await request( + `${host}/api/channels/channels/${id}/?include_streams=true` + ); + return response; + } catch (e) { + errorNotification('Failed to fetch channel details', e); + return null; + } + } + + static async me() { + return await request(`${host}/api/accounts/users/me/`); + } + + static async getUsers() { + try { + const response = await request(`${host}/api/accounts/users/`); + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async createUser(body) { + try { + const response = await request(`${host}/api/accounts/users/`, { + method: 'POST', + body, + }); + + useUsersStore.getState().addUser(response); + + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async updateUser(id, body) { + try { + const response = await request(`${host}/api/accounts/users/${id}/`, { + method: 'PATCH', + body, + }); + + useUsersStore.getState().updateUser(response); + + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async deleteUser(id) { + try { + await request(`${host}/api/accounts/users/${id}/`, { + method: 'DELETE', + }); + + useUsersStore.getState().removeUser(id); + } catch (e) { + errorNotification('Failed to delete user', e); + } + } + + static async rehashStreams() { + try { + const response = await request(`${host}/api/core/rehash-streams/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to trigger stream rehash', e); + } + } + + static async getStreamsByIds(ids) { + try { + // Use POST for large ID lists to avoid URL length limitations + if (ids.length > 50) { + const response = await request( + `${host}/api/channels/streams/by-ids/`, + { + method: 'POST', + body: { ids }, + } + ); + return response; + } else { + // Use GET for small ID lists for backward compatibility + const params = new URLSearchParams(); + params.append('ids', ids.join(',')); + const response = await request( + `${host}/api/channels/streams/?${params.toString()}` + ); + return response.results || response; + } + } catch (e) { + errorNotification('Failed to retrieve streams by IDs', e); + throw e; // Re-throw to allow proper error handling in calling code + } + } + + // VOD Methods + static async getMovies(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/movies/?${params.toString()}` + ); + return response; + } catch (e) { + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve movies', e); + } + throw e; + } + } + + static async getSeries(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/series/?${params.toString()}` + ); + return response; + } catch (e) { + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve series', e); + } + throw e; + } + } + + static async getAllContent(params = new URLSearchParams()) { + try { + console.log('Calling getAllContent with URL:', `${host}/api/vod/all/?${params.toString()}`); + const response = await request( + `${host}/api/vod/all/?${params.toString()}` + ); + console.log('getAllContent raw response:', response); + return response; + } catch (e) { + console.error('getAllContent error:', e); + console.error('Error status:', e.status); + console.error('Error body:', e.body); + console.error('Error message:', e.message); + + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve content', e); + } + throw e; + } + } + + static async getMovieDetails(movieId) { + try { + const response = await request(`${host}/api/vod/movies/${movieId}/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie details', e); + } + } + + static async getMovieProviderInfo(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/provider-info/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie provider info', e); + } + } + + static async getMovieProviders(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie providers', e); + } + } + + static async getSeriesProviders(seriesId) { + try { + const response = await request( + `${host}/api/vod/series/${seriesId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series providers', e); + } + } + + static async getVODCategories() { + try { + const response = await request(`${host}/api/vod/categories/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD categories', e); + } + } + + static async getSeriesInfo(seriesId) { + try { + // Call the provider-info endpoint that includes episodes + const response = await request( + `${host}/api/vod/series/${seriesId}/provider-info/?include_episodes=true` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series info', e); + } + } + + static async updateVODPosition(vodUuid, clientId, position) { + try { + const response = await request( + `${host}/proxy/vod/stream/${vodUuid}/position/`, + { + method: 'POST', + body: { client_id: clientId, position }, + } + ); + return response; + } catch (e) { + errorNotification('Failed to update playback position', e); + } + } + + static async getSystemEvents(limit = 100, offset = 0, eventType = null) { + try { + const params = new URLSearchParams(); + params.append('limit', limit); + params.append('offset', offset); + if (eventType) { + params.append('event_type', eventType); + } + const response = await request( + `${host}/api/core/system-events/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve system events', e); + } + } } diff --git a/frontend/src/components/ConfirmationDialog.jsx b/frontend/src/components/ConfirmationDialog.jsx new file mode 100644 index 00000000..94fb169c --- /dev/null +++ b/frontend/src/components/ConfirmationDialog.jsx @@ -0,0 +1,115 @@ +import { Modal, Group, Text, Button, Checkbox, Box } from '@mantine/core'; +import React, { useState } from 'react'; +import useWarningsStore from '../store/warnings'; + +/** + * A reusable confirmation dialog with option to suppress future warnings + * + * @param {Object} props - Component props + * @param {boolean} props.opened - Whether the dialog is visible + * @param {Function} props.onClose - Function to call when closing without confirming + * @param {Function} props.onConfirm - Function to call when confirming the action + * @param {string} props.title - Dialog title + * @param {string} props.message - Dialog message + * @param {string} props.confirmLabel - Text for the confirm button + * @param {string} props.cancelLabel - Text for the cancel button + * @param {string} props.actionKey - Unique key for this type of action (used for suppression) + * @param {Function} props.onSuppressChange - Called when "don't show again" option changes + * @param {string} [props.size='md'] - Size of the modal + * @param {boolean} [props.loading=false] - Whether the confirm button should show loading state + */ +const ConfirmationDialog = ({ + opened, + onClose, + onConfirm, + title = 'Confirm Action', + message = 'Are you sure you want to proceed?', + confirmLabel = 'Confirm', + cancelLabel = 'Cancel', + actionKey, + onSuppressChange, + size = 'md', + zIndex = 1000, + showDeleteFileOption = false, + deleteFileLabel = 'Also delete files from disk', + loading = false, +}) => { + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); + const [suppressChecked, setSuppressChecked] = useState( + isWarningSuppressed(actionKey) + ); + const [deleteFiles, setDeleteFiles] = useState(false); + + const handleToggleSuppress = (e) => { + setSuppressChecked(e.currentTarget.checked); + if (onSuppressChange) { + onSuppressChange(e.currentTarget.checked); + } + }; + + const handleConfirm = () => { + if (suppressChecked) { + suppressWarning(actionKey); + } + if (showDeleteFileOption) { + onConfirm(deleteFiles); + } else { + onConfirm(); + } + setDeleteFiles(false); // Reset for next time + }; + + const handleClose = () => { + setDeleteFiles(false); // Reset for next time + onClose(); + }; + + return ( + + {message} + + {actionKey && ( + + )} + + {showDeleteFileOption && ( + setDeleteFiles(event.currentTarget.checked)} + label={deleteFileLabel} + mb="md" + /> + )} + + + + + + + ); +}; + +export default ConfirmationDialog; diff --git a/frontend/src/components/ErrorBoundary.jsx b/frontend/src/components/ErrorBoundary.jsx new file mode 100644 index 00000000..60c4ba38 --- /dev/null +++ b/frontend/src/components/ErrorBoundary.jsx @@ -0,0 +1,18 @@ +import React from 'react'; + +class ErrorBoundary extends React.Component { + state = { hasError: false }; + + static getDerivedStateFromError(error) { + return { hasError: true }; + } + + render() { + if (this.state.hasError) { + return
Something went wrong
; + } + return this.props.children; + } +} + +export default ErrorBoundary; \ No newline at end of file diff --git a/frontend/src/components/Field.jsx b/frontend/src/components/Field.jsx new file mode 100644 index 00000000..1293bf7b --- /dev/null +++ b/frontend/src/components/Field.jsx @@ -0,0 +1,47 @@ +import { NumberInput, Select, Switch, TextInput } from '@mantine/core'; +import React from 'react'; + +export const Field = ({ field, value, onChange }) => { + const common = { label: field.label, description: field.help_text }; + const effective = value ?? field.default; + switch (field.type) { + case 'boolean': + return ( + onChange(field.id, e.currentTarget.checked)} + label={field.label} + description={field.help_text} + /> + ); + case 'number': + return ( + onChange(field.id, v)} + {...common} + /> + ); + case 'select': + return ( + - - - - - - - - - - - - - - - - - - - - - - ); -}; - -export default ProxyManager; \ No newline at end of file diff --git a/frontend/src/components/RecordingSynopsis.jsx b/frontend/src/components/RecordingSynopsis.jsx new file mode 100644 index 00000000..bf668afe --- /dev/null +++ b/frontend/src/components/RecordingSynopsis.jsx @@ -0,0 +1,26 @@ +import { Text, } from '@mantine/core'; + +// Short preview that triggers the details modal when clicked +const RecordingSynopsis = ({ description, onOpen }) => { + const truncated = description?.length > 140; + const preview = truncated + ? `${description.slice(0, 140).trim()}...` + : description; + + if (!description) return null; + + return ( + onOpen?.()} + style={{ cursor: 'pointer' }} + > + {preview} + + ); +}; + +export default RecordingSynopsis; \ No newline at end of file diff --git a/frontend/src/components/SeriesModal.jsx b/frontend/src/components/SeriesModal.jsx new file mode 100644 index 00000000..05023712 --- /dev/null +++ b/frontend/src/components/SeriesModal.jsx @@ -0,0 +1,950 @@ +import React, { useState, useEffect } from 'react'; +import { + Box, + Button, + Flex, + Group, + Image, + Text, + Title, + Select, + Badge, + Loader, + Stack, + ActionIcon, + Modal, + Tabs, + Table, + Divider, +} from '@mantine/core'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; +import useVODStore from '../store/useVODStore'; +import useVideoStore from '../store/useVideoStore'; +import useSettingsStore from '../store/settings'; + +const imdbUrl = (imdb_id) => + imdb_id ? `https://www.imdb.com/title/${imdb_id}` : ''; +const tmdbUrl = (tmdb_id, type = 'movie') => + tmdb_id ? `https://www.themoviedb.org/${type}/${tmdb_id}` : ''; +const formatDuration = (seconds) => { + if (!seconds) return ''; + const hours = Math.floor(seconds / 3600); + const mins = Math.floor((seconds % 3600) / 60); + const secs = seconds % 60; + return hours > 0 ? `${hours}h ${mins}m` : `${mins}m ${secs}s`; +}; + +const formatStreamLabel = (relation) => { + // Create a label for the stream that includes provider name and stream-specific info + const provider = relation.m3u_account.name; + const streamId = relation.stream_id; + + // Try to extract quality info - prioritizing the new quality_info field from backend + let qualityInfo = ''; + + // 1. Check the new quality_info field from backend (PRIMARY) + if (relation.quality_info) { + if (relation.quality_info.quality) { + qualityInfo = ` - ${relation.quality_info.quality}`; + } else if (relation.quality_info.resolution) { + qualityInfo = ` - ${relation.quality_info.resolution}`; + } else if (relation.quality_info.bitrate) { + qualityInfo = ` - ${relation.quality_info.bitrate}`; + } + } + + // 2. Fallback: Check custom_properties detailed info structure + if (qualityInfo === '' && relation.custom_properties) { + const props = relation.custom_properties; + + // Check detailed_info structure (where the real data is!) + if (qualityInfo === '' && props.detailed_info) { + const detailedInfo = props.detailed_info; + + // Extract from video resolution + if ( + detailedInfo.video && + detailedInfo.video.width && + detailedInfo.video.height + ) { + const width = detailedInfo.video.width; + const height = detailedInfo.video.height; + + // Prioritize width for quality detection (handles ultrawide/cinematic aspect ratios) + if (width >= 3840) { + qualityInfo = ' - 4K'; + } else if (width >= 1920) { + qualityInfo = ' - 1080p'; + } else if (width >= 1280) { + qualityInfo = ' - 720p'; + } else if (width >= 854) { + qualityInfo = ' - 480p'; + } else { + qualityInfo = ` - ${width}x${height}`; + } + } + + // Extract from movie name in detailed_info + if (qualityInfo === '' && detailedInfo.name) { + const name = detailedInfo.name; + if (name.includes('4K') || name.includes('2160p')) { + qualityInfo = ' - 4K'; + } else if (name.includes('1080p') || name.includes('FHD')) { + qualityInfo = ' - 1080p'; + } else if (name.includes('720p') || name.includes('HD')) { + qualityInfo = ' - 720p'; + } else if (name.includes('480p')) { + qualityInfo = ' - 480p'; + } + } + } + } + + // 3. Final fallback: Check stream name for quality markers + if (qualityInfo === '' && relation.stream_name) { + const streamName = relation.stream_name; + if (streamName.includes('4K') || streamName.includes('2160p')) { + qualityInfo = ' - 4K'; + } else if (streamName.includes('1080p') || streamName.includes('FHD')) { + qualityInfo = ' - 1080p'; + } else if (streamName.includes('720p') || streamName.includes('HD')) { + qualityInfo = ' - 720p'; + } else if (streamName.includes('480p')) { + qualityInfo = ' - 480p'; + } + } + + return `${provider}${qualityInfo}${streamId ? ` (Stream ${streamId})` : ''}`; +}; + +const SeriesModal = ({ series, opened, onClose }) => { + const { fetchSeriesInfo, fetchSeriesProviders } = useVODStore(); + const showVideo = useVideoStore((s) => s.showVideo); + const env_mode = useSettingsStore((s) => s.environment.env_mode); + const [detailedSeries, setDetailedSeries] = useState(null); + const [loadingDetails, setLoadingDetails] = useState(false); + const [activeTab, setActiveTab] = useState(null); + const [expandedEpisode, setExpandedEpisode] = useState(null); + const [trailerModalOpened, setTrailerModalOpened] = useState(false); + const [trailerUrl, setTrailerUrl] = useState(''); + const [providers, setProviders] = useState([]); + const [selectedProvider, setSelectedProvider] = useState(null); + const [loadingProviders, setLoadingProviders] = useState(false); + + useEffect(() => { + if (opened && series) { + // Fetch detailed series info which now includes episodes + setLoadingDetails(true); + fetchSeriesInfo(series.id) + .then((details) => { + setDetailedSeries(details); + // Check if episodes were fetched + if (!details.episodes_fetched) { + // Episodes not yet fetched, may need to wait for background fetch + } + }) + .catch((error) => { + console.warn( + 'Failed to fetch series details, using basic info:', + error + ); + setDetailedSeries(series); // Fallback to basic data + }) + .finally(() => { + setLoadingDetails(false); + }); + + // Fetch available providers + setLoadingProviders(true); + fetchSeriesProviders(series.id) + .then((providersData) => { + setProviders(providersData); + // Set the first provider as default if none selected + if (providersData.length > 0 && !selectedProvider) { + setSelectedProvider(providersData[0]); + } + }) + .catch((error) => { + console.error('Failed to fetch series providers:', error); + setProviders([]); + }) + .finally(() => { + setLoadingProviders(false); + }); + } + }, [opened, series, fetchSeriesInfo, fetchSeriesProviders, selectedProvider]); + + useEffect(() => { + if (!opened) { + setDetailedSeries(null); + setLoadingDetails(false); + setProviders([]); + setSelectedProvider(null); + setLoadingProviders(false); + } + }, [opened]); + + // Get episodes from the store based on the series ID + const seriesEpisodes = React.useMemo(() => { + if (!detailedSeries) return []; + + // Try to get episodes from the fetched data + if (detailedSeries.episodesList) { + return detailedSeries.episodesList.sort((a, b) => { + if (a.season_number !== b.season_number) { + return (a.season_number || 0) - (b.season_number || 0); + } + return (a.episode_number || 0) - (b.episode_number || 0); + }); + } + + // If no episodes in detailed series, return empty array + return []; + }, [detailedSeries]); + + // Group episodes by season + const episodesBySeason = React.useMemo(() => { + const grouped = {}; + seriesEpisodes.forEach((episode) => { + const season = episode.season_number || 1; + if (!grouped[season]) { + grouped[season] = []; + } + grouped[season].push(episode); + }); + return grouped; + }, [seriesEpisodes]); + + // Get available seasons sorted + const seasons = React.useMemo(() => { + return Object.keys(episodesBySeason) + .map(Number) + .sort((a, b) => a - b); + }, [episodesBySeason]); + + // Update active tab when seasons change or modal opens + React.useEffect(() => { + if (seasons.length > 0) { + if ( + !activeTab || + !seasons.includes(parseInt(activeTab.replace('season-', ''))) + ) { + setActiveTab(`season-${seasons[0]}`); + } + } + }, [seasons, activeTab]); + + // Reset tab when modal closes + React.useEffect(() => { + if (!opened) { + setActiveTab(null); + } + }, [opened]); + + const handlePlayEpisode = (episode) => { + let streamUrl = `/proxy/vod/episode/${episode.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + showVideo(streamUrl, 'vod', episode); + }; + + const getEpisodeStreamUrl = (episode) => { + let streamUrl = `/proxy/vod/episode/${episode.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + return streamUrl; + }; + + const handleCopyEpisodeLink = async (episode) => { + const streamUrl = getEpisodeStreamUrl(episode); + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Episode link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + + const handleEpisodeRowClick = (episode) => { + setExpandedEpisode(expandedEpisode === episode.id ? null : episode.id); + }; + + // Helper to get embeddable YouTube URL + const getEmbedUrl = (url) => { + if (!url) return ''; + // Accepts full YouTube URLs or just IDs + const match = url.match(/(?:youtube\.com\/watch\?v=|youtu\.be\/)([\w-]+)/); + const videoId = match ? match[1] : url; + return `https://www.youtube.com/embed/${videoId}`; + }; + + if (!series) return null; + + // Use detailed data if available, otherwise use basic series data + const displaySeries = detailedSeries || series; + + return ( + <> + + + {/* Backdrop image as background */} + {displaySeries.backdrop_path && + displaySeries.backdrop_path.length > 0 && ( + <> + {`${displaySeries.name} + {/* Overlay for readability */} + + + )} + + {/* Modal content above backdrop */} + + + {loadingDetails && ( + + + + Loading series details and episodes... + + + )} + + {/* Series poster and basic info */} + + {displaySeries.series_image || displaySeries.logo?.url ? ( + + {displaySeries.name} + + ) : ( + + + + )} + + + {displaySeries.name} + + {/* Original name if different */} + {displaySeries.o_name && + displaySeries.o_name !== displaySeries.name && ( + + Original: {displaySeries.o_name} + + )} + + + {displaySeries.year && ( + {displaySeries.year} + )} + {displaySeries.rating && ( + {displaySeries.rating} + )} + {displaySeries.age && ( + {displaySeries.age} + )} + Series + {displaySeries.episode_count && ( + + {displaySeries.episode_count} episodes + + )} + {/* imdb_id and tmdb_id badges */} + {displaySeries.imdb_id && ( + + IMDb + + )} + {displaySeries.tmdb_id && ( + + TMDb + + )} + + + {/* Release date */} + {displaySeries.release_date && ( + + Release Date:{' '} + {displaySeries.release_date} + + )} + + {displaySeries.genre && ( + + Genre: {displaySeries.genre} + + )} + + {displaySeries.director && ( + + Director: {displaySeries.director} + + )} + + {displaySeries.cast && ( + + Cast: {displaySeries.cast} + + )} + + {displaySeries.country && ( + + Country: {displaySeries.country} + + )} + + {/* Description */} + {displaySeries.description && ( + + + Description + + {displaySeries.description} + + )} + + {/* Watch Trailer button if available */} + {displaySeries.youtube_trailer && ( + + )} + + + + {/* Provider Information */} + + + Stream Selection + {loadingProviders && ( + + )} + + {providers.length === 0 && + !loadingProviders && + displaySeries.m3u_account ? ( + + + {displaySeries.m3u_account.name} + + + ) : providers.length === 1 ? ( + + + {providers[0].m3u_account.name} + + {providers[0].stream_id && ( + + Stream {providers[0].stream_id} + + )} + + ) : providers.length > 1 ? ( + setEventsRefreshInterval(parseInt(value))} + data={[ + { value: '0', label: 'Manual' }, + { value: '5', label: '5s' }, + { value: '10', label: '10s' }, + { value: '30', label: '30s' }, + { value: '60', label: '1m' }, + ]} + style={{ width: 120 }} + /> + + + )} + setIsExpanded(!isExpanded)} + > + + + + + + {isExpanded && ( + <> + {totalEvents > eventsLimit && ( + + + Showing {offset + 1}- + {Math.min(offset + eventsLimit, totalEvents)} of {totalEvents} + + + + )} + + {events.length === 0 ? ( + + No events recorded yet + + ) : ( + events.map((event) => ( + + + + + {getEventIcon(event.event_type)} + + + + + {event.event_type_display || event.event_type} + + {event.channel_name && ( + + {event.channel_name} + + )} + + {event.details && + Object.keys(event.details).length > 0 && ( + + {Object.entries(event.details) + .filter( + ([key]) => + !['stream_url', 'new_url'].includes(key) + ) + .map(([key, value]) => `${key}: ${value}`) + .join(', ')} + + )} + + + + {dayjs(event.timestamp).format(`${dateFormat} HH:mm:ss`)} + + + + )) + )} + + + )} + + ); +}; + +export default SystemEvents; diff --git a/frontend/src/components/VODModal.jsx b/frontend/src/components/VODModal.jsx new file mode 100644 index 00000000..7df90ec0 --- /dev/null +++ b/frontend/src/components/VODModal.jsx @@ -0,0 +1,716 @@ +import React, { useState, useEffect } from 'react'; +import { + Box, + Button, + Flex, + Group, + Image, + Text, + Title, + Select, + Badge, + Loader, + Stack, + Modal, +} from '@mantine/core'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; +import useVODStore from '../store/useVODStore'; +import useVideoStore from '../store/useVideoStore'; +import useSettingsStore from '../store/settings'; + +const imdbUrl = (imdb_id) => + imdb_id ? `https://www.imdb.com/title/${imdb_id}` : ''; +const tmdbUrl = (tmdb_id, type = 'movie') => + tmdb_id ? `https://www.themoviedb.org/${type}/${tmdb_id}` : ''; +const formatDuration = (seconds) => { + if (!seconds) return ''; + const hours = Math.floor(seconds / 3600); + const mins = Math.floor((seconds % 3600) / 60); + const secs = seconds % 60; + return hours > 0 ? `${hours}h ${mins}m` : `${mins}m ${secs}s`; +}; + +const formatStreamLabel = (relation) => { + // Create a label for the stream that includes provider name and stream-specific info + const provider = relation.m3u_account.name; + const streamId = relation.stream_id; + + // Try to extract quality info - prioritizing the new quality_info field from backend + let qualityInfo = ''; + + // 1. Check the new quality_info field from backend (PRIMARY) + if (relation.quality_info) { + if (relation.quality_info.quality) { + qualityInfo = ` - ${relation.quality_info.quality}`; + } else if (relation.quality_info.resolution) { + qualityInfo = ` - ${relation.quality_info.resolution}`; + } else if (relation.quality_info.bitrate) { + qualityInfo = ` - ${relation.quality_info.bitrate}`; + } + } + + // 2. Fallback: Check custom_properties detailed info structure + if (qualityInfo === '' && relation.custom_properties) { + const props = relation.custom_properties; + + // Check detailed_info structure (where the real data is!) + if (qualityInfo === '' && props.detailed_info) { + const detailedInfo = props.detailed_info; + + // Extract from video resolution + if ( + detailedInfo.video && + detailedInfo.video.width && + detailedInfo.video.height + ) { + const width = detailedInfo.video.width; + const height = detailedInfo.video.height; + + // Prioritize width for quality detection (handles ultrawide/cinematic aspect ratios) + if (width >= 3840) { + qualityInfo = ' - 4K'; + } else if (width >= 1920) { + qualityInfo = ' - 1080p'; + } else if (width >= 1280) { + qualityInfo = ' - 720p'; + } else if (width >= 854) { + qualityInfo = ' - 480p'; + } else { + qualityInfo = ` - ${width}x${height}`; + } + } + + // Extract from movie name in detailed_info + if (qualityInfo === '' && detailedInfo.name) { + const name = detailedInfo.name; + if (name.includes('4K') || name.includes('2160p')) { + qualityInfo = ' - 4K'; + } else if (name.includes('1080p') || name.includes('FHD')) { + qualityInfo = ' - 1080p'; + } else if (name.includes('720p') || name.includes('HD')) { + qualityInfo = ' - 720p'; + } else if (name.includes('480p')) { + qualityInfo = ' - 480p'; + } + } + } + } + + // 3. Final fallback: Check stream name for quality markers + if (qualityInfo === '' && relation.stream_name) { + const streamName = relation.stream_name; + if (streamName.includes('4K') || streamName.includes('2160p')) { + qualityInfo = ' - 4K'; + } else if (streamName.includes('1080p') || streamName.includes('FHD')) { + qualityInfo = ' - 1080p'; + } else if (streamName.includes('720p') || streamName.includes('HD')) { + qualityInfo = ' - 720p'; + } else if (streamName.includes('480p')) { + qualityInfo = ' - 480p'; + } + } + + return `${provider}${qualityInfo}${streamId ? ` (Stream ${streamId})` : ''}`; +}; + +const getTechnicalDetails = (selectedProvider, defaultVOD) => { + let source = defaultVOD; // Default fallback + + // If a provider is selected, try to get technical details from various locations + if (selectedProvider) { + // 1. First try the movie/episode relation content + const content = selectedProvider.movie || selectedProvider.episode; + + if (content && (content.bitrate || content.video || content.audio)) { + source = content; + } + // 2. Try technical details directly on the relation object + else if ( + selectedProvider.bitrate || + selectedProvider.video || + selectedProvider.audio + ) { + source = selectedProvider; + } + // 3. Try to extract from custom_properties detailed_info (where quality data is stored) + else if (selectedProvider.custom_properties?.detailed_info) { + const detailedInfo = selectedProvider.custom_properties.detailed_info; + + // Create a synthetic source from detailed_info + const syntheticSource = { + bitrate: detailedInfo.bitrate || null, + video: detailedInfo.video || null, + audio: detailedInfo.audio || null, + }; + + if ( + syntheticSource.bitrate || + syntheticSource.video || + syntheticSource.audio + ) { + source = syntheticSource; + } + } + } + + return { + bitrate: source?.bitrate, + video: source?.video, + audio: source?.audio, + }; +}; + +const VODModal = ({ vod, opened, onClose }) => { + const [detailedVOD, setDetailedVOD] = useState(null); + const [loadingDetails, setLoadingDetails] = useState(false); + const [trailerModalOpened, setTrailerModalOpened] = useState(false); + const [trailerUrl, setTrailerUrl] = useState(''); + const [providers, setProviders] = useState([]); + const [selectedProvider, setSelectedProvider] = useState(null); + const [loadingProviders, setLoadingProviders] = useState(false); + const { fetchMovieDetailsFromProvider, fetchMovieProviders } = useVODStore(); + const showVideo = useVideoStore((s) => s.showVideo); + const env_mode = useSettingsStore((s) => s.environment.env_mode); + + useEffect(() => { + if (opened && vod) { + // Fetch detailed VOD info if not already loaded + if (!detailedVOD) { + setLoadingDetails(true); + fetchMovieDetailsFromProvider(vod.id) + .then((details) => { + setDetailedVOD(details); + }) + .catch((error) => { + console.warn( + 'Failed to fetch provider details, using basic info:', + error + ); + setDetailedVOD(vod); // Fallback to basic data + }) + .finally(() => { + setLoadingDetails(false); + }); + } + + // Fetch available providers + setLoadingProviders(true); + fetchMovieProviders(vod.id) + .then((providersData) => { + setProviders(providersData); + // Set the first provider as default if none selected + if (providersData.length > 0 && !selectedProvider) { + setSelectedProvider(providersData[0]); + } + }) + .catch((error) => { + console.error('Failed to fetch providers:', error); + setProviders([]); + }) + .finally(() => { + setLoadingProviders(false); + }); + } + }, [ + opened, + vod, + detailedVOD, + fetchMovieDetailsFromProvider, + fetchMovieProviders, + selectedProvider, + ]); + + useEffect(() => { + if (!opened) { + setDetailedVOD(null); + setLoadingDetails(false); + setTrailerModalOpened(false); + setTrailerUrl(''); + setProviders([]); + setSelectedProvider(null); + setLoadingProviders(false); + } + }, [opened]); + + const getStreamUrl = () => { + const vodToPlay = detailedVOD || vod; + if (!vodToPlay) return null; + + let streamUrl = `/proxy/vod/movie/${vod.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + return streamUrl; + }; + + const handlePlayVOD = () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const vodToPlay = detailedVOD || vod; + showVideo(streamUrl, 'vod', vodToPlay); + }; + + const handleCopyLink = async () => { + const streamUrl = getStreamUrl(); + if (!streamUrl) return; + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Stream link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + + // Helper to get embeddable YouTube URL + const getEmbedUrl = (url) => { + if (!url) return ''; + // Accepts full YouTube URLs or just IDs + const match = url.match(/(?:youtube\.com\/watch\?v=|youtu\.be\/)([\w-]+)/); + const videoId = match ? match[1] : url; + return `https://www.youtube.com/embed/${videoId}`; + }; + + if (!vod) return null; + + // Use detailed data if available, otherwise use basic vod data + const displayVOD = detailedVOD || vod; + + return ( + <> + + + {/* Backdrop image as background */} + {displayVOD.backdrop_path && displayVOD.backdrop_path.length > 0 && ( + <> + {`${displayVOD.name} + {/* Overlay for readability */} + + + )} + {/* Modal content above backdrop */} + + + {loadingDetails && ( + + + + Loading additional details... + + + )} + + {/* Movie poster and basic info */} + + {/* Use movie_image or logo */} + {displayVOD.movie_image || displayVOD.logo?.url ? ( + + {displayVOD.name} + + ) : ( + + + + )} + + + {displayVOD.name} + + {/* Original name if different */} + {displayVOD.o_name && + displayVOD.o_name !== displayVOD.name && ( + + Original: {displayVOD.o_name} + + )} + + + {displayVOD.year && ( + {displayVOD.year} + )} + {displayVOD.duration_secs && ( + + {formatDuration(displayVOD.duration_secs)} + + )} + {displayVOD.rating && ( + {displayVOD.rating} + )} + {displayVOD.age && ( + {displayVOD.age} + )} + Movie + {/* imdb_id and tmdb_id badges */} + {displayVOD.imdb_id && ( + + IMDb + + )} + {displayVOD.tmdb_id && ( + + TMDb + + )} + + + {/* Release date */} + {displayVOD.release_date && ( + + Release Date: {displayVOD.release_date} + + )} + + {displayVOD.genre && ( + + Genre: {displayVOD.genre} + + )} + + {displayVOD.director && ( + + Director: {displayVOD.director} + + )} + + {displayVOD.actors && ( + + Cast: {displayVOD.actors} + + )} + + {displayVOD.country && ( + + Country: {displayVOD.country} + + )} + + {/* Description */} + {displayVOD.description && ( + + + Description + + {displayVOD.description} + + )} + + {/* Play and Watch Trailer buttons */} + + + {displayVOD.youtube_trailer && ( + + )} + + + + + + {/* Provider Information & Play Button Row */} + + {/* Provider Selection */} + {providers.length > 0 && ( + + + Stream Selection + {loadingProviders && ( + + )} + + {providers.length === 1 ? ( + + + {providers[0].m3u_account.name} + + + ) : ( + + handleScheduleChange('frequency', value) + } + data={[ + { value: 'daily', label: 'Daily' }, + { value: 'weekly', label: 'Weekly' }, + ]} + disabled={!schedule.enabled} + /> + {schedule.frequency === 'weekly' && ( + { + const minute = displayTime + ? displayTime.split(':')[1] + : '00'; + handleTimeChange12h(`${value}:${minute}`, null); + }} + data={Array.from({ length: 12 }, (_, i) => ({ + value: String(i + 1), + label: String(i + 1), + }))} + disabled={!schedule.enabled} + searchable + /> + handleTimeChange12h(null, value)} + data={[ + { value: 'AM', label: 'AM' }, + { value: 'PM', label: 'PM' }, + ]} + disabled={!schedule.enabled} + /> + + ) : ( + <> + { + const hour = schedule.time + ? schedule.time.split(':')[0] + : '00'; + handleTimeChange24h(`${hour}:${value}`); + }} + data={Array.from({ length: 60 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + + )} + + + + handleScheduleChange('retention_count', value || 0) + } + min={0} + disabled={!schedule.enabled} + /> + + + + )} + + {/* Timezone info - only show in simple mode */} + {!advancedMode && schedule.enabled && schedule.time && ( + + System Timezone: {userTimezone} • Backup will run at{' '} + {schedule.time} {userTimezone} + + )} + + )} + + + {/* Backups List */} + + + + + + + + + + + + + + + + + + + + {loading ? ( + + + + ) : backups.length === 0 ? ( + + No backups found. Create one to get started. + + ) : ( +
+ +
+ )} +
+
+
+ + { + setUploadModalOpen(false); + setUploadFile(null); + }} + title="Upload Backup" + > + + + + + + + + + + { + setRestoreConfirmOpen(false); + setSelectedBackup(null); + }} + onConfirm={handleRestoreConfirm} + title="Restore Backup" + message={`Are you sure you want to restore from "${selectedBackup?.name}"? This will replace all current data with the backup data. This action cannot be undone.`} + confirmLabel="Restore" + cancelLabel="Cancel" + actionKey="restore-backup" + onSuppressChange={suppressWarning} + loading={restoring} + /> + + { + setDeleteConfirmOpen(false); + setSelectedBackup(null); + }} + onConfirm={handleDeleteConfirm} + title="Delete Backup" + message={`Are you sure you want to delete "${selectedBackup?.name}"? This action cannot be undone.`} + confirmLabel="Delete" + cancelLabel="Cancel" + actionKey="delete-backup" + onSuppressChange={suppressWarning} + loading={deleting} + /> + + ); +} diff --git a/frontend/src/components/cards/PluginCard.jsx b/frontend/src/components/cards/PluginCard.jsx new file mode 100644 index 00000000..78725781 --- /dev/null +++ b/frontend/src/components/cards/PluginCard.jsx @@ -0,0 +1,258 @@ +import React, { useState } from 'react'; +import { showNotification } from '../../utils/notificationUtils.js'; +import { Field } from '../Field.jsx'; +import { + ActionIcon, + Button, + Card, + Divider, + Group, + Stack, + Switch, + Text, +} from '@mantine/core'; +import { Trash2 } from 'lucide-react'; +import { getConfirmationDetails } from '../../utils/cards/PluginCardUtils.js'; + +const PluginFieldList = ({ plugin, settings, updateField }) => { + return plugin.fields.map((f) => ( + + )); +}; + +const PluginActionList = ({ plugin, enabled, running, handlePluginRun }) => { + return plugin.actions.map((action) => ( + +
+ {action.label} + {action.description && ( + + {action.description} + + )} +
+ +
+ )); +}; + +const PluginActionStatus = ({ running, lastResult }) => { + return ( + <> + {running && ( + + Running action… please wait + + )} + {!running && lastResult?.file && ( + + Output: {lastResult.file} + + )} + {!running && lastResult?.error && ( + + Error: {String(lastResult.error)} + + )} + + ); +}; + +const PluginCard = ({ + plugin, + onSaveSettings, + onRunAction, + onToggleEnabled, + onRequireTrust, + onRequestDelete, + onRequestConfirm, +}) => { + const [settings, setSettings] = useState(plugin.settings || {}); + const [saving, setSaving] = useState(false); + const [running, setRunning] = useState(false); + const [enabled, setEnabled] = useState(!!plugin.enabled); + const [lastResult, setLastResult] = useState(null); + + // Keep local enabled state in sync with props (e.g., after import + enable) + React.useEffect(() => { + setEnabled(!!plugin.enabled); + }, [plugin.enabled]); + // Sync settings if plugin changes identity + React.useEffect(() => { + setSettings(plugin.settings || {}); + }, [plugin.key]); + + const updateField = (id, val) => { + setSettings((prev) => ({ ...prev, [id]: val })); + }; + + const save = async () => { + setSaving(true); + try { + await onSaveSettings(plugin.key, settings); + showNotification({ + title: 'Saved', + message: `${plugin.name} settings updated`, + color: 'green', + }); + } finally { + setSaving(false); + } + }; + + const missing = plugin.missing; + + const handleEnableChange = () => { + return async (e) => { + const next = e.currentTarget.checked; + if (next && !plugin.ever_enabled && onRequireTrust) { + const ok = await onRequireTrust(plugin); + if (!ok) { + // Revert + setEnabled(false); + return; + } + } + setEnabled(next); + const resp = await onToggleEnabled(plugin.key, next); + if (next && resp?.ever_enabled) { + plugin.ever_enabled = true; + } + }; + }; + + const handlePluginRun = async (a) => { + setRunning(true); + setLastResult(null); + try { + // Determine if confirmation is required from action metadata or fallback field + const { requireConfirm, confirmTitle, confirmMessage } = + getConfirmationDetails(a, plugin, settings); + + if (requireConfirm) { + const confirmed = await onRequestConfirm(confirmTitle, confirmMessage); + + if (!confirmed) { + // User canceled, abort the action + return; + } + } + + // Save settings before running to ensure backend uses latest values + try { + await onSaveSettings(plugin.key, settings); + } catch (e) { + /* ignore, run anyway */ + } + const resp = await onRunAction(plugin.key, a.id); + if (resp?.success) { + setLastResult(resp.result || {}); + const msg = resp.result?.message || 'Plugin action completed'; + showNotification({ + title: plugin.name, + message: msg, + color: 'green', + }); + } else { + const err = resp?.error || 'Unknown error'; + setLastResult({ error: err }); + showNotification({ + title: `${plugin.name} error`, + message: String(err), + color: 'red', + }); + } + } finally { + setRunning(false); + } + }; + + return ( + + +
+ {plugin.name} + + {plugin.description} + +
+ + onRequestDelete && onRequestDelete(plugin)} + > + + + + v{plugin.version || '1.0.0'} + + + +
+ + {missing && ( + + Missing plugin files. Re-import or delete this entry. + + )} + + {!missing && plugin.fields && plugin.fields.length > 0 && ( + + + + + + + )} + + {!missing && plugin.actions && plugin.actions.length > 0 && ( + <> + + + + + + + )} +
+ ); +}; + +export default PluginCard; \ No newline at end of file diff --git a/frontend/src/components/cards/RecordingCard.jsx b/frontend/src/components/cards/RecordingCard.jsx new file mode 100644 index 00000000..6f90e0f5 --- /dev/null +++ b/frontend/src/components/cards/RecordingCard.jsx @@ -0,0 +1,422 @@ +import useChannelsStore from '../../store/channels.jsx'; +import useSettingsStore from '../../store/settings.jsx'; +import useVideoStore from '../../store/useVideoStore.jsx'; +import { useDateTimeFormat, useTimeHelpers } from '../../utils/dateTimeUtils.js'; +import { notifications } from '@mantine/notifications'; +import React from 'react'; +import { + ActionIcon, + Badge, + Box, + Button, + Card, + Center, + Flex, + Group, + Image, + Modal, + Stack, + Text, + Tooltip, +} from '@mantine/core'; +import { AlertTriangle, SquareX } from 'lucide-react'; +import RecordingSynopsis from '../RecordingSynopsis'; +import { + deleteRecordingById, + deleteSeriesAndRule, + getPosterUrl, + getRecordingUrl, + getSeasonLabel, + getSeriesInfo, + getShowVideoUrl, + removeRecording, + runComSkip, +} from './../../utils/cards/RecordingCardUtils.js'; + +const RecordingCard = ({ recording, onOpenDetails, onOpenRecurring }) => { + const channels = useChannelsStore((s) => s.channels); + const env_mode = useSettingsStore((s) => s.environment.env_mode); + const showVideo = useVideoStore((s) => s.showVideo); + const fetchRecordings = useChannelsStore((s) => s.fetchRecordings); + const { toUserTime, userNow } = useTimeHelpers(); + const [timeformat, dateformat] = useDateTimeFormat(); + + const channel = channels?.[recording.channel]; + + const customProps = recording.custom_properties || {}; + const program = customProps.program || {}; + const recordingName = program.title || 'Custom Recording'; + const subTitle = program.sub_title || ''; + const description = program.description || customProps.description || ''; + const isRecurringRule = customProps?.rule?.type === 'recurring'; + + // Poster or channel logo + const posterUrl = getPosterUrl( + customProps.poster_logo_id, customProps, channel?.logo?.cache_url, env_mode); + + const start = toUserTime(recording.start_time); + const end = toUserTime(recording.end_time); + const now = userNow(); + const status = customProps.status; + const isTimeActive = now.isAfter(start) && now.isBefore(end); + const isInterrupted = status === 'interrupted'; + const isInProgress = isTimeActive; // Show as recording by time, regardless of status glitches + const isUpcoming = now.isBefore(start); + const isSeriesGroup = Boolean( + recording._group_count && recording._group_count > 1 + ); + // Season/Episode display if present + const season = customProps.season ?? program?.custom_properties?.season; + const episode = customProps.episode ?? program?.custom_properties?.episode; + const onscreen = + customProps.onscreen_episode ?? + program?.custom_properties?.onscreen_episode; + const seLabel = getSeasonLabel(season, episode, onscreen); + + const handleWatchLive = () => { + if (!channel) return; + showVideo(getShowVideoUrl(channel, env_mode), 'live'); + }; + + const handleWatchRecording = () => { + // Only enable if backend provides a playable file URL in custom properties + const fileUrl = getRecordingUrl(customProps, env_mode); + if (!fileUrl) return; + + showVideo(fileUrl, 'vod', { + name: recordingName, + logo: { url: posterUrl }, + }); + }; + + const handleRunComskip = async (e) => { + e?.stopPropagation?.(); + try { + await runComSkip(recording); + notifications.show({ + title: 'Removing commercials', + message: 'Queued comskip for this recording', + color: 'blue.5', + autoClose: 2000, + }); + } catch (error) { + console.error('Failed to queue comskip for recording', error); + } + }; + + // Cancel handling for series groups + const [cancelOpen, setCancelOpen] = React.useState(false); + const [busy, setBusy] = React.useState(false); + const handleCancelClick = (e) => { + e.stopPropagation(); + if (isRecurringRule) { + onOpenRecurring?.(recording, true); + return; + } + if (isSeriesGroup) { + setCancelOpen(true); + } else { + removeRecording(recording.id); + } + }; + + const seriesInfo = getSeriesInfo(customProps); + + const removeUpcomingOnly = async () => { + try { + setBusy(true); + await deleteRecordingById(recording.id); + } finally { + setBusy(false); + setCancelOpen(false); + try { + await fetchRecordings(); + } catch (error) { + console.error('Failed to refresh recordings', error); + } + } + }; + + const removeSeriesAndRule = async () => { + try { + setBusy(true); + await deleteSeriesAndRule(seriesInfo); + } finally { + setBusy(false); + setCancelOpen(false); + try { + await fetchRecordings(); + } catch (error) { + console.error( + 'Failed to refresh recordings after series removal', + error + ); + } + } + }; + + const handleOnMainCardClick = () => { + if (isRecurringRule) { + onOpenRecurring?.(recording, false); + } else { + onOpenDetails?.(recording); + } + } + + const WatchLive = () => { + return ; + } + + const WatchRecording = () => { + return + + ; + } + + const MainCard = ( + + + + + {isInterrupted + ? 'Interrupted' + : isInProgress + ? 'Recording' + : isUpcoming + ? 'Scheduled' + : 'Completed'} + + {isInterrupted && } + + + + {recordingName} + + {isSeriesGroup && ( + + Series + + )} + {isRecurringRule && ( + + Recurring + + )} + {seLabel && !isSeriesGroup && ( + + {seLabel} + + )} + + + + +
+ + e.stopPropagation()} + onClick={handleCancelClick} + > + + + +
+
+ + + {recordingName} + + {!isSeriesGroup && subTitle && ( + + + Episode + + + {subTitle} + + + )} + + + Channel + + + {channel ? `${channel.channel_number} • ${channel.name}` : '—'} + + + + + + {isSeriesGroup ? 'Next recording' : 'Time'} + + + {start.format(`${dateformat}, YYYY ${timeformat}`)} – {end.format(timeformat)} + + + + {!isSeriesGroup && description && ( + onOpenDetails?.(recording)} + /> + )} + + {isInterrupted && customProps.interrupted_reason && ( + + {customProps.interrupted_reason} + + )} + + + {isInProgress && } + + {!isUpcoming && } + {!isUpcoming && + customProps?.status === 'completed' && + (!customProps?.comskip || + customProps?.comskip?.status !== 'completed') && ( + + )} + + + + {/* If this card is a grouped upcoming series, show count */} + {recording._group_count > 1 && ( + + Next of {recording._group_count} + + )} +
+ ); + if (!isSeriesGroup) return MainCard; + + // Stacked look for series groups: render two shadow layers behind the main card + return ( + + setCancelOpen(false)} + title="Cancel Series" + centered + size="md" + zIndex={9999} + > + + This is a series rule. What would you like to cancel? + + + + + + + + + {MainCard} + + ); +}; + +export default RecordingCard; \ No newline at end of file diff --git a/frontend/src/components/cards/SeriesCard.jsx b/frontend/src/components/cards/SeriesCard.jsx new file mode 100644 index 00000000..f010cb44 --- /dev/null +++ b/frontend/src/components/cards/SeriesCard.jsx @@ -0,0 +1,85 @@ +import { + Badge, + Box, + Card, + CardSection, + Group, + Image, + Stack, + Text, +} from '@mantine/core'; +import {Calendar, Play, Star} from "lucide-react"; +import React from "react"; + +const SeriesCard = ({ series, onClick }) => { + return ( + onClick(series)} + > + + + {series.logo?.url ? ( + {series.name} + ) : ( + + + + )} + {/* Add Series badge in the same position as Movie badge */} + + Series + + + + + + {series.name} + + + {series.year && ( + + + + {series.year} + + + )} + {series.rating && ( + + + + {series.rating} + + + )} + + + {series.genre && ( + + {series.genre} + + )} + + + ); +}; + +export default SeriesCard; \ No newline at end of file diff --git a/frontend/src/components/cards/StreamConnectionCard.jsx b/frontend/src/components/cards/StreamConnectionCard.jsx new file mode 100644 index 00000000..62d6e62f --- /dev/null +++ b/frontend/src/components/cards/StreamConnectionCard.jsx @@ -0,0 +1,613 @@ +import { useLocation } from 'react-router-dom'; +import React, { useEffect, useMemo, useState } from 'react'; +import useLocalStorage from '../../hooks/useLocalStorage.jsx'; +import usePlaylistsStore from '../../store/playlists.jsx'; +import useSettingsStore from '../../store/settings.jsx'; +import { + ActionIcon, + Badge, + Box, + Card, + Center, + Flex, + Group, + Select, + Stack, + Text, + Tooltip, +} from '@mantine/core'; +import { + Gauge, + HardDriveDownload, + HardDriveUpload, + SquareX, + Timer, + Users, + Video, +} from 'lucide-react'; +import { toFriendlyDuration } from '../../utils/dateTimeUtils.js'; +import { CustomTable, useTable } from '../tables/CustomTable/index.jsx'; +import { TableHelper } from '../../helpers/index.jsx'; +import logo from '../../images/logo.png'; +import { formatBytes, formatSpeed } from '../../utils/networkUtils.js'; +import { showNotification } from '../../utils/notificationUtils.js'; +import { + connectedAccessor, + durationAccessor, + getBufferingSpeedThreshold, + getChannelStreams, + getLogoUrl, + getM3uAccountsMap, + getMatchingStreamByUrl, + getSelectedStream, + getStartDate, + getStreamOptions, + getStreamsByIds, + switchStream, +} from '../../utils/cards/StreamConnectionCardUtils.js'; + +// Create a separate component for each channel card to properly handle the hook +const StreamConnectionCard = ({ + channel, + clients, + stopClient, + stopChannel, + logos, + channelsByUUID, +}) => { + const location = useLocation(); + const [availableStreams, setAvailableStreams] = useState([]); + const [isLoadingStreams, setIsLoadingStreams] = useState(false); + const [activeStreamId, setActiveStreamId] = useState(null); + const [currentM3UProfile, setCurrentM3UProfile] = useState(null); // Add state for current M3U profile + const [data, setData] = useState([]); + const [previewedStream, setPreviewedStream] = useState(null); + + // Get M3U account data from the playlists store + const m3uAccounts = usePlaylistsStore((s) => s.playlists); + // Get settings for speed threshold + const settings = useSettingsStore((s) => s.settings); + + // Get Date-format from localStorage + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM'; + const [tableSize] = useLocalStorage('table-size', 'default'); + + // Create a map of M3U account IDs to names for quick lookup + const m3uAccountsMap = useMemo(() => { + return getM3uAccountsMap(m3uAccounts); + }, [m3uAccounts]); + + // Update M3U profile information when channel data changes + useEffect(() => { + // If the channel data includes M3U profile information, update our state + if (channel.m3u_profile || channel.m3u_profile_name) { + setCurrentM3UProfile({ + name: + channel.m3u_profile?.name || + channel.m3u_profile_name || + 'Default M3U', + }); + } + }, [channel.m3u_profile, channel.m3u_profile_name, channel.stream_id]); + + // Fetch available streams for this channel + useEffect(() => { + const fetchStreams = async () => { + setIsLoadingStreams(true); + try { + // Get channel ID from UUID + const channelId = channelsByUUID[channel.channel_id]; + if (channelId) { + const streamData = await getChannelStreams(channelId); + + // Use streams in the order returned by the API without sorting + setAvailableStreams(streamData); + + // If we have a channel URL, try to find the matching stream + if (channel.url && streamData.length > 0) { + // Try to find matching stream based on URL + const matchingStream = getMatchingStreamByUrl( + streamData, + channel.url + ); + + if (matchingStream) { + setActiveStreamId(matchingStream.id.toString()); + + // If the stream has M3U profile info, save it + if (matchingStream.m3u_profile) { + setCurrentM3UProfile(matchingStream.m3u_profile); + } + } + } + } + } catch (error) { + console.error('Error fetching streams:', error); + } finally { + setIsLoadingStreams(false); + } + }; + + fetchStreams(); + }, [channel.channel_id, channel.url, channelsByUUID]); + + useEffect(() => { + setData( + clients + .filter((client) => client.channel.channel_id === channel.channel_id) + .map((client) => ({ + id: client.client_id, + ...client, + })) + ); + }, [clients, channel.channel_id]); + + const renderHeaderCell = (header) => { + switch (header.id) { + default: + return ( + + + {header.column.columnDef.header} + + + ); + } + }; + + const renderBodyCell = ({ cell, row }) => { + switch (cell.column.id) { + case 'actions': + return ( + +
+ + + stopClient( + row.original.channel.uuid, + row.original.client_id + ) + } + > + + + +
+
+ ); + } + }; + + const checkStreamsAfterChange = (streamId) => { + return async () => { + try { + const channelId = channelsByUUID[channel.channel_id]; + if (channelId) { + const updatedStreamData = await getChannelStreams(channelId); + console.log('Channel streams after switch:', updatedStreamData); + + // Update current stream information with fresh data + const updatedStream = getSelectedStream(updatedStreamData, streamId); + if (updatedStream?.m3u_profile) { + setCurrentM3UProfile(updatedStream.m3u_profile); + } + } + } catch (error) { + console.error('Error checking streams after switch:', error); + } + }; + }; + + // Handle stream switching + const handleStreamChange = async (streamId) => { + try { + console.log('Switching to stream ID:', streamId); + // Find the selected stream in availableStreams for debugging + const selectedStream = getSelectedStream(availableStreams, streamId); + console.log('Selected stream details:', selectedStream); + + // Make sure we're passing the correct ID to the API + const response = await switchStream(channel, streamId); + console.log('Stream switch API response:', response); + + // Update the local active stream ID immediately + setActiveStreamId(streamId); + + // Update M3U profile information if available in the response + if (response?.m3u_profile) { + setCurrentM3UProfile(response.m3u_profile); + } else if (selectedStream && selectedStream.m3u_profile) { + // Fallback to the profile from the selected stream + setCurrentM3UProfile(selectedStream.m3u_profile); + } + + // Show detailed notification with stream name + showNotification({ + title: 'Stream switching', + message: `Switching to "${selectedStream?.name}" for ${channel.name}`, + color: 'blue.5', + }); + + // After a short delay, fetch streams again to confirm the switch + setTimeout(checkStreamsAfterChange(streamId), 2000); + } catch (error) { + console.error('Stream switch error:', error); + showNotification({ + title: 'Error switching stream', + message: error.toString(), + color: 'red.5', + }); + } + }; + + const clientsColumns = useMemo( + () => [ + { + id: 'expand', + size: 20, + }, + { + header: 'IP Address', + accessorKey: 'ip_address', + }, + // Updated Connected column with tooltip + { + id: 'connected', + header: 'Connected', + accessorFn: connectedAccessor(dateFormat), + cell: ({ cell }) => ( + + {cell.getValue()} + + ), + }, + // Update Duration column with tooltip showing exact seconds + { + id: 'duration', + header: 'Duration', + accessorFn: durationAccessor(), + cell: ({ cell, row }) => { + const exactDuration = + row.original.connected_since || row.original.connection_duration; + return ( + + {cell.getValue()} + + ); + }, + }, + { + id: 'actions', + header: 'Actions', + size: tableSize == 'compact' ? 75 : 100, + }, + ], + [] + ); + + const channelClientsTable = useTable({ + ...TableHelper.defaultProperties, + columns: clientsColumns, + data, + allRowIds: data.map((client) => client.id), + tableCellProps: () => ({ + padding: 4, + borderColor: '#444', + color: '#E0E0E0', + fontSize: '0.85rem', + }), + headerCellRenderFns: { + ip_address: renderHeaderCell, + connected: renderHeaderCell, + duration: renderHeaderCell, + actions: renderHeaderCell, + }, + bodyCellRenderFns: { + actions: renderBodyCell, + }, + getExpandedRowHeight: (row) => { + return 20 + 28 * row.original.streams.length; + }, + expandedRowRenderer: ({ row }) => { + return ( + + + + User Agent: + + {row.original.user_agent || 'Unknown'} + + + ); + }, + mantineExpandButtonProps: ({ row, table }) => ({ + size: 'xs', + style: { + transform: row.getIsExpanded() ? 'rotate(180deg)' : 'rotate(-90deg)', + transition: 'transform 0.2s', + }, + }), + displayColumnDefOptions: { + 'mrt-row-expand': { + size: 15, + header: '', + }, + 'mrt-row-actions': { + size: 74, + }, + }, + }); + + // Get logo URL from the logos object if available + const logoUrl = getLogoUrl(channel.logo_id, logos, previewedStream); + + useEffect(() => { + let isMounted = true; + // Only fetch if we have a stream_id and NO channel.name + if (!channel.name && channel.stream_id) { + getStreamsByIds(channel.stream_id).then((streams) => { + if (isMounted && streams && streams.length > 0) { + setPreviewedStream(streams[0]); + } + }); + } + return () => { + isMounted = false; + }; + }, [channel.name, channel.stream_id]); + + const channelName = + channel.name || previewedStream?.name || 'Unnamed Channel'; + const uptime = channel.uptime || 0; + const bitrates = channel.bitrates || []; + const totalBytes = channel.total_bytes || 0; + const clientCount = channel.client_count || 0; + const avgBitrate = channel.avg_bitrate || '0 Kbps'; + const streamProfileName = channel.stream_profile?.name || 'Unknown Profile'; + + // Use currentM3UProfile if available, otherwise fall back to channel data + const m3uProfileName = + currentM3UProfile?.name || + channel.m3u_profile?.name || + channel.m3u_profile_name || + 'Unknown M3U Profile'; + + // Create select options for available streams + const streamOptions = getStreamOptions(availableStreams, m3uAccountsMap); + + if (location.pathname !== '/stats') { + return <>; + } + + // Safety check - if channel doesn't have required data, don't render + if (!channel || !channel.channel_id) { + return null; + } + + return ( + + + + + channel logo + + + + + +
+ + {toFriendlyDuration(uptime, 'seconds')} +
+
+
+
+ + stopChannel(channel.channel_id)} + > + + + +
+
+
+ + + + {channelName} + + + + + + + + + {/* Display M3U profile information */} + + + + + {m3uProfileName} + + + + + {/* Add stream selection dropdown */} + {availableStreams.length > 0 && ( + + { + return { + label: USER_LEVEL_LABELS[value], + value: `${value}`, + }; + })} + value={watch('user_level')} + onChange={(value) => { + setValue('user_level', value); + }} + error={errors.user_level?.message} + />
@@ -531,7 +656,16 @@ const Channel = ({ channel = null, isOpen, onClose }) => { { + setLogoPopoverOpened(opened); + // Load all logos when popover is opened + if (opened) { + console.log( + 'Popover opened, calling ensureLogosLoaded...' + ); + ensureLogosLoaded(); + } + }} // position="bottom-start" withArrow > @@ -539,10 +673,31 @@ const Channel = ({ channel = null, isOpen, onClose }) => { + Logo + {watch('epg_data_id') && ( + + )} + + } readOnly - value={logos[formik.values.logo_id]?.name || 'Default'} - onClick={() => setLogoPopoverOpened(true)} + value={channelLogos[watch('logo_id')]?.name || 'Default'} + onClick={() => { + console.log( + 'Logo input clicked, setting popover opened to true' + ); + setLogoPopoverOpened(true); + }} size="xs" /> @@ -558,77 +713,104 @@ const Channel = ({ channel = null, isOpen, onClose }) => { mb="xs" size="xs" /> + {logosLoading && ( + + Loading... + + )} - - {({ index, style }) => ( -
-
- { - formik.setFieldValue( - 'logo_id', - filteredLogos[index].id - ); - }} - /> -
-
- )} -
+ {filteredLogos.length === 0 ? ( +
+ + {logoFilter + ? 'No logos match your filter' + : 'No logos available'} + +
+ ) : ( + + {({ index, style }) => ( +
{ + setValue('logo_id', filteredLogos[index].id); + setLogoPopoverOpened(false); + }} + onMouseEnter={(e) => { + e.currentTarget.style.backgroundColor = + 'rgb(68, 68, 68)'; + }} + onMouseLeave={(e) => { + e.currentTarget.style.backgroundColor = + 'transparent'; + }} + > +
+ {filteredLogos[index].name { + // Fallback to default logo if image fails to load + if (e.target.src !== logo) { + e.target.src = logo; + } + }} + /> + + {filteredLogos[index].name || 'Default'} + +
+
+ )} +
+ )}
- + + + - - - - OR - - - - - - Upload Logo - console.log('rejected files', files)} - maxSize={5 * 1024 ** 2} - > - - - Drag images here or click to select files - - - - -
-
+ @@ -638,25 +820,45 @@ const Channel = ({ channel = null, isOpen, onClose }) => { id="channel_number" name="channel_number" label="Channel # (blank to auto-assign)" - value={formik.values.channel_number} - onChange={(value) => - formik.setFieldValue('channel_number', value) - } - error={ - formik.errors.channel_number - ? formik.touched.channel_number - : '' - } + value={watch('channel_number')} + onChange={(value) => setValue('channel_number', value)} + error={errors.channel_number?.message} size="xs" + step={0.1} // Add step prop to allow decimal inputs + precision={1} // Specify decimal precision /> + TVG-ID + {watch('epg_data_id') && ( + + )} + + } + {...register('tvg_id')} + error={errors.tvg_id?.message} + size="xs" + /> + + @@ -676,20 +878,44 @@ const Channel = ({ channel = null, isOpen, onClose }) => { + } readOnly - value={ - formik.values.epg_data_id - ? tvgsById[formik.values.epg_data_id].name - : 'Dummy' - } + value={(() => { + const tvg = tvgsById[watch('epg_data_id')]; + const epgSource = tvg && epgs[tvg.epg_source]; + const tvgLabel = tvg ? tvg.name || tvg.id : ''; + if (epgSource && tvgLabel) { + return `${epgSource.name} - ${tvgLabel}`; + } else if (tvgLabel) { + return tvgLabel; + } else { + return 'Dummy'; + } + })()} onClick={() => setEpgPopoverOpened(true)} size="xs" rightSection={ @@ -699,7 +925,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => { color="white" onClick={(e) => { e.stopPropagation(); - formik.setFieldValue('epg_data_id', null); + setValue('epg_data_id', null); }} title="Create new group" size="small" @@ -735,6 +961,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => { } mb="xs" size="xs" + autoFocus /> @@ -743,7 +970,7 @@ const Channel = ({ channel = null, isOpen, onClose }) => { height={200} // Set max height for visible items itemCount={filteredTvgs.length} itemSize={40} // Adjust row height for each item - width="100%" + style={{ width: '100%' }} ref={listRef} > {({ index, style }) => ( @@ -752,22 +979,29 @@ const Channel = ({ channel = null, isOpen, onClose }) => { key={filteredTvgs[index].id} variant="subtle" color="gray" - fullWidth + style={{ width: '100%' }} justify="left" size="xs" onClick={() => { if (filteredTvgs[index].id == '0') { - formik.setFieldValue('epg_data_id', null); + setValue('epg_data_id', null); } else { - formik.setFieldValue( - 'epg_data_id', - filteredTvgs[index].id - ); + setValue('epg_data_id', filteredTvgs[index].id); + // Also update selectedEPG to match the EPG source of the selected tvg + if (filteredTvgs[index].epg_source) { + setSelectedEPG( + `${filteredTvgs[index].epg_source}` + ); + } } setEpgPopoverOpened(false); }} > - {filteredTvgs[index].tvg_id} + {filteredTvgs[index].name && + filteredTvgs[index].tvg_id + ? `${filteredTvgs[index].name} (${filteredTvgs[index].tvg_id})` + : filteredTvgs[index].name || + filteredTvgs[index].tvg_id} )} @@ -778,25 +1012,15 @@ const Channel = ({ channel = null, isOpen, onClose }) => { - {/* - - Active Streams - - - - - Available Streams - - - */} - @@ -804,10 +1028,16 @@ const Channel = ({ channel = null, isOpen, onClose }) => { setChannelGroupModalOpen(false)} + onClose={handleChannelGroupModalClose} + /> + + setLogoModalOpen(false)} + onSuccess={handleLogoSuccess} /> ); }; -export default Channel; +export default ChannelForm; diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx new file mode 100644 index 00000000..14dd22f1 --- /dev/null +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -0,0 +1,1142 @@ +import React, { useState, useEffect, useMemo, useRef } from 'react'; +import useChannelsStore from '../../store/channels'; +import API from '../../api'; +import useStreamProfilesStore from '../../store/streamProfiles'; +import useEPGsStore from '../../store/epgs'; +import ChannelGroupForm from './ChannelGroup'; +import { + Box, + Button, + Modal, + TextInput, + Text, + Group, + ActionIcon, + Flex, + Select, + Stack, + useMantineTheme, + Popover, + ScrollArea, + Tooltip, + UnstyledButton, + Center, + Divider, + Checkbox, + Paper, +} from '@mantine/core'; +import { ListOrdered, SquarePlus, SquareX, X } from 'lucide-react'; +import { FixedSizeList as List } from 'react-window'; +import { useForm } from '@mantine/form'; +import { notifications } from '@mantine/notifications'; +import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants'; +import { useChannelLogoSelection } from '../../hooks/useSmartLogos'; +import LazyLogo from '../LazyLogo'; +import logo from '../../images/logo.png'; +import ConfirmationDialog from '../ConfirmationDialog'; +import useWarningsStore from '../../store/warnings'; + +const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { + const theme = useMantineTheme(); + + const groupListRef = useRef(null); + const logoListRef = useRef(null); + + const channelGroups = useChannelsStore((s) => s.channelGroups); + const { + logos: channelLogos, + ensureLogosLoaded, + isLoading: logosLoading, + } = useChannelLogoSelection(); + + useEffect(() => { + ensureLogosLoaded(); + }, [ensureLogosLoaded]); + + const streamProfiles = useStreamProfilesStore((s) => s.profiles); + const epgs = useEPGsStore((s) => s.epgs); + const tvgs = useEPGsStore((s) => s.tvgs); + const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); + + const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false); + const [selectedChannelGroup, setSelectedChannelGroup] = useState('-1'); + const [selectedLogoId, setSelectedLogoId] = useState('-1'); + const [isSubmitting, setIsSubmitting] = useState(false); + const [regexFind, setRegexFind] = useState(''); + const [regexReplace, setRegexReplace] = useState(''); + const [selectedDummyEpgId, setSelectedDummyEpgId] = useState(null); + + const [groupPopoverOpened, setGroupPopoverOpened] = useState(false); + const [groupFilter, setGroupFilter] = useState(''); + const groupOptions = Object.values(channelGroups); + + const [logoPopoverOpened, setLogoPopoverOpened] = useState(false); + const [logoFilter, setLogoFilter] = useState(''); + // Confirmation dialog states + const [confirmSetNamesOpen, setConfirmSetNamesOpen] = useState(false); + const [confirmSetLogosOpen, setConfirmSetLogosOpen] = useState(false); + const [confirmSetTvgIdsOpen, setConfirmSetTvgIdsOpen] = useState(false); + const [confirmBatchUpdateOpen, setConfirmBatchUpdateOpen] = useState(false); + const [settingNames, setSettingNames] = useState(false); + const [settingLogos, setSettingLogos] = useState(false); + const [settingTvgIds, setSettingTvgIds] = useState(false); + const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + + // Fetch EPG sources when modal opens + useEffect(() => { + if (isOpen) { + fetchEPGs(); + } + }, [isOpen, fetchEPGs]); + + // Get dummy EPG sources + const dummyEpgSources = useMemo(() => { + return Object.values(epgs).filter((epg) => epg.source_type === 'dummy'); + }, [epgs]); + + const form = useForm({ + mode: 'uncontrolled', + initialValues: { + channel_group: '(no change)', + logo: '(no change)', + stream_profile_id: '-1', + user_level: '-1', + }, + }); + + // Build confirmation message based on selected changes + const getConfirmationMessage = () => { + const changes = []; + const values = form.getValues(); + + // Check for regex name changes + if (regexFind.trim().length > 0) { + changes.push( + `• Name Change: Apply regex find "${regexFind}" replace with "${regexReplace || ''}"` + ); + } + + // Check channel group + if (selectedChannelGroup && selectedChannelGroup !== '-1') { + const groupName = channelGroups[selectedChannelGroup]?.name || 'Unknown'; + changes.push(`• Channel Group: ${groupName}`); + } + + // Check logo + if (selectedLogoId && selectedLogoId !== '-1') { + if (selectedLogoId === '0') { + changes.push(`• Logo: Use Default`); + } else { + const logoName = channelLogos[selectedLogoId]?.name || 'Selected Logo'; + changes.push(`• Logo: ${logoName}`); + } + } + + // Check stream profile + if (values.stream_profile_id && values.stream_profile_id !== '-1') { + if (values.stream_profile_id === '0') { + changes.push(`• Stream Profile: Use Default`); + } else { + const profile = streamProfiles.find( + (p) => `${p.id}` === `${values.stream_profile_id}` + ); + const profileName = profile?.name || 'Selected Profile'; + changes.push(`• Stream Profile: ${profileName}`); + } + } + + // Check user level + if (values.user_level && values.user_level !== '-1') { + const userLevelLabel = + USER_LEVEL_LABELS[values.user_level] || values.user_level; + changes.push(`• User Level: ${userLevelLabel}`); + } + + // Check dummy EPG + if (selectedDummyEpgId) { + if (selectedDummyEpgId === 'clear') { + changes.push(`• EPG: Clear Assignment (use default dummy)`); + } else { + const epgName = epgs[selectedDummyEpgId]?.name || 'Selected EPG'; + changes.push(`• Dummy EPG: ${epgName}`); + } + } + + return changes; + }; + + const handleSubmit = () => { + const changes = getConfirmationMessage(); + + // If no changes detected, show notification + if (changes.length === 0) { + notifications.show({ + title: 'No Changes', + message: 'Please select at least one field to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-update-channels')) { + return onSubmit(); + } + + setConfirmBatchUpdateOpen(true); + }; + + const onSubmit = async () => { + setConfirmBatchUpdateOpen(false); + setIsSubmitting(true); + + const values = { + ...form.getValues(), + }; // Handle channel group ID - convert to integer if it exists + if (selectedChannelGroup && selectedChannelGroup !== '-1') { + values.channel_group_id = parseInt(selectedChannelGroup); + } else { + delete values.channel_group_id; + } + + if (selectedLogoId && selectedLogoId !== '-1') { + if (selectedLogoId === '0') { + values.logo_id = null; + } else { + values.logo_id = parseInt(selectedLogoId); + } + } + delete values.logo; + + // Handle stream profile ID - convert special values + if (!values.stream_profile_id || values.stream_profile_id === '-1') { + delete values.stream_profile_id; + } else if ( + values.stream_profile_id === '0' || + values.stream_profile_id === 0 + ) { + values.stream_profile_id = null; // Convert "use default" to null + } + + if (values.user_level == '-1') { + delete values.user_level; + } + + // Remove the channel_group field from form values as we use channel_group_id + delete values.channel_group; + + try { + const applyRegex = regexFind.trim().length > 0; + + // First, handle standard field updates (name, group, logo, etc.) + if (applyRegex) { + // Build per-channel updates to apply unique names via regex + let flags = 'g'; + let re; + try { + re = new RegExp(regexFind, flags); + } catch (e) { + console.error('Invalid regex:', e); + setIsSubmitting(false); + return; + } + + const channelsMap = useChannelsStore.getState().channels; + const updates = channelIds.map((id) => { + const ch = channelsMap[id]; + const currentName = ch?.name ?? ''; + const newName = currentName.replace(re, regexReplace ?? ''); + const update = { id }; + if (newName !== currentName && newName.trim().length > 0) { + update.name = newName; + } + // Merge base values (group/profile/user_level) if present + Object.assign(update, values); + return update; + }); + + await API.bulkUpdateChannels(updates); + } else if (Object.keys(values).length > 0) { + await API.updateChannels(channelIds, values); + } + + // Then, handle EPG assignment if a dummy EPG was selected + if (selectedDummyEpgId) { + if (selectedDummyEpgId === 'clear') { + // Clear EPG assignments + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: null, + })); + await API.batchSetEPG(associations); + } else { + // Assign the selected dummy EPG + const selectedEpg = epgs[selectedDummyEpgId]; + if (selectedEpg && selectedEpg.epg_data_count > 0) { + // Convert to number for comparison since Select returns string + const epgSourceId = parseInt(selectedDummyEpgId, 10); + + // Check if we already have EPG data loaded in the store + let epgData = tvgs.find((data) => data.epg_source === epgSourceId); + + // If not in store, fetch it + if (!epgData) { + const epgDataList = await API.getEPGData(); + epgData = epgDataList.find( + (data) => data.epg_source === epgSourceId + ); + } + + if (epgData) { + const associations = channelIds.map((id) => ({ + channel_id: id, + epg_data_id: epgData.id, + })); + await API.batchSetEPG(associations); + } + } + } + } + + // Refresh both the channels table data and the main channels store + await Promise.all([ + API.requeryChannels(), + useChannelsStore.getState().fetchChannels(), + ]); + onClose(); + } catch (error) { + console.error('Failed to update channels:', error); + } finally { + setIsSubmitting(false); + } + }; + + const handleSetNamesFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-names-from-epg')) { + return executeSetNamesFromEpg(); + } + + setConfirmSetNamesOpen(true); + }; + + const executeSetNamesFromEpg = async () => { + setSettingNames(true); + try { + // Start the backend task + await API.setChannelNamesFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting names from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + onClose(); + } catch (error) { + console.error('Failed to start EPG name setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG name setting task.', + color: 'red', + }); + } finally { + setSettingNames(false); + setConfirmSetNamesOpen(false); + } + }; + + const handleSetLogosFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-logos-from-epg')) { + return executeSetLogosFromEpg(); + } + + setConfirmSetLogosOpen(true); + }; + + const executeSetLogosFromEpg = async () => { + setSettingLogos(true); + try { + // Start the backend task + await API.setChannelLogosFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting logos from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + onClose(); + } catch (error) { + console.error('Failed to start EPG logo setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG logo setting task.', + color: 'red', + }); + } finally { + setSettingLogos(false); + setConfirmSetLogosOpen(false); + } + }; + + const handleSetTvgIdsFromEpg = async () => { + if (!channelIds || channelIds.length === 0) { + notifications.show({ + title: 'No Channels Selected', + message: 'No channels to update.', + color: 'orange', + }); + return; + } + + // Skip warning if suppressed + if (isWarningSuppressed('batch-set-tvg-ids-from-epg')) { + return executeSetTvgIdsFromEpg(); + } + + setConfirmSetTvgIdsOpen(true); + }; + + const executeSetTvgIdsFromEpg = async () => { + setSettingTvgIds(true); + try { + // Start the backend task + await API.setChannelTvgIdsFromEpg(channelIds); + + // The task will send WebSocket updates for progress + // Just show that it started successfully + notifications.show({ + title: 'Task Started', + message: `Started setting TVG-IDs from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`, + color: 'blue', + }); + + // Close the modal since the task is now running in background + onClose(); + } catch (error) { + console.error('Failed to start EPG TVG-ID setting task:', error); + notifications.show({ + title: 'Error', + message: 'Failed to start EPG TVG-ID setting task.', + color: 'red', + }); + } finally { + setSettingTvgIds(false); + setConfirmSetTvgIdsOpen(false); + } + }; + + // useEffect(() => { + // // const sameStreamProfile = channels.every( + // // (channel) => channel.stream_profile_id == channels[0].stream_profile_id + // // ); + // // const sameChannelGroup = channels.every( + // // (channel) => channel.channel_group_id == channels[0].channel_group_id + // // ); + // // const sameUserLevel = channels.every( + // // (channel) => channel.user_level == channels[0].user_level + // // ); + // // form.setValues({ + // // ...(sameStreamProfile && { + // // stream_profile_id: `${channels[0].stream_profile_id}`, + // // }), + // // ...(sameChannelGroup && { + // // channel_group_id: `${channels[0].channel_group_id}`, + // // }), + // // ...(sameUserLevel && { + // // user_level: `${channels[0].user_level}`, + // // }), + // // }); + // }, [channelIds, streamProfiles, channelGroups]); + + const handleChannelGroupModalClose = (newGroup) => { + setChannelGroupModalOpen(false); + + if (newGroup && newGroup.id) { + setSelectedChannelGroup(newGroup.id); + form.setValues({ + channel_group: `${newGroup.name}`, + }); + } + }; + const filteredGroups = [ + { id: '-1', name: '(no change)' }, + ...groupOptions.filter((group) => + group.name.toLowerCase().includes(groupFilter.toLowerCase()) + ), + ]; + + const logoOptions = useMemo(() => { + return [ + { id: '-1', name: '(no change)' }, + { id: '0', name: 'Use Default', isDefault: true }, + ...Object.values(channelLogos), + ]; + }, [channelLogos]); + + const filteredLogos = logoOptions.filter((logo) => + logo.name.toLowerCase().includes(logoFilter.toLowerCase()) + ); + + if (!isOpen) { + return <>; + } + + return ( + <> + + + Channels + + } + styles={{ hannontent: { '--mantine-color-body': '#27272A' } }} + > +
+ + + + + + Channel Name + + + + setRegexFind(e.currentTarget.value)} + style={{ flex: 1 }} + /> + setRegexReplace(e.currentTarget.value)} + style={{ flex: 1 }} + /> + + + + + + + + EPG Operations + + + + + + + + + + + Assign Dummy EPG + + ({ + value: `${option.id}`, + label: option.name, + })) + )} + size="xs" + /> + + ({ + value: e.id.toString(), + label: e.name, + }))} + onChange={handleImportFromTemplate} + clearable + searchable + /> + + + )} + + {/* Basic Settings */} + + + {/* Pattern Configuration */} + + + + Define regex patterns to extract information from channel titles or + stream names. Use named capture groups like + (?<groupname>pattern). + + +