diff --git a/.dockerignore b/.dockerignore index 5073af60..c79ca7b4 100755 --- a/.dockerignore +++ b/.dockerignore @@ -11,6 +11,10 @@ **/.toolstarget **/.vs **/.vscode +**/.history +**/media +**/models +**/static **/*.*proj.user **/*.dbmdl **/*.jfm @@ -26,3 +30,4 @@ **/values.dev.yaml LICENSE README.md +data/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index d36be10c..47f12f7d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,7 +1,7 @@ name: Bug Report description: I have an issue with Dispatcharr title: "[Bug]: " -labels: ["Bug", "Triage"] +labels: ["Triage"] type: "Bug" projects: [] assignees: [] diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index bf7db830..77a03df7 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,7 +1,7 @@ name: Feature request description: I want to suggest a new feature for Dispatcharr title: "[Feature]: " -labels: ["Feature Request"] +labels: ["Triage"] type: "Feature" projects: [] assignees: [] diff --git a/.github/workflows/base-image.yml b/.github/workflows/base-image.yml index 1da33d4f..f926d892 100644 --- a/.github/workflows/base-image.yml +++ b/.github/workflows/base-image.yml @@ -2,42 +2,37 @@ name: Base Image Build on: push: - branches: [ main, dev ] + branches: [main, dev] paths: - 'docker/DispatcharrBase' - '.github/workflows/base-image.yml' - 'requirements.txt' pull_request: - branches: [ main, dev ] + branches: [main, dev] paths: - 'docker/DispatcharrBase' - '.github/workflows/base-image.yml' - 'requirements.txt' - workflow_dispatch: # Allow manual triggering + workflow_dispatch: # Allow manual triggering permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry jobs: - build-base-image: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} steps: - uses: actions/checkout@v3 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Generate timestamp for build id: timestamp run: | @@ -66,13 +61,111 @@ jobs: echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT fi + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push Docker base image uses: docker/build-push-action@v4 with: context: . file: ./docker/DispatcharrBase - push: true - platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/${{ matrix.platform }} tags: | - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} + BRANCH=${{ github.ref_name }} + REPO_URL=https://github.com/${{ github.repository }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 + + # Docker Hub manifests + # branch tag (e.g. base or base-dev) + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + # branch + timestamp tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a60ac49..5da4118c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,19 +2,84 @@ name: CI Pipeline on: push: - branches: [ dev ] + branches: [dev] pull_request: - branches: [ dev ] - workflow_dispatch: # Allow manual triggering + branches: [dev] + workflow_dispatch: -# Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write + packages: write jobs: - build: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + # compute a single timestamp, version, and repo metadata for the entire workflow + outputs: + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} + branch_tag: ${{ steps.meta.outputs.branch_tag }} + version: ${{ steps.version.outputs.version }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + + - name: Extract version info + id: version + run: | + VERSION=$(python -c "import version; print(version.__version__)") + echo "version=${VERSION}" >> $GITHUB_OUTPUT + + - name: Set repository and image metadata + id: meta + run: | + REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT + + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "branch_tag=latest" >> $GITHUB_OUTPUT + echo "is_main=true" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then + echo "branch_tag=dev" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + else + BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') + echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT + echo "is_main=false" >> $GITHUB_OUTPUT + fi + + if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then + echo "is_fork=true" >> $GITHUB_OUTPUT + else + echo "is_fork=false" >> $GITHUB_OUTPUT + fi + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + # no per-job outputs here; shared metadata comes from the `prepare` job steps: - uses: actions/checkout@v3 with: @@ -45,66 +110,85 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Generate timestamp for build - id: timestamp - run: | - TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') - echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Extract version info - id: version - run: | - VERSION=$(python -c "import version; print(version.__version__)") - echo "version=${VERSION}" >> $GITHUB_OUTPUT - echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT - - - name: Set repository and image metadata - id: meta - run: | - # Get lowercase repository owner - REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - # Get repository name - REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') - echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT - - # Determine branch name - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - echo "branch_tag=latest" >> $GITHUB_OUTPUT - echo "is_main=true" >> $GITHUB_OUTPUT - elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then - echo "branch_tag=dev" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - else - # For other branches, use the branch name - BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g') - echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT - echo "is_main=false" >> $GITHUB_OUTPUT - fi - - # Determine if this is from a fork - if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then - echo "is_fork=true" >> $GITHUB_OUTPUT - else - echo "is_fork=false" >> $GITHUB_OUTPUT - fi + # use metadata from the prepare job - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: ${{ github.event_name != 'pull_request' }} - platforms: linux/amd64,linux/arm64 + # Build only the platform for this matrix job to avoid running amd64 + # stages under qemu on an arm64 runner (and vice-versa). This makes + # the matrix runner's platform the one built by buildx. + platforms: linux/${{ matrix.platform }} + # push arch-specific tags from each matrix job (they will be combined + # into a multi-arch manifest in a follow-up job) tags: | - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }} - ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} build-args: | - REPO_OWNER=${{ steps.meta.outputs.repo_owner }} - REPO_NAME=${{ steps.meta.outputs.repo_name }} + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} BASE_TAG=base BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} - TIMESTAMP=${{ steps.timestamp.outputs.timestamp }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} file: ./docker/Dockerfile + + create-manifest: + # wait for prepare and all matrix builds to finish + needs: [prepare, docker] + runs-on: ubuntu-24.04 + if: ${{ github.event_name != 'pull_request' }} + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }} + VERSION=${{ needs.prepare.outputs.version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # branch tag (e.g. latest or dev) + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 + + # version + timestamp tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 + + # also create Docker Hub manifests using the same username + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 + + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 52c2ada2..27356c9a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,16 +15,21 @@ on: # Add explicit permissions for the workflow permissions: - contents: write # For managing releases and pushing tags - packages: write # For publishing to GitHub Container Registry + contents: write # For managing releases and pushing tags + packages: write # For publishing to GitHub Container Registry jobs: - release: - runs-on: ubuntu-latest + prepare: + runs-on: ubuntu-24.04 + outputs: + new_version: ${{ steps.update_version.outputs.new_version }} + repo_owner: ${{ steps.meta.outputs.repo_owner }} + repo_name: ${{ steps.meta.outputs.repo_name }} steps: - uses: actions/checkout@v3 with: fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Configure Git run: | @@ -38,14 +43,45 @@ jobs: NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')") echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT - - name: Set lowercase repo owner - id: repo_owner + - name: Set repository metadata + id: meta run: | REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') - echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT + echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') + echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + + - name: Commit and Tag + run: | + git add version.py + git commit -m "Release v${{ steps.update_version.outputs.new_version }}" + git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" + git push origin main --tags + + docker: + needs: [prepare] + strategy: + fail-fast: false + matrix: + platform: [amd64, arm64] + include: + - platform: amd64 + runner: ubuntu-24.04 + - platform: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + ref: main + + - name: Configure Git + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -57,36 +93,88 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Commit and Tag - run: | - git add version.py - git commit -m "Release v${{ steps.update_version.outputs.new_version }}" - git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" - git push origin main --tags + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Release Image + - name: Build and push Docker image uses: docker/build-push-action@v4 with: context: . push: true - platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases + platforms: linux/${{ matrix.platform }} tags: | - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }} - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64 - ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64 + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} build-args: | + REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO_NAME=${{ needs.prepare.outputs.repo_name }} BRANCH=${{ github.ref_name }} REPO_URL=https://github.com/${{ github.repository }} file: ./docker/Dockerfile + create-manifest: + needs: [prepare, docker] + runs-on: ubuntu-24.04 + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create multi-arch manifest tags + run: | + set -euo pipefail + OWNER=${{ needs.prepare.outputs.repo_owner }} + REPO=${{ needs.prepare.outputs.repo_name }} + VERSION=${{ needs.prepare.outputs.new_version }} + + echo "Creating multi-arch manifest for ${OWNER}/${REPO}" + + # GitHub Container Registry manifests + # latest tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \ + ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64 + + # version tag + docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ + ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64 + + # Docker Hub manifests + # latest tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64 + + # version tag + docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64 + + create-release: + needs: [prepare, create-manifest] + runs-on: ubuntu-24.04 + steps: - name: Create GitHub Release uses: softprops/action-gh-release@v1 with: - tag_name: v${{ steps.update_version.outputs.new_version }} - name: Release v${{ steps.update_version.outputs.new_version }} + tag_name: v${{ needs.prepare.outputs.new_version }} + name: Release v${{ needs.prepare.outputs.new_version }} draft: false prerelease: false token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Plugins.md b/Plugins.md new file mode 100644 index 00000000..62ea0d87 --- /dev/null +++ b/Plugins.md @@ -0,0 +1,286 @@ +# Dispatcharr Plugins + +This document explains how to build, install, and use Python plugins in Dispatcharr. It covers discovery, the plugin interface, settings, actions, how to access application APIs, and examples. + +--- + +## Quick Start + +1) Create a folder under `/app/data/plugins/my_plugin/` (host path `data/plugins/my_plugin/` in the repo). + +2) Add a `plugin.py` file exporting a `Plugin` class: + +``` +# /app/data/plugins/my_plugin/plugin.py +class Plugin: + name = "My Plugin" + version = "0.1.0" + description = "Does something useful" + + # Settings fields rendered by the UI and persisted by the backend + fields = [ + {"id": "enabled", "label": "Enabled", "type": "boolean", "default": True}, + {"id": "limit", "label": "Item limit", "type": "number", "default": 5}, + {"id": "mode", "label": "Mode", "type": "select", "default": "safe", + "options": [ + {"value": "safe", "label": "Safe"}, + {"value": "fast", "label": "Fast"}, + ]}, + {"id": "note", "label": "Note", "type": "string", "default": ""}, + ] + + # Actions appear as buttons. Clicking one calls run(action, params, context) + actions = [ + {"id": "do_work", "label": "Do Work", "description": "Process items"}, + ] + + def run(self, action: str, params: dict, context: dict): + settings = context.get("settings", {}) + logger = context.get("logger") + + if action == "do_work": + limit = int(settings.get("limit", 5)) + mode = settings.get("mode", "safe") + logger.info(f"My Plugin running with limit={limit}, mode={mode}") + # Do a small amount of work here. Schedule Celery tasks for heavy work. + return {"status": "ok", "processed": limit, "mode": mode} + + return {"status": "error", "message": f"Unknown action {action}"} +``` + +3) Open the Plugins page in the UI, click the refresh icon to reload discovery, then configure and run your plugin. + +--- + +## Where Plugins Live + +- Default directory: `/app/data/plugins` inside the container. +- Override with env var: `DISPATCHARR_PLUGINS_DIR`. +- Each plugin is a directory containing either: + - `plugin.py` exporting a `Plugin` class, or + - a Python package (`__init__.py`) exporting a `Plugin` class. + +The directory name (lowercased, spaces as `_`) is used as the registry key and module import path (e.g. `my_plugin.plugin`). + +--- + +## Discovery & Lifecycle + +- Discovery runs at server startup and on-demand when: + - Fetching the plugins list from the UI + - Hitting `POST /api/plugins/plugins/reload/` +- The loader imports each plugin module and instantiates `Plugin()`. +- Metadata (name, version, description) and a per-plugin settings JSON are stored in the DB. + +Backend code: +- Loader: `apps/plugins/loader.py` +- API Views: `apps/plugins/api_views.py` +- API URLs: `apps/plugins/api_urls.py` +- Model: `apps/plugins/models.py` (stores `enabled` flag and `settings` per plugin) + +--- + +## Plugin Interface + +Export a `Plugin` class. Supported attributes and behavior: + +- `name` (str): Human-readable name. +- `version` (str): Semantic version string. +- `description` (str): Short description. +- `fields` (list): Settings schema used by the UI to render controls. +- `actions` (list): Available actions; the UI renders a Run button for each. +- `run(action, params, context)` (callable): Invoked when a user clicks an action. + +### Settings Schema +Supported field `type`s: +- `boolean` +- `number` +- `string` +- `select` (requires `options`: `[{"value": ..., "label": ...}, ...]`) + +Common field keys: +- `id` (str): Settings key. +- `label` (str): Label shown in the UI. +- `type` (str): One of above. +- `default` (any): Default value used until saved. +- `help_text` (str, optional): Shown under the control. +- `options` (list, for select): List of `{value, label}`. + +The UI automatically renders settings and persists them. The backend stores settings in `PluginConfig.settings`. + +Read settings in `run` via `context["settings"]`. + +### Actions +Each action is a dict: +- `id` (str): Unique action id. +- `label` (str): Button label. +- `description` (str, optional): Helper text. + +Clicking an action calls your plugin’s `run(action, params, context)` and shows a notification with the result or error. + +### Action Confirmation (Modal) +Developers can request a confirmation modal per action using the `confirm` key on the action. Options: + +- Boolean: `confirm: true` will show a default confirmation modal. +- Object: `confirm: { required: true, title: '...', message: '...' }` to customize the modal title and message. + +Example: +``` +actions = [ + { + "id": "danger_run", + "label": "Do Something Risky", + "description": "Runs a job that affects many records.", + "confirm": { "required": true, "title": "Proceed?", "message": "This will modify many records." }, + } +] +``` + +--- + +## Accessing Dispatcharr APIs from Plugins + +Plugins are server-side Python code running within the Django application. You can: + +- Import models and run queries/updates: + ``` + from apps.m3u.models import M3UAccount + from apps.epg.models import EPGSource + from apps.channels.models import Channel + from core.models import CoreSettings + ``` + +- Dispatch Celery tasks for heavy work (recommended): + ``` + from apps.m3u.tasks import refresh_m3u_accounts # apps/m3u/tasks.py + from apps.epg.tasks import refresh_all_epg_data # apps/epg/tasks.py + + refresh_m3u_accounts.delay() + refresh_all_epg_data.delay() + ``` + +- Send WebSocket updates: + ``` + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"type": "plugin", "plugin": "my_plugin", "message": "Done"}) + ``` + +- Use transactions: + ``` + from django.db import transaction + with transaction.atomic(): + # bulk updates here + ... + ``` + +- Log via provided context or standard logging: + ``` + def run(self, action, params, context): + logger = context.get("logger") # already configured + logger.info("running action %s", action) + ``` + +Prefer Celery tasks (`.delay()`) to keep `run` fast and non-blocking. + +--- + +## REST Endpoints (for UI and tooling) + +- List plugins: `GET /api/plugins/plugins/` + - Response: `{ "plugins": [{ key, name, version, description, enabled, fields, settings, actions }, ...] }` +- Reload discovery: `POST /api/plugins/plugins/reload/` +- Import plugin: `POST /api/plugins/plugins/import/` with form-data file field `file` +- Update settings: `POST /api/plugins/plugins//settings/` with `{"settings": {...}}` +- Run action: `POST /api/plugins/plugins//run/` with `{"action": "id", "params": {...}}` +- Enable/disable: `POST /api/plugins/plugins//enabled/` with `{"enabled": true|false}` + +Notes: +- When disabled, a plugin cannot run actions; backend returns HTTP 403. + +--- + +## Importing Plugins + +- In the UI, click the Import button on the Plugins page and upload a `.zip` containing a plugin folder. +- The archive should contain either `plugin.py` or a Python package (`__init__.py`). +- On success, the UI shows the plugin name/description and lets you enable it immediately (plugins are disabled by default). + +--- + +## Enabling / Disabling Plugins + +- Each plugin has a persisted `enabled` flag (default: disabled) and `ever_enabled` flag in the DB (`apps/plugins/models.py`). +- New plugins are disabled by default and require an explicit enable. +- The first time a plugin is enabled, the UI shows a trust warning modal explaining that plugins can run arbitrary server-side code. +- The Plugins page shows a toggle in the card header. Turning it off dims the card and disables the Run button. +- Backend enforcement: Attempts to run an action for a disabled plugin return HTTP 403. + +--- + +## Example: Refresh All Sources Plugin + +Path: `data/plugins/refresh_all/plugin.py` + +``` +class Plugin: + name = "Refresh All Sources" + version = "1.0.0" + description = "Force refresh all M3U accounts and EPG sources." + + fields = [ + {"id": "confirm", "label": "Require confirmation", "type": "boolean", "default": True, + "help_text": "If enabled, the UI should ask before running."} + ] + + actions = [ + {"id": "refresh_all", "label": "Refresh All M3Us and EPGs", + "description": "Queues background refresh for all active M3U accounts and EPG sources."} + ] + + def run(self, action: str, params: dict, context: dict): + if action == "refresh_all": + from apps.m3u.tasks import refresh_m3u_accounts + from apps.epg.tasks import refresh_all_epg_data + refresh_m3u_accounts.delay() + refresh_all_epg_data.delay() + return {"status": "queued", "message": "Refresh jobs queued"} + return {"status": "error", "message": f"Unknown action: {action}"} +``` + +--- + +## Best Practices + +- Keep `run` short and schedule heavy operations via Celery tasks. +- Validate and sanitize `params` received from the UI. +- Use database transactions for bulk or related updates. +- Log actionable messages for troubleshooting. +- Only write files under `/data` or `/app/data` paths. +- Treat plugins as trusted code: they run with full app permissions. + +--- + +## Troubleshooting + +- Plugin not listed: ensure the folder exists and contains `plugin.py` with a `Plugin` class. +- Import errors: the folder name is the import name; avoid spaces or exotic characters. +- No confirmation: include a boolean field with `id: "confirm"` and set it to true or default true. +- HTTP 403 on run: the plugin is disabled; enable it from the toggle or via the `enabled/` endpoint. + +--- + +## Contributing + +- Keep dependencies minimal. Vendoring small helpers into the plugin folder is acceptable. +- Use the existing task and model APIs where possible; propose extensions if you need new capabilities. + +--- + +## Internals Reference + +- Loader: `apps/plugins/loader.py` +- API Views: `apps/plugins/api_views.py` +- API URLs: `apps/plugins/api_urls.py` +- Model: `apps/plugins/models.py` +- Frontend page: `frontend/src/pages/Plugins.jsx` +- Sidebar entry: `frontend/src/components/Sidebar.jsx` diff --git a/README.md b/README.md index 5216663f..9b359e25 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and πŸ“Š **Real-Time Stats Dashboard** β€” Live insights into stream health and client activity\ 🧠 **EPG Auto-Match** β€” Match program data to channels automatically\ βš™οΈ **Streamlink + FFmpeg Support** β€” Flexible backend options for streaming and recording\ +🎬 **VOD Management** β€” Full Video on Demand support with movies and TV series\ 🧼 **UI & UX Enhancements** β€” Smoother, faster, more responsive interface\ πŸ› **Output Compatibility** β€” HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more @@ -31,6 +32,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and βœ… **Full IPTV Control** β€” Import, organize, proxy, and monitor IPTV streams on your own terms\ βœ… **Smart Playlist Handling** β€” M3U import, filtering, grouping, and failover support\ +βœ… **VOD Content Management** β€” Organize movies and TV series with metadata and streaming\ βœ… **Reliable EPG Integration** β€” Match and manage TV guide data with ease\ βœ… **Clean & Responsive Interface** β€” Modern design that gets out of your way\ βœ… **Fully Self-Hosted** β€” Total control, zero reliance on third-party services diff --git a/apps/accounts/api_urls.py b/apps/accounts/api_urls.py index e1518105..dda3832c 100644 --- a/apps/accounts/api_urls.py +++ b/apps/accounts/api_urls.py @@ -1,41 +1,39 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter from .api_views import ( - AuthViewSet, UserViewSet, GroupViewSet, - list_permissions, initialize_superuser + AuthViewSet, + UserViewSet, + GroupViewSet, + TokenObtainPairView, + TokenRefreshView, + list_permissions, + initialize_superuser, ) from rest_framework_simplejwt import views as jwt_views -app_name = 'accounts' +app_name = "accounts" # πŸ”Ή Register ViewSets with a Router router = DefaultRouter() -router.register(r'users', UserViewSet, basename='user') -router.register(r'groups', GroupViewSet, basename='group') +router.register(r"users", UserViewSet, basename="user") +router.register(r"groups", GroupViewSet, basename="group") # πŸ”Ή Custom Authentication Endpoints -auth_view = AuthViewSet.as_view({ - 'post': 'login' -}) +auth_view = AuthViewSet.as_view({"post": "login"}) -logout_view = AuthViewSet.as_view({ - 'post': 'logout' -}) +logout_view = AuthViewSet.as_view({"post": "logout"}) # πŸ”Ή Define API URL patterns urlpatterns = [ # Authentication - path('auth/login/', auth_view, name='user-login'), - path('auth/logout/', logout_view, name='user-logout'), - + path("auth/login/", auth_view, name="user-login"), + path("auth/logout/", logout_view, name="user-logout"), # Superuser API - path('initialize-superuser/', initialize_superuser, name='initialize_superuser'), - + path("initialize-superuser/", initialize_superuser, name="initialize_superuser"), # Permissions API - path('permissions/', list_permissions, name='list-permissions'), - - path('token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'), - path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'), + path("permissions/", list_permissions, name="list-permissions"), + path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"), + path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"), ] # πŸ”Ή Include ViewSet routes diff --git a/apps/accounts/api_views.py b/apps/accounts/api_views.py index 27d844df..bf87c2ab 100644 --- a/apps/accounts/api_views.py +++ b/apps/accounts/api_views.py @@ -2,16 +2,52 @@ from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group, Permission from django.http import JsonResponse, HttpResponse from django.views.decorators.csrf import csrf_exempt -from rest_framework.decorators import api_view, permission_classes -from rest_framework.permissions import IsAuthenticated, AllowAny +from rest_framework.decorators import api_view, permission_classes, action from rest_framework.response import Response -from rest_framework import viewsets +from rest_framework import viewsets, status from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi import json +from .permissions import IsAdmin, Authenticated +from dispatcharr.utils import network_access_allowed from .models import User from .serializers import UserSerializer, GroupSerializer, PermissionSerializer +from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView + + +class TokenObtainPairView(TokenObtainPairView): + def post(self, request, *args, **kwargs): + # Custom logic here + if not network_access_allowed(request, "UI"): + return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN) + + # Get the response from the parent class first + response = super().post(request, *args, **kwargs) + + # If login was successful, update last_login + if response.status_code == 200: + username = request.data.get("username") + if username: + from django.utils import timezone + try: + user = User.objects.get(username=username) + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + except User.DoesNotExist: + pass # User doesn't exist, but login somehow succeeded + + return response + + +class TokenRefreshView(TokenRefreshView): + def post(self, request, *args, **kwargs): + # Custom logic here + if not network_access_allowed(request, "UI"): + return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN) + + return super().post(request, *args, **kwargs) + @csrf_exempt # In production, consider CSRF protection strategies or ensure this endpoint is only accessible when no superuser exists. def initialize_superuser(request): @@ -26,15 +62,20 @@ def initialize_superuser(request): password = data.get("password") email = data.get("email", "") if not username or not password: - return JsonResponse({"error": "Username and password are required."}, status=400) + return JsonResponse( + {"error": "Username and password are required."}, status=400 + ) # Create the superuser - User.objects.create_superuser(username=username, password=password, email=email) + User.objects.create_superuser( + username=username, password=password, email=email, user_level=10 + ) return JsonResponse({"superuser_exists": True}) except Exception as e: return JsonResponse({"error": str(e)}, status=500) # For GET requests, indicate no superuser exists return JsonResponse({"superuser_exists": False}) + # πŸ”Ή 1) Authentication APIs class AuthViewSet(viewsets.ViewSet): """Handles user login and logout""" @@ -43,36 +84,45 @@ class AuthViewSet(viewsets.ViewSet): operation_description="Authenticate and log in a user", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, - required=['username', 'password'], + required=["username", "password"], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING), - 'password': openapi.Schema(type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD) + "username": openapi.Schema(type=openapi.TYPE_STRING), + "password": openapi.Schema( + type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD + ), }, ), responses={200: "Login successful", 400: "Invalid credentials"}, ) def login(self, request): """Logs in a user and returns user details""" - username = request.data.get('username') - password = request.data.get('password') + username = request.data.get("username") + password = request.data.get("password") user = authenticate(request, username=username, password=password) if user: login(request, user) - return Response({ - "message": "Login successful", - "user": { - "id": user.id, - "username": user.username, - "email": user.email, - "groups": list(user.groups.values_list('name', flat=True)) + # Update last_login timestamp + from django.utils import timezone + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + + return Response( + { + "message": "Login successful", + "user": { + "id": user.id, + "username": user.username, + "email": user.email, + "groups": list(user.groups.values_list("name", flat=True)), + }, } - }) + ) return Response({"error": "Invalid credentials"}, status=400) @swagger_auto_schema( operation_description="Log out the current user", - responses={200: "Logout successful"} + responses={200: "Logout successful"}, ) def logout(self, request): """Logs out the authenticated user""" @@ -83,13 +133,19 @@ class AuthViewSet(viewsets.ViewSet): # πŸ”Ή 2) User Management APIs class UserViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Users""" - queryset = User.objects.all() + + queryset = User.objects.all().prefetch_related('channel_profiles') serializer_class = UserSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + if self.action == "me": + return [Authenticated()] + + return [IsAdmin()] @swagger_auto_schema( operation_description="Retrieve a list of users", - responses={200: UserSerializer(many=True)} + responses={200: UserSerializer(many=True)}, ) def list(self, request, *args, **kwargs): return super().list(request, *args, **kwargs) @@ -110,17 +166,28 @@ class UserViewSet(viewsets.ModelViewSet): def destroy(self, request, *args, **kwargs): return super().destroy(request, *args, **kwargs) + @swagger_auto_schema( + method="get", + operation_description="Get active user information", + ) + @action(detail=False, methods=["get"], url_path="me") + def me(self, request): + user = request.user + serializer = UserSerializer(user) + return Response(serializer.data) + # πŸ”Ή 3) Group Management APIs class GroupViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Groups""" + queryset = Group.objects.all() serializer_class = GroupSerializer - permission_classes = [IsAuthenticated] + permission_classes = [Authenticated] @swagger_auto_schema( operation_description="Retrieve a list of groups", - responses={200: GroupSerializer(many=True)} + responses={200: GroupSerializer(many=True)}, ) def list(self, request, *args, **kwargs): return super().list(request, *args, **kwargs) @@ -144,12 +211,12 @@ class GroupViewSet(viewsets.ModelViewSet): # πŸ”Ή 4) Permissions List API @swagger_auto_schema( - method='get', + method="get", operation_description="Retrieve a list of all permissions", - responses={200: PermissionSerializer(many=True)} + responses={200: PermissionSerializer(many=True)}, ) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) +@api_view(["GET"]) +@permission_classes([Authenticated]) def list_permissions(request): """Returns a list of all available permissions""" permissions = Permission.objects.all() diff --git a/apps/accounts/apps.py b/apps/accounts/apps.py index fe284bd6..603ea847 100644 --- a/apps/accounts/apps.py +++ b/apps/accounts/apps.py @@ -1,6 +1,7 @@ from django.apps import AppConfig + class AccountsConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'apps.accounts' + default_auto_field = "django.db.models.BigAutoField" + name = "apps.accounts" verbose_name = "Accounts & Authentication" diff --git a/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py b/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py new file mode 100644 index 00000000..2a095773 --- /dev/null +++ b/apps/accounts/migrations/0002_remove_user_channel_groups_user_channel_profiles_and_more.py @@ -0,0 +1,43 @@ +# Generated by Django 5.1.6 on 2025-05-18 15:47 + +from django.db import migrations, models + + +def set_user_level_to_10(apps, schema_editor): + User = apps.get_model("accounts", "User") + User.objects.update(user_level=10) + + +class Migration(migrations.Migration): + + dependencies = [ + ("accounts", "0001_initial"), + ("dispatcharr_channels", "0021_channel_user_level"), + ] + + operations = [ + migrations.RemoveField( + model_name="user", + name="channel_groups", + ), + migrations.AddField( + model_name="user", + name="channel_profiles", + field=models.ManyToManyField( + blank=True, + related_name="users", + to="dispatcharr_channels.channelprofile", + ), + ), + migrations.AddField( + model_name="user", + name="user_level", + field=models.IntegerField(default=0), + ), + migrations.AddField( + model_name="user", + name="custom_properties", + field=models.TextField(blank=True, null=True), + ), + migrations.RunPython(set_user_level_to_10), + ] diff --git a/apps/accounts/migrations/0003_alter_user_custom_properties.py b/apps/accounts/migrations/0003_alter_user_custom_properties.py new file mode 100644 index 00000000..20411f75 --- /dev/null +++ b/apps/accounts/migrations/0003_alter_user_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('accounts', '0002_remove_user_channel_groups_user_channel_profiles_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='user', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/accounts/models.py b/apps/accounts/models.py index 5b24549f..da5e36bc 100644 --- a/apps/accounts/models.py +++ b/apps/accounts/models.py @@ -2,17 +2,26 @@ from django.db import models from django.contrib.auth.models import AbstractUser, Permission + class User(AbstractUser): """ Custom user model for Dispatcharr. Inherits from Django's AbstractUser to add additional fields if needed. """ + + class UserLevel(models.IntegerChoices): + STREAMER = 0, "Streamer" + STANDARD = 1, "Standard User" + ADMIN = 10, "Admin" + avatar_config = models.JSONField(default=dict, blank=True, null=True) - channel_groups = models.ManyToManyField( - 'dispatcharr_channels.ChannelGroup', # Updated reference to renamed model + channel_profiles = models.ManyToManyField( + "dispatcharr_channels.ChannelProfile", blank=True, - related_name="users" + related_name="users", ) + user_level = models.IntegerField(default=UserLevel.STREAMER) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return self.username diff --git a/apps/accounts/permissions.py b/apps/accounts/permissions.py new file mode 100644 index 00000000..62673038 --- /dev/null +++ b/apps/accounts/permissions.py @@ -0,0 +1,56 @@ +from rest_framework.permissions import IsAuthenticated +from .models import User +from dispatcharr.utils import network_access_allowed + + +class Authenticated(IsAuthenticated): + def has_permission(self, request, view): + is_authenticated = super().has_permission(request, view) + network_allowed = network_access_allowed(request, "UI") + + return is_authenticated and network_allowed + + +class IsStandardUser(Authenticated): + def has_permission(self, request, view): + if not super().has_permission(request, view): + return False + + return request.user and request.user.user_level >= User.UserLevel.STANDARD + + +class IsAdmin(Authenticated): + def has_permission(self, request, view): + if not super().has_permission(request, view): + return False + + return request.user.user_level >= 10 + + +class IsOwnerOfObject(Authenticated): + def has_object_permission(self, request, view, obj): + if not super().has_permission(request, view): + return False + + is_admin = IsAdmin().has_permission(request, view) + is_owner = request.user in obj.users.all() + + return is_admin or is_owner + + +permission_classes_by_action = { + "list": [IsStandardUser], + "create": [IsAdmin], + "retrieve": [IsStandardUser], + "update": [IsAdmin], + "partial_update": [IsAdmin], + "destroy": [IsAdmin], +} + +permission_classes_by_method = { + "GET": [IsStandardUser], + "POST": [IsAdmin], + "PATCH": [IsAdmin], + "PUT": [IsAdmin], + "DELETE": [IsAdmin], +} diff --git a/apps/accounts/serializers.py b/apps/accounts/serializers.py index 2346946e..865d29af 100644 --- a/apps/accounts/serializers.py +++ b/apps/accounts/serializers.py @@ -1,13 +1,14 @@ from rest_framework import serializers from django.contrib.auth.models import Group, Permission from .models import User +from apps.channels.models import ChannelProfile # πŸ”Ή Fix for Permission serialization class PermissionSerializer(serializers.ModelSerializer): class Meta: model = Permission - fields = ['id', 'name', 'codename'] + fields = ["id", "name", "codename"] # πŸ”Ή Fix for Group serialization @@ -18,15 +19,61 @@ class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group - fields = ['id', 'name', 'permissions'] + fields = ["id", "name", "permissions"] # πŸ”Ή Fix for User serialization class UserSerializer(serializers.ModelSerializer): - groups = serializers.SlugRelatedField( - many=True, queryset=Group.objects.all(), slug_field="name" - ) # βœ… Fix ManyToMany `_meta` error + password = serializers.CharField(write_only=True) + channel_profiles = serializers.PrimaryKeyRelatedField( + queryset=ChannelProfile.objects.all(), many=True, required=False + ) class Meta: model = User - fields = ['id', 'username', 'email', 'groups'] + fields = [ + "id", + "username", + "email", + "user_level", + "password", + "channel_profiles", + "custom_properties", + "avatar_config", + "is_active", + "is_staff", + "is_superuser", + "last_login", + "date_joined", + "first_name", + "last_name", + ] + + def create(self, validated_data): + channel_profiles = validated_data.pop("channel_profiles", []) + + user = User(**validated_data) + user.set_password(validated_data["password"]) + user.is_active = True + user.save() + + user.channel_profiles.set(channel_profiles) + + return user + + def update(self, instance, validated_data): + password = validated_data.pop("password", None) + channel_profiles = validated_data.pop("channel_profiles", None) + + for attr, value in validated_data.items(): + setattr(instance, attr, value) + + if password: + instance.set_password(password) + + instance.save() + + if channel_profiles is not None: + instance.channel_profiles.set(channel_profiles) + + return instance diff --git a/apps/accounts/signals.py b/apps/accounts/signals.py index 3bd1e246..dfc4f425 100644 --- a/apps/accounts/signals.py +++ b/apps/accounts/signals.py @@ -5,6 +5,7 @@ from django.db.models.signals import post_save from django.dispatch import receiver from .models import User + @receiver(post_save, sender=User) def handle_new_user(sender, instance, created, **kwargs): if created: diff --git a/apps/api/urls.py b/apps/api/urls.py index a2810f06..7d9edb52 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -1,11 +1,10 @@ -from django.urls import path, include +from django.urls import path, include, re_path from drf_yasg.views import get_schema_view from drf_yasg import openapi from rest_framework.permissions import AllowAny app_name = 'api' -# Configure Swagger Schema schema_view = get_schema_view( openapi.Info( title="Dispatcharr API", @@ -26,6 +25,8 @@ urlpatterns = [ path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')), path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')), path('core/', include(('core.api_urls', 'core'), namespace='core')), + path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')), + path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), @@ -34,7 +35,7 @@ urlpatterns = [ # Swagger Documentation api_urls - path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), + re_path(r'^swagger/?$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'), ] diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 4246373e..7999abd9 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -6,12 +6,21 @@ from .api_views import ( ChannelGroupViewSet, BulkDeleteStreamsAPIView, BulkDeleteChannelsAPIView, + BulkDeleteLogosAPIView, + CleanupUnusedLogosAPIView, LogoViewSet, ChannelProfileViewSet, UpdateChannelMembershipAPIView, BulkUpdateChannelMembershipAPIView, RecordingViewSet, + RecurringRecordingRuleViewSet, GetChannelStreamsAPIView, + SeriesRulesAPIView, + DeleteSeriesRuleAPIView, + EvaluateSeriesRulesAPIView, + BulkRemoveSeriesRecordingsAPIView, + BulkDeleteUpcomingRecordingsAPIView, + ComskipConfigAPIView, ) app_name = 'channels' # for DRF routing @@ -23,14 +32,24 @@ router.register(r'channels', ChannelViewSet, basename='channel') router.register(r'logos', LogoViewSet, basename='logo') router.register(r'profiles', ChannelProfileViewSet, basename='profile') router.register(r'recordings', RecordingViewSet, basename='recording') +router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule') urlpatterns = [ # Bulk delete is a single APIView, not a ViewSet path('streams/bulk-delete/', BulkDeleteStreamsAPIView.as_view(), name='bulk_delete_streams'), path('channels/bulk-delete/', BulkDeleteChannelsAPIView.as_view(), name='bulk_delete_channels'), + path('logos/bulk-delete/', BulkDeleteLogosAPIView.as_view(), name='bulk_delete_logos'), + path('logos/cleanup/', CleanupUnusedLogosAPIView.as_view(), name='cleanup_unused_logos'), path('channels//streams/', GetChannelStreamsAPIView.as_view(), name='get_channel_streams'), path('profiles//channels//', UpdateChannelMembershipAPIView.as_view(), name='update_channel_membership'), path('profiles//channels/bulk-update/', BulkUpdateChannelMembershipAPIView.as_view(), name='bulk_update_channel_membership'), + # DVR series rules (order matters: specific routes before catch-all slug) + path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'), + path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'), + path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), + path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), + path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), + path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] urlpatterns += router.urls diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 890dd247..fc5ea114 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -1,57 +1,117 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated, AllowAny +from rest_framework.permissions import AllowAny from rest_framework.decorators import action -from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.parsers import MultiPartParser, FormParser, JSONParser from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.shortcuts import get_object_or_404, get_list_or_404 from django.db import transaction -import os, json, requests +import os, json, requests, logging +from apps.accounts.permissions import ( + Authenticated, + IsAdmin, + IsOwnerOfObject, + permission_classes_by_action, + permission_classes_by_method, +) -from .models import Stream, Channel, ChannelGroup, Logo, ChannelProfile, ChannelProfileMembership, Recording -from .serializers import StreamSerializer, ChannelSerializer, ChannelGroupSerializer, LogoSerializer, ChannelProfileMembershipSerializer, BulkChannelProfileMembershipSerializer, ChannelProfileSerializer, RecordingSerializer -from .tasks import match_epg_channels +from core.models import UserAgent, CoreSettings +from core.utils import RedisClient + +from .models import ( + Stream, + Channel, + ChannelGroup, + Logo, + ChannelProfile, + ChannelProfileMembership, + Recording, + RecurringRecordingRule, +) +from .serializers import ( + StreamSerializer, + ChannelSerializer, + ChannelGroupSerializer, + LogoSerializer, + ChannelProfileMembershipSerializer, + BulkChannelProfileMembershipSerializer, + ChannelProfileSerializer, + RecordingSerializer, + RecurringRecordingRuleSerializer, +) +from .tasks import ( + match_epg_channels, + evaluate_series_rules, + evaluate_series_rules_impl, + match_single_channel_epg, + match_selected_channels_epg, + sync_recurring_rule_impl, + purge_recurring_rule_impl, +) import django_filters from django_filters.rest_framework import DjangoFilterBackend from rest_framework.filters import SearchFilter, OrderingFilter from apps.epg.models import EPGData +from apps.vod.models import Movie, Series from django.db.models import Q from django.http import StreamingHttpResponse, FileResponse, Http404 +from django.utils import timezone import mimetypes +from django.conf import settings from rest_framework.pagination import PageNumberPagination + +logger = logging.getLogger(__name__) + + class OrInFilter(django_filters.Filter): """ Custom filter that handles the OR condition instead of AND. """ + def filter(self, queryset, value): if value: # Create a Q object for each value and combine them with OR query = Q() - for val in value.split(','): + for val in value.split(","): query |= Q(**{self.field_name: val}) return queryset.filter(query) return queryset + class StreamPagination(PageNumberPagination): - page_size = 25 # Default page size - page_size_query_param = 'page_size' # Allow clients to specify page size + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size max_page_size = 10000 # Prevent excessive page sizes + class StreamFilter(django_filters.FilterSet): - name = django_filters.CharFilter(lookup_expr='icontains') - channel_group_name = OrInFilter(field_name="channel_group__name", lookup_expr="icontains") + name = django_filters.CharFilter(lookup_expr="icontains") + channel_group_name = OrInFilter( + field_name="channel_group__name", lookup_expr="icontains" + ) m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") - m3u_account_name = django_filters.CharFilter(field_name="m3u_account__name", lookup_expr="icontains") - m3u_account_is_active = django_filters.BooleanFilter(field_name="m3u_account__is_active") + m3u_account_name = django_filters.CharFilter( + field_name="m3u_account__name", lookup_expr="icontains" + ) + m3u_account_is_active = django_filters.BooleanFilter( + field_name="m3u_account__is_active" + ) class Meta: model = Stream - fields = ['name', 'channel_group_name', 'm3u_account', 'm3u_account_name', 'm3u_account_is_active'] + fields = [ + "name", + "channel_group_name", + "m3u_account", + "m3u_account_name", + "m3u_account_is_active", + ] + # ───────────────────────────────────────────────────────── # 1) Stream API (CRUD) @@ -59,46 +119,51 @@ class StreamFilter(django_filters.FilterSet): class StreamViewSet(viewsets.ModelViewSet): queryset = Stream.objects.all() serializer_class = StreamSerializer - permission_classes = [IsAuthenticated] pagination_class = StreamPagination filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = StreamFilter - search_fields = ['name', 'channel_group__name'] - ordering_fields = ['name', 'channel_group__name'] - ordering = ['-name'] + search_fields = ["name", "channel_group__name"] + ordering_fields = ["name", "channel_group__name"] + ordering = ["-name"] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): qs = super().get_queryset() # Exclude streams from inactive M3U accounts qs = qs.exclude(m3u_account__is_active=False) - assigned = self.request.query_params.get('assigned') + assigned = self.request.query_params.get("assigned") if assigned is not None: qs = qs.filter(channels__id=assigned) - unassigned = self.request.query_params.get('unassigned') - if unassigned == '1': + unassigned = self.request.query_params.get("unassigned") + if unassigned == "1": qs = qs.filter(channels__isnull=True) - channel_group = self.request.query_params.get('channel_group') + channel_group = self.request.query_params.get("channel_group") if channel_group: - group_names = channel_group.split(',') + group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) return qs def list(self, request, *args, **kwargs): - ids = request.query_params.get('ids', None) + ids = request.query_params.get("ids", None) if ids: - ids = ids.split(',') + ids = ids.split(",") streams = get_list_or_404(Stream, id__in=ids) serializer = self.get_serializer(streams, many=True) return Response(serializer.data) return super().list(request, *args, **kwargs) - @action(detail=False, methods=['get'], url_path='ids') + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset queryset = self.get_queryset() @@ -107,109 +172,433 @@ class StreamViewSet(viewsets.ModelViewSet): queryset = self.filter_queryset(queryset) # Return only the IDs from the queryset - stream_ids = queryset.values_list('id', flat=True) + stream_ids = queryset.values_list("id", flat=True) # Return the response with the list of IDs return Response(list(stream_ids)) - @action(detail=False, methods=['get'], url_path='groups') + @action(detail=False, methods=["get"], url_path="groups") def get_groups(self, request, *args, **kwargs): # Get unique ChannelGroup names that are linked to streams - group_names = ChannelGroup.objects.filter(streams__isnull=False).order_by('name').values_list('name', flat=True).distinct() + group_names = ( + ChannelGroup.objects.filter(streams__isnull=False) + .order_by("name") + .values_list("name", flat=True) + .distinct() + ) # Return the response with the list of unique group names return Response(list(group_names)) + @swagger_auto_schema( + method="post", + operation_description="Retrieve streams by a list of IDs using POST to avoid URL length limitations", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=["ids"], + properties={ + "ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="List of stream IDs to retrieve" + ), + }, + ), + responses={200: StreamSerializer(many=True)}, + ) + @action(detail=False, methods=["post"], url_path="by-ids") + def get_by_ids(self, request, *args, **kwargs): + ids = request.data.get("ids", []) + if not isinstance(ids, list): + return Response( + {"error": "ids must be a list of integers"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + streams = Stream.objects.filter(id__in=ids) + serializer = self.get_serializer(streams, many=True) + return Response(serializer.data) + + # ───────────────────────────────────────────────────────── # 2) Channel Group Management (CRUD) # ───────────────────────────────────────────────────────── class ChannelGroupViewSet(viewsets.ModelViewSet): queryset = ChannelGroup.objects.all() serializer_class = ChannelGroupSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + """Add annotation for association counts""" + from django.db.models import Count + return ChannelGroup.objects.annotate( + channel_count=Count('channels', distinct=True), + m3u_account_count=Count('m3u_accounts', distinct=True) + ) + + def update(self, request, *args, **kwargs): + """Override update to check M3U associations""" + instance = self.get_object() + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot edit group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().update(request, *args, **kwargs) + + def partial_update(self, request, *args, **kwargs): + """Override partial_update to check M3U associations""" + instance = self.get_object() + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot edit group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().partial_update(request, *args, **kwargs) + + @swagger_auto_schema( + method="post", + operation_description="Delete all channel groups that have no associations (no channels or M3U accounts)", + responses={200: "Cleanup completed"}, + ) + @action(detail=False, methods=["post"], url_path="cleanup") + def cleanup_unused_groups(self, request): + """Delete all channel groups with no channels or M3U account associations""" + from django.db.models import Count + + # Find groups with no channels and no M3U account associations + unused_groups = ChannelGroup.objects.annotate( + channel_count=Count('channels', distinct=True), + m3u_account_count=Count('m3u_accounts', distinct=True) + ).filter( + channel_count=0, + m3u_account_count=0 + ) + + deleted_count = unused_groups.count() + group_names = list(unused_groups.values_list('name', flat=True)) + + # Delete the unused groups + unused_groups.delete() + + return Response({ + "message": f"Successfully deleted {deleted_count} unused channel groups", + "deleted_count": deleted_count, + "deleted_groups": group_names + }) + + def destroy(self, request, *args, **kwargs): + """Override destroy to check for associations before deletion""" + instance = self.get_object() + + # Check if group has associated channels + if instance.channels.exists(): + return Response( + {"error": "Cannot delete group with associated channels"}, + status=status.HTTP_400_BAD_REQUEST + ) + + # Check if group has M3U account associations + if hasattr(instance, 'm3u_account') and instance.m3u_account.exists(): + return Response( + {"error": "Cannot delete group with M3U account associations"}, + status=status.HTTP_400_BAD_REQUEST + ) + + return super().destroy(request, *args, **kwargs) # ───────────────────────────────────────────────────────── # 3) Channel Management (CRUD) # ───────────────────────────────────────────────────────── class ChannelPagination(PageNumberPagination): - page_size = 25 # Default page size - page_size_query_param = 'page_size' # Allow clients to specify page size + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size max_page_size = 10000 # Prevent excessive page sizes - def paginate_queryset(self, queryset, request, view=None): if not request.query_params.get(self.page_query_param): return None # disables pagination, returns full queryset return super().paginate_queryset(queryset, request, view) + +class EPGFilter(django_filters.Filter): + """ + Filter channels by EPG source name or null (unlinked). + """ + def filter(self, queryset, value): + if not value: + return queryset + + # Split comma-separated values + values = [v.strip() for v in value.split(',')] + query = Q() + + for val in values: + if val == 'null': + # Filter for channels with no EPG data + query |= Q(epg_data__isnull=True) + else: + # Filter for channels with specific EPG source name + query |= Q(epg_data__epg_source__name__icontains=val) + + return queryset.filter(query) + + class ChannelFilter(django_filters.FilterSet): - name = django_filters.CharFilter(lookup_expr='icontains') - channel_group_name = OrInFilter(field_name="channel_group__name", lookup_expr="icontains") + name = django_filters.CharFilter(lookup_expr="icontains") + channel_group = OrInFilter( + field_name="channel_group__name", lookup_expr="icontains" + ) + epg = EPGFilter() class Meta: model = Channel - fields = ['name', 'channel_group_name',] + fields = [ + "name", + "channel_group", + "epg", + ] + class ChannelViewSet(viewsets.ModelViewSet): queryset = Channel.objects.all() serializer_class = ChannelSerializer - permission_classes = [IsAuthenticated] pagination_class = ChannelPagination filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = ChannelFilter - search_fields = ['name', 'channel_group__name'] - ordering_fields = ['channel_number', 'name', 'channel_group__name'] - ordering = ['-channel_number'] + search_fields = ["name", "channel_group__name"] + ordering_fields = ["channel_number", "name", "channel_group__name"] + ordering = ["-channel_number"] + + def get_permissions(self): + if self.action in [ + "edit_bulk", + "assign", + "from_stream", + "from_stream_bulk", + "match_epg", + "set_epg", + "batch_set_epg", + ]: + return [IsAdmin()] + + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): - qs = super().get_queryset().select_related( - 'channel_group', - 'logo', - 'epg_data', - 'stream_profile', - ).prefetch_related('streams') + qs = ( + super() + .get_queryset() + .select_related( + "channel_group", + "logo", + "epg_data", + "stream_profile", + ) + .prefetch_related("streams") + ) - channel_group = self.request.query_params.get('channel_group') + channel_group = self.request.query_params.get("channel_group") if channel_group: - group_names = channel_group.split(',') + group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) + if self.request.user.user_level < 10: + qs = qs.filter(user_level__lte=self.request.user.user_level) + return qs def get_serializer_context(self): context = super().get_serializer_context() - include_streams = self.request.query_params.get('include_streams', 'false') == 'true' - context['include_streams'] = include_streams + include_streams = ( + self.request.query_params.get("include_streams", "false") == "true" + ) + context["include_streams"] = include_streams return context - @action(detail=False, methods=['patch'], url_path='edit/bulk') + @action(detail=False, methods=["patch"], url_path="edit/bulk") def edit_bulk(self, request): - data_list = request.data - if not isinstance(data_list, list): - return Response({"error": "Expected a list of channel objects objects"}, status=status.HTTP_400_BAD_REQUEST) + """ + Bulk edit channels. + Expects a list of channels with their updates. + """ + data = request.data + if not isinstance(data, list): + return Response( + {"error": "Expected a list of channel updates"}, + status=status.HTTP_400_BAD_REQUEST, + ) updated_channels = [] - try: - with transaction.atomic(): - for item in data_list: - channel = Channel.objects.id(id=item.pop('id')) - for key, value in item.items(): - setattr(channel, key, value) + errors = [] - channel.save(update_fields=item.keys()) - updated_channels.append(channel) - except Exception as e: - logger.error("Error during bulk channel edit", e) - return Response({"error": e}, status=500) + for channel_data in data: + channel_id = channel_data.get("id") + if not channel_id: + errors.append({"error": "Channel ID is required"}) + continue - response_data = ChannelSerializer(updated_channels, many=True).data + try: + channel = Channel.objects.get(id=channel_id) - return Response(response_data, status=status.HTTP_200_OK) + # Handle channel_group_id properly - convert string to integer if needed + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None - @action(detail=False, methods=['get'], url_path='ids') + # Use the serializer to validate and update + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + updated_channel = serializer.save() + updated_channels.append(updated_channel) + else: + errors.append({ + "channel_id": channel_id, + "errors": serializer.errors + }) + + except Channel.DoesNotExist: + errors.append({ + "channel_id": channel_id, + "error": "Channel not found" + }) + except Exception as e: + errors.append({ + "channel_id": channel_id, + "error": str(e) + }) + + if errors: + return Response( + {"errors": errors, "updated_count": len(updated_channels)}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Serialize the updated channels for response + serialized_channels = ChannelSerializer(updated_channels, many=True).data + + return Response({ + "message": f"Successfully updated {len(updated_channels)} channels", + "channels": serialized_channels + }) + + @action(detail=False, methods=["post"], url_path="set-names-from-epg") + def set_names_from_epg(self, request): + """ + Trigger a Celery task to set channel names from EPG data + """ + from .tasks import set_channels_names_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_names_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG name setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-logos-from-epg") + def set_logos_from_epg(self, request): + """ + Trigger a Celery task to set channel logos from EPG data + """ + from .tasks import set_channels_logos_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_logos_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG logo setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg") + def set_tvg_ids_from_epg(self, request): + """ + Trigger a Celery task to set channel TVG-IDs from EPG data + """ + from .tasks import set_channels_tvg_ids_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_tvg_ids_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset queryset = self.get_queryset() @@ -218,35 +607,38 @@ class ChannelViewSet(viewsets.ModelViewSet): queryset = self.filter_queryset(queryset) # Return only the IDs from the queryset - channel_ids = queryset.values_list('id', flat=True) + channel_ids = queryset.values_list("id", flat=True) # Return the response with the list of IDs return Response(list(channel_ids)) @swagger_auto_schema( - method='post', + method="post", operation_description="Auto-assign channel_number in bulk by an ordered list of channel IDs.", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, required=["channel_ids"], properties={ - "starting_number": openapi.Schema(type=openapi.TYPE_NUMBER, description="Starting channel number to assign (can be decimal)"), + "starting_number": openapi.Schema( + type=openapi.TYPE_NUMBER, + description="Starting channel number to assign (can be decimal)", + ), "channel_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="Channel IDs to assign" - ) - } + description="Channel IDs to assign", + ), + }, ), - responses={200: "Channels have been auto-assigned!"} + responses={200: "Channels have been auto-assigned!"}, ) - @action(detail=False, methods=['post'], url_path='assign') + @action(detail=False, methods=["post"], url_path="assign") def assign(self, request): with transaction.atomic(): - channel_ids = request.data.get('channel_ids', []) + channel_ids = request.data.get("channel_ids", []) # Ensure starting_number is processed as a float try: - channel_num = float(request.data.get('starting_number', 1)) + channel_num = float(request.data.get("starting_number", 1)) except (ValueError, TypeError): channel_num = 1.0 @@ -254,14 +646,18 @@ class ChannelViewSet(viewsets.ModelViewSet): Channel.objects.filter(id=channel_id).update(channel_number=channel_num) channel_num = channel_num + 1 - return Response({"message": "Channels have been auto-assigned!"}, status=status.HTTP_200_OK) + return Response( + {"message": "Channels have been auto-assigned!"}, status=status.HTTP_200_OK + ) @swagger_auto_schema( - method='post', + method="post", operation_description=( "Create a new channel from an existing stream. " "If 'channel_number' is provided, it will be used (if available); " - "otherwise, the next available channel number is assigned." + "otherwise, the next available channel number is assigned. " + "If 'channel_profile_ids' is provided, the channel will only be added to those profiles. " + "Accepts either a single ID or an array of IDs." ), request_body=openapi.Schema( type=openapi.TYPE_OBJECT, @@ -272,72 +668,93 @@ class ChannelViewSet(viewsets.ModelViewSet): ), "channel_number": openapi.Schema( type=openapi.TYPE_NUMBER, - description="(Optional) Desired channel number. Must not be in use." + description="(Optional) Desired channel number. Must not be in use.", ), "name": openapi.Schema( type=openapi.TYPE_STRING, description="Desired channel name" - ) - } + ), + "channel_profile_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="(Optional) Channel profile ID(s) to add the channel to. Can be a single ID or array of IDs. If not provided, channel is added to all profiles." + ), + }, ), - responses={201: ChannelSerializer()} + responses={201: ChannelSerializer()}, ) - @action(detail=False, methods=['post'], url_path='from-stream') + @action(detail=False, methods=["post"], url_path="from-stream") def from_stream(self, request): - stream_id = request.data.get('stream_id') + stream_id = request.data.get("stream_id") if not stream_id: - return Response({"error": "Missing stream_id"}, status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Missing stream_id"}, status=status.HTTP_400_BAD_REQUEST + ) stream = get_object_or_404(Stream, pk=stream_id) channel_group = stream.channel_group - name = request.data.get('name') + name = request.data.get("name") + + if name is None: name = stream.name # Check if client provided a channel_number; if not, auto-assign one. - stream_custom_props = json.loads(stream.custom_properties) if stream.custom_properties else {} - - channel_number = None - if 'tvg-chno' in stream_custom_props: - channel_number = float(stream_custom_props['tvg-chno']) - elif 'channel-number' in stream_custom_props: - channel_number = float(stream_custom_props['channel-number']) + stream_custom_props = stream.custom_properties or {} + channel_number = request.data.get("channel_number") if channel_number is None: - provided_number = request.data.get('channel_number') - if provided_number is None: - channel_number = Channel.get_next_available_channel_number() - else: - try: - channel_number = float(provided_number) - except ValueError: - return Response({"error": "channel_number must be an integer."}, status=status.HTTP_400_BAD_REQUEST) - # If the provided number is already used, return an error. - if Channel.objects.filter(channel_number=channel_number).exists(): - return Response( - {"error": f"Channel number {channel_number} is already in use. Please choose a different number."}, - status=status.HTTP_400_BAD_REQUEST - ) - #Get the tvc_guide_stationid from custom properties if it exists + # Channel number not provided by client, check stream properties or auto-assign + if "tvg-chno" in stream_custom_props: + channel_number = float(stream_custom_props["tvg-chno"]) + elif "channel-number" in stream_custom_props: + channel_number = float(stream_custom_props["channel-number"]) + elif "num" in stream_custom_props: + channel_number = float(stream_custom_props["num"]) + elif channel_number == 0: + # Special case: 0 means ignore provider numbers and auto-assign + channel_number = None + + if channel_number is None: + # Still None, auto-assign the next available channel number + channel_number = Channel.get_next_available_channel_number() + + + try: + channel_number = float(channel_number) + except ValueError: + return Response( + {"error": "channel_number must be an integer."}, + status=status.HTTP_400_BAD_REQUEST, + ) + # If the provided number is already used, return an error. + if Channel.objects.filter(channel_number=channel_number).exists(): + channel_number = Channel.get_next_available_channel_number(channel_number) + # Get the tvc_guide_stationid from custom properties if it exists tvc_guide_stationid = None - if 'tvc-guide-stationid' in stream_custom_props: - tvc_guide_stationid = stream_custom_props['tvc-guide-stationid'] - - + if "tvc-guide-stationid" in stream_custom_props: + tvc_guide_stationid = stream_custom_props["tvc-guide-stationid"] channel_data = { - 'channel_number': channel_number, - 'name': name, - 'tvg_id': stream.tvg_id, - 'tvc_guide_stationid': tvc_guide_stationid, - 'channel_group_id': channel_group.id, - 'streams': [stream_id], + "channel_number": channel_number, + "name": name, + "tvg_id": stream.tvg_id, + "tvc_guide_stationid": tvc_guide_stationid, + "streams": [stream_id], } + # Only add channel_group_id if the stream has a channel group + if channel_group: + channel_data["channel_group_id"] = channel_group.id + if stream.logo_url: - logo, _ = Logo.objects.get_or_create(url=stream.logo_url, defaults={ - "name": stream.name or stream.tvg_id - }) - channel_data["logo_id"] = logo.id + # Import validation function + from apps.channels.tasks import validate_logo_url + validated_logo_url = validate_logo_url(stream.logo_url) + if validated_logo_url: + logo, _ = Logo.objects.get_or_create( + url=validated_logo_url, defaults={"name": stream.name or stream.tvg_id} + ) + channel_data["logo_id"] = logo.id # Attempt to find existing EPGs with the same tvg-id epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) @@ -346,274 +763,289 @@ class ChannelViewSet(viewsets.ModelViewSet): serializer = self.get_serializer(data=channel_data) serializer.is_valid(raise_exception=True) - channel = serializer.save() - channel.streams.add(stream) + + with transaction.atomic(): + channel = serializer.save() + channel.streams.add(stream) + + # Handle channel profile membership + channel_profile_ids = request.data.get("channel_profile_ids") + if channel_profile_ids is not None: + # Normalize single ID to array + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] + + if channel_profile_ids: + # Add channel only to the specified profiles + try: + channel_profiles = ChannelProfile.objects.filter(id__in=channel_profile_ids) + if len(channel_profiles) != len(channel_profile_ids): + missing_ids = set(channel_profile_ids) - set(channel_profiles.values_list('id', flat=True)) + return Response( + {"error": f"Channel profiles with IDs {list(missing_ids)} not found"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in channel_profiles + ]) + except Exception as e: + return Response( + {"error": f"Error creating profile memberships: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + else: + # Default behavior: add to all profiles + profiles = ChannelProfile.objects.all() + ChannelProfileMembership.objects.bulk_create([ + ChannelProfileMembership(channel_profile=profile, channel=channel, enabled=True) + for profile in profiles + ]) + + # Send WebSocket notification for single channel creation + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'channels_created', + 'count': 1, + 'channel_id': channel.id, + 'channel_name': channel.name, + 'channel_number': channel.channel_number + }) + return Response(serializer.data, status=status.HTTP_201_CREATED) @swagger_auto_schema( - method='post', + method="post", operation_description=( - "Bulk create channels from existing streams. For each object, if 'channel_number' is provided, " - "it is used (if available); otherwise, the next available number is auto-assigned. " - "Each object must include 'stream_id' and 'name'." + "Asynchronously bulk create channels from stream IDs. " + "Returns a task ID to track progress via WebSocket. " + "This is the recommended approach for large bulk operations." ), request_body=openapi.Schema( - type=openapi.TYPE_ARRAY, - items=openapi.Schema( - type=openapi.TYPE_OBJECT, - required=["stream_id"], - properties={ - "stream_id": openapi.Schema( - type=openapi.TYPE_INTEGER, description="ID of the stream to link" - ), - "channel_number": openapi.Schema( - type=openapi.TYPE_NUMBER, - description="(Optional) Desired channel number. Must not be in use." - ), - "name": openapi.Schema( - type=openapi.TYPE_STRING, description="Desired channel name" - ) - } - ) + type=openapi.TYPE_OBJECT, + required=["stream_ids"], + properties={ + "stream_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="List of stream IDs to create channels from" + ), + "channel_profile_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="(Optional) Channel profile ID(s) to add the channels to. If not provided, channels are added to all profiles." + ), + "starting_channel_number": openapi.Schema( + type=openapi.TYPE_INTEGER, + description="(Optional) Starting channel number mode: null=use provider numbers, 0=lowest available, other=start from specified number" + ), + }, ), - responses={201: "Bulk channels created"} + responses={202: "Task started successfully"}, ) - @action(detail=False, methods=['post'], url_path='from-stream/bulk') + @action(detail=False, methods=["post"], url_path="from-stream/bulk") def from_stream_bulk(self, request): - data_list = request.data - if not isinstance(data_list, list): - return Response({"error": "Expected a list of channel objects"}, status=status.HTTP_400_BAD_REQUEST) + from .tasks import bulk_create_channels_from_streams - created_channels = [] - errors = [] + stream_ids = request.data.get("stream_ids", []) + channel_profile_ids = request.data.get("channel_profile_ids") + starting_channel_number = request.data.get("starting_channel_number") - # Gather current used numbers once. - used_numbers = set(Channel.objects.all().values_list('channel_number', flat=True)) - next_number = 1 + if not stream_ids: + return Response( + {"error": "stream_ids is required and cannot be empty"}, + status=status.HTTP_400_BAD_REQUEST, + ) - def get_auto_number(): - nonlocal next_number - while next_number in used_numbers: - next_number += 1 - used_numbers.add(next_number) - return next_number + if not isinstance(stream_ids, list): + return Response( + {"error": "stream_ids must be a list of integers"}, + status=status.HTTP_400_BAD_REQUEST, + ) - logos_to_create = [] - channels_to_create = [] - streams_map = [] - logo_map = [] - for item in data_list: - stream_id = item.get('stream_id') - if not all([stream_id]): - errors.append({"item": item, "error": "Missing required fields: stream_id and name are required."}) - continue + # Normalize channel_profile_ids to array if single ID provided + if channel_profile_ids is not None: + if not isinstance(channel_profile_ids, list): + channel_profile_ids = [channel_profile_ids] - try: - stream = get_object_or_404(Stream, pk=stream_id) - except Exception as e: - errors.append({"item": item, "error": str(e)}) - continue + # Start the async task + task = bulk_create_channels_from_streams.delay(stream_ids, channel_profile_ids, starting_channel_number) - name = item.get('name') - if name is None: - name = stream.name - - channel_group = stream.channel_group - - stream_custom_props = json.loads(stream.custom_properties) if stream.custom_properties else {} - - channel_number = None - if 'tvg-chno' in stream_custom_props: - channel_number = float(stream_custom_props['tvg-chno']) - elif 'channel-number' in stream_custom_props: - channel_number = float(stream_custom_props['channel-number']) - # Get the tvc_guide_stationid from custom properties if it exists - tvc_guide_stationid = None - if 'tvc-guide-stationid' in stream_custom_props: - tvc_guide_stationid = stream_custom_props['tvc-guide-stationid'] - - # Determine channel number: if provided, use it (if free); else auto assign. - if channel_number is None: - provided_number = item.get('channel_number') - if provided_number is None: - channel_number = get_auto_number() - else: - try: - channel_number = float(provided_number) - except ValueError: - errors.append({"item": item, "error": "channel_number must be an integer."}) - continue - if channel_number in used_numbers or Channel.objects.filter(channel_number=channel_number).exists(): - errors.append({"item": item, "error": f"Channel number {channel_number} is already in use."}) - continue - used_numbers.add(channel_number) - - channel_data = { - "channel_number": channel_number, - "name": name, - 'tvc_guide_stationid': tvc_guide_stationid, - "tvg_id": stream.tvg_id, - "channel_group_id": channel_group.id, - } - - # Attempt to find existing EPGs with the same tvg-id - epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) - if epgs: - channel_data["epg_data_id"] = epgs.first().id - - serializer = self.get_serializer(data=channel_data) - if serializer.is_valid(): - validated_data = serializer.validated_data - channel = Channel(**validated_data) - channels_to_create.append(channel) - - streams_map.append([stream_id]) - if stream.logo_url: - logos_to_create.append(Logo( - url=stream.logo_url, - name=stream.name or stream.tvg_id, - )) - logo_map.append(stream.logo_url) - else: - logo_map.append(None) - - # channel = serializer.save() - # channel.streams.add(stream) - # created_channels.append(serializer.data) - else: - errors.append({"item": item, "error": serializer.errors}) - - if logos_to_create: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) - - channel_logos = {logo.url: logo for logo in Logo.objects.filter(url__in=[url for url in logo_map if url is not None])} - - profiles = ChannelProfile.objects.all() - channel_profile_memberships = [] - if channels_to_create: - with transaction.atomic(): - created_channels = Channel.objects.bulk_create(channels_to_create) - - update = [] - for channel, stream_ids, logo_url in zip(created_channels, streams_map, logo_map): - if logo_url: - channel.logo = channel_logos[logo_url] - update.append(channel) - channel_profile_memberships = channel_profile_memberships + [ - ChannelProfileMembership(channel_profile=profile, channel=channel) - for profile in profiles - ] - - ChannelProfileMembership.objects.bulk_create(channel_profile_memberships) - Channel.objects.bulk_update(update, ['logo']) - - for channel, stream_ids in zip(created_channels, streams_map): - channel.streams.set(stream_ids) - - response_data = {"created": ChannelSerializer(created_channels, many=True).data} - if errors: - response_data["errors"] = errors - - return Response(response_data, status=status.HTTP_201_CREATED) + return Response({ + "task_id": task.id, + "message": f"Bulk channel creation task started for {len(stream_ids)} streams", + "stream_count": len(stream_ids), + "status": "started" + }, status=status.HTTP_202_ACCEPTED) # ───────────────────────────────────────────────────────── # 6) EPG Fuzzy Matching # ───────────────────────────────────────────────────────── @swagger_auto_schema( - method='post', - operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data.", - responses={202: "EPG matching task initiated"} + method="post", + operation_description="Kick off a Celery task that tries to fuzzy-match channels with EPG data. If channel_ids are provided, only those channels will be processed.", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'channel_ids': openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_INTEGER), + description='List of channel IDs to process. If empty or not provided, all channels without EPG will be processed.' + ) + } + ), + responses={202: "EPG matching task initiated"}, ) - @action(detail=False, methods=['post'], url_path='match-epg') + @action(detail=False, methods=["post"], url_path="match-epg") def match_epg(self, request): - match_epg_channels.delay() - return Response({"message": "EPG matching task initiated."}, status=status.HTTP_202_ACCEPTED) + # Get channel IDs from request body if provided + channel_ids = request.data.get('channel_ids', []) + + if channel_ids: + # Process only selected channels + from .tasks import match_selected_channels_epg + match_selected_channels_epg.delay(channel_ids) + message = f"EPG matching task initiated for {len(channel_ids)} selected channel(s)." + else: + # Process all channels without EPG (original behavior) + match_epg_channels.delay() + message = "EPG matching task initiated for all channels without EPG." + + return Response( + {"message": message}, status=status.HTTP_202_ACCEPTED + ) + + @swagger_auto_schema( + method="post", + operation_description="Try to auto-match this specific channel with EPG data.", + responses={200: "EPG matching completed", 202: "EPG matching task initiated"}, + ) + @action(detail=True, methods=["post"], url_path="match-epg") + def match_channel_epg(self, request, pk=None): + channel = self.get_object() + + # Import the matching logic + from apps.channels.tasks import match_single_channel_epg + + try: + # Try to match this specific channel - call synchronously for immediate response + result = match_single_channel_epg.apply_async(args=[channel.id]).get(timeout=30) + + # Refresh the channel from DB to get any updates + channel.refresh_from_db() + + return Response({ + "message": result.get("message", "Channel matching completed"), + "matched": result.get("matched", False), + "channel": self.get_serializer(channel).data + }) + except Exception as e: + return Response({"error": str(e)}, status=400) # ───────────────────────────────────────────────────────── # 7) Set EPG and Refresh # ───────────────────────────────────────────────────────── @swagger_auto_schema( - method='post', + method="post", operation_description="Set EPG data for a channel and refresh program data", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, - required=['epg_data_id'], + required=["epg_data_id"], properties={ - 'epg_data_id': openapi.Schema( + "epg_data_id": openapi.Schema( type=openapi.TYPE_INTEGER, description="EPG data ID to link" ) - } + }, ), - responses={200: "EPG data linked and refresh triggered"} + responses={200: "EPG data linked and refresh triggered"}, ) - @action(detail=True, methods=['post'], url_path='set-epg') + @action(detail=True, methods=["post"], url_path="set-epg") def set_epg(self, request, pk=None): channel = self.get_object() - epg_data_id = request.data.get('epg_data_id') + epg_data_id = request.data.get("epg_data_id") # Handle removing EPG link - if epg_data_id in (None, '', '0', 0): + if epg_data_id in (None, "", "0", 0): channel.epg_data = None - channel.save(update_fields=['epg_data']) - return Response({"message": f"EPG data removed from channel {channel.name}"}) + channel.save(update_fields=["epg_data"]) + return Response( + {"message": f"EPG data removed from channel {channel.name}"} + ) try: # Get the EPG data object from apps.epg.models import EPGData + epg_data = EPGData.objects.get(pk=epg_data_id) # Set the EPG data and save channel.epg_data = epg_data - channel.save(update_fields=['epg_data']) + channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - return Response({ - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", - "channel": self.get_serializer(channel).data, - "task_status": status_message - }) + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" + + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" + + return Response( + { + "message": message, + "channel": self.get_serializer(channel).data, + "task_status": status_message, + } + ) except Exception as e: return Response({"error": str(e)}, status=400) @swagger_auto_schema( - method='post', + method="post", operation_description="Associate multiple channels with EPG data without triggering a full refresh", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'associations': openapi.Schema( + "associations": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'channel_id': openapi.Schema(type=openapi.TYPE_INTEGER), - 'epg_data_id': openapi.Schema(type=openapi.TYPE_INTEGER) - } - ) + "channel_id": openapi.Schema(type=openapi.TYPE_INTEGER), + "epg_data_id": openapi.Schema(type=openapi.TYPE_INTEGER), + }, + ), ) - } + }, ), - responses={200: "EPG data linked for multiple channels"} + responses={200: "EPG data linked for multiple channels"}, ) - @action(detail=False, methods=['post'], url_path='batch-set-epg') + @action(detail=False, methods=["post"], url_path="batch-set-epg") def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" - associations = request.data.get('associations', []) + associations = request.data.get("associations", []) channels_updated = 0 programs_refreshed = 0 unique_epg_ids = set() for assoc in associations: - channel_id = assoc.get('channel_id') - epg_data_id = assoc.get('epg_data_id') + channel_id = assoc.get("channel_id") + epg_data_id = assoc.get("epg_data_id") if not channel_id: continue @@ -624,7 +1056,7 @@ class ChannelViewSet(viewsets.ModelViewSet): # Set the EPG data channel.epg_data_id = epg_data_id - channel.save(update_fields=['epg_data']) + channel.save(update_fields=["epg_data"]) channels_updated += 1 # Track unique EPG data IDs @@ -634,27 +1066,44 @@ class ChannelViewSet(viewsets.ModelViewSet): except Channel.DoesNotExist: logger.error(f"Channel with ID {channel_id} not found") except Exception as e: - logger.error(f"Error setting EPG data for channel {channel_id}: {str(e)}") + logger.error( + f"Error setting EPG data for channel {channel_id}: {str(e)}" + ) - # Trigger program refresh for unique EPG data IDs + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData + for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + try: + epg_data = EPGData.objects.select_related('epg_source').get(id=epg_id) + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 + except EPGData.DoesNotExist: + logger.error(f"EPGData with ID {epg_id} not found") + return Response( + { + "success": True, + "channels_updated": channels_updated, + "programs_refreshed": programs_refreshed, + } + ) - return Response({ - 'success': True, - 'channels_updated': channels_updated, - 'programs_refreshed': programs_refreshed - }) - # ───────────────────────────────────────────────────────── # 4) Bulk Delete Streams # ───────────────────────────────────────────────────────── class BulkDeleteStreamsAPIView(APIView): - permission_classes = [IsAuthenticated] + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] @swagger_auto_schema( operation_description="Bulk delete streams by ID", @@ -665,23 +1114,32 @@ class BulkDeleteStreamsAPIView(APIView): "stream_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="Stream IDs to delete" + description="Stream IDs to delete", ) }, ), - responses={204: "Streams deleted"} + responses={204: "Streams deleted"}, ) def delete(self, request, *args, **kwargs): - stream_ids = request.data.get('stream_ids', []) + stream_ids = request.data.get("stream_ids", []) Stream.objects.filter(id__in=stream_ids).delete() - return Response({"message": "Streams deleted successfully!"}, status=status.HTTP_204_NO_CONTENT) + return Response( + {"message": "Streams deleted successfully!"}, + status=status.HTTP_204_NO_CONTENT, + ) # ───────────────────────────────────────────────────────── # 5) Bulk Delete Channels # ───────────────────────────────────────────────────────── class BulkDeleteChannelsAPIView(APIView): - permission_classes = [IsAuthenticated] + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] @swagger_auto_schema( operation_description="Bulk delete channels by ID", @@ -692,44 +1150,323 @@ class BulkDeleteChannelsAPIView(APIView): "channel_ids": openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), - description="Channel IDs to delete" + description="Channel IDs to delete", ) }, ), - responses={204: "Channels deleted"} + responses={204: "Channels deleted"}, ) def delete(self, request): - channel_ids = request.data.get('channel_ids', []) + channel_ids = request.data.get("channel_ids", []) Channel.objects.filter(id__in=channel_ids).delete() - return Response({"message": "Channels deleted"}, status=status.HTTP_204_NO_CONTENT) + return Response( + {"message": "Channels deleted"}, status=status.HTTP_204_NO_CONTENT + ) + + +# ───────────────────────────────────────────────────────── +# 6) Bulk Delete Logos +# ───────────────────────────────────────────────────────── +class BulkDeleteLogosAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Bulk delete logos by ID", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=["logo_ids"], + properties={ + "logo_ids": openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_INTEGER), + description="Logo IDs to delete", + ) + }, + ), + responses={204: "Logos deleted"}, + ) + def delete(self, request): + logo_ids = request.data.get("logo_ids", []) + delete_files = request.data.get("delete_files", False) + + # Get logos and their usage info before deletion + logos_to_delete = Logo.objects.filter(id__in=logo_ids) + total_channels_affected = 0 + local_files_deleted = 0 + + for logo in logos_to_delete: + # Handle file deletion for local files + if delete_files and logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + local_files_deleted += 1 + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file {logo.url}: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + if logo.channels.exists(): + channel_count = logo.channels.count() + total_channels_affected += channel_count + # Remove logo from channels + logo.channels.update(logo=None) + logger.info(f"Removed logo {logo.name} from {channel_count} channels before deletion") + + # Delete logos + deleted_count = logos_to_delete.delete()[0] + + message = f"Successfully deleted {deleted_count} logos" + if total_channels_affected > 0: + message += f" and removed them from {total_channels_affected} channels" + if local_files_deleted > 0: + message += f" and deleted {local_files_deleted} local files" + + return Response( + {"message": message}, + status=status.HTTP_204_NO_CONTENT + ) + + +class CleanupUnusedLogosAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Delete all channel logos that are not used by any channels", + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + "delete_files": openapi.Schema( + type=openapi.TYPE_BOOLEAN, + description="Whether to delete local logo files from disk", + default=False + ) + }, + ), + responses={200: "Cleanup completed"}, + ) + def post(self, request): + """Delete all channel logos with no channel associations""" + delete_files = request.data.get("delete_files", False) + + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + local_files_deleted = 0 + + # Handle file deletion for local files if requested + if delete_files: + for logo in unused_logos: + if logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + local_files_deleted += 1 + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file {logo.url}: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + # Delete the unused logos + unused_logos.delete() + + message = f"Successfully deleted {deleted_count} unused logos" + if local_files_deleted > 0: + message += f" and deleted {local_files_deleted} local files" + + return Response({ + "message": message, + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "local_files_deleted": local_files_deleted + }) + + +class LogoPagination(PageNumberPagination): + page_size = 50 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 1000 # Prevent excessive page sizes + + def paginate_queryset(self, queryset, request, view=None): + # Check if pagination should be disabled for specific requests + if request.query_params.get('no_pagination') == 'true': + return None # disables pagination, returns full queryset + + return super().paginate_queryset(queryset, request, view) + class LogoViewSet(viewsets.ModelViewSet): - permission_classes = [IsAuthenticated] queryset = Logo.objects.all() serializer_class = LogoSerializer - parser_classes = (MultiPartParser, FormParser) + pagination_class = LogoPagination + parser_classes = (MultiPartParser, FormParser, JSONParser) - @action(detail=False, methods=['post']) + def get_permissions(self): + if self.action in ["upload"]: + return [IsAdmin()] + + if self.action in ["cache"]: + return [AllowAny()] + + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + # Start with basic prefetch for channels + queryset = Logo.objects.prefetch_related('channels').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + # Convert string IDs to integers and filter + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + pass # Invalid IDs, return empty queryset + queryset = Logo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() + elif used_filter == 'false': + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) + + # Filter by name + name_filter = self.request.query_params.get('name', None) + if name_filter: + queryset = queryset.filter(name__icontains=name_filter) + + return queryset + + def create(self, request, *args, **kwargs): + """Create a new logo entry""" + serializer = self.get_serializer(data=request.data) + if serializer.is_valid(): + logo = serializer.save() + return Response(self.get_serializer(logo).data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def update(self, request, *args, **kwargs): + """Update an existing logo""" + partial = kwargs.pop('partial', False) + instance = self.get_object() + serializer = self.get_serializer(instance, data=request.data, partial=partial) + if serializer.is_valid(): + logo = serializer.save() + return Response(self.get_serializer(logo).data) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def destroy(self, request, *args, **kwargs): + """Delete a logo and remove it from any channels using it""" + logo = self.get_object() + delete_file = request.query_params.get('delete_file', 'false').lower() == 'true' + + # Check if it's a local file that should be deleted + if delete_file and logo.url and logo.url.startswith('/data/logos'): + try: + if os.path.exists(logo.url): + os.remove(logo.url) + logger.info(f"Deleted local logo file: {logo.url}") + except Exception as e: + logger.error(f"Failed to delete logo file {logo.url}: {str(e)}") + return Response( + {"error": f"Failed to delete logo file: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + # Instead of preventing deletion, remove the logo from channels + if logo.channels.exists(): + channel_count = logo.channels.count() + logo.channels.update(logo=None) + logger.info(f"Removed logo {logo.name} from {channel_count} channels before deletion") + + return super().destroy(request, *args, **kwargs) + + @action(detail=False, methods=["post"]) def upload(self, request): - if 'file' not in request.FILES: - return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST) + if "file" not in request.FILES: + return Response( + {"error": "No file uploaded"}, status=status.HTTP_400_BAD_REQUEST + ) + + file = request.FILES["file"] + + # Validate file + try: + from dispatcharr.utils import validate_logo_file + validate_logo_file(file) + except Exception as e: + return Response( + {"error": str(e)}, status=status.HTTP_400_BAD_REQUEST + ) - file = request.FILES['file'] file_name = file.name - file_path = os.path.join('/data/logos', file_name) + file_path = os.path.join("/data/logos", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) - logo, _ = Logo.objects.get_or_create(url=file_path, defaults={ - "name": file_name, - }) + # Mark file as processed in Redis to prevent file scanner notifications + try: + redis_client = RedisClient.get_client() + if redis_client: + # Use the same key format as the file scanner + redis_key = f"processed_file:{file_path}" + # Store the actual file modification time to match the file scanner's expectation + file_mtime = os.path.getmtime(file_path) + redis_client.setex(redis_key, 60 * 60 * 24 * 3, str(file_mtime)) # 3 day TTL + logger.debug(f"Marked uploaded logo file as processed in Redis: {file_path} (mtime: {file_mtime})") + except Exception as e: + logger.warning(f"Failed to mark logo file as processed in Redis: {e}") - return Response({'id': logo.id, 'name': logo.name, 'url': logo.url}, status=status.HTTP_201_CREATED) + # Get custom name from request data, fallback to filename + custom_name = request.data.get('name', '').strip() + logo_name = custom_name if custom_name else file_name - @action(detail=True, methods=['get'], permission_classes=[AllowAny]) + logo, _ = Logo.objects.get_or_create( + url=file_path, + defaults={ + "name": logo_name, + }, + ) + + # Use get_serializer to ensure proper context + serializer = self.get_serializer(logo) + return Response( + serializer.data, + status=status.HTTP_201_CREATED, + ) + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) def cache(self, request, pk=None): """Streams the logo file, whether it's local or remote.""" logo = self.get_object() @@ -742,19 +1479,38 @@ class LogoViewSet(viewsets.ModelViewSet): # Get proper mime type (first item of the tuple) content_type, _ = mimetypes.guess_type(logo_url) if not content_type: - content_type = 'image/jpeg' # Default to a common image type + content_type = "image/jpeg" # Default to a common image type # Use context manager and set Content-Disposition to inline - response = StreamingHttpResponse(open(logo_url, "rb"), content_type=content_type) - response['Content-Disposition'] = 'inline; filename="{}"'.format(os.path.basename(logo_url)) + response = StreamingHttpResponse( + open(logo_url, "rb"), content_type=content_type + ) + response["Content-Disposition"] = 'inline; filename="{}"'.format( + os.path.basename(logo_url) + ) return response else: # Remote image try: - remote_response = requests.get(logo_url, stream=True) + # Get the default user agent + try: + default_user_agent_id = CoreSettings.get_default_user_agent_id() + user_agent_obj = UserAgent.objects.get(id=int(default_user_agent_id)) + user_agent = user_agent_obj.user_agent + except (CoreSettings.DoesNotExist, UserAgent.DoesNotExist, ValueError): + # Fallback to hardcoded if default not found + user_agent = 'Dispatcharr/1.0' + + # Add proper timeouts to prevent hanging + remote_response = requests.get( + logo_url, + stream=True, + timeout=(3, 5), # (connect_timeout, read_timeout) + headers={'User-Agent': user_agent} + ) if remote_response.status_code == 200: # Try to get content type from response headers first - content_type = remote_response.headers.get('Content-Type') + content_type = remote_response.headers.get("Content-Type") # If no content type in headers or it's empty, guess based on URL if not content_type: @@ -762,43 +1518,104 @@ class LogoViewSet(viewsets.ModelViewSet): # If still no content type, default to common image type if not content_type: - content_type = 'image/jpeg' + content_type = "image/jpeg" - response = StreamingHttpResponse(remote_response.iter_content(chunk_size=8192), content_type=content_type) - response['Content-Disposition'] = 'inline; filename="{}"'.format(os.path.basename(logo_url)) + response = StreamingHttpResponse( + remote_response.iter_content(chunk_size=8192), + content_type=content_type, + ) + response["Content-Disposition"] = 'inline; filename="{}"'.format( + os.path.basename(logo_url) + ) return response raise Http404("Remote image not found") - except requests.RequestException: + except requests.exceptions.Timeout: + logger.warning(f"Timeout fetching logo from {logo_url}") + raise Http404("Logo request timed out") + except requests.exceptions.ConnectionError: + logger.warning(f"Connection error fetching logo from {logo_url}") + raise Http404("Unable to connect to logo server") + except requests.RequestException as e: + logger.warning(f"Error fetching logo from {logo_url}: {e}") raise Http404("Error fetching remote image") + class ChannelProfileViewSet(viewsets.ModelViewSet): queryset = ChannelProfile.objects.all() serializer_class = ChannelProfileSerializer - permission_classes = [IsAuthenticated] + + def get_queryset(self): + user = self.request.user + + # If user_level is 10, return all ChannelProfiles + if hasattr(user, "user_level") and user.user_level == 10: + return ChannelProfile.objects.all() + + # Otherwise, return only ChannelProfiles related to the user + return self.request.user.channel_profiles.all() + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + class GetChannelStreamsAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + def get(self, request, channel_id): channel = get_object_or_404(Channel, id=channel_id) # Order the streams by channelstream__order to match the order in the channel view - streams = channel.streams.all().order_by('channelstream__order') + streams = channel.streams.all().order_by("channelstream__order") serializer = StreamSerializer(streams, many=True) return Response(serializer.data) + class UpdateChannelMembershipAPIView(APIView): + permission_classes = [IsOwnerOfObject] + def patch(self, request, profile_id, channel_id): """Enable or disable a channel for a specific group""" channel_profile = get_object_or_404(ChannelProfile, id=profile_id) channel = get_object_or_404(Channel, id=channel_id) - membership = get_object_or_404(ChannelProfileMembership, channel_profile=channel_profile, channel=channel) + try: + membership = ChannelProfileMembership.objects.get( + channel_profile=channel_profile, channel=channel + ) + except ChannelProfileMembership.DoesNotExist: + # Create the membership if it does not exist (for custom channels) + membership = ChannelProfileMembership.objects.create( + channel_profile=channel_profile, + channel=channel, + enabled=False # Default to False, will be updated below + ) - serializer = ChannelProfileMembershipSerializer(membership, data=request.data, partial=True) + serializer = ChannelProfileMembershipSerializer( + membership, data=request.data, partial=True + ) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + class BulkUpdateChannelMembershipAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + def patch(self, request, profile_id): """Bulk enable or disable channels for a specific profile""" # Get the channel profile @@ -808,30 +1625,404 @@ class BulkUpdateChannelMembershipAPIView(APIView): serializer = BulkChannelProfileMembershipSerializer(data=request.data) if serializer.is_valid(): - updates = serializer.validated_data['channels'] - channel_ids = [entry['channel_id'] for entry in updates] - + updates = serializer.validated_data["channels"] + channel_ids = [entry["channel_id"] for entry in updates] memberships = ChannelProfileMembership.objects.filter( - channel_profile=channel_profile, - channel_id__in=channel_ids + channel_profile=channel_profile, channel_id__in=channel_ids ) membership_dict = {m.channel.id: m for m in memberships} for entry in updates: - channel_id = entry['channel_id'] - enabled_status = entry['enabled'] + channel_id = entry["channel_id"] + enabled_status = entry["enabled"] if channel_id in membership_dict: membership_dict[channel_id].enabled = enabled_status - ChannelProfileMembership.objects.bulk_update(memberships, ['enabled']) + ChannelProfileMembership.objects.bulk_update(memberships, ["enabled"]) return Response({"status": "success"}, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + +class RecurringRecordingRuleViewSet(viewsets.ModelViewSet): + queryset = RecurringRecordingRule.objects.all().select_related("channel") + serializer_class = RecurringRecordingRuleSerializer + + def get_permissions(self): + return [IsAdmin()] + + def perform_create(self, serializer): + rule = serializer.save() + try: + sync_recurring_rule_impl(rule.id, drop_existing=True) + except Exception as err: + logger.warning(f"Failed to initialize recurring rule {rule.id}: {err}") + return rule + + def perform_update(self, serializer): + rule = serializer.save() + try: + if rule.enabled: + sync_recurring_rule_impl(rule.id, drop_existing=True) + else: + purge_recurring_rule_impl(rule.id) + except Exception as err: + logger.warning(f"Failed to resync recurring rule {rule.id}: {err}") + return rule + + def perform_destroy(self, instance): + rule_id = instance.id + super().perform_destroy(instance) + try: + purge_recurring_rule_impl(rule_id) + except Exception as err: + logger.warning(f"Failed to purge recordings for rule {rule_id}: {err}") + + class RecordingViewSet(viewsets.ModelViewSet): queryset = Recording.objects.all() serializer_class = RecordingSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + # Allow unauthenticated playback of recording files (like other streaming endpoints) + if getattr(self, 'action', None) == 'file': + return [AllowAny()] + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + @action(detail=True, methods=["post"], url_path="comskip") + def comskip(self, request, pk=None): + """Trigger comskip processing for this recording.""" + from .tasks import comskip_process_recording + rec = get_object_or_404(Recording, pk=pk) + try: + comskip_process_recording.delay(rec.id) + return Response({"success": True, "queued": True}) + except Exception as e: + return Response({"success": False, "error": str(e)}, status=400) + + @action(detail=True, methods=["get"], url_path="file") + def file(self, request, pk=None): + """Stream a recorded file with HTTP Range support for seeking.""" + recording = get_object_or_404(Recording, pk=pk) + cp = recording.custom_properties or {} + file_path = cp.get("file_path") + file_name = cp.get("file_name") or "recording" + + if not file_path or not os.path.exists(file_path): + raise Http404("Recording file not found") + + # Guess content type + ext = os.path.splitext(file_path)[1].lower() + if ext == ".mp4": + content_type = "video/mp4" + elif ext == ".mkv": + content_type = "video/x-matroska" + else: + content_type = mimetypes.guess_type(file_path)[0] or "application/octet-stream" + + file_size = os.path.getsize(file_path) + range_header = request.META.get("HTTP_RANGE", "").strip() + + def file_iterator(path, start=0, end=None, chunk_size=8192): + with open(path, "rb") as f: + f.seek(start) + remaining = (end - start + 1) if end is not None else None + while True: + if remaining is not None and remaining <= 0: + break + bytes_to_read = min(chunk_size, remaining) if remaining is not None else chunk_size + data = f.read(bytes_to_read) + if not data: + break + if remaining is not None: + remaining -= len(data) + yield data + + if range_header and range_header.startswith("bytes="): + # Parse Range header + try: + range_spec = range_header.split("=", 1)[1] + start_str, end_str = range_spec.split("-", 1) + start = int(start_str) if start_str else 0 + end = int(end_str) if end_str else file_size - 1 + start = max(0, start) + end = min(file_size - 1, end) + length = end - start + 1 + + resp = StreamingHttpResponse( + file_iterator(file_path, start, end), + status=206, + content_type=content_type, + ) + resp["Content-Range"] = f"bytes {start}-{end}/{file_size}" + resp["Content-Length"] = str(length) + resp["Accept-Ranges"] = "bytes" + resp["Content-Disposition"] = f"inline; filename=\"{file_name}\"" + return resp + except Exception: + # Fall back to full file if parsing fails + pass + + # Full file response + response = FileResponse(open(file_path, "rb"), content_type=content_type) + response["Content-Length"] = str(file_size) + response["Accept-Ranges"] = "bytes" + response["Content-Disposition"] = f"inline; filename=\"{file_name}\"" + return response + + def destroy(self, request, *args, **kwargs): + """Delete the Recording and ensure any active DVR client connection is closed. + + Also removes the associated file(s) from disk if present. + """ + instance = self.get_object() + + # Attempt to close the DVR client connection for this channel if active + try: + channel_uuid = str(instance.channel.uuid) + # Lazy imports to avoid module overhead if proxy isn't used + from core.utils import RedisClient + from apps.proxy.ts_proxy.redis_keys import RedisKeys + from apps.proxy.ts_proxy.services.channel_service import ChannelService + + r = RedisClient.get_client() + if r: + client_set_key = RedisKeys.clients(channel_uuid) + client_ids = r.smembers(client_set_key) or [] + stopped = 0 + for raw_id in client_ids: + try: + cid = raw_id.decode("utf-8") if isinstance(raw_id, (bytes, bytearray)) else str(raw_id) + meta_key = RedisKeys.client_metadata(channel_uuid, cid) + ua = r.hget(meta_key, "user_agent") + ua_s = ua.decode("utf-8") if isinstance(ua, (bytes, bytearray)) else (ua or "") + # Identify DVR recording client by its user agent + if ua_s and "Dispatcharr-DVR" in ua_s: + try: + ChannelService.stop_client(channel_uuid, cid) + stopped += 1 + except Exception as inner_e: + logger.debug(f"Failed to stop DVR client {cid} for channel {channel_uuid}: {inner_e}") + except Exception as inner: + logger.debug(f"Error while checking client metadata: {inner}") + if stopped: + logger.info(f"Stopped {stopped} DVR client(s) for channel {channel_uuid} due to recording cancellation") + # If no clients remain after stopping DVR clients, proactively stop the channel + try: + remaining = r.scard(client_set_key) or 0 + except Exception: + remaining = 0 + if remaining == 0: + try: + ChannelService.stop_channel(channel_uuid) + logger.info(f"Stopped channel {channel_uuid} (no clients remain)") + except Exception as sc_e: + logger.debug(f"Unable to stop channel {channel_uuid}: {sc_e}") + except Exception as e: + logger.debug(f"Unable to stop DVR clients for cancelled recording: {e}") + + # Capture paths before deletion + cp = instance.custom_properties or {} + file_path = cp.get("file_path") + temp_ts_path = cp.get("_temp_file_path") + + # Perform DB delete first, then try to remove files + response = super().destroy(request, *args, **kwargs) + + # Notify frontends to refresh recordings + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + library_dir = '/data' + allowed_roots = ['/data/', library_dir.rstrip('/') + '/'] + + def _safe_remove(path: str): + if not path or not isinstance(path, str): + return + try: + if any(path.startswith(root) for root in allowed_roots) and os.path.exists(path): + os.remove(path) + logger.info(f"Deleted recording artifact: {path}") + except Exception as ex: + logger.warning(f"Failed to delete recording artifact {path}: {ex}") + + _safe_remove(file_path) + _safe_remove(temp_ts_path) + + return response + + +class ComskipConfigAPIView(APIView): + """Upload or inspect the custom comskip.ini used by DVR processing.""" + + parser_classes = [MultiPartParser, FormParser] + + def get_permissions(self): + return [IsAdmin()] + + def get(self, request): + path = CoreSettings.get_dvr_comskip_custom_path() + exists = bool(path and os.path.exists(path)) + return Response({"path": path, "exists": exists}) + + def post(self, request): + uploaded = request.FILES.get("file") or request.FILES.get("comskip_ini") + if not uploaded: + return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST) + + name = (uploaded.name or "").lower() + if not name.endswith(".ini"): + return Response({"error": "Only .ini files are allowed"}, status=status.HTTP_400_BAD_REQUEST) + + if uploaded.size and uploaded.size > 1024 * 1024: + return Response({"error": "File too large (limit 1MB)"}, status=status.HTTP_400_BAD_REQUEST) + + dest_dir = os.path.join(settings.MEDIA_ROOT, "comskip") + os.makedirs(dest_dir, exist_ok=True) + dest_path = os.path.join(dest_dir, "comskip.ini") + + try: + with open(dest_path, "wb") as dest: + for chunk in uploaded.chunks(): + dest.write(chunk) + except Exception as e: + logger.error(f"Failed to save uploaded comskip.ini: {e}") + return Response({"error": "Unable to save file"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Persist path setting so DVR processing picks it up immediately + CoreSettings.set_dvr_comskip_custom_path(dest_path) + + return Response({"success": True, "path": dest_path, "exists": os.path.exists(dest_path)}) + + +class BulkDeleteUpcomingRecordingsAPIView(APIView): + """Delete all upcoming (future) recordings.""" + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + now = timezone.now() + qs = Recording.objects.filter(start_time__gt=now) + removed = qs.count() + qs.delete() + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed", "removed": removed}) + except Exception: + pass + return Response({"success": True, "removed": removed}) + + +class SeriesRulesAPIView(APIView): + """Manage DVR series recording rules (list/add).""" + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def get(self, request): + return Response({"rules": CoreSettings.get_dvr_series_rules()}) + + def post(self, request): + data = request.data or {} + tvg_id = str(data.get("tvg_id") or "").strip() + mode = (data.get("mode") or "all").lower() + title = data.get("title") or "" + if mode not in ("all", "new"): + return Response({"error": "mode must be 'all' or 'new'"}, status=status.HTTP_400_BAD_REQUEST) + if not tvg_id: + return Response({"error": "tvg_id is required"}, status=status.HTTP_400_BAD_REQUEST) + rules = CoreSettings.get_dvr_series_rules() + # Upsert by tvg_id + existing = next((r for r in rules if str(r.get("tvg_id")) == tvg_id), None) + if existing: + existing.update({"mode": mode, "title": title}) + else: + rules.append({"tvg_id": tvg_id, "mode": mode, "title": title}) + CoreSettings.set_dvr_series_rules(rules) + # Evaluate immediately for this tvg_id (async) + try: + evaluate_series_rules.delay(tvg_id) + except Exception: + pass + return Response({"success": True, "rules": rules}) + + +class DeleteSeriesRuleAPIView(APIView): + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def delete(self, request, tvg_id): + tvg_id = str(tvg_id) + rules = [r for r in CoreSettings.get_dvr_series_rules() if str(r.get("tvg_id")) != tvg_id] + CoreSettings.set_dvr_series_rules(rules) + return Response({"success": True, "rules": rules}) + + +class EvaluateSeriesRulesAPIView(APIView): + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + tvg_id = request.data.get("tvg_id") + # Run synchronously so UI sees results immediately + result = evaluate_series_rules_impl(str(tvg_id)) if tvg_id else evaluate_series_rules_impl() + return Response({"success": True, **result}) + + +class BulkRemoveSeriesRecordingsAPIView(APIView): + """Bulk remove scheduled recordings for a series rule. + + POST body: + - tvg_id: required (EPG channel id) + - title: optional (series title) + - scope: 'title' (default) or 'channel' + """ + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_method[self.request.method]] + except KeyError: + return [Authenticated()] + + def post(self, request): + from django.utils import timezone + tvg_id = str(request.data.get("tvg_id") or "").strip() + title = request.data.get("title") + scope = (request.data.get("scope") or "title").lower() + if not tvg_id: + return Response({"error": "tvg_id is required"}, status=status.HTTP_400_BAD_REQUEST) + + qs = Recording.objects.filter( + start_time__gte=timezone.now(), + custom_properties__program__tvg_id=tvg_id, + ) + if scope == "title" and title: + qs = qs.filter(custom_properties__program__title=title) + + count = qs.count() + qs.delete() + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed", "removed": count}) + except Exception: + pass + return Response({"success": True, "removed": count}) diff --git a/apps/channels/migrations/0021_channel_user_level.py b/apps/channels/migrations/0021_channel_user_level.py new file mode 100644 index 00000000..2aa55eeb --- /dev/null +++ b/apps/channels/migrations/0021_channel_user_level.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-05-18 14:31 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0020_alter_channel_channel_number'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='user_level', + field=models.IntegerField(default=0), + ), + ] diff --git a/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py b/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py new file mode 100644 index 00000000..b1450c09 --- /dev/null +++ b/apps/channels/migrations/0022_channel_auto_created_channel_auto_created_by_and_more.py @@ -0,0 +1,35 @@ +# Generated by Django 5.1.6 on 2025-07-13 23:08 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0021_channel_user_level'), + ('m3u', '0012_alter_m3uaccount_refresh_interval'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='auto_created', + field=models.BooleanField(default=False, help_text='Whether this channel was automatically created via M3U auto channel sync'), + ), + migrations.AddField( + model_name='channel', + name='auto_created_by', + field=models.ForeignKey(blank=True, help_text='The M3U account that auto-created this channel', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='auto_created_channels', to='m3u.m3uaccount'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='auto_channel_sync', + field=models.BooleanField(default=False, help_text='Automatically create/delete channels to match streams in this group'), + ), + migrations.AddField( + model_name='channelgroupm3uaccount', + name='auto_sync_channel_start', + field=models.FloatField(blank=True, help_text='Starting channel number for auto-created channels in this group', null=True), + ), + ] diff --git a/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py b/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py new file mode 100644 index 00000000..1b0fdbe8 --- /dev/null +++ b/apps/channels/migrations/0023_stream_stream_stats_stream_stream_stats_updated_at.py @@ -0,0 +1,23 @@ +# Generated by Django 5.1.6 on 2025-07-29 02:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0022_channel_auto_created_channel_auto_created_by_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='stream', + name='stream_stats', + field=models.JSONField(blank=True, help_text='JSON object containing stream statistics like video codec, resolution, etc.', null=True), + ), + migrations.AddField( + model_name='stream', + name='stream_stats_updated_at', + field=models.DateTimeField(blank=True, db_index=True, help_text='When stream statistics were last updated', null=True), + ), + ] diff --git a/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py new file mode 100644 index 00000000..7ee5544c --- /dev/null +++ b/apps/channels/migrations/0024_alter_channelgroupm3uaccount_channel_group.py @@ -0,0 +1,19 @@ +# Generated by Django 5.2.4 on 2025-08-22 20:14 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'), + ] + + operations = [ + migrations.AlterField( + model_name='channelgroupm3uaccount', + name='channel_group', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'), + ), + ] diff --git a/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py b/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py new file mode 100644 index 00000000..980682cb --- /dev/null +++ b/apps/channels/migrations/0025_alter_channelgroupm3uaccount_custom_properties_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'), + ] + + operations = [ + migrations.AlterField( + model_name='channelgroupm3uaccount', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='recording', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='stream', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/channels/migrations/0026_recurringrecordingrule.py b/apps/channels/migrations/0026_recurringrecordingrule.py new file mode 100644 index 00000000..1b8cfdb8 --- /dev/null +++ b/apps/channels/migrations/0026_recurringrecordingrule.py @@ -0,0 +1,31 @@ +# Generated by Django 5.0.14 on 2025-09-18 14:56 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='RecurringRecordingRule', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('days_of_week', models.JSONField(default=list)), + ('start_time', models.TimeField()), + ('end_time', models.TimeField()), + ('enabled', models.BooleanField(default=True)), + ('name', models.CharField(blank=True, max_length=255)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')), + ], + options={ + 'ordering': ['channel', 'start_time'], + }, + ), + ] diff --git a/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py new file mode 100644 index 00000000..8cdb9868 --- /dev/null +++ b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-05 20:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0026_recurringrecordingrule'), + ] + + operations = [ + migrations.AddField( + model_name='recurringrecordingrule', + name='end_date', + field=models.DateField(blank=True, null=True), + ), + migrations.AddField( + model_name='recurringrecordingrule', + name='start_date', + field=models.DateField(blank=True, null=True), + ), + ] diff --git a/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py new file mode 100644 index 00000000..08c426b1 --- /dev/null +++ b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py @@ -0,0 +1,25 @@ +# Generated by Django 5.2.4 on 2025-10-06 22:55 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'), + preserve_default=False, + ), + migrations.AddField( + model_name='channel', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'), + ), + ] diff --git a/apps/channels/migrations/0029_backfill_custom_stream_hashes.py b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py new file mode 100644 index 00000000..3e270be2 --- /dev/null +++ b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py @@ -0,0 +1,54 @@ +# Generated migration to backfill stream_hash for existing custom streams + +from django.db import migrations +import hashlib + + +def backfill_custom_stream_hashes(apps, schema_editor): + """ + Generate stream_hash for all custom streams that don't have one. + Uses stream ID to create a stable hash that won't change when name/url is edited. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams_without_hash = Stream.objects.filter( + is_custom=True, + stream_hash__isnull=True + ) + + updated_count = 0 + for stream in custom_streams_without_hash: + # Generate a stable hash using the stream's ID + # This ensures the hash never changes even if name/url is edited + unique_string = f"custom_stream_{stream.id}" + stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + stream.save(update_fields=['stream_hash']) + updated_count += 1 + + if updated_count > 0: + print(f"Backfilled stream_hash for {updated_count} custom streams") + else: + print("No custom streams needed stream_hash backfill") + + +def reverse_backfill(apps, schema_editor): + """ + Reverse migration - clear stream_hash for custom streams. + Note: This will break preview functionality for custom streams. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams = Stream.objects.filter(is_custom=True) + count = custom_streams.update(stream_hash=None) + print(f"Cleared stream_hash for {count} custom streams") + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'), + ] + + operations = [ + migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill), + ] diff --git a/apps/channels/migrations/0030_alter_stream_url.py b/apps/channels/migrations/0030_alter_stream_url.py new file mode 100644 index 00000000..203e411a --- /dev/null +++ b/apps/channels/migrations/0030_alter_stream_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-28 20:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0029_backfill_custom_stream_hashes'), + ] + + operations = [ + migrations.AlterField( + model_name='stream', + name='url', + field=models.URLField(blank=True, max_length=4096, null=True), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index 191eb45e..3dfb392b 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -9,12 +9,14 @@ from datetime import datetime import hashlib import json from apps.epg.models import EPGData +from apps.accounts.models import User logger = logging.getLogger(__name__) # If you have an M3UAccount model in apps.m3u, you can still import it: from apps.m3u.models import M3UAccount + # Add fallback functions if Redis isn't available def get_total_viewers(channel_id): """Get viewer count from Redis or return 0 if Redis isn't available""" @@ -25,6 +27,7 @@ def get_total_viewers(channel_id): except Exception: return 0 + class ChannelGroup(models.Model): name = models.TextField(unique=True, db_index=True) @@ -45,12 +48,14 @@ class ChannelGroup(models.Model): return created_objects + class Stream(models.Model): """ Represents a single stream (e.g. from an M3U source or custom URL). """ + name = models.CharField(max_length=255, default="Default Stream") - url = models.URLField(max_length=2000, blank=True, null=True) + url = models.URLField(max_length=4096, blank=True, null=True) m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, @@ -60,7 +65,7 @@ class Stream(models.Model): ) logo_url = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, blank=True, null=True) - local_file = models.FileField(upload_to='uploads/', blank=True, null=True) + local_file = models.FileField(upload_to="uploads/", blank=True, null=True) current_viewers = models.PositiveIntegerField(default=0) updated_at = models.DateTimeField(auto_now=True) channel_group = models.ForeignKey( @@ -68,18 +73,18 @@ class Stream(models.Model): on_delete=models.SET_NULL, null=True, blank=True, - related_name='streams' + related_name="streams", ) stream_profile = models.ForeignKey( StreamProfile, null=True, blank=True, on_delete=models.SET_NULL, - related_name='streams' + related_name="streams", ) is_custom = models.BooleanField( default=False, - help_text="Whether this is a user-created stream or from an M3U account" + help_text="Whether this is a user-created stream or from an M3U account", ) stream_hash = models.CharField( max_length=255, @@ -89,30 +94,43 @@ class Stream(models.Model): db_index=True, ) last_seen = models.DateTimeField(db_index=True, default=datetime.now) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) + + # Stream statistics fields + stream_stats = models.JSONField( + null=True, + blank=True, + help_text="JSON object containing stream statistics like video codec, resolution, etc." + ) + stream_stats_updated_at = models.DateTimeField( + null=True, + blank=True, + help_text="When stream statistics were last updated", + db_index=True + ) class Meta: # If you use m3u_account, you might do unique_together = ('name','url','m3u_account') verbose_name = "Stream" verbose_name_plural = "Streams" - ordering = ['-updated_at'] + ordering = ["-updated_at"] def __str__(self): return self.name or self.url or f"Stream ID {self.id}" @classmethod - def generate_hash_key(cls, name, url, tvg_id, keys=None): + def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None): if keys is None: keys = CoreSettings.get_m3u_hash_key().split(",") - stream_parts = { - "name": name, "url": url, "tvg_id": tvg_id - } + stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id} hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts} # Serialize and hash the dictionary - serialized_obj = json.dumps(hash_parts, sort_keys=True) # sort_keys ensures consistent ordering + serialized_obj = json.dumps( + hash_parts, sort_keys=True + ) # sort_keys ensures consistent ordering hash_object = hashlib.sha256(serialized_obj.encode()) return hash_object.hexdigest() @@ -128,13 +146,23 @@ class Stream(models.Model): return stream, False # False means it was updated, not created except cls.DoesNotExist: # If it doesn't exist, create a new object with the given hash - fields_to_update['stream_hash'] = hash_value # Make sure the hash field is set + fields_to_update["stream_hash"] = ( + hash_value # Make sure the hash field is set + ) stream = cls.objects.create(**fields_to_update) return stream, True # True means it was created - # @TODO: honor stream's stream profile def get_stream_profile(self): - stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) + """ + Get the stream profile for this stream. + Uses the stream's own profile if set, otherwise returns the default. + """ + if self.stream_profile: + return self.stream_profile + + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) return stream_profile @@ -152,7 +180,9 @@ class Stream(models.Model): m3u_account = self.m3u_account m3u_profiles = m3u_account.profiles.all() default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + profiles = [default_profile] + [ + obj for obj in m3u_profiles if not obj.is_default + ] for profile in profiles: logger.info(profile) @@ -167,13 +197,19 @@ class Stream(models.Model): if profile.max_streams == 0 or current_connections < profile.max_streams: # Start a new stream redis_client.set(f"channel_stream:{self.id}", self.id) - redis_client.set(f"stream_profile:{self.id}", profile.id) # Store only the matched profile + redis_client.set( + f"stream_profile:{self.id}", profile.id + ) # Store only the matched profile # Increment connection count for profiles with limits if profile.max_streams > 0: redis_client.incr(profile_connections_key) - return self.id, profile.id, None # Return newly assigned stream and matched profile + return ( + self.id, + profile.id, + None, + ) # Return newly assigned stream and matched profile # 4. No available streams return None, None, None @@ -194,7 +230,9 @@ class Stream(models.Model): redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association profile_id = int(profile_id) - logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}") + logger.debug( + f"Found profile ID {profile_id} associated with stream {stream_id}" + ) profile_connections_key = f"profile_connections:{profile_id}" @@ -203,6 +241,7 @@ class Stream(models.Model): if current_count > 0: redis_client.decr(profile_connections_key) + class ChannelManager(models.Manager): def active(self): return self.all() @@ -212,38 +251,35 @@ class Channel(models.Model): channel_number = models.FloatField(db_index=True) name = models.CharField(max_length=255) logo = models.ForeignKey( - 'Logo', + "Logo", on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels', + related_name="channels", ) # M2M to Stream now in the same file streams = models.ManyToManyField( - Stream, - blank=True, - through='ChannelStream', - related_name='channels' + Stream, blank=True, through="ChannelStream", related_name="channels" ) channel_group = models.ForeignKey( - 'ChannelGroup', + "ChannelGroup", on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels', - help_text="Channel group this channel belongs to." + related_name="channels", + help_text="Channel group this channel belongs to.", ) tvg_id = models.CharField(max_length=255, blank=True, null=True) tvc_guide_stationid = models.CharField(max_length=255, blank=True, null=True) - + epg_data = models.ForeignKey( EPGData, on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels' + related_name="channels", ) stream_profile = models.ForeignKey( @@ -251,16 +287,41 @@ class Channel(models.Model): on_delete=models.SET_NULL, null=True, blank=True, - related_name='channels' + related_name="channels", ) - uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True, db_index=True) + uuid = models.UUIDField( + default=uuid.uuid4, editable=False, unique=True, db_index=True + ) + + user_level = models.IntegerField(default=0) + + auto_created = models.BooleanField( + default=False, + help_text="Whether this channel was automatically created via M3U auto channel sync" + ) + auto_created_by = models.ForeignKey( + "m3u.M3UAccount", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="auto_created_channels", + help_text="The M3U account that auto-created this channel" + ) + + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Timestamp when this channel was created" + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Timestamp when this channel was last updated" + ) def clean(self): # Enforce unique channel_number within a given group existing = Channel.objects.filter( - channel_number=self.channel_number, - channel_group=self.channel_group + channel_number=self.channel_number, channel_group=self.channel_group ).exclude(id=self.id) if existing.exists(): raise ValidationError( @@ -272,7 +333,7 @@ class Channel(models.Model): @classmethod def get_next_available_channel_number(cls, starting_from=1): - used_numbers = set(cls.objects.all().values_list('channel_number', flat=True)) + used_numbers = set(cls.objects.all().values_list("channel_number", flat=True)) n = starting_from while n in used_numbers: n += 1 @@ -282,7 +343,9 @@ class Channel(models.Model): def get_stream_profile(self): stream_profile = self.stream_profile if not stream_profile: - stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id()) + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) return stream_profile @@ -312,44 +375,55 @@ class Channel(models.Model): profile_id = int(profile_id_bytes) return stream_id, profile_id, None except (ValueError, TypeError): - logger.debug(f"Invalid profile ID retrieved from Redis: {profile_id_bytes}") + logger.debug( + f"Invalid profile ID retrieved from Redis: {profile_id_bytes}" + ) except (ValueError, TypeError): - logger.debug(f"Invalid stream ID retrieved from Redis: {stream_id_bytes}") + logger.debug( + f"Invalid stream ID retrieved from Redis: {stream_id_bytes}" + ) # No existing active stream, attempt to assign a new one has_streams_but_maxed_out = False has_active_profiles = False # Iterate through channel streams and their profiles - for stream in self.streams.all().order_by('channelstream__order'): + for stream in self.streams.all().order_by("channelstream__order"): # Retrieve the M3U account associated with the stream. m3u_account = stream.m3u_account if not m3u_account: logger.debug(f"Stream {stream.id} has no M3U account") continue - - m3u_profiles = m3u_account.profiles.all() - default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - - if not default_profile: - logger.debug(f"M3U account {m3u_account.id} has no default profile") + if m3u_account.is_active == False: + logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.") continue - profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next( + (obj for obj in m3u_profiles if obj.is_default), None + ) + + if not default_profile: + logger.debug(f"M3U account {m3u_account.id} has no active default profile") + continue + + profiles = [default_profile] + [ + obj for obj in m3u_profiles if not obj.is_default + ] for profile in profiles: - # Skip inactive profiles - if not profile.is_active: - logger.debug(f"Skipping inactive profile {profile.id}") - continue - has_active_profiles = True profile_connections_key = f"profile_connections:{profile.id}" - current_connections = int(redis_client.get(profile_connections_key) or 0) + current_connections = int( + redis_client.get(profile_connections_key) or 0 + ) # Check if profile has available slots (or unlimited connections) - if profile.max_streams == 0 or current_connections < profile.max_streams: + if ( + profile.max_streams == 0 + or current_connections < profile.max_streams + ): # Start a new stream redis_client.set(f"channel_stream:{self.id}", stream.id) redis_client.set(f"stream_profile:{stream.id}", profile.id) @@ -358,17 +432,23 @@ class Channel(models.Model): if profile.max_streams > 0: redis_client.incr(profile_connections_key) - return stream.id, profile.id, None # Return newly assigned stream and matched profile + return ( + stream.id, + profile.id, + None, + ) # Return newly assigned stream and matched profile else: # This profile is at max connections has_streams_but_maxed_out = True - logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}") + logger.debug( + f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}" + ) # No available streams - determine specific reason if has_streams_but_maxed_out: - error_reason = "All M3U profiles have reached maximum connection limits" + error_reason = "All active M3U profiles have reached maximum connection limits" elif has_active_profiles: - error_reason = "No compatible profile found for any assigned stream" + error_reason = "No compatible active profile found for any assigned stream" else: error_reason = "No active profiles found for any assigned stream" @@ -388,7 +468,9 @@ class Channel(models.Model): redis_client.delete(f"channel_stream:{self.id}") # Remove active stream stream_id = int(stream_id) - logger.debug(f"Found stream ID {stream_id} associated with channel stream {self.id}") + logger.debug( + f"Found stream ID {stream_id} associated with channel stream {self.id}" + ) # Get the matched profile for cleanup profile_id = redis_client.get(f"stream_profile:{stream_id}") @@ -399,7 +481,9 @@ class Channel(models.Model): redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association profile_id = int(profile_id) - logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}") + logger.debug( + f"Found profile ID {profile_id} associated with stream {stream_id}" + ) profile_connections_key = f"profile_connections:{profile_id}" @@ -452,20 +536,26 @@ class Channel(models.Model): # Increment connection count for new profile new_profile_connections_key = f"profile_connections:{new_profile_id}" redis_client.incr(new_profile_connections_key) - logger.info(f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}") + logger.info( + f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}" + ) return True class ChannelProfile(models.Model): name = models.CharField(max_length=100, unique=True) + class ChannelProfileMembership(models.Model): channel_profile = models.ForeignKey(ChannelProfile, on_delete=models.CASCADE) channel = models.ForeignKey(Channel, on_delete=models.CASCADE) - enabled = models.BooleanField(default=True) # Track if the channel is enabled for this group + enabled = models.BooleanField( + default=True + ) # Track if the channel is enabled for this group class Meta: - unique_together = ('channel_profile', 'channel') + unique_together = ("channel_profile", "channel") + class ChannelStream(models.Model): channel = models.ForeignKey(Channel, on_delete=models.CASCADE) @@ -473,27 +563,35 @@ class ChannelStream(models.Model): order = models.PositiveIntegerField(default=0) # Ordering field class Meta: - ordering = ['order'] # Ensure streams are retrieved in order + ordering = ["order"] # Ensure streams are retrieved in order constraints = [ - models.UniqueConstraint(fields=['channel', 'stream'], name='unique_channel_stream') + models.UniqueConstraint( + fields=["channel", "stream"], name="unique_channel_stream" + ) ] + class ChannelGroupM3UAccount(models.Model): channel_group = models.ForeignKey( - ChannelGroup, - on_delete=models.CASCADE, - related_name='m3u_account' + ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts" ) m3u_account = models.ForeignKey( - M3UAccount, - on_delete=models.CASCADE, - related_name='channel_group' + M3UAccount, on_delete=models.CASCADE, related_name="channel_group" ) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) enabled = models.BooleanField(default=True) + auto_channel_sync = models.BooleanField( + default=False, + help_text='Automatically create/delete channels to match streams in this group' + ) + auto_sync_channel_start = models.FloatField( + null=True, + blank=True, + help_text='Starting channel number for auto-created channels in this group' + ) class Meta: - unique_together = ('channel_group', 'm3u_account') + unique_together = ("channel_group", "m3u_account") def __str__(self): return f"{self.channel_group.name} - {self.m3u_account.name} (Enabled: {self.enabled})" @@ -506,12 +604,47 @@ class Logo(models.Model): def __str__(self): return self.name + class Recording(models.Model): - channel = models.ForeignKey("Channel", on_delete=models.CASCADE, related_name="recordings") + channel = models.ForeignKey( + "Channel", on_delete=models.CASCADE, related_name="recordings" + ) start_time = models.DateTimeField() end_time = models.DateTimeField() task_id = models.CharField(max_length=255, null=True, blank=True) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return f"{self.channel.name} - {self.start_time} to {self.end_time}" + + +class RecurringRecordingRule(models.Model): + """Rule describing a recurring manual DVR schedule.""" + + channel = models.ForeignKey( + "Channel", + on_delete=models.CASCADE, + related_name="recurring_rules", + ) + days_of_week = models.JSONField(default=list) + start_time = models.TimeField() + end_time = models.TimeField() + enabled = models.BooleanField(default=True) + name = models.CharField(max_length=255, blank=True) + start_date = models.DateField(null=True, blank=True) + end_date = models.DateField(null=True, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + ordering = ["channel", "start_time"] + + def __str__(self): + channel_name = getattr(self.channel, "name", str(self.channel_id)) + return f"Recurring rule for {channel_name}" + + def cleaned_days(self): + try: + return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6}) + except Exception: + return [] diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 5423037f..62c9650d 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,108 +1,225 @@ +import json +from datetime import datetime + from rest_framework import serializers -from .models import Stream, Channel, ChannelGroup, ChannelStream, ChannelGroupM3UAccount, Logo, ChannelProfile, ChannelProfileMembership, Recording +from .models import ( + Stream, + Channel, + ChannelGroup, + ChannelStream, + ChannelGroupM3UAccount, + Logo, + ChannelProfile, + ChannelProfileMembership, + Recording, + RecurringRecordingRule, +) from apps.epg.serializers import EPGDataSerializer from core.models import StreamProfile from apps.epg.models import EPGData from django.urls import reverse from rest_framework import serializers from django.utils import timezone +from core.utils import validate_flexible_url + class LogoSerializer(serializers.ModelSerializer): cache_url = serializers.SerializerMethodField() + channel_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + channel_names = serializers.SerializerMethodField() class Meta: model = Logo - fields = ['id', 'name', 'url', 'cache_url'] + fields = ["id", "name", "url", "cache_url", "channel_count", "is_used", "channel_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if Logo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return Logo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance def get_cache_url(self, obj): # return f"/api/channels/logos/{obj.id}/cache/" - request = self.context.get('request') + request = self.context.get("request") if request: - return request.build_absolute_uri(reverse('api:channels:logo-cache', args=[obj.id])) - return reverse('api:channels:logo-cache', args=[obj.id]) + return request.build_absolute_uri( + reverse("api:channels:logo-cache", args=[obj.id]) + ) + return reverse("api:channels:logo-cache", args=[obj.id]) + + def get_channel_count(self, obj): + """Get the number of channels using this logo""" + return obj.channels.count() + + def get_is_used(self, obj): + """Check if this logo is used by any channels""" + return obj.channels.exists() + + def get_channel_names(self, obj): + """Get the names of channels using this logo (limited to first 5)""" + names = [] + + # Get channel names + channels = obj.channels.all()[:5] + for channel in channels: + names.append(f"Channel: {channel.name}") + + # Calculate total count for "more" message + total_count = self.get_channel_count(obj) + if total_count > 5: + names.append(f"...and {total_count - 5} more") + + return names + # # Stream # class StreamSerializer(serializers.ModelSerializer): + url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url] + ) stream_profile_id = serializers.PrimaryKeyRelatedField( queryset=StreamProfile.objects.all(), - source='stream_profile', + source="stream_profile", allow_null=True, - required=False + required=False, ) - read_only_fields = ['is_custom', 'm3u_account', 'stream_hash'] + read_only_fields = ["is_custom", "m3u_account", "stream_hash"] class Meta: model = Stream fields = [ - 'id', - 'name', - 'url', - 'm3u_account', # Uncomment if using M3U fields - 'logo_url', - 'tvg_id', - 'local_file', - 'current_viewers', - 'updated_at', - 'last_seen', - 'stream_profile_id', - 'is_custom', - 'channel_group', - 'stream_hash', + "id", + "name", + "url", + "m3u_account", # Uncomment if using M3U fields + "logo_url", + "tvg_id", + "local_file", + "current_viewers", + "updated_at", + "last_seen", + "stream_profile_id", + "is_custom", + "channel_group", + "stream_hash", + "stream_stats", + "stream_stats_updated_at", ] def get_fields(self): fields = super().get_fields() # Unable to edit specific properties if this stream was created from an M3U account - if self.instance and getattr(self.instance, 'm3u_account', None) and not self.instance.is_custom: - fields['id'].read_only = True - fields['name'].read_only = True - fields['url'].read_only = True - fields['m3u_account'].read_only = True - fields['tvg_id'].read_only = True - fields['channel_group'].read_only = True - + if ( + self.instance + and getattr(self.instance, "m3u_account", None) + and not self.instance.is_custom + ): + fields["id"].read_only = True + fields["name"].read_only = True + fields["url"].read_only = True + fields["m3u_account"].read_only = True + fields["tvg_id"].read_only = True + fields["channel_group"].read_only = True return fields +class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): + m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True) + enabled = serializers.BooleanField() + auto_channel_sync = serializers.BooleanField(default=False) + auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False) + custom_properties = serializers.JSONField(required=False) + + class Meta: + model = ChannelGroupM3UAccount + fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"] + + def to_representation(self, instance): + data = super().to_representation(instance) + + custom_props = instance.custom_properties or {} + + return data + + def to_internal_value(self, data): + # Accept both dict and JSON string for custom_properties (for backward compatibility) + val = data.get("custom_properties") + if isinstance(val, str): + try: + data["custom_properties"] = json.loads(val) + except Exception: + pass + + return super().to_internal_value(data) + # # Channel Group # class ChannelGroupSerializer(serializers.ModelSerializer): + channel_count = serializers.IntegerField(read_only=True) + m3u_account_count = serializers.IntegerField(read_only=True) + m3u_accounts = ChannelGroupM3UAccountSerializer( + many=True, + read_only=True + ) + class Meta: model = ChannelGroup - fields = ['id', 'name'] + fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"] + class ChannelProfileSerializer(serializers.ModelSerializer): channels = serializers.SerializerMethodField() class Meta: model = ChannelProfile - fields = ['id', 'name', 'channels'] + fields = ["id", "name", "channels"] def get_channels(self, obj): - memberships = ChannelProfileMembership.objects.filter(channel_profile=obj, enabled=True) - return [ - membership.channel.id - for membership in memberships - ] + memberships = ChannelProfileMembership.objects.filter( + channel_profile=obj, enabled=True + ) + return [membership.channel.id for membership in memberships] + class ChannelProfileMembershipSerializer(serializers.ModelSerializer): class Meta: model = ChannelProfileMembership - fields = ['channel', 'enabled'] + fields = ["channel", "enabled"] + class ChanneProfilelMembershipUpdateSerializer(serializers.Serializer): channel_id = serializers.IntegerField() # Ensure channel_id is an integer enabled = serializers.BooleanField() + class BulkChannelProfileMembershipSerializer(serializers.Serializer): channels = serializers.ListField( child=ChanneProfilelMembershipUpdateSerializer(), # Use the nested serializer - allow_empty=False + allow_empty=False, ) def validate_channels(self, value): @@ -110,6 +227,7 @@ class BulkChannelProfileMembershipSerializer(serializers.Serializer): raise serializers.ValidationError("At least one channel must be provided.") return value + # # Channel # @@ -119,14 +237,10 @@ class ChannelSerializer(serializers.ModelSerializer): channel_number = serializers.FloatField( allow_null=True, required=False, - error_messages={ - 'invalid': 'Channel number must be a valid decimal number.' - } + error_messages={"invalid": "Channel number must be a valid decimal number."}, ) channel_group_id = serializers.PrimaryKeyRelatedField( - queryset=ChannelGroup.objects.all(), - source="channel_group", - required=False + queryset=ChannelGroup.objects.all(), source="channel_group", required=False ) epg_data_id = serializers.PrimaryKeyRelatedField( queryset=EPGData.objects.all(), @@ -137,41 +251,49 @@ class ChannelSerializer(serializers.ModelSerializer): stream_profile_id = serializers.PrimaryKeyRelatedField( queryset=StreamProfile.objects.all(), - source='stream_profile', - allow_null=True, - required=False - ) - - streams = serializers.PrimaryKeyRelatedField(queryset=Stream.objects.all(), many=True, required=False) - - logo_id = serializers.PrimaryKeyRelatedField( - queryset=Logo.objects.all(), - source='logo', + source="stream_profile", allow_null=True, required=False, ) + streams = serializers.PrimaryKeyRelatedField( + queryset=Stream.objects.all(), many=True, required=False + ) + + logo_id = serializers.PrimaryKeyRelatedField( + queryset=Logo.objects.all(), + source="logo", + allow_null=True, + required=False, + ) + + auto_created_by_name = serializers.SerializerMethodField() + class Meta: model = Channel fields = [ - 'id', - 'channel_number', - 'name', - 'channel_group_id', - 'tvg_id', - 'tvc_guide_stationid', - 'epg_data_id', - 'streams', - 'stream_profile_id', - 'uuid', - 'logo_id', + "id", + "channel_number", + "name", + "channel_group_id", + "tvg_id", + "tvc_guide_stationid", + "epg_data_id", + "streams", + "stream_profile_id", + "uuid", + "logo_id", + "user_level", + "auto_created", + "auto_created_by", + "auto_created_by_name", ] def to_representation(self, instance): - include_streams = self.context.get('include_streams', False) + include_streams = self.context.get("include_streams", False) if include_streams: - self.fields['streams'] = serializers.SerializerMethodField() + self.fields["streams"] = serializers.SerializerMethodField() return super().to_representation(instance) @@ -180,22 +302,28 @@ class ChannelSerializer(serializers.ModelSerializer): def get_streams(self, obj): """Retrieve ordered stream IDs for GET requests.""" - return StreamSerializer(obj.streams.all().order_by('channelstream__order'), many=True).data + return StreamSerializer( + obj.streams.all().order_by("channelstream__order"), many=True + ).data def create(self, validated_data): - streams = validated_data.pop('streams', []) - channel_number = validated_data.pop('channel_number', Channel.get_next_available_channel_number()) + streams = validated_data.pop("streams", []) + channel_number = validated_data.pop( + "channel_number", Channel.get_next_available_channel_number() + ) validated_data["channel_number"] = channel_number channel = Channel.objects.create(**validated_data) # Add streams in the specified order for index, stream in enumerate(streams): - ChannelStream.objects.create(channel=channel, stream_id=stream.id, order=index) + ChannelStream.objects.create( + channel=channel, stream_id=stream.id, order=index + ) return channel def update(self, instance, validated_data): - streams = validated_data.pop('streams', None) + streams = validated_data.pop("streams", None) # Update standard fields for attr, value in validated_data.items(): @@ -206,8 +334,7 @@ class ChannelSerializer(serializers.ModelSerializer): if streams is not None: # Normalize stream IDs normalized_ids = [ - stream.id if hasattr(stream, "id") else stream - for stream in streams + stream.id if hasattr(stream, "id") else stream for stream in streams ] print(normalized_ids) @@ -234,9 +361,7 @@ class ChannelSerializer(serializers.ModelSerializer): cs.save(update_fields=["order"]) else: ChannelStream.objects.create( - channel=instance, - stream_id=stream_id, - order=order + channel=instance, stream_id=stream_id, order=order ) return instance @@ -250,34 +375,71 @@ class ChannelSerializer(serializers.ModelSerializer): # Ensure it's processed as a float return float(value) except (ValueError, TypeError): - raise serializers.ValidationError("Channel number must be a valid decimal number.") + raise serializers.ValidationError( + "Channel number must be a valid decimal number." + ) def validate_stream_profile(self, value): """Handle special case where empty/0 values mean 'use default' (null)""" - if value == '0' or value == 0 or value == '' or value is None: + if value == "0" or value == 0 or value == "" or value is None: return None return value # PrimaryKeyRelatedField will handle the conversion to object -class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer): - enabled = serializers.BooleanField() - - class Meta: - model = ChannelGroupM3UAccount - fields = ['id', 'channel_group', 'enabled'] - - # Optionally, if you only need the id of the ChannelGroup, you can customize it like this: - # channel_group = serializers.PrimaryKeyRelatedField(queryset=ChannelGroup.objects.all()) + def get_auto_created_by_name(self, obj): + """Get the name of the M3U account that auto-created this channel.""" + if obj.auto_created_by: + return obj.auto_created_by.name + return None class RecordingSerializer(serializers.ModelSerializer): class Meta: model = Recording - fields = '__all__' - read_only_fields = ['task_id'] + fields = "__all__" + read_only_fields = ["task_id"] def validate(self, data): - start_time = data.get('start_time') - end_time = data.get('end_time') + from core.models import CoreSettings + start_time = data.get("start_time") + end_time = data.get("end_time") + + if start_time and timezone.is_naive(start_time): + start_time = timezone.make_aware(start_time, timezone.get_current_timezone()) + data["start_time"] = start_time + if end_time and timezone.is_naive(end_time): + end_time = timezone.make_aware(end_time, timezone.get_current_timezone()) + data["end_time"] = end_time + + # If this is an EPG-based recording (program provided), apply global pre/post offsets + try: + cp = data.get("custom_properties") or {} + is_epg_based = isinstance(cp, dict) and isinstance(cp.get("program"), (dict,)) + except Exception: + is_epg_based = False + + if is_epg_based and start_time and end_time: + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + from datetime import timedelta + try: + if pre_min and pre_min > 0: + start_time = start_time - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + end_time = end_time + timedelta(minutes=post_min) + except Exception: + pass + # write back adjusted times so scheduling uses them + data["start_time"] = start_time + data["end_time"] = end_time now = timezone.now() # timezone-aware current time @@ -286,8 +448,61 @@ class RecordingSerializer(serializers.ModelSerializer): if start_time < now: # Optional: Adjust start_time if it's in the past but end_time is in the future - data['start_time'] = now # or: timezone.now() + timedelta(seconds=1) - if end_time <= data['start_time']: + data["start_time"] = now # or: timezone.now() + timedelta(seconds=1) + if end_time <= data["start_time"]: raise serializers.ValidationError("End time must be after start time.") return data + + +class RecurringRecordingRuleSerializer(serializers.ModelSerializer): + class Meta: + model = RecurringRecordingRule + fields = "__all__" + read_only_fields = ["created_at", "updated_at"] + + def validate_days_of_week(self, value): + if not value: + raise serializers.ValidationError("Select at least one day of the week") + cleaned = [] + for entry in value: + try: + iv = int(entry) + except (TypeError, ValueError): + raise serializers.ValidationError("Days of week must be integers 0-6") + if iv < 0 or iv > 6: + raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)") + cleaned.append(iv) + return sorted(set(cleaned)) + + def validate(self, attrs): + start = attrs.get("start_time") or getattr(self.instance, "start_time", None) + end = attrs.get("end_time") or getattr(self.instance, "end_time", None) + start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None) + end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None) + if start_date is None: + existing_start = getattr(self.instance, "start_date", None) + if existing_start is None: + raise serializers.ValidationError("Start date is required") + if start_date and end_date and end_date < start_date: + raise serializers.ValidationError("End date must be on or after start date") + if end_date is None: + existing_end = getattr(self.instance, "end_date", None) + if existing_end is None: + raise serializers.ValidationError("End date is required") + if start and end and start_date and end_date: + start_dt = datetime.combine(start_date, start) + end_dt = datetime.combine(end_date, end) + if end_dt <= start_dt: + raise serializers.ValidationError("End datetime must be after start datetime") + elif start and end and end == start: + raise serializers.ValidationError("End time must be different from start time") + # Normalize empty strings to None for dates + if attrs.get("end_date") == "": + attrs["end_date"] = None + if attrs.get("start_date") == "": + attrs["start_date"] = None + return super().validate(attrs) + + def create(self, validated_data): + return super().create(validated_data) diff --git a/apps/channels/signals.py b/apps/channels/signals.py index f98c1c97..27b361ba 100644 --- a/apps/channels/signals.py +++ b/apps/channels/signals.py @@ -8,7 +8,7 @@ from .models import Channel, Stream, ChannelProfile, ChannelProfileMembership, R from apps.m3u.models import M3UAccount from apps.epg.tasks import parse_programs_for_tvg_id import logging, requests, time -from .tasks import run_recording +from .tasks import run_recording, prefetch_recording_artwork from django.utils.timezone import now, is_aware, make_aware from datetime import timedelta @@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs): else: raise ValueError("No default M3UAccount found.") +@receiver(post_save, sender=Stream) +def generate_custom_stream_hash(sender, instance, created, **kwargs): + """ + Generate a stable stream_hash for custom streams after creation. + Uses the stream's ID to ensure the hash never changes even if name/url is edited. + """ + if instance.is_custom and not instance.stream_hash and created: + import hashlib + # Use stream ID for a stable, unique hash that never changes + unique_string = f"custom_stream_{instance.id}" + instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + # Use update to avoid triggering signals again + Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash) + @receiver(post_save, sender=Channel) def refresh_epg_programs(sender, instance, created, **kwargs): """ @@ -62,15 +76,6 @@ def refresh_epg_programs(sender, instance, created, **kwargs): logger.info(f"New channel {instance.id} ({instance.name}) created with EPG data, refreshing program data") parse_programs_for_tvg_id.delay(instance.epg_data.id) -@receiver(post_save, sender=Channel) -def add_new_channel_to_groups(sender, instance, created, **kwargs): - if created: - profiles = ChannelProfile.objects.all() - ChannelProfileMembership.objects.bulk_create([ - ChannelProfileMembership(channel_profile=profile, channel=instance) - for profile in profiles - ]) - @receiver(post_save, sender=ChannelProfile) def create_profile_memberships(sender, instance, created, **kwargs): if created: @@ -82,8 +87,9 @@ def create_profile_memberships(sender, instance, created, **kwargs): def schedule_recording_task(instance): eta = instance.start_time + # Pass recording_id first so task can persist metadata to the correct row task = run_recording.apply_async( - args=[instance.channel_id, str(instance.start_time), str(instance.end_time)], + args=[instance.id, instance.channel_id, str(instance.start_time), str(instance.end_time)], eta=eta ) return task.id @@ -132,6 +138,11 @@ def schedule_task_on_save(sender, instance, created, **kwargs): instance.save(update_fields=['task_id']) else: print("Start time is in the past. Not scheduling.") + # Kick off poster/artwork prefetch to enrich Upcoming cards + try: + prefetch_recording_artwork.apply_async(args=[instance.id], countdown=1) + except Exception as e: + print("Error scheduling artwork prefetch:", e) except Exception as e: import traceback print("Error in post_save signal:", e) diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 6217a4ca..3943cf16 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -1,16 +1,20 @@ # apps/channels/tasks.py import logging import os +import select import re import requests import time import json import subprocess -from datetime import datetime +import signal +from zoneinfo import ZoneInfo +from datetime import datetime, timedelta import gc from celery import shared_task from django.utils.text import slugify +from rapidfuzz import fuzz from apps.channels.models import Channel from apps.epg.models import EPGData @@ -22,9 +26,108 @@ from asgiref.sync import async_to_sync from asgiref.sync import async_to_sync from channels.layers import get_channel_layer import tempfile +from urllib.parse import quote logger = logging.getLogger(__name__) +# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size) +# We use 2000 as a safe maximum to account for multibyte characters +def validate_logo_url(logo_url, max_length=2000): + """ + Fast validation for logo URLs during bulk creation. + Returns None if URL is too long (would exceed PostgreSQL btree index limit), + original URL otherwise. + + PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than + this cannot be indexed and would cause database errors. These are typically + base64-encoded images embedded in URLs. + """ + if logo_url and len(logo_url) > max_length: + logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...") + return None + return logo_url + +def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"): + """ + Send EPG matching progress via WebSocket + """ + try: + channel_layer = get_channel_layer() + if channel_layer: + progress_data = { + 'type': 'epg_matching_progress', + 'total': total_channels, + 'matched': len(matched_channels) if isinstance(matched_channels, list) else matched_channels, + 'remaining': total_channels - (len(matched_channels) if isinstance(matched_channels, list) else matched_channels), + 'current_channel': current_channel_name, + 'stage': stage, + 'progress_percent': round((len(matched_channels) if isinstance(matched_channels, list) else matched_channels) / total_channels * 100, 1) if total_channels > 0 else 0 + } + + async_to_sync(channel_layer.group_send)( + "updates", + { + "type": "update", + "data": { + "type": "epg_matching_progress", + **progress_data + } + } + ) + except Exception as e: + logger.warning(f"Failed to send EPG matching progress: {e}") + +# Lazy loading for ML models - only imported/loaded when needed +_ml_model_cache = { + 'sentence_transformer': None +} + +def get_sentence_transformer(): + """Lazy load the sentence transformer model only when needed""" + if _ml_model_cache['sentence_transformer'] is None: + try: + from sentence_transformers import SentenceTransformer + from sentence_transformers import util + + model_name = "sentence-transformers/all-MiniLM-L6-v2" + cache_dir = "/data/models" + + # Check environment variable to disable downloads + disable_downloads = os.environ.get('DISABLE_ML_DOWNLOADS', 'false').lower() == 'true' + + if disable_downloads: + # Check if model exists before attempting to load + hf_model_path = os.path.join(cache_dir, f"models--{model_name.replace('/', '--')}") + if not os.path.exists(hf_model_path): + logger.warning("ML model not found and downloads disabled (DISABLE_ML_DOWNLOADS=true). Skipping ML matching.") + return None, None + + # Ensure cache directory exists + os.makedirs(cache_dir, exist_ok=True) + + # Let sentence-transformers handle all cache detection and management + logger.info(f"Loading sentence transformer model (cache: {cache_dir})") + _ml_model_cache['sentence_transformer'] = SentenceTransformer( + model_name, + cache_folder=cache_dir + ) + + return _ml_model_cache['sentence_transformer'], util + except ImportError: + logger.warning("sentence-transformers not available - ML-enhanced matching disabled") + return None, None + except Exception as e: + logger.error(f"Failed to load sentence transformer: {e}") + return None, None + else: + from sentence_transformers import util + return _ml_model_cache['sentence_transformer'], util + +# ML matching thresholds (same as original script) +BEST_FUZZY_THRESHOLD = 85 +LOWER_FUZZY_THRESHOLD = 40 +EMBED_SIM_THRESHOLD = 0.65 + # Words we remove to help with fuzzy + embedding matching COMMON_EXTRANEOUS_WORDS = [ "tv", "channel", "network", "television", @@ -47,138 +150,367 @@ def normalize_name(name: str) -> str: norm = name.lower() norm = re.sub(r"\[.*?\]", "", norm) + + # Extract and preserve important call signs from parentheses before removing them + # This captures call signs like (KVLY), (KING), (KARE), etc. + call_sign_match = re.search(r"\(([A-Z]{3,5})\)", name) + preserved_call_sign = "" + if call_sign_match: + preserved_call_sign = " " + call_sign_match.group(1).lower() + + # Now remove all parentheses content norm = re.sub(r"\(.*?\)", "", norm) + + # Add back the preserved call sign + norm = norm + preserved_call_sign + norm = re.sub(r"[^\w\s]", "", norm) tokens = norm.split() tokens = [t for t in tokens if t not in COMMON_EXTRANEOUS_WORDS] norm = " ".join(tokens).strip() return norm +def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True, send_progress=True): + """ + EPG matching logic that finds the best EPG matches for channels using + multiple matching strategies including fuzzy matching and ML models. + + Automatically uses conservative thresholds for bulk matching (multiple channels) + to avoid bad matches that create user cleanup work, and aggressive thresholds + for single channel matching where users specifically requested a match attempt. + """ + channels_to_update = [] + matched_channels = [] + total_channels = len(channels_data) + + # Send initial progress + if send_progress: + send_epg_matching_progress(total_channels, 0, stage="starting") + + # Try to get ML models if requested (but don't load yet - lazy loading) + st_model, util = None, None + epg_embeddings = None + ml_available = use_ml + + # Automatically determine matching strategy based on number of channels + is_bulk_matching = len(channels_data) > 1 + + # Adjust matching thresholds based on operation type + if is_bulk_matching: + # Conservative thresholds for bulk matching to avoid creating cleanup work + FUZZY_HIGH_CONFIDENCE = 90 # Only very high fuzzy scores + FUZZY_MEDIUM_CONFIDENCE = 70 # Higher threshold for ML enhancement + ML_HIGH_CONFIDENCE = 0.75 # Higher ML confidence required + ML_LAST_RESORT = 0.65 # More conservative last resort + FUZZY_LAST_RESORT_MIN = 50 # Higher fuzzy minimum for last resort + logger.info(f"Using conservative thresholds for bulk matching ({total_channels} channels)") + else: + # More aggressive thresholds for single channel matching (user requested specific match) + FUZZY_HIGH_CONFIDENCE = 85 # Original threshold + FUZZY_MEDIUM_CONFIDENCE = 40 # Original threshold + ML_HIGH_CONFIDENCE = 0.65 # Original threshold + ML_LAST_RESORT = 0.50 # Original desperate threshold + FUZZY_LAST_RESORT_MIN = 20 # Original minimum + logger.info("Using aggressive thresholds for single channel matching") # Process each channel + for index, chan in enumerate(channels_data): + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Send progress update every 5 channels or for the first few + if send_progress and (index < 5 or index % 5 == 0 or index == total_channels - 1): + send_epg_matching_progress( + total_channels, + len(matched_channels), + current_channel_name=chan["name"][:50], # Truncate long names + stage="matching" + ) + normalized_tvg_id = chan.get("tvg_id", "") + fallback_name = chan["tvg_id"].strip() if chan["tvg_id"] else chan["name"] + + # Step 1: Exact TVG ID match + epg_by_tvg_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_tvg_id), None) + if normalized_tvg_id and epg_by_tvg_id: + chan["epg_data_id"] = epg_by_tvg_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, epg_by_tvg_id["tvg_id"])) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact tvg_id={epg_by_tvg_id['tvg_id']}") + continue + + # Step 2: Secondary TVG ID check (legacy compatibility) + if chan["tvg_id"]: + epg_match = [epg["id"] for epg in epg_data if epg["tvg_id"] == chan["tvg_id"]] + if epg_match: + chan["epg_data_id"] = epg_match[0] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, chan["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by secondary tvg_id={chan['tvg_id']}") + continue + + # Step 2.5: Exact Gracenote ID match + normalized_gracenote_id = chan.get("gracenote_id", "") + if normalized_gracenote_id: + epg_by_gracenote_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_gracenote_id), None) + if epg_by_gracenote_id: + chan["epg_data_id"] = epg_by_gracenote_id["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], fallback_name, f"gracenote:{epg_by_gracenote_id['tvg_id']}")) + logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact gracenote_id={normalized_gracenote_id}") + continue + + # Step 3: Name-based fuzzy matching + if not chan["norm_chan"]: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping") + continue + + best_score = 0 + best_epg = None + + # Debug: show what we're matching against + logger.debug(f"Fuzzy matching '{chan['norm_chan']}' against EPG entries...") + + # Find best fuzzy match + for row in epg_data: + if not row.get("norm_name"): + continue + + base_score = fuzz.ratio(chan["norm_chan"], row["norm_name"]) + bonus = 0 + + # Apply region-based bonus/penalty + if region_code and row.get("tvg_id"): + combined_text = row["tvg_id"].lower() + " " + row["name"].lower() + dot_regions = re.findall(r'\.([a-z]{2})', combined_text) + + if dot_regions: + if region_code in dot_regions: + bonus = 15 # Bigger bonus for matching region + else: + bonus = -15 # Penalty for different region + elif region_code in combined_text: + bonus = 10 + + score = base_score + bonus + + # Debug the best few matches + if score > 50: # Only show decent matches + logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") + + if score > best_score: + best_score = score + best_epg = row + + # Log the best score we found + if best_epg: + logger.info(f"Channel {chan['id']} '{chan['name']}' => best match: '{best_epg['name']}' (score: {best_score})") + else: + logger.debug(f"Channel {chan['id']} '{chan['name']}' => no EPG entries with valid norm_name found") + continue + + # High confidence match - accept immediately + if best_score >= FUZZY_HIGH_CONFIDENCE: + chan["epg_data_id"] = best_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], best_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched tvg_id={best_epg['tvg_id']} (score={best_score})") + + # Medium confidence - use ML if available (lazy load models here) + elif best_score >= FUZZY_MEDIUM_CONFIDENCE and ml_available: + # Lazy load ML models only when we actually need them + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings only when we actually need them + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_HIGH_CONFIDENCE: + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => matched EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy={best_score}, ML-sim={top_value:.2f} < {ML_HIGH_CONFIDENCE}, trying last resort...") + + # Last resort: try ML with very low fuzzy threshold + if top_value >= ML_LAST_RESORT: # Dynamic last resort threshold + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => even last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, skipping") + + except Exception as e: + logger.warning(f"ML matching failed for channel {chan['id']}: {e}") + # Fall back to non-ML decision + logger.info(f"Channel {chan['id']} '{chan['name']}' => fuzzy score {best_score} below threshold, skipping") + + # Last resort: Try ML matching even with very low fuzzy scores + elif best_score >= FUZZY_LAST_RESORT_MIN and ml_available: + # Lazy load ML models for last resort attempts + if st_model is None: + st_model, util = get_sentence_transformer() + + # Lazy generate embeddings for last resort attempts + if epg_embeddings is None and st_model and any(row.get("norm_name") for row in epg_data): + try: + logger.info("Generating embeddings for EPG data using ML model (last resort lazy loading)") + epg_embeddings = st_model.encode( + [row["norm_name"] for row in epg_data if row.get("norm_name")], + convert_to_tensor=True + ) + except Exception as e: + logger.warning(f"Failed to generate embeddings for last resort: {e}") + epg_embeddings = None + + if epg_embeddings is not None and st_model: + try: + logger.info(f"Channel {chan['id']} '{chan['name']}' => trying ML as last resort (fuzzy={best_score})") + # Generate embedding for this channel + chan_embedding = st_model.encode(chan["norm_chan"], convert_to_tensor=True) + + # Calculate similarity with all EPG embeddings + sim_scores = util.cos_sim(chan_embedding, epg_embeddings)[0] + top_index = int(sim_scores.argmax()) + top_value = float(sim_scores[top_index]) + + if top_value >= ML_LAST_RESORT: # Dynamic threshold for desperate attempts + # Find the EPG entry that corresponds to this embedding index + epg_with_names = [epg for epg in epg_data if epg.get("norm_name")] + matched_epg = epg_with_names[top_index] + + chan["epg_data_id"] = matched_epg["id"] + channels_to_update.append(chan) + matched_channels.append((chan['id'], chan['name'], matched_epg["tvg_id"])) + logger.info(f"Channel {chan['id']} '{chan['name']}' => DESPERATE LAST RESORT match EPG tvg_id={matched_epg['tvg_id']} (fuzzy={best_score}, ML-sim={top_value:.2f})") + else: + logger.info(f"Channel {chan['id']} '{chan['name']}' => desperate last resort ML-sim {top_value:.2f} < {ML_LAST_RESORT}, giving up") + except Exception as e: + logger.warning(f"Last resort ML matching failed for channel {chan['id']}: {e}") + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, giving up") + else: + # No ML available or very low fuzzy score + logger.info(f"Channel {chan['id']} '{chan['name']}' => best fuzzy score={best_score} < {FUZZY_MEDIUM_CONFIDENCE}, no ML fallback available") + + # Clean up ML models from memory after matching (infrequent operation) + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + # Send final progress update + if send_progress: + send_epg_matching_progress( + total_channels, + len(matched_channels), + stage="completed" + ) + + return { + "channels_to_update": channels_to_update, + "matched_channels": matched_channels + } + @shared_task def match_epg_channels(): """ - Goes through all Channels and tries to find a matching EPGData row by: - 1) If channel.tvg_id is valid in EPGData, skip. - 2) If channel has a tvg_id but not found in EPGData, attempt direct EPGData lookup. - 3) Otherwise, perform name-based fuzzy matching with optional region-based bonus. - 4) If a match is found, we set channel.tvg_id - 5) Summarize and log results. + Uses integrated EPG matching instead of external script. + Provides the same functionality with better performance and maintainability. """ try: - logger.info("Starting EPG matching logic...") + logger.info("Starting integrated EPG matching...") - # Attempt to retrieve a "preferred-region" if configured + # Get region preference try: region_obj = CoreSettings.objects.get(key="preferred-region") region_code = region_obj.value.strip().lower() except CoreSettings.DoesNotExist: region_code = None - matched_channels = [] - channels_to_update = [] - # Get channels that don't have EPG data assigned channels_without_epg = Channel.objects.filter(epg_data__isnull=True) logger.info(f"Found {channels_without_epg.count()} channels without EPG data") - channels_json = [] + channels_data = [] for channel in channels_without_epg: - # Normalize TVG ID - strip whitespace and convert to lowercase normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" - if normalized_tvg_id: - logger.info(f"Processing channel {channel.id} '{channel.name}' with TVG ID='{normalized_tvg_id}'") - - channels_json.append({ + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ "id": channel.id, "name": channel.name, - "tvg_id": normalized_tvg_id, # Use normalized TVG ID - "original_tvg_id": channel.tvg_id, # Keep original for reference + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, - "norm_chan": normalize_name(normalized_tvg_id if normalized_tvg_id else channel.name) + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! }) - # Similarly normalize EPG data TVG IDs - epg_json = [] + # Get all EPG data + epg_data = [] for epg in EPGData.objects.all(): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" - epg_json.append({ + epg_data.append({ 'id': epg.id, - 'tvg_id': normalized_tvg_id, # Use normalized TVG ID - 'original_tvg_id': epg.tvg_id, # Keep original for reference + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, }) - # Log available EPG data TVG IDs for debugging - unique_epg_tvg_ids = set(e['tvg_id'] for e in epg_json if e['tvg_id']) - logger.info(f"Available EPG TVG IDs: {', '.join(sorted(unique_epg_tvg_ids))}") + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries") - payload = { - "channels": channels_json, - "epg_data": epg_json, - "region_code": region_code, - } - - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - temp_file.write(json.dumps(payload).encode('utf-8')) - temp_file_path = temp_file.name - - # After writing to the file but before subprocess - # Explicitly delete the large data structures - del payload - gc.collect() - - process = subprocess.Popen( - ['python', '/app/scripts/epg_match.py', temp_file_path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - - # Log stderr in real-time - for line in iter(process.stderr.readline, ''): - if line: - logger.info(line.strip()) - - process.stderr.close() - stdout, stderr = process.communicate() - - os.remove(temp_file_path) - - if process.returncode != 0: - return f"Failed to process EPG matching: {stderr}" - - result = json.loads(stdout) - # This returns lists of dicts, not model objects + # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) channels_to_update_dicts = result["channels_to_update"] matched_channels = result["matched_channels"] - # Explicitly clean up large objects - del stdout, result - gc.collect() - - # Convert your dict-based 'channels_to_update' into real Channel objects + # Update channels in database if channels_to_update_dicts: - # Extract IDs of the channels that need updates channel_ids = [d["id"] for d in channels_to_update_dicts] - - # Fetch them from DB channels_qs = Channel.objects.filter(id__in=channel_ids) channels_list = list(channels_qs) - # Build a map from channel_id -> epg_data_id (or whatever fields you need) - epg_mapping = { - d["id"]: d["epg_data_id"] for d in channels_to_update_dicts - } + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} - # Populate each Channel object with the updated epg_data_id + # Update each channel with matched EPG data for channel_obj in channels_list: - # The script sets 'epg_data_id' in the returned dict - # We either assign directly, or fetch the EPGData instance if needed. - channel_obj.epg_data_id = epg_mapping.get(channel_obj.id) + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") - # Now we have real model objects, so bulk_update will work + # Bulk update all channels Channel.objects.bulk_update(channels_list, ["epg_data"]) total_matched = len(matched_channels) @@ -189,9 +521,9 @@ def match_epg_channels(): else: logger.info("No new channels were matched.") - logger.info("Finished EPG matching logic.") + logger.info("Finished integrated EPG matching.") - # Send update with additional information for refreshing UI + # Send WebSocket update channel_layer = get_channel_layer() associations = [ {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} @@ -205,32 +537,890 @@ def match_epg_channels(): "data": { "success": True, "type": "epg_match", - "refresh_channels": True, # Flag to tell frontend to refresh channels + "refresh_channels": True, "matches_count": total_matched, "message": f"EPG matching complete: {total_matched} channel(s) matched", - "associations": associations # Add the associations data + "associations": associations } } ) return f"Done. Matched {total_matched} channel(s)." + finally: - # Final cleanup + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup gc.collect() - # Use our standardized cleanup function for more thorough memory management from core.utils import cleanup_memory cleanup_memory(log_usage=True, force_collection=True) @shared_task -def run_recording(channel_id, start_time_str, end_time_str): +def match_selected_channels_epg(channel_ids): + """ + Match EPG data for only the specified selected channels. + Uses the same integrated EPG matching logic but processes only selected channels. + """ + try: + logger.info(f"Starting integrated EPG matching for {len(channel_ids)} selected channels...") + + # Get region preference + try: + region_obj = CoreSettings.objects.get(key="preferred-region") + region_code = region_obj.value.strip().lower() + except CoreSettings.DoesNotExist: + region_code = None + + # Get only the specified channels that don't have EPG data assigned + channels_without_epg = Channel.objects.filter( + id__in=channel_ids, + epg_data__isnull=True + ) + logger.info(f"Found {channels_without_epg.count()} selected channels without EPG data") + + if not channels_without_epg.exists(): + logger.info("No selected channels need EPG matching.") + + # Send WebSocket update + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": 0, + "message": "No selected channels need EPG matching", + "associations": [] + } + } + ) + return "No selected channels needed EPG matching." + + channels_data = [] + for channel in channels_without_epg: + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channels_data.append({ + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) + }) + + # Get all EPG data + epg_data = [] + for epg in EPGData.objects.all(): + normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data.append({ + 'id': epg.id, + 'tvg_id': normalized_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + }) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries") + + # Run EPG matching with progress updates - automatically uses appropriate thresholds + result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) + channels_to_update_dicts = result["channels_to_update"] + matched_channels = result["matched_channels"] + + # Update channels in database + if channels_to_update_dicts: + channel_ids_to_update = [d["id"] for d in channels_to_update_dicts] + channels_qs = Channel.objects.filter(id__in=channel_ids_to_update) + channels_list = list(channels_qs) + + # Create mapping from channel_id to epg_data_id + epg_mapping = {d["id"]: d["epg_data_id"] for d in channels_to_update_dicts} + + # Update each channel with matched EPG data + for channel_obj in channels_list: + epg_data_id = epg_mapping.get(channel_obj.id) + if epg_data_id: + try: + epg_data_obj = EPGData.objects.get(id=epg_data_id) + channel_obj.epg_data = epg_data_obj + except EPGData.DoesNotExist: + logger.error(f"EPG data {epg_data_id} not found for channel {channel_obj.id}") + + # Bulk update all channels + Channel.objects.bulk_update(channels_list, ["epg_data"]) + + total_matched = len(matched_channels) + if total_matched: + logger.info(f"Selected Channel Match Summary: {total_matched} channel(s) matched.") + for (cid, cname, tvg) in matched_channels: + logger.info(f" - Channel ID={cid}, Name='{cname}' => tvg_id='{tvg}'") + else: + logger.info("No selected channels were matched.") + + logger.info("Finished integrated EPG matching for selected channels.") + + # Send WebSocket update + channel_layer = get_channel_layer() + associations = [ + {"channel_id": chan["id"], "epg_data_id": chan["epg_data_id"]} + for chan in channels_to_update_dicts + ] + + async_to_sync(channel_layer.group_send)( + 'updates', + { + 'type': 'update', + "data": { + "success": True, + "type": "epg_match", + "refresh_channels": True, + "matches_count": total_matched, + "message": f"EPG matching complete: {total_matched} selected channel(s) matched", + "associations": associations + } + } + ) + + return f"Done. Matched {total_matched} selected channel(s)." + + finally: + # Clean up ML models from memory after bulk matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + + # Memory cleanup + gc.collect() + from core.utils import cleanup_memory + cleanup_memory(log_usage=True, force_collection=True) + + +@shared_task +def match_single_channel_epg(channel_id): + """ + Try to match a single channel with EPG data using the integrated matching logic + that includes both fuzzy and ML-enhanced matching. Returns a dict with match status and message. + """ + try: + from apps.channels.models import Channel + from apps.epg.models import EPGData + + logger.info(f"Starting integrated single channel EPG matching for channel ID {channel_id}") + + # Get the channel + try: + channel = Channel.objects.get(id=channel_id) + except Channel.DoesNotExist: + return {"matched": False, "message": "Channel not found"} + + # If channel already has EPG data, skip + if channel.epg_data: + return {"matched": False, "message": f"Channel '{channel.name}' already has EPG data assigned"} + + # Prepare single channel data for matching (same format as bulk matching) + normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else "" + normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else "" + channel_data = { + "id": channel.id, + "name": channel.name, + "tvg_id": normalized_tvg_id, + "original_tvg_id": channel.tvg_id, + "gracenote_id": normalized_gracenote_id, + "original_gracenote_id": channel.tvc_guide_stationid, + "fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name, + "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! + } + + logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', gracenote_id='{normalized_gracenote_id}', norm_chan='{channel_data['norm_chan']}'") + + # Debug: Test what the normalization does to preserve call signs + test_name = "NBC 11 (KVLY) - Fargo" # Example for testing + test_normalized = normalize_name(test_name) + logger.debug(f"DEBUG normalization example: '{test_name}' β†’ '{test_normalized}' (call sign preserved)") + + # Get all EPG data for matching - must include norm_name field + epg_data_list = [] + for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''): + normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" + epg_data_list.append({ + 'id': epg.id, + 'tvg_id': normalized_epg_tvg_id, + 'original_tvg_id': epg.tvg_id, + 'name': epg.name, + 'norm_name': normalize_name(epg.name), + 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + }) + + if not epg_data_list: + return {"matched": False, "message": "No EPG data available for matching"} + + logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") + + # Send progress for single channel matching + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="matching") + + # Use the EPG matching function - automatically uses aggressive thresholds for single channel + result = match_channels_to_epg([channel_data], epg_data_list, send_progress=False) + channels_to_update = result.get("channels_to_update", []) + matched_channels = result.get("matched_channels", []) + + if channels_to_update: + # Find our channel in the results + channel_match = None + for update in channels_to_update: + if update["id"] == channel.id: + channel_match = update + break + + if channel_match: + # Apply the match to the channel + try: + epg_data = EPGData.objects.get(id=channel_match['epg_data_id']) + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + + # Find match details from matched_channels for better reporting + match_details = None + for match_info in matched_channels: + if match_info[0] == channel.id: # matched_channels format: (channel_id, channel_name, epg_info) + match_details = match_info + break + + success_msg = f"Channel '{channel.name}' matched with EPG '{epg_data.name}'" + if match_details: + success_msg += f" (matched via: {match_details[2]})" + + logger.info(success_msg) + + # Send completion progress for single channel + send_epg_matching_progress(1, 1, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": True, + "message": success_msg, + "epg_name": epg_data.name, + "epg_id": epg_data.id + } + except EPGData.DoesNotExist: + return {"matched": False, "message": "Matched EPG data not found"} + + # No match found + # Send completion progress for single channel (failed) + send_epg_matching_progress(1, 0, current_channel_name=channel.name, stage="completed") + + # Clean up ML models from memory after single channel matching + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return { + "matched": False, + "message": f"No suitable EPG match found for channel '{channel.name}'" + } + + except Exception as e: + logger.error(f"Error in integrated single channel EPG matching: {e}", exc_info=True) + + # Clean up ML models from memory even on error + if _ml_model_cache['sentence_transformer'] is not None: + logger.info("Cleaning up ML models from memory after error") + _ml_model_cache['sentence_transformer'] = None + gc.collect() + + return {"matched": False, "message": f"Error during matching: {str(e)}"} + + +def evaluate_series_rules_impl(tvg_id: str | None = None): + """Synchronous implementation of series rule evaluation; returns details for debugging.""" + from django.utils import timezone + from apps.channels.models import Recording, Channel + from apps.epg.models import EPGData, ProgramData + + rules = CoreSettings.get_dvr_series_rules() + result = {"scheduled": 0, "details": []} + if not isinstance(rules, list) or not rules: + return result + + # Optionally filter for tvg_id + if tvg_id: + rules = [r for r in rules if str(r.get("tvg_id")) == str(tvg_id)] + if not rules: + result["details"].append({"tvg_id": tvg_id, "status": "no_rule"}) + return result + + now = timezone.now() + horizon = now + timedelta(days=7) + + # Preload existing recordings' program ids to avoid duplicates + existing_program_ids = set() + for rec in Recording.objects.all().only("custom_properties"): + try: + pid = rec.custom_properties.get("program", {}).get("id") if rec.custom_properties else None + if pid is not None: + # Normalize to string for consistent comparisons + existing_program_ids.add(str(pid)) + except Exception: + continue + + for rule in rules: + rv_tvg = str(rule.get("tvg_id") or "").strip() + mode = (rule.get("mode") or "all").lower() + series_title = (rule.get("title") or "").strip() + norm_series = normalize_name(series_title) if series_title else None + if not rv_tvg: + result["details"].append({"tvg_id": rv_tvg, "status": "invalid_rule"}) + continue + + epg = EPGData.objects.filter(tvg_id=rv_tvg).first() + if not epg: + result["details"].append({"tvg_id": rv_tvg, "status": "no_epg_match"}) + continue + + programs_qs = ProgramData.objects.filter( + epg=epg, + start_time__gte=now, + start_time__lte=horizon, + ) + if series_title: + programs_qs = programs_qs.filter(title__iexact=series_title) + programs = list(programs_qs.order_by("start_time")) + # Fallback: if no direct matches and we have a title, try normalized comparison in Python + if series_title and not programs: + all_progs = ProgramData.objects.filter( + epg=epg, + start_time__gte=now, + start_time__lte=horizon, + ).only("id", "title", "start_time", "end_time", "custom_properties", "tvg_id") + programs = [p for p in all_progs if normalize_name(p.title) == norm_series] + + channel = Channel.objects.filter(epg_data=epg).order_by("channel_number").first() + if not channel: + result["details"].append({"tvg_id": rv_tvg, "status": "no_channel_for_epg"}) + continue + + # + # Many providers list multiple future airings of the same episode + # (e.g., prime-time and a late-night repeat). Previously we scheduled + # a recording for each airing which shows up as duplicates in the DVR. + # + # To avoid that, we collapse programs to the earliest airing per + # unique episode using the best identifier available: + # - season+episode from ProgramData.custom_properties + # - onscreen_episode (e.g., S08E03) + # - sub_title (episode name), scoped by tvg_id+series title + # If none of the above exist, we fall back to keeping each program + # (usually movies or specials without episode identifiers). + # + def _episode_key(p: "ProgramData"): + try: + props = p.custom_properties or {} + season = props.get("season") + episode = props.get("episode") + onscreen = props.get("onscreen_episode") + except Exception: + season = episode = onscreen = None + base = f"{p.tvg_id or ''}|{(p.title or '').strip().lower()}" # series scope + if season is not None and episode is not None: + return f"{base}|s{season}e{episode}" + if onscreen: + return f"{base}|{str(onscreen).strip().lower()}" + if p.sub_title: + return f"{base}|{p.sub_title.strip().lower()}" + # No reliable episode identity; use the program id to avoid over-merging + return f"id:{p.id}" + + # Optionally filter to only brand-new episodes before grouping + if mode == "new": + filtered = [] + for p in programs: + try: + if (p.custom_properties or {}).get("new"): + filtered.append(p) + except Exception: + pass + programs = filtered + + # Pick the earliest airing for each episode key + earliest_by_key = {} + for p in programs: + k = _episode_key(p) + cur = earliest_by_key.get(k) + if cur is None or p.start_time < cur.start_time: + earliest_by_key[k] = p + + unique_programs = list(earliest_by_key.values()) + + created_here = 0 + for prog in unique_programs: + try: + # Skip if already scheduled by program id + if str(prog.id) in existing_program_ids: + continue + # Extra guard: skip if a recording exists for the same channel + timeslot + try: + from django.db.models import Q + if Recording.objects.filter( + channel=channel, + start_time=prog.start_time, + end_time=prog.end_time, + ).filter(Q(custom_properties__program__id=prog.id) | Q(custom_properties__program__title=prog.title)).exists(): + continue + except Exception: + continue # already scheduled/recorded + + # Apply global DVR pre/post offsets (in minutes) + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + + adj_start = prog.start_time + adj_end = prog.end_time + try: + if pre_min and pre_min > 0: + adj_start = adj_start - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + adj_end = adj_end + timedelta(minutes=post_min) + except Exception: + pass + + rec = Recording.objects.create( + channel=channel, + start_time=adj_start, + end_time=adj_end, + custom_properties={ + "program": { + "id": prog.id, + "tvg_id": prog.tvg_id, + "title": prog.title, + "sub_title": prog.sub_title, + "description": prog.description, + "start_time": prog.start_time.isoformat(), + "end_time": prog.end_time.isoformat(), + } + }, + ) + existing_program_ids.add(str(prog.id)) + created_here += 1 + try: + prefetch_recording_artwork.apply_async(args=[rec.id], countdown=1) + except Exception: + pass + except Exception as e: + result["details"].append({"tvg_id": rv_tvg, "status": "error", "error": str(e)}) + continue + result["scheduled"] += created_here + result["details"].append({"tvg_id": rv_tvg, "title": series_title, "status": "ok", "created": created_here}) + + # Notify frontend to refresh + try: + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + {'type': 'update', 'data': {"success": True, "type": "recordings_refreshed", "scheduled": result["scheduled"]}}, + ) + except Exception: + pass + + return result + + +@shared_task +def evaluate_series_rules(tvg_id: str | None = None): + return evaluate_series_rules_impl(tvg_id) + + +def reschedule_upcoming_recordings_for_offset_change_impl(): + """Recalculate start/end for all future EPG-based recordings using current DVR offsets. + + Only recordings that have not yet started (start_time > now) and that were + scheduled from EPG data (custom_properties.program present) are updated. + """ + from django.utils import timezone + from django.utils.dateparse import parse_datetime + from apps.channels.models import Recording + + now = timezone.now() + + try: + pre_min = int(CoreSettings.get_dvr_pre_offset_minutes()) + except Exception: + pre_min = 0 + try: + post_min = int(CoreSettings.get_dvr_post_offset_minutes()) + except Exception: + post_min = 0 + + changed = 0 + scanned = 0 + + for rec in Recording.objects.filter(start_time__gt=now).iterator(): + scanned += 1 + try: + cp = rec.custom_properties or {} + program = cp.get("program") if isinstance(cp, dict) else None + if not isinstance(program, dict): + continue + base_start = program.get("start_time") + base_end = program.get("end_time") + if not base_start or not base_end: + continue + start_dt = parse_datetime(str(base_start)) + end_dt = parse_datetime(str(base_end)) + if start_dt is None or end_dt is None: + continue + + adj_start = start_dt + adj_end = end_dt + try: + if pre_min and pre_min > 0: + adj_start = adj_start - timedelta(minutes=pre_min) + except Exception: + pass + try: + if post_min and post_min > 0: + adj_end = adj_end + timedelta(minutes=post_min) + except Exception: + pass + + if rec.start_time != adj_start or rec.end_time != adj_end: + rec.start_time = adj_start + rec.end_time = adj_end + rec.save(update_fields=["start_time", "end_time"]) + changed += 1 + except Exception: + continue + + # Notify frontend to refresh + try: + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + 'updates', + {'type': 'update', 'data': {"success": True, "type": "recordings_refreshed", "rescheduled": changed}}, + ) + except Exception: + pass + + return {"changed": changed, "scanned": scanned, "pre": pre_min, "post": post_min} + + +@shared_task +def reschedule_upcoming_recordings_for_offset_change(): + return reschedule_upcoming_recordings_for_offset_change_impl() + + +def _notify_recordings_refresh(): + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + +def purge_recurring_rule_impl(rule_id: int) -> int: + """Remove all future recordings created by a recurring rule.""" + from django.utils import timezone + from .models import Recording + + now = timezone.now() + try: + removed, _ = Recording.objects.filter( + start_time__gte=now, + custom_properties__rule__id=rule_id, + ).delete() + except Exception: + removed = 0 + if removed: + _notify_recordings_refresh() + return removed + + +def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int: + """Ensure recordings exist for a recurring rule within the scheduling horizon.""" + from django.utils import timezone + from .models import RecurringRecordingRule, Recording + + rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first() + now = timezone.now() + removed = 0 + if drop_existing: + removed = purge_recurring_rule_impl(rule_id) + + if not rule or not rule.enabled: + return 0 + + days = rule.cleaned_days() + if not days: + return 0 + + tz_name = CoreSettings.get_system_time_zone() + try: + tz = ZoneInfo(tz_name) + except Exception: + logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name) + tz = timezone.get_current_timezone() + start_limit = rule.start_date or now.date() + end_limit = rule.end_date + horizon = now + timedelta(days=horizon_days) + start_window = max(start_limit, now.date()) + if drop_existing and end_limit: + end_window = end_limit + else: + end_window = horizon.date() + if end_limit and end_limit < end_window: + end_window = end_limit + if end_window < start_window: + return 0 + total_created = 0 + + for offset in range((end_window - start_window).days + 1): + target_date = start_window + timedelta(days=offset) + if target_date.weekday() not in days: + continue + if end_limit and target_date > end_limit: + continue + try: + start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz) + end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz) + except Exception: + continue + if end_dt <= start_dt: + end_dt = end_dt + timedelta(days=1) + if start_dt <= now: + continue + exists = Recording.objects.filter( + channel=rule.channel, + start_time=start_dt, + custom_properties__rule__id=rule.id, + ).exists() + if exists: + continue + description = rule.name or f"Recurring recording for {rule.channel.name}" + cp = { + "rule": { + "type": "recurring", + "id": rule.id, + "days_of_week": days, + "name": rule.name or "", + }, + "status": "scheduled", + "description": description, + "program": { + "title": rule.name or rule.channel.name, + "description": description, + "start_time": start_dt.isoformat(), + "end_time": end_dt.isoformat(), + }, + } + try: + Recording.objects.create( + channel=rule.channel, + start_time=start_dt, + end_time=end_dt, + custom_properties=cp, + ) + total_created += 1 + except Exception as err: + logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}") + + if removed or total_created: + _notify_recordings_refresh() + + return total_created + + +@shared_task +def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14): + return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days) + + +@shared_task +def maintain_recurring_recordings(): + from .models import RecurringRecordingRule + + total = 0 + for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True): + try: + total += sync_recurring_rule_impl(rule_id, drop_existing=False) + except Exception as err: + logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}") + return total + + +@shared_task +def purge_recurring_rule(rule_id: int): + return purge_recurring_rule_impl(rule_id) + +@shared_task +def _safe_name(s): + try: + import re + s = s or "" + # Remove forbidden filename characters and normalize spaces + s = re.sub(r'[\\/:*?"<>|]+', '', s) + s = s.strip() + return s + except Exception: + return s or "" + + +def _parse_epg_tv_movie_info(program): + """Return tuple (is_movie, season, episode, year, sub_title) from EPG ProgramData if available.""" + is_movie = False + season = None + episode = None + year = None + sub_title = program.get('sub_title') if isinstance(program, dict) else None + try: + from apps.epg.models import ProgramData + prog_id = program.get('id') if isinstance(program, dict) else None + epg_program = ProgramData.objects.filter(id=prog_id).only('custom_properties').first() if prog_id else None + if epg_program and epg_program.custom_properties: + cp = epg_program.custom_properties + # Determine categories + cats = [c.lower() for c in (cp.get('categories') or []) if isinstance(c, str)] + is_movie = 'movie' in cats or 'film' in cats + season = cp.get('season') + episode = cp.get('episode') + onscreen = cp.get('onscreen_episode') + if (season is None or episode is None) and isinstance(onscreen, str): + import re as _re + m = _re.search(r'[sS](\d+)[eE](\d+)', onscreen) + if m: + season = season or int(m.group(1)) + episode = episode or int(m.group(2)) + d = cp.get('date') + if d: + year = str(d)[:4] + except Exception: + pass + return is_movie, season, episode, year, sub_title + + +def _build_output_paths(channel, program, start_time, end_time): + """ + Build (final_path, temp_ts_path, final_filename) using DVR templates. + """ + from core.models import CoreSettings + # Root for DVR recordings: fixed to /data/recordings inside the container + library_root = '/data/recordings' + + is_movie, season, episode, year, sub_title = _parse_epg_tv_movie_info(program) + show = _safe_name(program.get('title') if isinstance(program, dict) else channel.name) + title = _safe_name(program.get('title') if isinstance(program, dict) else channel.name) + sub_title = _safe_name(sub_title) + season = int(season) if season is not None else 0 + episode = int(episode) if episode is not None else 0 + year = year or str(start_time.year) + + values = { + 'show': show, + 'title': title, + 'sub_title': sub_title, + 'season': season, + 'episode': episode, + 'year': year, + 'channel': _safe_name(channel.name), + 'start': start_time.strftime('%Y%m%d_%H%M%S'), + 'end': end_time.strftime('%Y%m%d_%H%M%S'), + } + + template = CoreSettings.get_dvr_movie_template() if is_movie else CoreSettings.get_dvr_tv_template() + # Build relative path from templates with smart fallbacks + rel_path = None + if not is_movie and (season == 0 or episode == 0): + # TV fallback template when S/E are missing + try: + tv_fb = CoreSettings.get_dvr_tv_fallback_template() + rel_path = tv_fb.format(**values) + except Exception: + # Older setting support + try: + fallback_root = CoreSettings.get_dvr_tv_fallback_dir() + except Exception: + fallback_root = "TV_Shows" + rel_path = f"{fallback_root}/{show}/{values['start']}.mkv" + if not rel_path: + try: + rel_path = template.format(**values) + except Exception: + rel_path = None + # Movie-specific fallback if formatting failed or title missing + if is_movie and not rel_path: + try: + m_fb = CoreSettings.get_dvr_movie_fallback_template() + rel_path = m_fb.format(**values) + except Exception: + rel_path = f"Movies/{values['start']}.mkv" + # As a last resort for TV + if not is_movie and not rel_path: + rel_path = f"TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv" + # Keep any leading folder like 'Recordings/' from the template so users can + # structure their library under /data as desired. + if not rel_path.lower().endswith('.mkv'): + rel_path = f"{rel_path}.mkv" + + # Normalize path (strip ./) + if rel_path.startswith('./'): + rel_path = rel_path[2:] + final_path = rel_path if rel_path.startswith('/') else os.path.join(library_root, rel_path) + final_path = os.path.normpath(final_path) + # Ensure directory exists + os.makedirs(os.path.dirname(final_path), exist_ok=True) + + # Derive temp TS path in same directory + base_no_ext = os.path.splitext(os.path.basename(final_path))[0] + temp_ts_path = os.path.join(os.path.dirname(final_path), f"{base_no_ext}.ts") + return final_path, temp_ts_path, os.path.basename(final_path) + + +@shared_task +def run_recording(recording_id, channel_id, start_time_str, end_time_str): + """ + Execute a scheduled recording for the given channel/recording. + + Enhancements: + - Accepts recording_id so we can persist metadata back to the Recording row + - Persists basic file info (name/path) to Recording.custom_properties + - Attempts to capture stream stats from TS proxy (codec, resolution, fps, etc.) + - Attempts to capture a poster (via program.custom_properties) and store a Logo reference + """ channel = Channel.objects.get(id=channel_id) start_time = datetime.fromisoformat(start_time_str) end_time = datetime.fromisoformat(end_time_str) duration_seconds = int((end_time - start_time).total_seconds()) - filename = f'{slugify(channel.name)}-{start_time.strftime("%Y-%m-%d_%H-%M-%S")}.mp4' + # Build output paths from templates + # We need program info; will refine after we load Recording cp below + filename = None + final_path = None + temp_ts_path = None channel_layer = get_channel_layer() @@ -243,21 +1433,389 @@ def run_recording(channel_id, start_time_str, end_time_str): ) logger.info(f"Starting recording for channel {channel.name}") - with requests.get(f"http://localhost:5656/proxy/ts/stream/{channel.uuid}", headers={ - 'User-Agent': 'Dispatcharr-DVR', - }, stream=True) as response: - # Raise an exception for bad responses (4xx, 5xx) - response.raise_for_status() - # Open the file in write-binary mode - with open(f"/data/recordings/{filename}", 'wb') as file: - start_time = time.time() # Start the timer - for chunk in response.iter_content(chunk_size=8192): # 8KB chunks - if time.time() - start_time > duration_seconds: - print(f"Timeout reached: {duration_seconds} seconds") + # Try to resolve the Recording row up front + recording_obj = None + try: + from .models import Recording, Logo + recording_obj = Recording.objects.get(id=recording_id) + # Prime custom_properties with file info/status + cp = recording_obj.custom_properties or {} + cp.update({ + "status": "recording", + "started_at": str(datetime.now()), + }) + # Provide a predictable playback URL for the frontend + cp["file_url"] = f"/api/channels/recordings/{recording_id}/file/" + cp["output_file_url"] = cp["file_url"] + + # Determine program info (may include id for deeper details) + program = cp.get("program") or {} + final_path, temp_ts_path, filename = _build_output_paths(channel, program, start_time, end_time) + cp["file_name"] = filename + cp["file_path"] = final_path + cp["_temp_file_path"] = temp_ts_path + + # Resolve poster the same way VODs do: + # 1) Prefer image(s) from EPG Program custom_properties (images/icon) + # 2) Otherwise reuse an existing VOD logo matching title (Movie/Series) + # 3) Otherwise save any direct poster URL from provided program fields + program = (cp.get("program") or {}) if isinstance(cp, dict) else {} + + def pick_best_image_from_epg_props(epg_props): + try: + images = epg_props.get("images") or [] + if not isinstance(images, list): + return None + # Prefer poster/cover and larger sizes + size_order = {"xxl": 6, "xl": 5, "l": 4, "m": 3, "s": 2, "xs": 1} + def score(img): + t = (img.get("type") or "").lower() + size = (img.get("size") or "").lower() + return ( + 2 if t in ("poster", "cover") else 1, + size_order.get(size, 0) + ) + best = None + for im in images: + if not isinstance(im, dict): + continue + url = im.get("url") + if not url: + continue + if best is None or score(im) > score(best): + best = im + return best.get("url") if best else None + except Exception: + return None + + poster_logo_id = None + poster_url = None + + # Try EPG Program custom_properties by ID + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and epg_program.custom_properties: + epg_props = epg_program.custom_properties or {} + poster_url = pick_best_image_from_epg_props(epg_props) + if not poster_url: + icon = epg_props.get("icon") + if isinstance(icon, str) and icon: + poster_url = icon + except Exception as e: + logger.debug(f"EPG image lookup failed: {e}") + + # Fallback: reuse VOD Logo by matching title + if not poster_url and not poster_logo_id: + try: + from apps.vod.models import Movie, Series + title = program.get("title") or channel.name + vod_logo = None + movie = Movie.objects.filter(name__iexact=title).select_related("logo").first() + if movie and movie.logo: + vod_logo = movie.logo + if not vod_logo: + series = Series.objects.filter(name__iexact=title).select_related("logo").first() + if series and series.logo: + vod_logo = series.logo + if vod_logo: + poster_logo_id = vod_logo.id + except Exception as e: + logger.debug(f"VOD logo fallback failed: {e}") + + # External metadata lookups (TMDB/OMDb) when EPG/VOD didn't provide an image + if not poster_url and not poster_logo_id: + try: + tmdb_key = os.environ.get('TMDB_API_KEY') + omdb_key = os.environ.get('OMDB_API_KEY') + title = (program.get('title') or channel.name or '').strip() + year = None + imdb_id = None + + # Try to derive year and imdb from EPG program custom_properties + try: + from apps.epg.models import ProgramData + prog_id = program.get('id') + epg_program = ProgramData.objects.filter(id=prog_id).only('custom_properties').first() if prog_id else None + if epg_program and epg_program.custom_properties: + d = epg_program.custom_properties.get('date') + if d and len(str(d)) >= 4: + year = str(d)[:4] + imdb_id = epg_program.custom_properties.get('imdb.com_id') or imdb_id + except Exception: + pass + + # TMDB: by IMDb ID + if not poster_url and tmdb_key and imdb_id: + try: + url = f"https://api.themoviedb.org/3/find/{quote(imdb_id)}?api_key={tmdb_key}&external_source=imdb_id" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + picks = [] + for k in ('movie_results', 'tv_results', 'tv_episode_results', 'tv_season_results'): + lst = data.get(k) or [] + picks.extend(lst) + poster_path = None + for item in picks: + if item.get('poster_path'): + poster_path = item['poster_path'] + break + if poster_path: + poster_url = f"https://image.tmdb.org/t/p/w780{poster_path}" + except Exception: + pass + + # TMDB: by title (and year if available) + if not poster_url and tmdb_key and title: + try: + q = quote(title) + extra = f"&year={year}" if year else "" + url = f"https://api.themoviedb.org/3/search/multi?api_key={tmdb_key}&query={q}{extra}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + results.sort(key=lambda x: float(x.get('popularity') or 0), reverse=True) + for item in results: + if item.get('poster_path'): + poster_url = f"https://image.tmdb.org/t/p/w780{item['poster_path']}" + break + except Exception: + pass + + # OMDb fallback + if not poster_url and omdb_key: + try: + if imdb_id: + url = f"https://www.omdbapi.com/?apikey={omdb_key}&i={quote(imdb_id)}" + elif title: + yy = f"&y={year}" if year else "" + url = f"https://www.omdbapi.com/?apikey={omdb_key}&t={quote(title)}{yy}" + else: + url = None + if url: + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + p = data.get('Poster') + if p and p != 'N/A': + poster_url = p + except Exception: + pass + except Exception as e: + logger.debug(f"External poster lookup failed: {e}") + + # Keyless fallback providers (no API keys required) + if not poster_url and not poster_logo_id: + try: + title = (program.get('title') or channel.name or '').strip() + if title: + # 1) TVMaze (TV shows) - singlesearch by title + try: + url = f"https://api.tvmaze.com/singlesearch/shows?q={quote(title)}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + img = (data.get('image') or {}) + p = img.get('original') or img.get('medium') + if p: + poster_url = p + except Exception: + pass + + # 2) iTunes Search API (movies or tv shows) + if not poster_url: + try: + for media in ('movie', 'tvShow'): + url = f"https://itunes.apple.com/search?term={quote(title)}&media={media}&limit=1" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + if results: + art = results[0].get('artworkUrl100') + if art: + # Scale up to 600x600 by convention + poster_url = art.replace('100x100', '600x600') + break + except Exception: + pass + except Exception as e: + logger.debug(f"Keyless poster lookup failed: {e}") + + # Last: check direct fields on provided program object + if not poster_url and not poster_logo_id: + for key in ("poster", "cover", "cover_big", "image", "icon"): + val = program.get(key) + if isinstance(val, dict): + candidate = val.get("url") + if candidate: + poster_url = candidate + break + elif isinstance(val, str) and val: + poster_url = val break - # Write the chunk to the file - file.write(chunk) + + # Create or assign Logo + if not poster_logo_id and poster_url and len(poster_url) <= 1000: + try: + logo, _ = Logo.objects.get_or_create(url=poster_url, defaults={"name": program.get("title") or channel.name}) + poster_logo_id = logo.id + except Exception as e: + logger.debug(f"Unable to persist poster to Logo: {e}") + + if poster_logo_id: + cp["poster_logo_id"] = poster_logo_id + if poster_url and "poster_url" not in cp: + cp["poster_url"] = poster_url + + # Ensure destination exists so it's visible immediately + try: + os.makedirs(os.path.dirname(final_path), exist_ok=True) + if not os.path.exists(final_path): + open(final_path, 'ab').close() + except Exception: + pass + + recording_obj.custom_properties = cp + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Unable to prime Recording metadata: {e}") + interrupted = False + interrupted_reason = None + bytes_written = 0 + + from requests.exceptions import ReadTimeout, ConnectionError as ReqConnectionError, ChunkedEncodingError + + # Determine internal base URL(s) for TS streaming + # Prefer explicit override, then try common ports for debug and docker + explicit = os.environ.get('DISPATCHARR_INTERNAL_TS_BASE_URL') + is_dev = (os.environ.get('DISPATCHARR_ENV', '').lower() == 'dev') or \ + (os.environ.get('DISPATCHARR_DEBUG', '').lower() == 'true') or \ + (os.environ.get('REDIS_HOST', 'redis') in ('localhost', '127.0.0.1')) + candidates = [] + if explicit: + candidates.append(explicit) + if is_dev: + # Debug container typically exposes API on 5656 + candidates.extend(['http://127.0.0.1:5656', 'http://127.0.0.1:9191']) + # Docker service name fallback + candidates.append(os.environ.get('DISPATCHARR_INTERNAL_API_BASE', 'http://web:9191')) + # Last-resort localhost ports + candidates.extend(['http://localhost:5656', 'http://localhost:9191']) + + chosen_base = None + last_error = None + bytes_written = 0 + interrupted = False + interrupted_reason = None + + # We'll attempt each base until we receive some data + for base in candidates: + try: + test_url = f"{base.rstrip('/')}/proxy/ts/stream/{channel.uuid}" + logger.info(f"DVR: trying TS base {base} -> {test_url}") + + with requests.get( + test_url, + headers={ + 'User-Agent': 'Dispatcharr-DVR', + }, + stream=True, + timeout=(10, 15), + ) as response: + response.raise_for_status() + + # Open the file and start copying; if we get any data within a short window, accept this base + got_any_data = False + test_window = 3.0 # seconds to detect first bytes + window_start = time.time() + + with open(temp_ts_path, 'wb') as file: + started_at = time.time() + for chunk in response.iter_content(chunk_size=8192): + if not chunk: + # keep-alives may be empty; continue + if not got_any_data and (time.time() - window_start) > test_window: + break + continue + # We have data + got_any_data = True + chosen_base = base + # Fall through to full recording loop using this same response/connection + file.write(chunk) + bytes_written += len(chunk) + elapsed = time.time() - started_at + if elapsed > duration_seconds: + break + # Continue draining the stream + for chunk2 in response.iter_content(chunk_size=8192): + if not chunk2: + continue + file.write(chunk2) + bytes_written += len(chunk2) + elapsed = time.time() - started_at + if elapsed > duration_seconds: + break + break # exit outer for-loop once we switched to full drain + + # If we wrote any bytes, treat as success and stop trying candidates + if bytes_written > 0: + logger.info(f"DVR: selected TS base {base}; wrote initial {bytes_written} bytes") + break + else: + last_error = f"no_data_from_{base}" + logger.warning(f"DVR: no data received from {base} within {test_window}s, trying next base") + # Clean up empty temp file + try: + if os.path.exists(temp_ts_path) and os.path.getsize(temp_ts_path) == 0: + os.remove(temp_ts_path) + except Exception: + pass + except Exception as e: + last_error = str(e) + logger.warning(f"DVR: attempt failed for base {base}: {e}") + + if chosen_base is None and bytes_written == 0: + interrupted = True + interrupted_reason = f"no_stream_data: {last_error or 'all_bases_failed'}" + else: + # If we ended before reaching planned duration, record reason + actual_elapsed = 0 + try: + actual_elapsed = os.path.getsize(temp_ts_path) and (duration_seconds) # Best effort; we streamed until duration or disconnect above + except Exception: + pass + # We cannot compute accurate elapsed here; fine to leave as is + pass + + # If no bytes were written at all, mark detail + if bytes_written == 0 and not interrupted: + interrupted = True + interrupted_reason = f"no_stream_data: {last_error or 'unknown'}" + + # Update DB status immediately so the UI reflects the change on the event below + try: + if recording_obj is None: + from .models import Recording + recording_obj = Recording.objects.get(id=recording_id) + cp_now = recording_obj.custom_properties or {} + cp_now.update({ + "status": "interrupted" if interrupted else "completed", + "ended_at": str(datetime.now()), + "file_name": filename or cp_now.get("file_name"), + "file_path": final_path or cp_now.get("file_path"), + }) + if interrupted and interrupted_reason: + cp_now["interrupted_reason"] = interrupted_reason + recording_obj.custom_properties = cp_now + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Failed to update immediate recording status: {e}") async_to_sync(channel_layer.group_send)( "updates", @@ -266,6 +1824,1213 @@ def run_recording(channel_id, start_time_str, end_time_str): "data": {"success": True, "type": "recording_ended", "channel": channel.name} }, ) - # After the loop, the file and response are closed automatically. logger.info(f"Finished recording for channel {channel.name}") + + # Remux TS to MKV container + remux_success = False + try: + if temp_ts_path and os.path.exists(temp_ts_path): + subprocess.run([ + "ffmpeg", "-y", "-i", temp_ts_path, "-c", "copy", final_path + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + remux_success = os.path.exists(final_path) + # Clean up temp file on success + if remux_success: + try: + os.remove(temp_ts_path) + except Exception: + pass + except Exception as e: + logger.warning(f"MKV remux failed: {e}") + + # Persist final metadata to Recording (status, ended_at, and stream stats if available) + try: + if recording_obj is None: + from .models import Recording + recording_obj = Recording.objects.get(id=recording_id) + + cp = recording_obj.custom_properties or {} + cp.update({ + "ended_at": str(datetime.now()), + }) + if interrupted: + cp["status"] = "interrupted" + if interrupted_reason: + cp["interrupted_reason"] = interrupted_reason + else: + cp["status"] = "completed" + cp["bytes_written"] = bytes_written + cp["remux_success"] = remux_success + + # Try to get stream stats from TS proxy Redis metadata + try: + from core.utils import RedisClient + from apps.proxy.ts_proxy.redis_keys import RedisKeys + from apps.proxy.ts_proxy.constants import ChannelMetadataField + + r = RedisClient.get_client() + if r is not None: + metadata_key = RedisKeys.channel_metadata(str(channel.uuid)) + md = r.hgetall(metadata_key) + if md: + def _gv(bkey): + return md.get(bkey.encode('utf-8')) + + def _d(bkey, cast=str): + v = _gv(bkey) + try: + if v is None: + return None + s = v.decode('utf-8') + return cast(s) if cast is not str else s + except Exception: + return None + + stream_info = {} + # Video fields + for key, caster in [ + (ChannelMetadataField.VIDEO_CODEC, str), + (ChannelMetadataField.RESOLUTION, str), + (ChannelMetadataField.WIDTH, float), + (ChannelMetadataField.HEIGHT, float), + (ChannelMetadataField.SOURCE_FPS, float), + (ChannelMetadataField.PIXEL_FORMAT, str), + (ChannelMetadataField.VIDEO_BITRATE, float), + ]: + val = _d(key, caster) + if val is not None: + stream_info[key] = val + + # Audio fields + for key, caster in [ + (ChannelMetadataField.AUDIO_CODEC, str), + (ChannelMetadataField.SAMPLE_RATE, float), + (ChannelMetadataField.AUDIO_CHANNELS, str), + (ChannelMetadataField.AUDIO_BITRATE, float), + ]: + val = _d(key, caster) + if val is not None: + stream_info[key] = val + + if stream_info: + cp["stream_info"] = stream_info + except Exception as e: + logger.debug(f"Unable to capture stream stats for recording: {e}") + + # Removed: local thumbnail generation. We rely on EPG/VOD/TMDB/OMDb/keyless providers only. + + recording_obj.custom_properties = cp + recording_obj.save(update_fields=["custom_properties"]) + except Exception as e: + logger.debug(f"Unable to finalize Recording metadata: {e}") + + # Optionally run comskip post-process + try: + from core.models import CoreSettings + if CoreSettings.get_dvr_comskip_enabled(): + comskip_process_recording.delay(recording_id) + except Exception: + pass + + +@shared_task +def recover_recordings_on_startup(): + """ + On service startup, reschedule or resume recordings to handle server restarts. + - For recordings whose window includes 'now': mark interrupted and start a new recording for the remainder. + - For future recordings: ensure a task is scheduled at start_time. + Uses a Redis lock to ensure only one worker runs this recovery. + """ + try: + from django.utils import timezone + from .models import Recording + from core.utils import RedisClient + from .signals import schedule_recording_task + + redis = RedisClient.get_client() + if redis: + lock_key = "dvr:recover_lock" + # Set lock with 60s TTL; only first winner proceeds + if not redis.set(lock_key, "1", ex=60, nx=True): + return "Recovery already in progress" + + now = timezone.now() + + # Resume in-window recordings + active = Recording.objects.filter(start_time__lte=now, end_time__gt=now) + for rec in active: + try: + cp = rec.custom_properties or {} + # Mark interrupted due to restart; will flip to 'recording' when task starts + cp["status"] = "interrupted" + cp["interrupted_reason"] = "server_restarted" + rec.custom_properties = cp + rec.save(update_fields=["custom_properties"]) + + # Start recording for remaining window + run_recording.apply_async( + args=[rec.id, rec.channel_id, str(now), str(rec.end_time)], eta=now + ) + except Exception as e: + logger.warning(f"Failed to resume recording {rec.id}: {e}") + + # Ensure future recordings are scheduled + upcoming = Recording.objects.filter(start_time__gt=now, end_time__gt=now) + for rec in upcoming: + try: + # Schedule task at start_time + task_id = schedule_recording_task(rec) + if task_id: + rec.task_id = task_id + rec.save(update_fields=["task_id"]) + except Exception as e: + logger.warning(f"Failed to schedule recording {rec.id}: {e}") + + return "Recovery complete" + except Exception as e: + logger.error(f"Error during DVR recovery: {e}") + return f"Error: {e}" + +@shared_task +def comskip_process_recording(recording_id: int): + """Run comskip on the MKV to remove commercials and replace the file in place. + Safe to call even if comskip is not installed; stores status in custom_properties.comskip. + """ + import shutil + from django.db import DatabaseError + from .models import Recording + # Helper to broadcast status over websocket + def _ws(status: str, extra: dict | None = None): + try: + from core.utils import send_websocket_update + payload = {"success": True, "type": "comskip_status", "status": status, "recording_id": recording_id} + if extra: + payload.update(extra) + send_websocket_update('updates', 'update', payload) + except Exception: + pass + + try: + rec = Recording.objects.get(id=recording_id) + except Recording.DoesNotExist: + return "not_found" + + cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {} + + def _persist_custom_properties(): + """Persist updated custom_properties without raising if the row disappeared.""" + try: + updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp) + if not updated: + logger.warning( + "Recording %s vanished before comskip status could be saved", + recording_id, + ) + return False + except DatabaseError as db_err: + logger.warning( + "Failed to persist comskip status for recording %s: %s", + recording_id, + db_err, + ) + return False + except Exception as unexpected: + logger.warning( + "Unexpected error while saving comskip status for recording %s: %s", + recording_id, + unexpected, + ) + return False + return True + file_path = (cp or {}).get("file_path") + if not file_path or not os.path.exists(file_path): + return "no_file" + + if isinstance(cp.get("comskip"), dict) and cp["comskip"].get("status") == "completed": + return "already_processed" + + comskip_bin = shutil.which("comskip") + if not comskip_bin: + cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"} + _persist_custom_properties() + _ws('skipped', {"reason": "comskip_not_installed"}) + return "comskip_missing" + + base, _ = os.path.splitext(file_path) + edl_path = f"{base}.edl" + + # Notify start + _ws('started', {"title": (cp.get('program') or {}).get('title') or os.path.basename(file_path)}) + + try: + cmd = [comskip_bin, "--output", os.path.dirname(file_path)] + # Prefer user-specified INI, fall back to known defaults + ini_candidates = [] + try: + custom_ini = CoreSettings.get_dvr_comskip_custom_path() + if custom_ini: + ini_candidates.append(custom_ini) + except Exception as ini_err: + logger.debug(f"Unable to load custom comskip.ini path: {ini_err}") + ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"]) + selected_ini = None + for ini_path in ini_candidates: + if ini_path and os.path.exists(ini_path): + selected_ini = ini_path + cmd.extend([f"--ini={ini_path}"]) + break + cmd.append(file_path) + subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + except subprocess.CalledProcessError as e: + stderr_tail = (e.stderr or "").strip().splitlines() + stderr_tail = stderr_tail[-5:] if stderr_tail else [] + detail = { + "status": "error", + "reason": "comskip_failed", + "returncode": e.returncode, + } + if e.returncode and e.returncode < 0: + try: + detail["signal"] = signal.Signals(-e.returncode).name + except Exception: + detail["signal"] = f"signal_{-e.returncode}" + if stderr_tail: + detail["stderr"] = "\n".join(stderr_tail) + if selected_ini: + detail["ini_path"] = selected_ini + cp["comskip"] = detail + _persist_custom_properties() + _ws('error', {"reason": "comskip_failed", "returncode": e.returncode}) + return "comskip_failed" + except Exception as e: + cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"} + _persist_custom_properties() + _ws('error', {"reason": str(e)}) + return "comskip_failed" + + if not os.path.exists(edl_path): + cp["comskip"] = {"status": "error", "reason": "edl_not_found"} + _persist_custom_properties() + _ws('error', {"reason": "edl_not_found"}) + return "no_edl" + + # Duration via ffprobe + def _ffprobe_duration(path): + try: + p = subprocess.run([ + "ffprobe", "-v", "error", "-show_entries", "format=duration", + "-of", "default=noprint_wrappers=1:nokey=1", path + ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + return float(p.stdout.strip()) + except Exception: + return None + + duration = _ffprobe_duration(file_path) + if duration is None: + cp["comskip"] = {"status": "error", "reason": "duration_unknown"} + _persist_custom_properties() + _ws('error', {"reason": "duration_unknown"}) + return "no_duration" + + commercials = [] + try: + with open(edl_path, "r") as f: + for line in f: + parts = line.strip().split() + if len(parts) >= 2: + try: + s = float(parts[0]); e = float(parts[1]) + commercials.append((max(0.0, s), min(duration, e))) + except Exception: + pass + except Exception: + pass + + commercials.sort() + keep = [] + cur = 0.0 + for s, e in commercials: + if s > cur: + keep.append((cur, max(cur, s))) + cur = max(cur, e) + if cur < duration: + keep.append((cur, duration)) + + if not commercials or sum((e - s) for s, e in commercials) <= 0.5: + cp["comskip"] = { + "status": "completed", + "skipped": True, + "edl": os.path.basename(edl_path), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() + _ws('skipped', {"reason": "no_commercials", "commercials": 0}) + return "no_commercials" + + workdir = os.path.dirname(file_path) + parts = [] + try: + for idx, (s, e) in enumerate(keep): + seg = os.path.join(workdir, f"segment_{idx:03d}.mkv") + dur = max(0.0, e - s) + if dur <= 0.01: + continue + subprocess.run([ + "ffmpeg", "-y", "-ss", f"{s:.3f}", "-i", file_path, "-t", f"{dur:.3f}", + "-c", "copy", "-avoid_negative_ts", "1", seg + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + parts.append(seg) + + if not parts: + raise RuntimeError("no_parts") + + list_path = os.path.join(workdir, "concat_list.txt") + with open(list_path, "w") as lf: + for pth in parts: + escaped = pth.replace("'", "'\\''") + lf.write(f"file '{escaped}'\n") + + output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv") + subprocess.run([ + "ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", list_path, "-c", "copy", output_path + ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + try: + os.replace(output_path, file_path) + except Exception: + shutil.copy(output_path, file_path) + + try: + os.remove(list_path) + except Exception: + pass + for pth in parts: + try: os.remove(pth) + except Exception: pass + + cp["comskip"] = { + "status": "completed", + "edl": os.path.basename(edl_path), + "segments_kept": len(parts), + "commercials": len(commercials), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() + _ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)}) + return "ok" + except Exception as e: + cp["comskip"] = {"status": "error", "reason": str(e)} + _persist_custom_properties() + _ws('error', {"reason": str(e)}) + return f"error:{e}" +def _resolve_poster_for_program(channel_name, program): + """Internal helper that attempts to resolve a poster URL and/or Logo id. + Returns (poster_logo_id, poster_url) where either may be None. + """ + poster_logo_id = None + poster_url = None + + # Try EPG Program images first + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") if isinstance(program, dict) else None + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and epg_program.custom_properties: + epg_props = epg_program.custom_properties or {} + + def pick_best_image_from_epg_props(epg_props): + images = epg_props.get("images") or [] + if not isinstance(images, list): + return None + size_order = {"xxl": 6, "xl": 5, "l": 4, "m": 3, "s": 2, "xs": 1} + def score(img): + t = (img.get("type") or "").lower() + size = (img.get("size") or "").lower() + return (2 if t in ("poster", "cover") else 1, size_order.get(size, 0)) + best = None + for im in images: + if not isinstance(im, dict): + continue + url = im.get("url") + if not url: + continue + if best is None or score(im) > score(best): + best = im + return best.get("url") if best else None + + poster_url = pick_best_image_from_epg_props(epg_props) + if not poster_url: + icon = epg_props.get("icon") + if isinstance(icon, str) and icon: + poster_url = icon + except Exception: + pass + + # VOD logo fallback by title + if not poster_url and not poster_logo_id: + try: + from apps.vod.models import Movie, Series + title = (program.get("title") if isinstance(program, dict) else None) or channel_name + vod_logo = None + movie = Movie.objects.filter(name__iexact=title).select_related("logo").first() + if movie and movie.logo: + vod_logo = movie.logo + if not vod_logo: + series = Series.objects.filter(name__iexact=title).select_related("logo").first() + if series and series.logo: + vod_logo = series.logo + if vod_logo: + poster_logo_id = vod_logo.id + except Exception: + pass + + # Keyless providers (TVMaze & iTunes) + if not poster_url and not poster_logo_id: + try: + title = (program.get('title') if isinstance(program, dict) else None) or channel_name + if title: + # TVMaze + try: + url = f"https://api.tvmaze.com/singlesearch/shows?q={quote(title)}" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + img = (data.get('image') or {}) + p = img.get('original') or img.get('medium') + if p: + poster_url = p + except Exception: + pass + # iTunes + if not poster_url: + try: + for media in ('movie', 'tvShow'): + url = f"https://itunes.apple.com/search?term={quote(title)}&media={media}&limit=1" + resp = requests.get(url, timeout=5) + if resp.ok: + data = resp.json() or {} + results = data.get('results') or [] + if results: + art = results[0].get('artworkUrl100') + if art: + poster_url = art.replace('100x100', '600x600') + break + except Exception: + pass + except Exception: + pass + + # Fallback: search existing Logo entries by name if we still have nothing + if not poster_logo_id and not poster_url: + try: + from .models import Logo + title = (program.get("title") if isinstance(program, dict) else None) or channel_name + existing = Logo.objects.filter(name__iexact=title).first() + if existing: + poster_logo_id = existing.id + poster_url = existing.url + except Exception: + pass + + # Save to Logo if URL available + if not poster_logo_id and poster_url and len(poster_url) <= 1000: + try: + from .models import Logo + logo, _ = Logo.objects.get_or_create(url=poster_url, defaults={"name": (program.get("title") if isinstance(program, dict) else None) or channel_name}) + poster_logo_id = logo.id + except Exception: + pass + + return poster_logo_id, poster_url + + +@shared_task +def prefetch_recording_artwork(recording_id): + """Prefetch poster info for a scheduled recording so the UI can show art in Upcoming.""" + try: + from .models import Recording + rec = Recording.objects.get(id=recording_id) + cp = rec.custom_properties or {} + program = cp.get("program") or {} + poster_logo_id, poster_url = _resolve_poster_for_program(rec.channel.name, program) + updated = False + if poster_logo_id and cp.get("poster_logo_id") != poster_logo_id: + cp["poster_logo_id"] = poster_logo_id + updated = True + if poster_url and cp.get("poster_url") != poster_url: + cp["poster_url"] = poster_url + updated = True + # Enrich with rating if available from ProgramData.custom_properties + try: + from apps.epg.models import ProgramData + prog_id = program.get("id") if isinstance(program, dict) else None + if prog_id: + epg_program = ProgramData.objects.filter(id=prog_id).only("custom_properties").first() + if epg_program and isinstance(epg_program.custom_properties, dict): + rating_val = epg_program.custom_properties.get("rating") + rating_sys = epg_program.custom_properties.get("rating_system") + season_val = epg_program.custom_properties.get("season") + episode_val = epg_program.custom_properties.get("episode") + onscreen = epg_program.custom_properties.get("onscreen_episode") + if rating_val and cp.get("rating") != rating_val: + cp["rating"] = rating_val + updated = True + if rating_sys and cp.get("rating_system") != rating_sys: + cp["rating_system"] = rating_sys + updated = True + if season_val is not None and cp.get("season") != season_val: + cp["season"] = season_val + updated = True + if episode_val is not None and cp.get("episode") != episode_val: + cp["episode"] = episode_val + updated = True + if onscreen and cp.get("onscreen_episode") != onscreen: + cp["onscreen_episode"] = onscreen + updated = True + except Exception: + pass + + if updated: + rec.custom_properties = cp + rec.save(update_fields=["custom_properties"]) + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recording_updated", "recording_id": rec.id}) + except Exception: + pass + return "ok" + except Exception as e: + logger.debug(f"prefetch_recording_artwork failed: {e}") + return f"error: {e}" + + +@shared_task(bind=True) +def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None, starting_channel_number=None): + """ + Asynchronously create channels from a list of stream IDs. + Provides progress updates via WebSocket. + + Args: + stream_ids: List of stream IDs to create channels from + channel_profile_ids: Optional list of channel profile IDs to assign channels to + starting_channel_number: Optional starting channel number behavior: + - None: Use provider channel numbers, then auto-assign from 1 + - 0: Start with lowest available number and increment by 1 + - Other number: Use as starting number for auto-assignment + """ + from apps.channels.models import Stream, Channel, ChannelGroup, ChannelProfile, ChannelProfileMembership, Logo + from apps.epg.models import EPGData + from django.db import transaction + from django.shortcuts import get_object_or_404 + from core.utils import send_websocket_update + + task_id = self.request.id + total_streams = len(stream_ids) + created_channels = [] + errors = [] + + try: + # Send initial progress update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_streams, + 'status': 'starting', + 'message': f'Starting bulk creation of {total_streams} channels...' + }) + + # Gather current used numbers once + used_numbers = set(Channel.objects.all().values_list("channel_number", flat=True)) + + # Initialize next_number based on starting_channel_number mode + if starting_channel_number is None: + # Mode 1: Use provider numbers when available, auto-assign when not + next_number = 1 + elif starting_channel_number == 0: + # Mode 2: Start from lowest available number + next_number = 1 + else: + # Mode 3: Start from specified number + next_number = starting_channel_number + + def get_auto_number(): + nonlocal next_number + while next_number in used_numbers: + next_number += 1 + used_numbers.add(next_number) + return next_number + + logos_to_create = [] + channels_to_create = [] + streams_map = [] + logo_map = [] + profile_map = [] + + # Process streams in batches to avoid memory issues + batch_size = 100 + processed = 0 + + for i in range(0, total_streams, batch_size): + batch_stream_ids = stream_ids[i:i + batch_size] + # Fetch streams and preserve the order from batch_stream_ids + batch_streams_dict = {stream.id: stream for stream in Stream.objects.filter(id__in=batch_stream_ids)} + batch_streams = [batch_streams_dict[stream_id] for stream_id in batch_stream_ids if stream_id in batch_streams_dict] + + # Send progress update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'processing', + 'message': f'Processing streams {processed + 1}-{min(processed + batch_size, total_streams)} of {total_streams}...' + }) + + for stream in batch_streams: + try: + name = stream.name + channel_group = stream.channel_group + stream_custom_props = stream.custom_properties or {} + + # Determine channel number based on starting_channel_number mode + channel_number = None + + if starting_channel_number is None: + # Mode 1: Use provider numbers when available + if "tvg-chno" in stream_custom_props: + channel_number = float(stream_custom_props["tvg-chno"]) + elif "channel-number" in stream_custom_props: + channel_number = float(stream_custom_props["channel-number"]) + elif "num" in stream_custom_props: + channel_number = float(stream_custom_props["num"]) + + # For modes 2 and 3 (starting_channel_number == 0 or specific number), + # ignore provider numbers and use sequential assignment + + # Get TVC guide station ID + tvc_guide_stationid = None + if "tvc-guide-stationid" in stream_custom_props: + tvc_guide_stationid = stream_custom_props["tvc-guide-stationid"] + + # Check if the determined/provider number is available + if channel_number is not None and ( + channel_number in used_numbers + or Channel.objects.filter(channel_number=channel_number).exists() + ): + # Provider number is taken, use auto-assignment + channel_number = get_auto_number() + elif channel_number is not None: + # Provider number is available, use it + used_numbers.add(channel_number) + else: + # No provider number or ignoring provider numbers, use auto-assignment + channel_number = get_auto_number() + + channel_data = { + "channel_number": channel_number, + "name": name, + "tvc_guide_stationid": tvc_guide_stationid, + "tvg_id": stream.tvg_id, + } + + # Only add channel_group_id if the stream has a channel group + if channel_group: + channel_data["channel_group_id"] = channel_group.id + + # Attempt to find existing EPGs with the same tvg-id + epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) + if epgs: + channel_data["epg_data_id"] = epgs.first().id + + channel = Channel(**channel_data) + channels_to_create.append(channel) + streams_map.append([stream.id]) + + # Store profile IDs for this channel + profile_map.append(channel_profile_ids) + + # Handle logo - validate URL length to avoid PostgreSQL btree index errors + validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None + if validated_logo_url: + logos_to_create.append( + Logo( + url=validated_logo_url, + name=stream.name or stream.tvg_id, + ) + ) + logo_map.append(validated_logo_url) + else: + logo_map.append(None) + + processed += 1 + + except Exception as e: + errors.append({ + 'stream_id': stream.id if 'stream' in locals() else 'unknown', + 'error': str(e) + }) + processed += 1 + + # Create logos first + if logos_to_create: + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'creating_logos', + 'message': f'Creating {len(logos_to_create)} logos...' + }) + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + + # Get logo objects for association + channel_logos = { + logo.url: logo + for logo in Logo.objects.filter( + url__in=[url for url in logo_map if url is not None] + ) + } + + # Create channels in database + if channels_to_create: + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': processed, + 'total': total_streams, + 'status': 'creating_channels', + 'message': f'Creating {len(channels_to_create)} channels in database...' + }) + + with transaction.atomic(): + created_channels = Channel.objects.bulk_create(channels_to_create) + + # Update channels with logos and create stream associations + update = [] + channel_stream_associations = [] + channel_profile_memberships = [] + + for channel, stream_ids, logo_url, profile_ids in zip( + created_channels, streams_map, logo_map, profile_map + ): + if logo_url: + channel.logo = channel_logos[logo_url] + update.append(channel) + + # Create stream associations + for stream_id in stream_ids: + from apps.channels.models import ChannelStream + channel_stream_associations.append( + ChannelStream(channel=channel, stream_id=stream_id, order=0) + ) + + # Handle channel profile membership + if profile_ids: + try: + specific_profiles = ChannelProfile.objects.filter(id__in=profile_ids) + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in specific_profiles + ]) + except Exception as e: + errors.append({ + 'channel_id': channel.id, + 'error': f'Failed to add to profiles: {str(e)}' + }) + else: + # Add to all profiles by default + all_profiles = ChannelProfile.objects.all() + channel_profile_memberships.extend([ + ChannelProfileMembership( + channel_profile=profile, + channel=channel, + enabled=True + ) + for profile in all_profiles + ]) + + # Bulk update channels with logos + if update: + Channel.objects.bulk_update(update, ["logo"]) + + # Bulk create channel-stream associations + if channel_stream_associations: + from apps.channels.models import ChannelStream + ChannelStream.objects.bulk_create(channel_stream_associations, ignore_conflicts=True) + + # Bulk create profile memberships + if channel_profile_memberships: + ChannelProfileMembership.objects.bulk_create(channel_profile_memberships, ignore_conflicts=True) + + # Send completion update + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': total_streams, + 'total': total_streams, + 'status': 'completed', + 'message': f'Successfully created {len(created_channels)} channels', + 'created_count': len(created_channels), + 'error_count': len(errors), + 'errors': errors[:10] # Send first 10 errors only + }) + + # Send general channel update notification + send_websocket_update('updates', 'update', { + 'type': 'channels_created', + 'count': len(created_channels) + }) + + return { + 'status': 'completed', + 'created_count': len(created_channels), + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"Bulk channel creation failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'bulk_channel_creation_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_streams, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_names_from_epg(self, channel_ids): + """ + Celery task to set channel names from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG name setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG name setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.name: + if channel.name != channel.epg_data.name: + channel.name = channel.epg_data.name + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['name']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel names...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel names from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG name setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG name setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_name_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_logos_from_epg(self, channel_ids): + """ + Celery task to set channel logos from EPG data for multiple channels + Creates logos from EPG icon URLs if they don't exist + """ + from .models import Logo + from core.utils import send_websocket_update + import requests + from urllib.parse import urlparse + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + created_logos_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG logo setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG logo setting...' + }) + + batch_size = 50 # Smaller batch for logo processing + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data', 'logo') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.icon_url: + icon_url = channel.epg_data.icon_url.strip() + + # Try to find existing logo with this URL + try: + logo = Logo.objects.get(url=icon_url) + except Logo.DoesNotExist: + # Create new logo from EPG icon URL + try: + # Generate a name for the logo + logo_name = channel.epg_data.name or f"Logo for {channel.epg_data.tvg_id}" + + # Create the logo record + logo = Logo.objects.create( + name=logo_name, + url=icon_url + ) + created_logos_count += 1 + logger.info(f"Created new logo from EPG: {logo_name} - {icon_url}") + + except Exception as create_error: + errors.append(f"Channel {channel.id}: Failed to create logo from {icon_url}: {str(create_error)}") + logger.error(f"Failed to create logo for channel {channel.id}: {create_error}") + continue + + # Update channel logo if different + if channel.logo != logo: + channel.logo = logo + batch_updates.append(channel) + updated_count += 1 + + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['logo']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel logos, created {created_logos_count} new logos...', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel logos and created {created_logos_count} new logos from EPG data', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG logo setting task completed. Updated {updated_count} channels, created {created_logos_count} logos") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'created_logos_count': created_logos_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG logo setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_logo_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise + + +@shared_task(bind=True) +def set_channels_tvg_ids_from_epg(self, channel_ids): + """ + Celery task to set channel TVG-IDs from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG TVG-ID setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.tvg_id: + if channel.tvg_id != channel.epg_data.tvg_id: + channel.tvg_id = channel.epg_data.tvg_id + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['tvg_id']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel TVG-IDs...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG TVG-ID setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise diff --git a/apps/channels/tests/__init__.py b/apps/channels/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/channels/tests/test_recurring_rules.py b/apps/channels/tests/test_recurring_rules.py new file mode 100644 index 00000000..982ecb93 --- /dev/null +++ b/apps/channels/tests/test_recurring_rules.py @@ -0,0 +1,40 @@ +from datetime import datetime, timedelta +from django.test import TestCase +from django.utils import timezone + +from apps.channels.models import Channel, RecurringRecordingRule, Recording +from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl + + +class RecurringRecordingRuleTasksTests(TestCase): + def test_sync_recurring_rule_creates_and_purges_recordings(self): + now = timezone.now() + channel = Channel.objects.create(channel_number=1, name='Test Channel') + + start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0) + end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0) + + rule = RecurringRecordingRule.objects.create( + channel=channel, + days_of_week=[now.weekday()], + start_time=start_time, + end_time=end_time, + ) + + created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1) + self.assertEqual(created, 1) + + recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first() + self.assertIsNotNone(recording) + self.assertEqual(recording.channel, channel) + self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id) + + expected_start = timezone.make_aware( + datetime.combine(recording.start_time.date(), start_time), + timezone.get_current_timezone(), + ) + self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60) + + removed = purge_recurring_rule_impl(rule.id) + self.assertEqual(removed, 1) + self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists()) diff --git a/apps/epg/api_views.py b/apps/epg/api_views.py index 240e2dcb..2fc5a743 100644 --- a/apps/epg/api_views.py +++ b/apps/epg/api_views.py @@ -2,18 +2,27 @@ import logging, os from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.utils import timezone from datetime import timedelta from .models import EPGSource, ProgramData, EPGData # Added ProgramData -from .serializers import ProgramDataSerializer, EPGSourceSerializer, EPGDataSerializer # Updated serializer +from .serializers import ( + ProgramDataSerializer, + EPGSourceSerializer, + EPGDataSerializer, +) # Updated serializer from .tasks import refresh_epg_data +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, + permission_classes_by_method, +) logger = logging.getLogger(__name__) + # ───────────────────────────── # 1) EPG Source API (CRUD) # ───────────────────────────── @@ -21,30 +30,38 @@ class EPGSourceViewSet(viewsets.ModelViewSet): """ API endpoint that allows EPG sources to be viewed or edited. """ + queryset = EPGSource.objects.all() serializer_class = EPGSourceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def list(self, request, *args, **kwargs): logger.debug("Listing all EPG sources.") return super().list(request, *args, **kwargs) - @action(detail=False, methods=['post']) + @action(detail=False, methods=["post"]) def upload(self, request): - if 'file' not in request.FILES: - return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST) + if "file" not in request.FILES: + return Response( + {"error": "No file uploaded"}, status=status.HTTP_400_BAD_REQUEST + ) - file = request.FILES['file'] + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/epgs', file_name) + file_path = os.path.join("/data/uploads/epgs", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) new_obj_data = request.data.copy() - new_obj_data['file_path'] = file_path + new_obj_data["file_path"] = file_path serializer = self.get_serializer(data=new_obj_data) serializer.is_valid(raise_exception=True) @@ -57,70 +74,111 @@ class EPGSourceViewSet(viewsets.ModelViewSet): instance = self.get_object() # Check if we're toggling is_active - if 'is_active' in request.data and instance.is_active != request.data['is_active']: + if ( + "is_active" in request.data + and instance.is_active != request.data["is_active"] + ): # Set appropriate status based on new is_active value - if request.data['is_active']: - request.data['status'] = 'idle' + if request.data["is_active"]: + request.data["status"] = "idle" else: - request.data['status'] = 'disabled' + request.data["status"] = "disabled" # Continue with regular partial update return super().partial_update(request, *args, **kwargs) + # ───────────────────────────── # 2) Program API (CRUD) # ───────────────────────────── class ProgramViewSet(viewsets.ModelViewSet): """Handles CRUD operations for EPG programs""" + queryset = ProgramData.objects.all() serializer_class = ProgramDataSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def list(self, request, *args, **kwargs): logger.debug("Listing all EPG programs.") return super().list(request, *args, **kwargs) + # ───────────────────────────── # 3) EPG Grid View # ───────────────────────────── class EPGGridAPIView(APIView): """Returns all programs airing in the next 24 hours including currently running ones and recent ones""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Retrieve programs from the previous hour, currently running and upcoming for the next 24 hours", - responses={200: ProgramDataSerializer(many=True)} + responses={200: ProgramDataSerializer(many=True)}, ) def get(self, request, format=None): # Use current time instead of midnight now = timezone.now() one_hour_ago = now - timedelta(hours=1) twenty_four_hours_later = now + timedelta(hours=24) - logger.debug(f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}.") + logger.debug( + f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}." + ) # Use select_related to prefetch EPGData and include programs from the last hour - programs = ProgramData.objects.select_related('epg').filter( + programs = ProgramData.objects.select_related("epg").filter( # Programs that end after one hour ago (includes recently ended programs) end_time__gt=one_hour_ago, # AND start before the end time window - start_time__lt=twenty_four_hours_later + start_time__lt=twenty_four_hours_later, ) count = programs.count() - logger.debug(f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows.") + logger.debug( + f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows." + ) - # Generate dummy programs for channels that have no EPG data + # Generate dummy programs for channels that have no EPG data OR dummy EPG sources from apps.channels.models import Channel + from apps.epg.models import EPGSource from django.db.models import Q - # Get channels with no EPG data + # Get channels with no EPG data at all (standard dummy) channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True)) - channels_count = channels_without_epg.count() - # Log more detailed information about channels missing EPG data - if channels_count > 0: + # Get channels with custom dummy EPG sources (generate on-demand with patterns) + channels_with_custom_dummy = Channel.objects.filter( + epg_data__epg_source__source_type='dummy' + ).distinct() + + # Log what we found + without_count = channels_without_epg.count() + custom_count = channels_with_custom_dummy.count() + + if without_count > 0: channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg] - logger.warning(f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}") + logger.debug( + f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}" + ) - logger.debug(f"EPGGridAPIView: Found {channels_count} channels with no EPG data.") + if custom_count > 0: + channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy] + logger.debug( + f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}" + ) + + logger.debug( + f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG." + ) # Serialize the regular programs serialized_programs = ProgramDataSerializer(programs, many=True).data @@ -130,43 +188,122 @@ class EPGGridAPIView(APIView): (0, 4): [ "Late Night with {channel} - Where insomniacs unite!", "The 'Why Am I Still Awake?' Show on {channel}", - "Counting Sheep - A {channel} production for the sleepless" + "Counting Sheep - A {channel} production for the sleepless", ], (4, 8): [ "Dawn Patrol - Rise and shine with {channel}!", "Early Bird Special - Coffee not included", - "Morning Zombies - Before coffee viewing on {channel}" + "Morning Zombies - Before coffee viewing on {channel}", ], (8, 12): [ "Mid-Morning Meetings - Pretend you're paying attention while watching {channel}", "The 'I Should Be Working' Hour on {channel}", - "Productivity Killer - {channel}'s daytime programming" + "Productivity Killer - {channel}'s daytime programming", ], (12, 16): [ "Lunchtime Laziness with {channel}", "The Afternoon Slump - Brought to you by {channel}", - "Post-Lunch Food Coma Theater on {channel}" + "Post-Lunch Food Coma Theater on {channel}", ], (16, 20): [ "Rush Hour - {channel}'s alternative to traffic", "The 'What's For Dinner?' Debate on {channel}", - "Evening Escapism - {channel}'s remedy for reality" + "Evening Escapism - {channel}'s remedy for reality", ], (20, 24): [ "Prime Time Placeholder - {channel}'s finest not-programming", "The 'Netflix Was Too Complicated' Show on {channel}", - "Family Argument Avoider - Courtesy of {channel}" - ] + "Family Argument Avoider - Courtesy of {channel}", + ], } # Generate and append dummy programs dummy_programs = [] - for channel in channels_without_epg: - # Use the channel UUID as tvg_id for dummy programs to match in the guide + + # Import the function from output.views + from apps.output.views import generate_dummy_programs as gen_dummy_progs + + # Handle channels with CUSTOM dummy EPG sources (with patterns) + for channel in channels_with_custom_dummy: + # For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel + # This prevents multiple channels assigned to the same dummy EPG from showing identical data + # Each channel gets its own unique program data even if they share the same EPG source dummy_tvg_id = str(channel.uuid) try: - # Create programs every 4 hours for the next 24 hours + # Get the custom dummy EPG source + epg_source = channel.epg_data.epg_source if channel.epg_data else None + + logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Determine which name to parse based on custom properties + name_to_parse = channel.name + if epg_source and epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + # Get the stream index (1-based from user, convert to 0-based) + stream_index = custom_props.get('stream_index', 1) - 1 + + # Get streams ordered by channelstream order + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + name_to_parse = stream.name + logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + elif name_source == 'channel': + logger.debug(f"Using channel name for parsing: {name_to_parse}") + + # Generate programs using custom patterns from the dummy EPG source + # Use the same tvg_id that will be set in the program data + generated = gen_dummy_progs( + channel_id=dummy_tvg_id, + channel_name=name_to_parse, + num_days=1, + program_length_hours=4, + epg_source=epg_source + ) + + # Custom dummy should always return data (either from patterns or fallback) + if generated: + logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}") + # Convert generated programs to API format + for program in generated: + dummy_program = { + "id": f"dummy-custom-{channel.id}-{program['start_time'].hour}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": program['start_time'].isoformat(), + "end_time": program['end_time'].isoformat(), + "title": program['title'], + "description": program['description'], + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, + } + dummy_programs.append(dummy_program) + else: + logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}") + + except Exception as e: + logger.error( + f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) + + # Handle channels with NO EPG data (standard dummy with humorous descriptions) + for channel in channels_without_epg: + # For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic) + # The frontend uses: tvgRecord?.tvg_id ?? channel.uuid + # Since there's no EPG data, it will fall back to UUID + dummy_tvg_id = str(channel.uuid) + + try: + logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Create programs every 4 hours for the next 24 hours with humorous descriptions for hour_offset in range(0, 24, 4): # Use timedelta for time arithmetic instead of replace() to avoid hour overflow start_time = now + timedelta(hours=hour_offset) @@ -184,7 +321,9 @@ class EPGGridAPIView(APIView): if start_range <= hour < end_range: # Pick a description using the sum of the hour and day as seed # This makes it somewhat random but consistent for the same timeslot - description = descriptions[(hour + day) % len(descriptions)].format(channel=channel.name) + description = descriptions[ + (hour + day) % len(descriptions) + ].format(channel=channel.name) break else: # Fallback description if somehow no range matches @@ -192,29 +331,31 @@ class EPGGridAPIView(APIView): # Create a dummy program in the same format as regular programs dummy_program = { - 'id': f"dummy-{channel.id}-{hour_offset}", # Create a unique ID - 'epg': { - 'tvg_id': dummy_tvg_id, - 'name': channel.name - }, - 'start_time': start_time.isoformat(), - 'end_time': end_time.isoformat(), - 'title': f"{channel.name}", - 'description': description, - 'tvg_id': dummy_tvg_id, - 'sub_title': None, - 'custom_properties': None + "id": f"dummy-standard-{channel.id}-{hour_offset}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": start_time.isoformat(), + "end_time": end_time.isoformat(), + "title": f"{channel.name}", + "description": description, + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, } dummy_programs.append(dummy_program) except Exception as e: - logger.error(f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}") + logger.error( + f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) # Combine regular and dummy programs all_programs = list(serialized_programs) + dummy_programs - logger.debug(f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs).") + logger.debug( + f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs)." + ) + + return Response({"data": all_programs}, status=status.HTTP_200_OK) - return Response({'data': all_programs}, status=status.HTTP_200_OK) # ───────────────────────────── # 4) EPG Import View @@ -222,15 +363,41 @@ class EPGGridAPIView(APIView): class EPGImportAPIView(APIView): """Triggers an EPG data refresh""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers an EPG data import", - responses={202: "EPG data import initiated"} + responses={202: "EPG data import initiated"}, ) def post(self, request, format=None): logger.info("EPGImportAPIView: Received request to import EPG data.") - refresh_epg_data.delay(request.data.get('id', None)) # Trigger Celery task + epg_id = request.data.get("id", None) + + # Check if this is a dummy EPG source + try: + from .models import EPGSource + epg_source = EPGSource.objects.get(id=epg_id) + if epg_source.source_type == 'dummy': + logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}") + return Response( + {"success": False, "message": "Dummy EPG sources do not require refreshing."}, + status=status.HTTP_400_BAD_REQUEST, + ) + except EPGSource.DoesNotExist: + pass # Let the task handle the missing source + + refresh_epg_data.delay(epg_id) # Trigger Celery task logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.") - return Response({'success': True, 'message': 'EPG data import initiated.'}, status=status.HTTP_202_ACCEPTED) + return Response( + {"success": True, "message": "EPG data import initiated."}, + status=status.HTTP_202_ACCEPTED, + ) # ───────────────────────────── @@ -240,6 +407,13 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet): """ API endpoint that allows EPGData objects to be viewed. """ + queryset = EPGData.objects.all() serializer_class = EPGDataSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + diff --git a/apps/epg/migrations/0015_alter_programdata_custom_properties.py b/apps/epg/migrations/0015_alter_programdata_custom_properties.py new file mode 100644 index 00000000..f33aa97f --- /dev/null +++ b/apps/epg/migrations/0015_alter_programdata_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-02 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0014_epgsource_extracted_file_path'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/epg/migrations/0016_epgdata_icon_url.py b/apps/epg/migrations/0016_epgdata_icon_url.py new file mode 100644 index 00000000..b934b024 --- /dev/null +++ b/apps/epg/migrations/0016_epgdata_icon_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-16 22:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0015_alter_programdata_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='epgdata', + name='icon_url', + field=models.URLField(blank=True, max_length=500, null=True), + ), + ] diff --git a/apps/epg/migrations/0017_alter_epgsource_url.py b/apps/epg/migrations/0017_alter_epgsource_url.py new file mode 100644 index 00000000..dcb55e20 --- /dev/null +++ b/apps/epg/migrations/0017_alter_epgsource_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-24 21:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0016_epgdata_icon_url'), + ] + + operations = [ + migrations.AlterField( + model_name='epgsource', + name='url', + field=models.URLField(blank=True, max_length=1000, null=True), + ), + ] diff --git a/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py new file mode 100644 index 00000000..70ebb214 --- /dev/null +++ b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-17 17:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0017_alter_epgsource_url'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True), + ), + migrations.AlterField( + model_name='epgsource', + name='source_type', + field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20), + ), + ] diff --git a/apps/epg/migrations/0019_alter_programdata_sub_title.py b/apps/epg/migrations/0019_alter_programdata_sub_title.py new file mode 100644 index 00000000..5a53627c --- /dev/null +++ b/apps/epg/migrations/0019_alter_programdata_sub_title.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-22 21:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0018_epgsource_custom_properties_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='sub_title', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py new file mode 100644 index 00000000..8f53bb0a --- /dev/null +++ b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py @@ -0,0 +1,119 @@ +# Generated migration to replace {time} placeholders with {starttime} + +import re +from django.db import migrations + + +def migrate_time_placeholders(apps, schema_editor): + """ + Replace {time} with {starttime} and {time24} with {starttime24} + in all dummy EPG source custom_properties templates. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + # Fields that contain templates with placeholders + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + # Get all dummy EPG sources + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Replace {time24} first (before {time}) to avoid double replacement + # e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime} + new_value = original_value + new_value = re.sub(r'\{time24\}', '{starttime24}', new_value) + new_value = re.sub(r'\{time\}', '{starttime}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.") + else: + print("No dummy EPG sources needed placeholder updates.") + + +def reverse_migration(apps, schema_editor): + """ + Reverse the migration by replacing {starttime} back to {time}. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Reverse the replacements + new_value = original_value + new_value = re.sub(r'\{starttime24\}', '{time24}', new_value) + new_value = re.sub(r'\{starttime\}', '{time}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.") + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0019_alter_programdata_sub_title'), + ] + + operations = [ + migrations.RunPython(migrate_time_placeholders, reverse_migration), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index 8abfb26f..e5f3847b 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -8,6 +8,7 @@ class EPGSource(models.Model): SOURCE_TYPE_CHOICES = [ ('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), + ('dummy', 'Custom Dummy EPG'), ] STATUS_IDLE = 'idle' @@ -28,7 +29,7 @@ class EPGSource(models.Model): name = models.CharField(max_length=255, unique=True) source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES) - url = models.URLField(blank=True, null=True) # For XMLTV + url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct is_active = models.BooleanField(default=True) file_path = models.CharField(max_length=1024, blank=True, null=True) @@ -38,6 +39,12 @@ class EPGSource(models.Model): refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, @@ -127,6 +134,7 @@ class EPGData(models.Model): # and a name (which might simply be the tvg_id if no real channel exists). tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True) name = models.CharField(max_length=255) + icon_url = models.URLField(max_length=500, null=True, blank=True) epg_source = models.ForeignKey( EPGSource, on_delete=models.CASCADE, @@ -147,10 +155,10 @@ class ProgramData(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() title = models.CharField(max_length=255) - sub_title = models.CharField(max_length=255, blank=True, null=True) + sub_title = models.TextField(blank=True, null=True) description = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, null=True, blank=True) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def __str__(self): return f"{self.title} ({self.start_time} - {self.end_time})" diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 09390237..bfb750fc 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -1,10 +1,17 @@ +from core.utils import validate_flexible_url from rest_framework import serializers from .models import EPGSource, EPGData, ProgramData from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] + url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url] + ) class Meta: model = EPGSource @@ -21,11 +28,13 @@ class EPGSourceSerializer(serializers.ModelSerializer): 'last_message', 'created_at', 'updated_at', - 'epg_data_ids' + 'custom_properties', + 'epg_data_count' ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: @@ -45,5 +54,6 @@ class EPGDataSerializer(serializers.ModelSerializer): 'id', 'tvg_id', 'name', + 'icon_url', 'epg_source', ] diff --git a/apps/epg/signals.py b/apps/epg/signals.py index e8a004cb..e41d3aaf 100644 --- a/apps/epg/signals.py +++ b/apps/epg/signals.py @@ -1,9 +1,9 @@ from django.db.models.signals import post_save, post_delete, pre_save from django.dispatch import receiver -from .models import EPGSource +from .models import EPGSource, EPGData from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id from django_celery_beat.models import PeriodicTask, IntervalSchedule -from core.utils import is_protected_path +from core.utils import is_protected_path, send_websocket_update import json import logging import os @@ -12,15 +12,77 @@ logger = logging.getLogger(__name__) @receiver(post_save, sender=EPGSource) def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs): - # Trigger refresh only if the source is newly created and active - if created and instance.is_active: + # Trigger refresh only if the source is newly created, active, and not a dummy EPG + if created and instance.is_active and instance.source_type != 'dummy': refresh_epg_data.delay(instance.id) +@receiver(post_save, sender=EPGSource) +def create_dummy_epg_data(sender, instance, created, **kwargs): + """ + Automatically create EPGData for dummy EPG sources when they are created. + This allows channels to be assigned to dummy EPGs immediately without + requiring a refresh first. + """ + if instance.source_type == 'dummy': + # Ensure dummy EPGs always have idle status and no status message + if instance.status != EPGSource.STATUS_IDLE or instance.last_message: + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + instance.save(update_fields=['status', 'last_message']) + + # Create a URL-friendly tvg_id from the dummy EPG name + # Replace spaces and special characters with underscores + friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_') + # Remove any characters that aren't alphanumeric or underscores + friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_') + # Convert to lowercase for consistency + friendly_tvg_id = friendly_tvg_id.lower() + # Prefix with 'dummy_' to make it clear this is a dummy EPG + friendly_tvg_id = f"dummy_{friendly_tvg_id}" + + # Create or update the EPGData record + epg_data, data_created = EPGData.objects.get_or_create( + tvg_id=friendly_tvg_id, + epg_source=instance, + defaults={ + 'name': instance.name, + 'icon_url': None + } + ) + + # Update name if it changed and record already existed + if not data_created and epg_data.name != instance.name: + epg_data.name = instance.name + epg_data.save(update_fields=['name']) + + if data_created: + logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})") + + # Send websocket update to notify frontend that EPG data has been created + # This allows the channel form to immediately show the new dummy EPG without refreshing + send_websocket_update('updates', 'update', { + 'type': 'epg_data_created', + 'source_id': instance.id, + 'source_name': instance.name, + 'epg_data_id': epg_data.id + }) + else: + logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})") + @receiver(post_save, sender=EPGSource) def create_or_update_refresh_task(sender, instance, **kwargs): """ Create or update a Celery Beat periodic task when an EPGSource is created/updated. + Skip creating tasks for dummy EPG sources as they don't need refreshing. """ + # Skip task creation for dummy EPGs + if instance.source_type == 'dummy': + # If there's an existing task, disable it + if instance.refresh_task: + instance.refresh_task.enabled = False + instance.refresh_task.save(update_fields=['enabled']) + return + task_name = f"epg_source-refresh-{instance.id}" interval, _ = IntervalSchedule.objects.get_or_create( every=int(instance.refresh_interval), @@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs): def update_status_on_active_change(sender, instance, **kwargs): """ When an EPGSource's is_active field changes, update the status accordingly. + For dummy EPGs, always ensure status is idle and no status message. """ + # Dummy EPGs should always be idle with no status message + if instance.source_type == 'dummy': + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + return + if instance.pk: # Only for existing records, not new ones try: # Get the current record from the database diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index d3062171..b6350686 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -29,6 +29,25 @@ from core.utils import acquire_task_lock, release_task_lock, send_websocket_upda logger = logging.getLogger(__name__) +def validate_icon_url_fast(icon_url, max_length=None): + """ + Fast validation for icon URLs during parsing. + Returns None if URL is too long, original URL otherwise. + If max_length is None, gets it dynamically from the EPGData model field. + """ + if max_length is None: + # Get max_length dynamically from the model field + max_length = EPGData._meta.get_field('icon_url').max_length + + if icon_url and len(icon_url) > max_length: + logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...") + return None + return icon_url + + +MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2) + + def send_epg_update(source_id, action, progress, **kwargs): """Send WebSocket update about EPG download/parsing progress""" # Start with the base data dictionary @@ -114,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id): @shared_task def refresh_all_epg_data(): logger.info("Starting refresh_epg_data task.") - active_sources = EPGSource.objects.filter(is_active=True) - logger.debug(f"Found {active_sources.count()} active EPGSource(s).") + # Exclude dummy EPG sources from refresh - they don't need refreshing + active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy') + logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).") for source in active_sources: refresh_epg_data(source.id) @@ -161,6 +181,13 @@ def refresh_epg_data(source_id): gc.collect() return + # Skip refresh for dummy EPG sources - they don't need refreshing + if source.source_type == 'dummy': + logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})") + release_task_lock('refresh_epg_data', source_id) + gc.collect() + return + # Continue with the normal processing... logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})") if source.source_type == 'xmltv': @@ -186,6 +213,12 @@ def refresh_epg_data(source_id): fetch_schedules_direct(source) source.save(update_fields=['updated_at']) + # After successful EPG refresh, evaluate DVR series rules to schedule new episodes + try: + from apps.channels.tasks import evaluate_series_rules + evaluate_series_rules.delay() + except Exception: + pass except Exception as e: logger.error(f"Error in refresh_epg_data for source {source_id}: {e}", exc_info=True) try: @@ -641,7 +674,11 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False): # Reset file pointer and extract the content gz_file.seek(0) with open(extracted_path, 'wb') as out_file: - out_file.write(gz_file.read()) + while True: + chunk = gz_file.read(MAX_EXTRACT_CHUNK_SIZE) + if not chunk or len(chunk) == 0: + break + out_file.write(chunk) except Exception as e: logger.error(f"Error extracting GZIP file: {e}", exc_info=True) return None @@ -685,9 +722,13 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False): return None # Extract the first XML file - xml_content = zip_file.read(xml_files[0]) with open(extracted_path, 'wb') as out_file: - out_file.write(xml_content) + with zip_file.open(xml_files[0], "r") as xml_file: + while True: + chunk = xml_file.read(MAX_EXTRACT_CHUNK_SIZE) + if not chunk or len(chunk) == 0: + break + out_file.write(chunk) logger.info(f"Successfully extracted zip file to: {extracted_path}") @@ -815,6 +856,7 @@ def parse_channels_only(source): processed_channels = 0 batch_size = 500 # Process in batches to limit memory usage progress = 0 # Initialize progress variable here + icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field # Track memory at key points if process: @@ -843,7 +885,7 @@ def parse_channels_only(source): # Change iterparse to look for both channel and programme elements logger.debug(f"Creating iterparse context for channels and programmes") - channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True) + channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True) if process: logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB") @@ -857,10 +899,15 @@ def parse_channels_only(source): tvg_id = elem.get('id', '').strip() if tvg_id: display_name = None + icon_url = None for child in elem: - if child.tag == 'display-name' and child.text: + if display_name is None and child.tag == 'display-name' and child.text: display_name = child.text.strip() - break + elif child.tag == 'icon': + raw_icon_url = child.get('src', '').strip() + icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length) + if display_name and icon_url: + break # No need to continue if we have both if not display_name: display_name = tvg_id @@ -878,17 +925,24 @@ def parse_channels_only(source): epgs_to_create.append(EPGData( tvg_id=tvg_id, name=display_name, + icon_url=icon_url, epg_source=source, )) logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}") processed_channels += 1 continue - # We use the cached object to check if the name has changed + # We use the cached object to check if the name or icon_url has changed epg_obj = existing_epgs[tvg_id] + needs_update = False if epg_obj.name != display_name: - # Only update if the name actually changed epg_obj.name = display_name + needs_update = True + if epg_obj.icon_url != icon_url: + epg_obj.icon_url = icon_url + needs_update = True + + if needs_update: epgs_to_update.append(epg_obj) logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}") else: @@ -899,6 +953,7 @@ def parse_channels_only(source): epgs_to_create.append(EPGData( tvg_id=tvg_id, name=display_name, + icon_url=icon_url, epg_source=source, )) logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}") @@ -921,7 +976,7 @@ def parse_channels_only(source): logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries") if process: logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") - EPGData.objects.bulk_update(epgs_to_update, ["name"]) + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) if process: logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB") epgs_to_update = [] @@ -988,7 +1043,7 @@ def parse_channels_only(source): logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries") if epgs_to_update: - EPGData.objects.bulk_update(epgs_to_update, ["name"]) + EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"]) logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries") if process: logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB") @@ -1102,6 +1157,12 @@ def parse_programs_for_tvg_id(epg_id): epg = EPGData.objects.get(id=epg_id) epg_source = epg.epg_source + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return + if not Channel.objects.filter(epg_data=epg).exists(): logger.info(f"No channels matched to EPG {epg.tvg_id}") release_task_lock('parse_epg_programs', epg_id) @@ -1195,7 +1256,7 @@ def parse_programs_for_tvg_id(epg_id): source_file = open(file_path, 'rb') # Stream parse the file using lxml's iterparse - program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True) + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) for _, elem in program_parser: if elem.get('channel') == epg.tvg_id: @@ -1224,10 +1285,7 @@ def parse_programs_for_tvg_id(epg_id): if custom_props: logger.trace(f"Number of custom properties: {len(custom_props)}") - try: - custom_properties_json = json.dumps(custom_props) - except Exception as e: - logger.error(f"Error serializing custom properties to JSON: {e}", exc_info=True) + custom_properties_json = custom_props programs_to_create.append(ProgramData( epg=epg, @@ -1612,6 +1670,11 @@ def extract_custom_properties(prog): if categories: custom_props['categories'] = categories + # Extract keywords (new) + keywords = [kw.text.strip() for kw in prog.findall('keyword') if kw.text and kw.text.strip()] + if keywords: + custom_props['keywords'] = keywords + # Extract episode numbers for ep_num in prog.findall('episode-num'): system = ep_num.get('system', '') @@ -1637,6 +1700,9 @@ def extract_custom_properties(prog): elif system == 'dd_progid' and ep_num.text: # Store the dd_progid format custom_props['dd_progid'] = ep_num.text.strip() + # Add support for other systems like thetvdb.com, themoviedb.org, imdb.com + elif system in ['thetvdb.com', 'themoviedb.org', 'imdb.com'] and ep_num.text: + custom_props[f'{system}_id'] = ep_num.text.strip() # Extract ratings more efficiently rating_elem = prog.find('rating') @@ -1647,37 +1713,172 @@ def extract_custom_properties(prog): if rating_elem.get('system'): custom_props['rating_system'] = rating_elem.get('system') + # Extract star ratings (new) + star_ratings = [] + for star_rating in prog.findall('star-rating'): + value_elem = star_rating.find('value') + if value_elem is not None and value_elem.text: + rating_data = {'value': value_elem.text.strip()} + if star_rating.get('system'): + rating_data['system'] = star_rating.get('system') + star_ratings.append(rating_data) + if star_ratings: + custom_props['star_ratings'] = star_ratings + # Extract credits more efficiently credits_elem = prog.find('credits') if credits_elem is not None: credits = {} - for credit_type in ['director', 'actor', 'writer', 'presenter', 'producer']: - names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()] - if names: - credits[credit_type] = names + for credit_type in ['director', 'actor', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if credit_type == 'actor': + # Handle actors with roles and guest status + actors = [] + for actor_elem in credits_elem.findall('actor'): + if actor_elem.text and actor_elem.text.strip(): + actor_data = {'name': actor_elem.text.strip()} + if actor_elem.get('role'): + actor_data['role'] = actor_elem.get('role') + if actor_elem.get('guest') == 'yes': + actor_data['guest'] = True + actors.append(actor_data) + if actors: + credits['actor'] = actors + else: + names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()] + if names: + credits[credit_type] = names if credits: custom_props['credits'] = credits # Extract other common program metadata date_elem = prog.find('date') if date_elem is not None and date_elem.text: - custom_props['year'] = date_elem.text.strip()[:4] # Just the year part + custom_props['date'] = date_elem.text.strip() country_elem = prog.find('country') if country_elem is not None and country_elem.text: custom_props['country'] = country_elem.text.strip() + # Extract language information (new) + language_elem = prog.find('language') + if language_elem is not None and language_elem.text: + custom_props['language'] = language_elem.text.strip() + + orig_language_elem = prog.find('orig-language') + if orig_language_elem is not None and orig_language_elem.text: + custom_props['original_language'] = orig_language_elem.text.strip() + + # Extract length (new) + length_elem = prog.find('length') + if length_elem is not None and length_elem.text: + try: + length_value = int(length_elem.text.strip()) + length_units = length_elem.get('units', 'minutes') + custom_props['length'] = {'value': length_value, 'units': length_units} + except ValueError: + pass + + # Extract video information (new) + video_elem = prog.find('video') + if video_elem is not None: + video_info = {} + for video_attr in ['present', 'colour', 'aspect', 'quality']: + attr_elem = video_elem.find(video_attr) + if attr_elem is not None and attr_elem.text: + video_info[video_attr] = attr_elem.text.strip() + if video_info: + custom_props['video'] = video_info + + # Extract audio information (new) + audio_elem = prog.find('audio') + if audio_elem is not None: + audio_info = {} + for audio_attr in ['present', 'stereo']: + attr_elem = audio_elem.find(audio_attr) + if attr_elem is not None and attr_elem.text: + audio_info[audio_attr] = attr_elem.text.strip() + if audio_info: + custom_props['audio'] = audio_info + + # Extract subtitles information (new) + subtitles = [] + for subtitle_elem in prog.findall('subtitles'): + subtitle_data = {} + if subtitle_elem.get('type'): + subtitle_data['type'] = subtitle_elem.get('type') + lang_elem = subtitle_elem.find('language') + if lang_elem is not None and lang_elem.text: + subtitle_data['language'] = lang_elem.text.strip() + if subtitle_data: + subtitles.append(subtitle_data) + + if subtitles: + custom_props['subtitles'] = subtitles + + # Extract reviews (new) + reviews = [] + for review_elem in prog.findall('review'): + if review_elem.text and review_elem.text.strip(): + review_data = {'content': review_elem.text.strip()} + if review_elem.get('type'): + review_data['type'] = review_elem.get('type') + if review_elem.get('source'): + review_data['source'] = review_elem.get('source') + if review_elem.get('reviewer'): + review_data['reviewer'] = review_elem.get('reviewer') + reviews.append(review_data) + if reviews: + custom_props['reviews'] = reviews + + # Extract images (new) + images = [] + for image_elem in prog.findall('image'): + if image_elem.text and image_elem.text.strip(): + image_data = {'url': image_elem.text.strip()} + for attr in ['type', 'size', 'orient', 'system']: + if image_elem.get(attr): + image_data[attr] = image_elem.get(attr) + images.append(image_data) + if images: + custom_props['images'] = images + icon_elem = prog.find('icon') if icon_elem is not None and icon_elem.get('src'): custom_props['icon'] = icon_elem.get('src') - # Simpler approach for boolean flags - for kw in ['previously-shown', 'premiere', 'new', 'live']: + # Simpler approach for boolean flags - expanded list + for kw in ['previously-shown', 'premiere', 'new', 'live', 'last-chance']: if prog.find(kw) is not None: custom_props[kw.replace('-', '_')] = True + # Extract premiere and last-chance text content if available + premiere_elem = prog.find('premiere') + if premiere_elem is not None: + custom_props['premiere'] = True + if premiere_elem.text and premiere_elem.text.strip(): + custom_props['premiere_text'] = premiere_elem.text.strip() + + last_chance_elem = prog.find('last-chance') + if last_chance_elem is not None: + custom_props['last_chance'] = True + if last_chance_elem.text and last_chance_elem.text.strip(): + custom_props['last_chance_text'] = last_chance_elem.text.strip() + + # Extract previously-shown details + prev_shown_elem = prog.find('previously-shown') + if prev_shown_elem is not None: + custom_props['previously_shown'] = True + prev_shown_data = {} + if prev_shown_elem.get('start'): + prev_shown_data['start'] = prev_shown_elem.get('start') + if prev_shown_elem.get('channel'): + prev_shown_data['channel'] = prev_shown_elem.get('channel') + if prev_shown_data: + custom_props['previously_shown_details'] = prev_shown_data + return custom_props + def clear_element(elem): """Clear an XML element and its parent to free memory.""" try: @@ -1756,3 +1957,20 @@ def detect_file_format(file_path=None, content=None): # If we reach here, we couldn't reliably determine the format return format_type, is_compressed, file_extension + + +def generate_dummy_epg(source): + """ + DEPRECATED: This function is no longer used. + + Dummy EPG programs are now generated on-demand when they are requested + (during XMLTV export or EPG grid display), rather than being pre-generated + and stored in the database. + + See: apps/output/views.py - generate_custom_dummy_programs() + + This function remains for backward compatibility but should not be called. + """ + logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. " + f"Dummy EPG programs are now generated on-demand.") + return True diff --git a/apps/hdhr/api_views.py b/apps/hdhr/api_views.py index 0d7b77e0..8f1609d4 100644 --- a/apps/hdhr/api_views.py +++ b/apps/hdhr/api_views.py @@ -1,7 +1,7 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import Authenticated, permission_classes_by_action from django.http import JsonResponse, HttpResponseForbidden, HttpResponse import logging from drf_yasg.utils import swagger_auto_schema @@ -17,22 +17,30 @@ from django.views import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt -from apps.m3u.models import M3UAccountProfile + # Configure logger logger = logging.getLogger(__name__) + @login_required def hdhr_dashboard_view(request): """Render the HDHR management page.""" hdhr_devices = HDHRDevice.objects.all() return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices}) + # πŸ”Ή 1) HDHomeRun Device API class HDHRDeviceViewSet(viewsets.ModelViewSet): """Handles CRUD operations for HDHomeRun devices""" + queryset = HDHRDevice.objects.all() serializer_class = HDHRDeviceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] # πŸ”Ή 2) Discover API @@ -41,48 +49,19 @@ class DiscoverAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve HDHomeRun device discovery information", - responses={200: openapi.Response("HDHR Discovery JSON")} + responses={200: openapi.Response("HDHR Discovery JSON")}, ) def get(self, request, profile=None): uri_parts = ["hdhr"] if profile is not None: uri_parts.append(profile) - base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip('/') + base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip("/") device = HDHRDevice.objects.first() - # Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile) - profiles = M3UAccountProfile.objects.filter( - is_active=True, - m3u_account__is_active=True # Only include profiles from enabled M3U accounts - ).exclude(id=1) - - # 1. Check if any profile has unlimited streams (max_streams=0) - has_unlimited = profiles.filter(max_streams=0).exists() - - # 2. Calculate tuner count from limited profiles - limited_tuners = 0 - if not has_unlimited: - limited_tuners = profiles.filter(max_streams__gt=0).aggregate( - total=models.Sum('max_streams') - ).get('total', 0) or 0 - - # 3. Add custom stream count to tuner count - custom_stream_count = Stream.objects.filter(is_custom=True).count() - logger.debug(f"Found {custom_stream_count} custom streams") - - # 4. Calculate final tuner count - if has_unlimited: - # If there are unlimited profiles, start with 10 plus custom streams - tuner_count = 10 + custom_stream_count - else: - # Otherwise use the limited profile sum plus custom streams - tuner_count = limited_tuners + custom_stream_count - - # 5. Ensure minimum of 1 tuners - tuner_count = max(1, tuner_count) - - logger.debug(f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})") + # Calculate tuner count using centralized function + from apps.m3u.utils import calculate_tuner_count + tuner_count = calculate_tuner_count(minimum=1, unlimited_default=10) # Create a unique DeviceID for the HDHomeRun device based on profile ID or a default value device_ID = "12345678" # Default DeviceID @@ -123,17 +102,17 @@ class LineupAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the available channel lineup", - responses={200: openapi.Response("Channel Lineup JSON")} + responses={200: openapi.Response("Channel Lineup JSON")}, ) def get(self, request, profile=None): if profile is not None: channel_profile = ChannelProfile.objects.get(name=profile) channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') + channelprofilemembership__enabled=True, + ).order_by("channel_number") else: - channels = Channel.objects.all().order_by('channel_number') + channels = Channel.objects.all().order_by("channel_number") lineup = [] for ch in channels: @@ -146,13 +125,15 @@ class LineupAPIView(APIView): else: formatted_channel_number = "" - lineup.append({ - "GuideNumber": formatted_channel_number, - "GuideName": ch.name, - "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), - "Guide_ID": formatted_channel_number, - "Station": formatted_channel_number, - }) + lineup.append( + { + "GuideNumber": formatted_channel_number, + "GuideName": ch.name, + "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), + "Guide_ID": formatted_channel_number, + "Station": formatted_channel_number, + } + ) return JsonResponse(lineup, safe=False) @@ -162,14 +143,14 @@ class LineupStatusAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun lineup status", - responses={200: openapi.Response("Lineup Status JSON")} + responses={200: openapi.Response("Lineup Status JSON")}, ) def get(self, request, profile=None): data = { "ScanInProgress": 0, "ScanPossible": 0, "Source": "Cable", - "SourceList": ["Cable"] + "SourceList": ["Cable"], } return JsonResponse(data) @@ -180,10 +161,10 @@ class HDHRDeviceXMLAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun device XML configuration", - responses={200: openapi.Response("HDHR Device XML")} + responses={200: openapi.Response("HDHR Device XML")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") xml_response = f""" diff --git a/apps/hdhr/views.py b/apps/hdhr/views.py index 048eb340..40823259 100644 --- a/apps/hdhr/views.py +++ b/apps/hdhr/views.py @@ -1,7 +1,7 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import Authenticated, permission_classes_by_action from django.http import JsonResponse, HttpResponseForbidden, HttpResponse from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi @@ -16,18 +16,26 @@ from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt + @login_required def hdhr_dashboard_view(request): """Render the HDHR management page.""" hdhr_devices = HDHRDevice.objects.all() return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices}) + # πŸ”Ή 1) HDHomeRun Device API class HDHRDeviceViewSet(viewsets.ModelViewSet): """Handles CRUD operations for HDHomeRun devices""" + queryset = HDHRDevice.objects.all() serializer_class = HDHRDeviceSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] # πŸ”Ή 2) Discover API @@ -36,10 +44,10 @@ class DiscoverAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve HDHomeRun device discovery information", - responses={200: openapi.Response("HDHR Discovery JSON")} + responses={200: openapi.Response("HDHR Discovery JSON")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") device = HDHRDevice.objects.first() if not device: @@ -75,15 +83,15 @@ class LineupAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the available channel lineup", - responses={200: openapi.Response("Channel Lineup JSON")} + responses={200: openapi.Response("Channel Lineup JSON")}, ) def get(self, request): - channels = Channel.objects.all().order_by('channel_number') + channels = Channel.objects.all().order_by("channel_number") lineup = [ { "GuideNumber": str(ch.channel_number), "GuideName": ch.name, - "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}") + "URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"), } for ch in channels ] @@ -96,14 +104,14 @@ class LineupStatusAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun lineup status", - responses={200: openapi.Response("Lineup Status JSON")} + responses={200: openapi.Response("Lineup Status JSON")}, ) def get(self, request): data = { "ScanInProgress": 0, "ScanPossible": 0, "Source": "Cable", - "SourceList": ["Cable"] + "SourceList": ["Cable"], } return JsonResponse(data) @@ -114,10 +122,10 @@ class HDHRDeviceXMLAPIView(APIView): @swagger_auto_schema( operation_description="Retrieve the HDHomeRun device XML configuration", - responses={200: openapi.Response("HDHR Device XML")} + responses={200: openapi.Response("HDHR Device XML")}, ) def get(self, request): - base_url = request.build_absolute_uri('/hdhr/').rstrip('/') + base_url = request.build_absolute_uri("/hdhr/").rstrip("/") xml_response = f""" diff --git a/apps/m3u/admin.py b/apps/m3u/admin.py index d4d6885b..c9b9ad0d 100644 --- a/apps/m3u/admin.py +++ b/apps/m3u/admin.py @@ -1,6 +1,8 @@ from django.contrib import admin from django.utils.html import format_html -from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent +from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent, M3UAccountProfile +import json + class M3UFilterInline(admin.TabularInline): model = M3UFilter @@ -8,50 +10,181 @@ class M3UFilterInline(admin.TabularInline): verbose_name = "M3U Filter" verbose_name_plural = "M3U Filters" + @admin.register(M3UAccount) class M3UAccountAdmin(admin.ModelAdmin): - list_display = ('name', 'server_url', 'server_group', 'max_streams', 'is_active', 'user_agent_display', 'uploaded_file_link', 'created_at', 'updated_at') - list_filter = ('is_active', 'server_group') - search_fields = ('name', 'server_url', 'server_group__name') + list_display = ( + "name", + "server_url", + "server_group", + "max_streams", + "priority", + "is_active", + "user_agent_display", + "uploaded_file_link", + "created_at", + "updated_at", + ) + list_filter = ("is_active", "server_group") + search_fields = ("name", "server_url", "server_group__name") inlines = [M3UFilterInline] - actions = ['activate_accounts', 'deactivate_accounts'] + actions = ["activate_accounts", "deactivate_accounts"] # Handle both ForeignKey and ManyToManyField cases for UserAgent def user_agent_display(self, obj): - if hasattr(obj, 'user_agent'): # ForeignKey case + if hasattr(obj, "user_agent"): # ForeignKey case return obj.user_agent.user_agent if obj.user_agent else "None" - elif hasattr(obj, 'user_agents'): # ManyToManyField case + elif hasattr(obj, "user_agents"): # ManyToManyField case return ", ".join([ua.user_agent for ua in obj.user_agents.all()]) or "None" return "None" + user_agent_display.short_description = "User Agent(s)" + def vod_enabled_display(self, obj): + """Display whether VOD is enabled for this account""" + if obj.custom_properties: + custom_props = obj.custom_properties or {} + return "Yes" if custom_props.get('enable_vod', False) else "No" + return "No" + vod_enabled_display.short_description = "VOD Enabled" + vod_enabled_display.boolean = True + def uploaded_file_link(self, obj): if obj.uploaded_file: - return format_html("Download M3U", obj.uploaded_file.url) + return format_html( + "Download M3U", obj.uploaded_file.url + ) return "No file uploaded" + uploaded_file_link.short_description = "Uploaded File" - @admin.action(description='Activate selected accounts') + @admin.action(description="Activate selected accounts") def activate_accounts(self, request, queryset): queryset.update(is_active=True) - @admin.action(description='Deactivate selected accounts') + @admin.action(description="Deactivate selected accounts") def deactivate_accounts(self, request, queryset): queryset.update(is_active=False) # Add ManyToManyField for Django Admin (if applicable) - if hasattr(M3UAccount, 'user_agents'): - filter_horizontal = ('user_agents',) # Only for ManyToManyField + if hasattr(M3UAccount, "user_agents"): + filter_horizontal = ("user_agents",) # Only for ManyToManyField + @admin.register(M3UFilter) class M3UFilterAdmin(admin.ModelAdmin): - list_display = ('m3u_account', 'filter_type', 'regex_pattern', 'exclude') - list_filter = ('filter_type', 'exclude') - search_fields = ('regex_pattern',) - ordering = ('m3u_account',) + list_display = ("m3u_account", "filter_type", "regex_pattern", "exclude") + list_filter = ("filter_type", "exclude") + search_fields = ("regex_pattern",) + ordering = ("m3u_account",) + @admin.register(ServerGroup) class ServerGroupAdmin(admin.ModelAdmin): - list_display = ('name',) - search_fields = ('name',) + list_display = ("name",) + search_fields = ("name",) + +@admin.register(M3UAccountProfile) +class M3UAccountProfileAdmin(admin.ModelAdmin): + list_display = ( + "name", + "m3u_account", + "is_default", + "is_active", + "max_streams", + "current_viewers", + "account_status_display", + "account_expiration_display", + "last_refresh_display", + ) + list_filter = ("is_active", "is_default", "m3u_account__account_type") + search_fields = ("name", "m3u_account__name") + readonly_fields = ("account_info_display",) + + def account_status_display(self, obj): + """Display account status from custom properties""" + status = obj.get_account_status() + if status: + # Create colored status display + color_map = { + 'Active': 'green', + 'Expired': 'red', + 'Disabled': 'red', + 'Banned': 'red', + } + color = color_map.get(status, 'black') + return format_html( + '{}', + color, + status + ) + return "Unknown" + account_status_display.short_description = "Account Status" + + def account_expiration_display(self, obj): + """Display account expiration from custom properties""" + expiration = obj.get_account_expiration() + if expiration: + from datetime import datetime + if expiration < datetime.now(): + return format_html( + '{}', + expiration.strftime('%Y-%m-%d %H:%M') + ) + else: + return format_html( + '{}', + expiration.strftime('%Y-%m-%d %H:%M') + ) + return "Unknown" + account_expiration_display.short_description = "Expires" + + def last_refresh_display(self, obj): + """Display last refresh time from custom properties""" + last_refresh = obj.get_last_refresh() + if last_refresh: + return last_refresh.strftime('%Y-%m-%d %H:%M:%S') + return "Never" + last_refresh_display.short_description = "Last Refresh" + + def account_info_display(self, obj): + """Display formatted account information from custom properties""" + if not obj.custom_properties: + return "No account information available" + + html_parts = [] + + # User Info + user_info = obj.custom_properties.get('user_info', {}) + if user_info: + html_parts.append("

User Information:

") + html_parts.append("
    ") + for key, value in user_info.items(): + if key == 'exp_date' and value: + try: + from datetime import datetime + exp_date = datetime.fromtimestamp(float(value)) + value = exp_date.strftime('%Y-%m-%d %H:%M:%S') + except (ValueError, TypeError): + pass + html_parts.append(f"
  • {key}: {value}
  • ") + html_parts.append("
") + + # Server Info + server_info = obj.custom_properties.get('server_info', {}) + if server_info: + html_parts.append("

Server Information:

") + html_parts.append("
    ") + for key, value in server_info.items(): + html_parts.append(f"
  • {key}: {value}
  • ") + html_parts.append("
") + + # Last Refresh + last_refresh = obj.custom_properties.get('last_refresh') + if last_refresh: + html_parts.append(f"

Last Refresh: {last_refresh}

") + + return format_html(''.join(html_parts)) if html_parts else "No account information available" + + account_info_display.short_description = "Account Information" diff --git a/apps/m3u/api_urls.py b/apps/m3u/api_urls.py index 41fc2fbc..6a80a1fe 100644 --- a/apps/m3u/api_urls.py +++ b/apps/m3u/api_urls.py @@ -1,18 +1,44 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import M3UAccountViewSet, M3UFilterViewSet, ServerGroupViewSet, RefreshM3UAPIView, RefreshSingleM3UAPIView, UserAgentViewSet, M3UAccountProfileViewSet +from .api_views import ( + M3UAccountViewSet, + M3UFilterViewSet, + ServerGroupViewSet, + RefreshM3UAPIView, + RefreshSingleM3UAPIView, + RefreshAccountInfoAPIView, + UserAgentViewSet, + M3UAccountProfileViewSet, +) -app_name = 'm3u' +app_name = "m3u" router = DefaultRouter() -router.register(r'accounts', M3UAccountViewSet, basename='m3u-account') -router.register(r'accounts\/(?P\d+)\/profiles', M3UAccountProfileViewSet, basename='m3u-account-profiles') -router.register(r'filters', M3UFilterViewSet, basename='m3u-filter') -router.register(r'server-groups', ServerGroupViewSet, basename='server-group') +router.register(r"accounts", M3UAccountViewSet, basename="m3u-account") +router.register( + r"accounts\/(?P\d+)\/profiles", + M3UAccountProfileViewSet, + basename="m3u-account-profiles", +) +router.register( + r"accounts\/(?P\d+)\/filters", + M3UFilterViewSet, + basename="m3u-filters", +) +router.register(r"server-groups", ServerGroupViewSet, basename="server-group") urlpatterns = [ - path('refresh/', RefreshM3UAPIView.as_view(), name='m3u_refresh'), - path('refresh//', RefreshSingleM3UAPIView.as_view(), name='m3u_refresh_single'), + path("refresh/", RefreshM3UAPIView.as_view(), name="m3u_refresh"), + path( + "refresh//", + RefreshSingleM3UAPIView.as_view(), + name="m3u_refresh_single", + ), + path( + "refresh-account-info//", + RefreshAccountInfoAPIView.as_view(), + name="m3u_refresh_account_info", + ), ] urlpatterns += router.urls diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index daac92b1..878ae7c6 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -1,7 +1,11 @@ from rest_framework import viewsets, status from rest_framework.response import Response from rest_framework.views import APIView -from rest_framework.permissions import IsAuthenticated +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, + permission_classes_by_method, +) from drf_yasg.utils import swagger_auto_schema from drf_yasg import openapi from django.shortcuts import get_object_or_404 @@ -11,13 +15,14 @@ import os from rest_framework.decorators import action from django.conf import settings from .tasks import refresh_m3u_groups +import json -# Import all models, including UserAgent. from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent from apps.channels.models import ChannelGroupM3UAccount from core.serializers import UserAgentSerializer -# Import all serializers, including the UserAgentSerializer. +from apps.vod.models import M3UVODCategoryRelation + from .serializers import ( M3UAccountSerializer, M3UFilterSerializer, @@ -25,80 +30,112 @@ from .serializers import ( M3UAccountProfileSerializer, ) -from .tasks import refresh_single_m3u_account, refresh_m3u_accounts -from django.core.files.storage import default_storage -from django.core.files.base import ContentFile +from .tasks import refresh_single_m3u_account, refresh_m3u_accounts, refresh_account_info +import json + class M3UAccountViewSet(viewsets.ModelViewSet): """Handles CRUD operations for M3U accounts""" - queryset = M3UAccount.objects.prefetch_related('channel_group') + + queryset = M3UAccount.objects.prefetch_related("channel_group") serializer_class = M3UAccountSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def create(self, request, *args, **kwargs): # Handle file upload first, if any file_path = None - if 'file' in request.FILES: - file = request.FILES['file'] + if "file" in request.FILES: + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/m3us', file_name) + file_path = os.path.join("/data/uploads/m3us", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) # Add file_path to the request data so it's available during creation request.data._mutable = True # Allow modification of the request data - request.data['file_path'] = file_path # Include the file path if a file was uploaded - + request.data["file_path"] = ( + file_path # Include the file path if a file was uploaded + ) + # Handle the user_agent field - convert "null" string to None - if 'user_agent' in request.data and request.data['user_agent'] == 'null': - request.data['user_agent'] = None - + if "user_agent" in request.data and request.data["user_agent"] == "null": + request.data["user_agent"] = None + # Handle server_url appropriately - if 'server_url' in request.data and not request.data['server_url']: - request.data.pop('server_url') - + if "server_url" in request.data and not request.data["server_url"]: + request.data.pop("server_url") + request.data._mutable = False # Make the request data immutable again # Now call super().create() to create the instance response = super().create(request, *args, **kwargs) - print(response.data.get('account_type')) - if response.data.get('account_type') == M3UAccount.Types.XC: - refresh_m3u_groups(response.data.get('id')) + account_type = response.data.get("account_type") + account_id = response.data.get("id") + + # Notify frontend that a new playlist was created + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'playlist_created', + 'playlist_id': account_id + }) + + if account_type == M3UAccount.Types.XC: + refresh_m3u_groups(account_id) + + # Check if VOD is enabled + enable_vod = request.data.get("enable_vod", False) + if enable_vod: + from apps.vod.tasks import refresh_categories + + refresh_categories(account_id) # After the instance is created, return the response return response def update(self, request, *args, **kwargs): instance = self.get_object() + old_vod_enabled = False + + # Check current VOD setting + if instance.custom_properties: + custom_props = instance.custom_properties or {} + old_vod_enabled = custom_props.get("enable_vod", False) # Handle file upload first, if any file_path = None - if 'file' in request.FILES: - file = request.FILES['file'] + if "file" in request.FILES: + file = request.FILES["file"] file_name = file.name - file_path = os.path.join('/data/uploads/m3us', file_name) + file_path = os.path.join("/data/uploads/m3us", file_name) os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'wb+') as destination: + with open(file_path, "wb+") as destination: for chunk in file.chunks(): destination.write(chunk) # Add file_path to the request data so it's available during creation request.data._mutable = True # Allow modification of the request data - request.data['file_path'] = file_path # Include the file path if a file was uploaded - + request.data["file_path"] = ( + file_path # Include the file path if a file was uploaded + ) + # Handle the user_agent field - convert "null" string to None - if 'user_agent' in request.data and request.data['user_agent'] == 'null': - request.data['user_agent'] = None - + if "user_agent" in request.data and request.data["user_agent"] == "null": + request.data["user_agent"] = None + # Handle server_url appropriately - if 'server_url' in request.data and not request.data['server_url']: - request.data.pop('server_url') - + if "server_url" in request.data and not request.data["server_url"]: + request.data.pop("server_url") + request.data._mutable = False # Make the request data immutable again if instance.file_path and os.path.exists(instance.file_path): @@ -107,6 +144,18 @@ class M3UAccountViewSet(viewsets.ModelViewSet): # Now call super().update() to update the instance response = super().update(request, *args, **kwargs) + # Check if VOD setting changed and trigger refresh if needed + new_vod_enabled = request.data.get("enable_vod", old_vod_enabled) + + if ( + instance.account_type == M3UAccount.Types.XC + and not old_vod_enabled + and new_vod_enabled + ): + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(instance.id) + # After the instance is updated, return the response return response @@ -115,75 +164,281 @@ class M3UAccountViewSet(viewsets.ModelViewSet): instance = self.get_object() # Check if we're toggling is_active - if 'is_active' in request.data and instance.is_active != request.data['is_active']: + if ( + "is_active" in request.data + and instance.is_active != request.data["is_active"] + ): # Set appropriate status based on new is_active value - if request.data['is_active']: - request.data['status'] = M3UAccount.Status.IDLE + if request.data["is_active"]: + request.data["status"] = M3UAccount.Status.IDLE else: - request.data['status'] = M3UAccount.Status.DISABLED + request.data["status"] = M3UAccount.Status.DISABLED # Continue with regular partial update return super().partial_update(request, *args, **kwargs) + @action(detail=True, methods=["post"], url_path="refresh-vod") + def refresh_vod(self, request, pk=None): + """Trigger VOD content refresh for XtreamCodes accounts""" + account = self.get_object() + + if account.account_type != M3UAccount.Types.XC: + return Response( + {"error": "VOD refresh is only available for XtreamCodes accounts"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Check if VOD is enabled + vod_enabled = False + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get("enable_vod", False) + + if not vod_enabled: + return Response( + {"error": "VOD is not enabled for this account"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + from apps.vod.tasks import refresh_vod_content + + refresh_vod_content.delay(account.id) + return Response( + {"message": f"VOD refresh initiated for account {account.name}"}, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"error": f"Failed to initiate VOD refresh: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + @action(detail=True, methods=["patch"], url_path="group-settings") + def update_group_settings(self, request, pk=None): + """Update auto channel sync settings for M3U account groups""" + account = self.get_object() + group_settings = request.data.get("group_settings", []) + category_settings = request.data.get("category_settings", []) + + try: + for setting in group_settings: + group_id = setting.get("channel_group") + enabled = setting.get("enabled", True) + auto_sync = setting.get("auto_channel_sync", False) + sync_start = setting.get("auto_sync_channel_start") + custom_properties = setting.get("custom_properties", {}) + + if group_id: + ChannelGroupM3UAccount.objects.update_or_create( + channel_group_id=group_id, + m3u_account=account, + defaults={ + "enabled": enabled, + "auto_channel_sync": auto_sync, + "auto_sync_channel_start": sync_start, + "custom_properties": custom_properties, + }, + ) + + for setting in category_settings: + category_id = setting.get("id") + enabled = setting.get("enabled", True) + custom_properties = setting.get("custom_properties", {}) + + if category_id: + M3UVODCategoryRelation.objects.update_or_create( + category_id=category_id, + m3u_account=account, + defaults={ + "enabled": enabled, + "custom_properties": custom_properties, + }, + ) + + return Response({"message": "Group settings updated successfully"}) + + except Exception as e: + return Response( + {"error": f"Failed to update group settings: {str(e)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + class M3UFilterViewSet(viewsets.ModelViewSet): - """Handles CRUD operations for M3U filters""" queryset = M3UFilter.objects.all() serializer_class = M3UFilterSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + m3u_account_id = self.kwargs["account_id"] + return M3UFilter.objects.filter(m3u_account_id=m3u_account_id) + + def perform_create(self, serializer): + # Get the account ID from the URL + account_id = self.kwargs["account_id"] + + # # Get the M3UAccount instance for the account_id + # m3u_account = M3UAccount.objects.get(id=account_id) + + # Save the 'm3u_account' in the serializer context + serializer.context["m3u_account"] = account_id + + # Perform the actual save + serializer.save(m3u_account_id=account_id) + class ServerGroupViewSet(viewsets.ModelViewSet): """Handles CRUD operations for Server Groups""" + queryset = ServerGroup.objects.all() serializer_class = ServerGroupSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + class RefreshM3UAPIView(APIView): """Triggers refresh for all active M3U accounts""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers a refresh of all active M3U accounts", - responses={202: "M3U refresh initiated"} + responses={202: "M3U refresh initiated"}, ) def post(self, request, format=None): refresh_m3u_accounts.delay() - return Response({'success': True, 'message': 'M3U refresh initiated.'}, status=status.HTTP_202_ACCEPTED) + return Response( + {"success": True, "message": "M3U refresh initiated."}, + status=status.HTTP_202_ACCEPTED, + ) + class RefreshSingleM3UAPIView(APIView): """Triggers refresh for a single M3U account""" + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + @swagger_auto_schema( operation_description="Triggers a refresh of a single M3U account", - responses={202: "M3U account refresh initiated"} + responses={202: "M3U account refresh initiated"}, ) def post(self, request, account_id, format=None): refresh_single_m3u_account.delay(account_id) - return Response({'success': True, 'message': f'M3U account {account_id} refresh initiated.'}, - status=status.HTTP_202_ACCEPTED) + return Response( + { + "success": True, + "message": f"M3U account {account_id} refresh initiated.", + }, + status=status.HTTP_202_ACCEPTED, + ) + + +class RefreshAccountInfoAPIView(APIView): + """Triggers account info refresh for a single M3U account""" + + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Triggers a refresh of account information for a specific M3U profile", + responses={202: "Account info refresh initiated", 400: "Profile not found or not XtreamCodes"}, + ) + def post(self, request, profile_id, format=None): + try: + from .models import M3UAccountProfile + profile = M3UAccountProfile.objects.get(id=profile_id) + account = profile.m3u_account + + if account.account_type != M3UAccount.Types.XC: + return Response( + { + "success": False, + "error": "Account info refresh is only available for XtreamCodes accounts", + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + refresh_account_info.delay(profile_id) + return Response( + { + "success": True, + "message": f"Account info refresh initiated for profile {profile.name}.", + }, + status=status.HTTP_202_ACCEPTED, + ) + except M3UAccountProfile.DoesNotExist: + return Response( + { + "success": False, + "error": "Profile not found", + }, + status=status.HTTP_404_NOT_FOUND, + ) + class UserAgentViewSet(viewsets.ModelViewSet): """Handles CRUD operations for User Agents""" + queryset = UserAgent.objects.all() serializer_class = UserAgentSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + class M3UAccountProfileViewSet(viewsets.ModelViewSet): queryset = M3UAccountProfile.objects.all() serializer_class = M3UAccountProfileSerializer - permission_classes = [IsAuthenticated] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] def get_queryset(self): - m3u_account_id = self.kwargs['account_id'] + m3u_account_id = self.kwargs["account_id"] return M3UAccountProfile.objects.filter(m3u_account_id=m3u_account_id) def perform_create(self, serializer): # Get the account ID from the URL - account_id = self.kwargs['account_id'] + account_id = self.kwargs["account_id"] # Get the M3UAccount instance for the account_id m3u_account = M3UAccount.objects.get(id=account_id) # Save the 'm3u_account' in the serializer context - serializer.context['m3u_account'] = m3u_account + serializer.context["m3u_account"] = m3u_account # Perform the actual save serializer.save(m3u_account_id=m3u_account) diff --git a/apps/m3u/forms.py b/apps/m3u/forms.py index f6fc7f91..cf6586c3 100644 --- a/apps/m3u/forms.py +++ b/apps/m3u/forms.py @@ -4,6 +4,13 @@ from .models import M3UAccount, M3UFilter import re class M3UAccountForm(forms.ModelForm): + enable_vod = forms.BooleanField( + required=False, + initial=False, + label="Enable VOD Content", + help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts" + ) + class Meta: model = M3UAccount fields = [ @@ -13,8 +20,34 @@ class M3UAccountForm(forms.ModelForm): 'server_group', 'max_streams', 'is_active', + 'enable_vod', ] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Set initial value for enable_vod from custom_properties + if self.instance and self.instance.custom_properties: + custom_props = self.instance.custom_properties or {} + self.fields['enable_vod'].initial = custom_props.get('enable_vod', False) + + def save(self, commit=True): + instance = super().save(commit=False) + + # Handle enable_vod field + enable_vod = self.cleaned_data.get('enable_vod', False) + + # Parse existing custom_properties + custom_props = instance.custom_properties or {} + + # Update VOD preference + custom_props['enable_vod'] = enable_vod + instance.custom_properties = custom_props + + if commit: + instance.save() + return instance + def clean_uploaded_file(self): uploaded_file = self.cleaned_data.get('uploaded_file') if uploaded_file: diff --git a/apps/m3u/migrations/0003_create_custom_account.py b/apps/m3u/migrations/0003_create_custom_account.py index 8695f248..cdc40cda 100644 --- a/apps/m3u/migrations/0003_create_custom_account.py +++ b/apps/m3u/migrations/0003_create_custom_account.py @@ -3,6 +3,7 @@ from django.db import migrations from core.models import CoreSettings + def create_custom_account(apps, schema_editor): default_user_agent_id = CoreSettings.get_default_user_agent_id() @@ -18,7 +19,7 @@ def create_custom_account(apps, schema_editor): M3UAccountProfile = apps.get_model("m3u", "M3UAccountProfile") M3UAccountProfile.objects.create( m3u_account=m3u_account, - name=f'{m3u_account.name} Default', + name=f"{m3u_account.name} Default", max_streams=m3u_account.max_streams, is_default=True, is_active=True, @@ -26,10 +27,12 @@ def create_custom_account(apps, schema_editor): replace_pattern="$1", ) + class Migration(migrations.Migration): dependencies = [ - ('m3u', '0002_m3uaccount_locked'), + ("m3u", "0002_m3uaccount_locked"), + ("core", "0004_preload_core_settings"), ] operations = [ diff --git a/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py b/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py index 3728bf7f..7a5f2013 100644 --- a/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py +++ b/apps/m3u/migrations/0005_m3uaccount_custom_properties_and_more.py @@ -7,24 +7,29 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('django_celery_beat', '0019_alter_periodictasks_options'), - ('m3u', '0004_m3uaccount_stream_profile'), + ("django_celery_beat", "0019_alter_periodictasks_options"), + ("m3u", "0004_m3uaccount_stream_profile"), ] operations = [ migrations.AddField( - model_name='m3uaccount', - name='custom_properties', + model_name="m3uaccount", + name="custom_properties", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='m3uaccount', - name='refresh_interval', + model_name="m3uaccount", + name="refresh_interval", field=models.IntegerField(default=24), ), migrations.AddField( - model_name='m3uaccount', - name='refresh_task', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_celery_beat.periodictask'), + model_name="m3uaccount", + name="refresh_task", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="django_celery_beat.periodictask", + ), ), ] diff --git a/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py b/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py new file mode 100644 index 00000000..0b0a8a1d --- /dev/null +++ b/apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.1.6 on 2025-07-22 21:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0012_alter_m3uaccount_refresh_interval'), + ] + + operations = [ + migrations.AlterField( + model_name='m3ufilter', + name='filter_type', + field=models.CharField(choices=[('group', 'Group'), ('name', 'Stream Name'), ('url', 'Stream URL')], default='group', help_text='Filter based on either group title or stream name.', max_length=50), + ), + ] diff --git a/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py b/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py new file mode 100644 index 00000000..3510bfc5 --- /dev/null +++ b/apps/m3u/migrations/0014_alter_m3ufilter_options_m3ufilter_order.py @@ -0,0 +1,22 @@ +# Generated by Django 5.1.6 on 2025-07-31 17:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0013_alter_m3ufilter_filter_type'), + ] + + operations = [ + migrations.AlterModelOptions( + name='m3ufilter', + options={'ordering': ['order']}, + ), + migrations.AddField( + model_name='m3ufilter', + name='order', + field=models.PositiveIntegerField(default=0), + ), + ] diff --git a/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py b/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py new file mode 100644 index 00000000..6b62c9a1 --- /dev/null +++ b/apps/m3u/migrations/0015_alter_m3ufilter_options_m3ufilter_custom_properties.py @@ -0,0 +1,22 @@ +# Generated by Django 5.2.4 on 2025-08-02 16:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0014_alter_m3ufilter_options_m3ufilter_order'), + ] + + operations = [ + migrations.AlterModelOptions( + name='m3ufilter', + options={}, + ), + migrations.AddField( + model_name='m3ufilter', + name='custom_properties', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/m3u/migrations/0016_m3uaccount_priority.py b/apps/m3u/migrations/0016_m3uaccount_priority.py new file mode 100644 index 00000000..55e0e95b --- /dev/null +++ b/apps/m3u/migrations/0016_m3uaccount_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-08-20 22:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccount', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'), + ), + ] diff --git a/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py b/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py new file mode 100644 index 00000000..84cb968b --- /dev/null +++ b/apps/m3u/migrations/0017_alter_m3uaccount_custom_properties_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-09-02 15:19 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0016_m3uaccount_priority'), + ] + + operations = [ + migrations.AlterField( + model_name='m3uaccount', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='m3uaccount', + name='server_url', + field=models.URLField(blank=True, help_text='The base URL of the M3U server (optional if a file is uploaded)', max_length=1000, null=True), + ), + migrations.AlterField( + model_name='m3ufilter', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/apps/m3u/migrations/0018_add_profile_custom_properties.py b/apps/m3u/migrations/0018_add_profile_custom_properties.py new file mode 100644 index 00000000..d616c598 --- /dev/null +++ b/apps/m3u/migrations/0018_add_profile_custom_properties.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-09-09 20:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('m3u', '0017_alter_m3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='m3uaccountprofile', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for storing account information from provider (e.g., XC account details, expiration dates)', null=True), + ), + ] diff --git a/apps/m3u/models.py b/apps/m3u/models.py index a297fd18..b812ad6c 100644 --- a/apps/m3u/models.py +++ b/apps/m3u/models.py @@ -7,7 +7,8 @@ from apps.channels.models import StreamProfile from django_celery_beat.models import PeriodicTask from core.models import CoreSettings, UserAgent -CUSTOM_M3U_ACCOUNT_NAME="custom" +CUSTOM_M3U_ACCOUNT_NAME = "custom" + class M3UAccount(models.Model): class Types(models.TextChoices): @@ -25,84 +26,78 @@ class M3UAccount(models.Model): """Represents an M3U Account for IPTV streams.""" name = models.CharField( - max_length=255, - unique=True, - help_text="Unique name for this M3U account" + max_length=255, unique=True, help_text="Unique name for this M3U account" ) server_url = models.URLField( + max_length=1000, blank=True, null=True, - help_text="The base URL of the M3U server (optional if a file is uploaded)" - ) - file_path = models.CharField( - max_length=255, - blank=True, - null=True + help_text="The base URL of the M3U server (optional if a file is uploaded)", ) + file_path = models.CharField(max_length=255, blank=True, null=True) server_group = models.ForeignKey( - 'ServerGroup', + "ServerGroup", on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts', - help_text="The server group this M3U account belongs to" + related_name="m3u_accounts", + help_text="The server group this M3U account belongs to", ) max_streams = models.PositiveIntegerField( - default=0, - help_text="Maximum number of concurrent streams (0 for unlimited)" + default=0, help_text="Maximum number of concurrent streams (0 for unlimited)" ) is_active = models.BooleanField( - default=True, - help_text="Set to false to deactivate this M3U account" + default=True, help_text="Set to false to deactivate this M3U account" ) created_at = models.DateTimeField( - auto_now_add=True, - help_text="Time when this account was created" + auto_now_add=True, help_text="Time when this account was created" ) updated_at = models.DateTimeField( - null=True, blank=True, - help_text="Time when this account was last successfully refreshed" + null=True, + blank=True, + help_text="Time when this account was last successfully refreshed", ) status = models.CharField( - max_length=20, - choices=Status.choices, - default=Status.IDLE + max_length=20, choices=Status.choices, default=Status.IDLE ) last_message = models.TextField( null=True, blank=True, - help_text="Last status message, including success results or error information" + help_text="Last status message, including success results or error information", ) user_agent = models.ForeignKey( - 'core.UserAgent', + "core.UserAgent", on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts', - help_text="The User-Agent associated with this M3U account." + related_name="m3u_accounts", + help_text="The User-Agent associated with this M3U account.", ) locked = models.BooleanField( - default=False, - help_text="Protected - can't be deleted or modified" + default=False, help_text="Protected - can't be deleted or modified" ) stream_profile = models.ForeignKey( StreamProfile, on_delete=models.SET_NULL, null=True, blank=True, - related_name='m3u_accounts' + related_name="m3u_accounts", ) account_type = models.CharField(choices=Types.choices, default=Types.STADNARD) username = models.CharField(max_length=255, null=True, blank=True) password = models.CharField(max_length=255, null=True, blank=True) - custom_properties = models.TextField(null=True, blank=True) + custom_properties = models.JSONField(default=dict, blank=True, null=True) refresh_interval = models.IntegerField(default=0) refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) stale_stream_days = models.PositiveIntegerField( default=7, - help_text="Number of days after which a stream will be removed if not seen in the M3U source." + help_text="Number of days after which a stream will be removed if not seen in the M3U source.", + ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.", ) def __str__(self): @@ -134,17 +129,19 @@ class M3UAccount(models.Model): def get_user_agent(self): user_agent = self.user_agent if not user_agent: - user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + user_agent = UserAgent.objects.get( + id=CoreSettings.get_default_user_agent_id() + ) return user_agent def save(self, *args, **kwargs): # Prevent auto_now behavior by handling updated_at manually - if 'update_fields' in kwargs and 'updated_at' not in kwargs['update_fields']: + if "update_fields" in kwargs and "updated_at" not in kwargs["update_fields"]: # Don't modify updated_at for regular updates - kwargs.setdefault('update_fields', []) - if 'updated_at' in kwargs['update_fields']: - kwargs['update_fields'].remove('updated_at') + kwargs.setdefault("update_fields", []) + if "updated_at" in kwargs["update_fields"]: + kwargs["update_fields"].remove("updated_at") super().save(*args, **kwargs) # def get_channel_groups(self): @@ -158,35 +155,40 @@ class M3UAccount(models.Model): # """Return all streams linked to this account with enabled ChannelGroups.""" # return self.streams.filter(channel_group__in=ChannelGroup.objects.filter(m3u_account__enabled=True)) + class M3UFilter(models.Model): """Defines filters for M3U accounts based on stream name or group title.""" + FILTER_TYPE_CHOICES = ( - ('group', 'Group Title'), - ('name', 'Stream Name'), + ("group", "Group"), + ("name", "Stream Name"), + ("url", "Stream URL"), ) + m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, - related_name='filters', - help_text="The M3U account this filter is applied to." + related_name="filters", + help_text="The M3U account this filter is applied to.", ) filter_type = models.CharField( max_length=50, choices=FILTER_TYPE_CHOICES, - default='group', - help_text="Filter based on either group title or stream name." + default="group", + help_text="Filter based on either group title or stream name.", ) regex_pattern = models.CharField( - max_length=200, - help_text="A regex pattern to match streams or groups." + max_length=200, help_text="A regex pattern to match streams or groups." ) exclude = models.BooleanField( default=True, - help_text="If True, matching items are excluded; if False, only matches are included." + help_text="If True, matching items are excluded; if False, only matches are included.", ) + order = models.PositiveIntegerField(default=0) + custom_properties = models.JSONField(default=dict, blank=True, null=True) def applies_to(self, stream_name, group_name): - target = group_name if self.filter_type == 'group' else stream_name + target = group_name if self.filter_type == "group" else stream_name return bool(re.search(self.regex_pattern, target, re.IGNORECASE)) def clean(self): @@ -196,7 +198,9 @@ class M3UFilter(models.Model): raise ValidationError(f"Invalid regex pattern: {self.regex_pattern}") def __str__(self): - filter_type_display = dict(self.FILTER_TYPE_CHOICES).get(self.filter_type, 'Unknown') + filter_type_display = dict(self.FILTER_TYPE_CHOICES).get( + self.filter_type, "Unknown" + ) exclude_status = "Exclude" if self.exclude else "Include" return f"[{self.m3u_account.name}] {filter_type_display}: {self.regex_pattern} ({exclude_status})" @@ -222,40 +226,35 @@ class M3UFilter(models.Model): class ServerGroup(models.Model): """Represents a logical grouping of servers or channels.""" + name = models.CharField( - max_length=100, - unique=True, - help_text="Unique name for this server group." + max_length=100, unique=True, help_text="Unique name for this server group." ) def __str__(self): return self.name -from django.db import models class M3UAccountProfile(models.Model): """Represents a profile associated with an M3U Account.""" + m3u_account = models.ForeignKey( - 'M3UAccount', + "M3UAccount", on_delete=models.CASCADE, - related_name='profiles', - help_text="The M3U account this profile belongs to." + related_name="profiles", + help_text="The M3U account this profile belongs to.", ) name = models.CharField( - max_length=255, - help_text="Name for the M3U account profile" + max_length=255, help_text="Name for the M3U account profile" ) is_default = models.BooleanField( - default=False, - help_text="Set to false to deactivate this profile" + default=False, help_text="Set to false to deactivate this profile" ) max_streams = models.PositiveIntegerField( - default=0, - help_text="Maximum number of concurrent streams (0 for unlimited)" + default=0, help_text="Maximum number of concurrent streams (0 for unlimited)" ) is_active = models.BooleanField( - default=True, - help_text="Set to false to deactivate this profile" + default=True, help_text="Set to false to deactivate this profile" ) search_pattern = models.CharField( max_length=255, @@ -264,22 +263,95 @@ class M3UAccountProfile(models.Model): max_length=255, ) current_viewers = models.PositiveIntegerField(default=0) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for storing account information from provider (e.g., XC account details, expiration dates)" + ) class Meta: constraints = [ - models.UniqueConstraint(fields=['m3u_account', 'name'], name='unique_account_name') + models.UniqueConstraint( + fields=["m3u_account", "name"], name="unique_account_name" + ) ] def __str__(self): return f"{self.name} ({self.m3u_account.name})" + def get_account_expiration(self): + """Get account expiration date from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + exp_date = user_info.get('exp_date') + + if exp_date: + try: + from datetime import datetime + # XC exp_date is typically a Unix timestamp + if isinstance(exp_date, (int, float)): + return datetime.fromtimestamp(exp_date) + elif isinstance(exp_date, str): + # Try to parse as timestamp first, then as ISO date + try: + return datetime.fromtimestamp(float(exp_date)) + except ValueError: + return datetime.fromisoformat(exp_date) + except (ValueError, TypeError): + pass + + return None + + def get_account_status(self): + """Get account status from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('status') + + def get_max_connections(self): + """Get maximum connections from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('max_connections') + + def get_active_connections(self): + """Get active connections from custom properties if available""" + if not self.custom_properties: + return None + + user_info = self.custom_properties.get('user_info', {}) + return user_info.get('active_cons') + + def get_last_refresh(self): + """Get last refresh timestamp from custom properties if available""" + if not self.custom_properties: + return None + + last_refresh = self.custom_properties.get('last_refresh') + if last_refresh: + try: + from datetime import datetime + return datetime.fromisoformat(last_refresh) + except (ValueError, TypeError): + pass + + return None + + @receiver(models.signals.post_save, sender=M3UAccount) def create_profile_for_m3u_account(sender, instance, created, **kwargs): """Automatically create an M3UAccountProfile when M3UAccount is created.""" if created: M3UAccountProfile.objects.create( m3u_account=instance, - name=f'{instance.name} Default', + name=f"{instance.name} Default", max_streams=instance.max_streams, is_default=True, is_active=True, @@ -292,6 +364,5 @@ def create_profile_for_m3u_account(sender, instance, created, **kwargs): is_default=True, ) - profile.max_streams = instance.max_streams profile.save() diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index 038af628..a607dc07 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -1,41 +1,106 @@ -from rest_framework import serializers +from core.utils import validate_flexible_url +from rest_framework import serializers, status from rest_framework.response import Response from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile from core.models import UserAgent from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount -from apps.channels.serializers import ChannelGroupM3UAccountSerializer, ChannelGroupSerializer +from apps.channels.serializers import ( + ChannelGroupM3UAccountSerializer, +) import logging +import json logger = logging.getLogger(__name__) + class M3UFilterSerializer(serializers.ModelSerializer): """Serializer for M3U Filters""" - channel_groups = ChannelGroupM3UAccountSerializer(source='m3u_account', many=True) class Meta: model = M3UFilter - fields = ['id', 'filter_type', 'regex_pattern', 'exclude', 'channel_groups'] + fields = [ + "id", + "filter_type", + "regex_pattern", + "exclude", + "order", + "custom_properties", + ] -from rest_framework import serializers -from .models import M3UAccountProfile class M3UAccountProfileSerializer(serializers.ModelSerializer): + account = serializers.SerializerMethodField() + + def get_account(self, obj): + """Include basic account information for frontend use""" + return { + 'id': obj.m3u_account.id, + 'name': obj.m3u_account.name, + 'account_type': obj.m3u_account.account_type, + 'is_xtream_codes': obj.m3u_account.account_type == 'XC' + } + class Meta: model = M3UAccountProfile - fields = ['id', 'name', 'max_streams', 'is_active', 'is_default', 'current_viewers', 'search_pattern', 'replace_pattern'] - read_only_fields = ['id'] + fields = [ + "id", + "name", + "max_streams", + "is_active", + "is_default", + "current_viewers", + "search_pattern", + "replace_pattern", + "custom_properties", + "account", + ] + read_only_fields = ["id", "account"] + extra_kwargs = { + 'search_pattern': {'required': False, 'allow_blank': True}, + 'replace_pattern': {'required': False, 'allow_blank': True}, + } def create(self, validated_data): - m3u_account = self.context.get('m3u_account') + m3u_account = self.context.get("m3u_account") # Use the m3u_account when creating the profile - validated_data['m3u_account_id'] = m3u_account.id + validated_data["m3u_account_id"] = m3u_account.id return super().create(validated_data) + def validate(self, data): + """Custom validation to handle default profiles""" + # For updates to existing instances + if self.instance and self.instance.is_default: + # For default profiles, search_pattern and replace_pattern are not required + # and we don't want to validate them since they shouldn't be changed + return data + + # For non-default profiles or new profiles, ensure required fields are present + if not data.get('search_pattern'): + raise serializers.ValidationError({ + 'search_pattern': ['This field is required for non-default profiles.'] + }) + if not data.get('replace_pattern'): + raise serializers.ValidationError({ + 'replace_pattern': ['This field is required for non-default profiles.'] + }) + + return data + def update(self, instance, validated_data): if instance.is_default: - raise serializers.ValidationError("Default profiles cannot be modified.") + # For default profiles, only allow updating name and custom_properties (for notes) + allowed_fields = {'name', 'custom_properties'} + + # Remove any fields that aren't allowed for default profiles + disallowed_fields = set(validated_data.keys()) - allowed_fields + if disallowed_fields: + raise serializers.ValidationError( + f"Default profiles can only modify name and notes. " + f"Cannot modify: {', '.join(disallowed_fields)}" + ) + return super().update(instance, validated_data) def destroy(self, request, *args, **kwargs): @@ -43,13 +108,15 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer): if instance.is_default: return Response( {"error": "Default profiles cannot be deleted."}, - status=status.HTTP_400_BAD_REQUEST + status=status.HTTP_400_BAD_REQUEST, ) return super().destroy(request, *args, **kwargs) + class M3UAccountSerializer(serializers.ModelSerializer): """Serializer for M3U Account""" - filters = M3UFilterSerializer(many=True, read_only=True) + + filters = serializers.SerializerMethodField() # Include user_agent as a mandatory field using its primary key. user_agent = serializers.PrimaryKeyRelatedField( queryset=UserAgent.objects.all(), @@ -57,28 +124,96 @@ class M3UAccountSerializer(serializers.ModelSerializer): allow_null=True, ) profiles = M3UAccountProfileSerializer(many=True, read_only=True) - read_only_fields = ['locked', 'created_at', 'updated_at'] + read_only_fields = ["locked", "created_at", "updated_at"] # channel_groups = serializers.SerializerMethodField() - channel_groups = ChannelGroupM3UAccountSerializer(source='channel_group', many=True, required=False) + channel_groups = ChannelGroupM3UAccountSerializer( + source="channel_group", many=True, required=False + ) + server_url = serializers.CharField( + required=False, + allow_blank=True, + allow_null=True, + validators=[validate_flexible_url], + ) + enable_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True) class Meta: model = M3UAccount fields = [ - 'id', 'name', 'server_url', 'file_path', 'server_group', - 'max_streams', 'is_active', 'created_at', 'updated_at', 'filters', 'user_agent', 'profiles', 'locked', - 'channel_groups', 'refresh_interval', 'custom_properties', 'account_type', 'username', 'password', 'stale_stream_days', - 'status', 'last_message', + "id", + "name", + "server_url", + "file_path", + "server_group", + "max_streams", + "is_active", + "created_at", + "updated_at", + "filters", + "user_agent", + "profiles", + "locked", + "channel_groups", + "refresh_interval", + "custom_properties", + "account_type", + "username", + "password", + "stale_stream_days", + "priority", + "status", + "last_message", + "enable_vod", + "auto_enable_new_groups_live", + "auto_enable_new_groups_vod", + "auto_enable_new_groups_series", ] extra_kwargs = { - 'password': { - 'required': False, - 'allow_blank': True, + "password": { + "required": False, + "allow_blank": True, }, } + def to_representation(self, instance): + data = super().to_representation(instance) + + # Parse custom_properties to get VOD preference and auto_enable_new_groups settings + custom_props = instance.custom_properties or {} + + data["enable_vod"] = custom_props.get("enable_vod", False) + data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True) + data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True) + data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True) + return data + def update(self, instance, validated_data): + # Handle enable_vod preference and auto_enable_new_groups settings + enable_vod = validated_data.pop("enable_vod", None) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None) + + # Get existing custom_properties + custom_props = instance.custom_properties or {} + + # Update preferences + if enable_vod is not None: + custom_props["enable_vod"] = enable_vod + if auto_enable_new_groups_live is not None: + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + if auto_enable_new_groups_vod is not None: + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + if auto_enable_new_groups_series is not None: + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + + validated_data["custom_properties"] = custom_props + # Pop out channel group memberships so we can handle them manually - channel_group_data = validated_data.pop('channel_group', []) + channel_group_data = validated_data.pop("channel_group", []) # First, update the M3UAccount itself for attr, value in validated_data.items(): @@ -88,13 +223,12 @@ class M3UAccountSerializer(serializers.ModelSerializer): # Prepare a list of memberships to update memberships_to_update = [] for group_data in channel_group_data: - group = group_data.get('channel_group') - enabled = group_data.get('enabled') + group = group_data.get("channel_group") + enabled = group_data.get("enabled") try: membership = ChannelGroupM3UAccount.objects.get( - m3u_account=instance, - channel_group=group + m3u_account=instance, channel_group=group ) membership.enabled = enabled memberships_to_update.append(membership) @@ -103,13 +237,39 @@ class M3UAccountSerializer(serializers.ModelSerializer): # Perform the bulk update if memberships_to_update: - ChannelGroupM3UAccount.objects.bulk_update(memberships_to_update, ['enabled']) + ChannelGroupM3UAccount.objects.bulk_update( + memberships_to_update, ["enabled"] + ) return instance + def create(self, validated_data): + # Handle enable_vod preference and auto_enable_new_groups settings during creation + enable_vod = validated_data.pop("enable_vod", False) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True) + + # Parse existing custom_properties or create new + custom_props = validated_data.get("custom_properties", {}) + + # Set preferences (default to True for auto_enable_new_groups) + custom_props["enable_vod"] = enable_vod + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + validated_data["custom_properties"] = custom_props + + return super().create(validated_data) + + def get_filters(self, obj): + filters = obj.filters.order_by("order") + return M3UFilterSerializer(filters, many=True).data + + class ServerGroupSerializer(serializers.ModelSerializer): """Serializer for Server Group""" class Meta: model = ServerGroup - fields = ['id', 'name'] + fields = ["id", "name"] diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index ce46a2ec..15479379 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -5,6 +5,7 @@ import requests import os import gc import gzip, zipfile +from concurrent.futures import ThreadPoolExecutor, as_completed from celery.app.control import Inspect from celery.result import AsyncResult from celery import shared_task, current_app, group @@ -18,17 +19,24 @@ from channels.layers import get_channel_layer from django.utils import timezone import time import json -from core.utils import RedisClient, acquire_task_lock, release_task_lock +from core.utils import ( + RedisClient, + acquire_task_lock, + release_task_lock, + natural_sort_key, +) from core.models import CoreSettings, UserAgent from asgiref.sync import async_to_sync from core.xtream_codes import Client as XCClient from core.utils import send_websocket_update +from .utils import normalize_stream_url logger = logging.getLogger(__name__) -BATCH_SIZE = 1000 +BATCH_SIZE = 1500 # Optimized batch size for threading m3u_dir = os.path.join(settings.MEDIA_ROOT, "cached_m3u") + def fetch_m3u_lines(account, use_cache=False): os.makedirs(m3u_dir, exist_ok=True) file_path = os.path.join(m3u_dir, f"{account.id}.m3u") @@ -39,119 +47,364 @@ def fetch_m3u_lines(account, use_cache=False): try: # Try to get account-specific user agent first user_agent_obj = account.get_user_agent() - user_agent = user_agent_obj.user_agent if user_agent_obj else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + user_agent = ( + user_agent_obj.user_agent + if user_agent_obj + else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) - logger.debug(f"Using user agent: {user_agent} for M3U account: {account.name}") + logger.debug( + f"Using user agent: {user_agent} for M3U account: {account.name}" + ) headers = {"User-Agent": user_agent} logger.info(f"Fetching from URL {account.server_url}") # Set account status to FETCHING before starting download account.status = M3UAccount.Status.FETCHING account.last_message = "Starting download..." - account.save(update_fields=['status', 'last_message']) + account.save(update_fields=["status", "last_message"]) - response = requests.get(account.server_url, headers=headers, stream=True) + response = requests.get( + account.server_url, headers=headers, stream=True + ) + + # Log the actual response details for debugging + logger.debug(f"HTTP Response: {response.status_code} from {account.server_url}") + logger.debug(f"Content-Type: {response.headers.get('content-type', 'Not specified')}") + logger.debug(f"Content-Length: {response.headers.get('content-length', 'Not specified')}") + logger.debug(f"Response headers: {dict(response.headers)}") + + # Check if we've been redirected to a different URL + if hasattr(response, 'url') and response.url != account.server_url: + logger.warning(f"Request was redirected from {account.server_url} to {response.url}") + + # Check for ANY non-success status code FIRST (before raise_for_status) + if response.status_code < 200 or response.status_code >= 300: + # For error responses, read the content immediately (not streaming) + try: + response_content = response.text[:1000] # Capture up to 1000 characters + logger.error(f"Error response content: {response_content!r}") + except Exception as e: + logger.error(f"Could not read error response content: {e}") + response_content = "Could not read error response content" + + # Provide specific messages for known non-standard codes + if response.status_code == 884: + error_msg = f"Server returned HTTP 884 (authentication/authorization failure) from URL: {account.server_url}. Server message: {response_content}" + elif response.status_code >= 800: + error_msg = f"Server returned non-standard HTTP status {response.status_code} from URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 404: + error_msg = f"M3U file not found (404) at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 403: + error_msg = f"Access forbidden (403) to M3U file at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 401: + error_msg = f"Authentication required (401) for M3U file at URL: {account.server_url}. Server message: {response_content}" + elif response.status_code == 500: + error_msg = f"Server error (500) while fetching M3U file from URL: {account.server_url}. Server message: {response_content}" + else: + error_msg = f"HTTP error ({response.status_code}) while fetching M3U file from URL: {account.server_url}. Server message: {response_content}" + + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account.id, + "downloading", + 100, + status="error", + error=error_msg, + ) + return [], False + + # Only call raise_for_status if we have a success code (this should not raise now) response.raise_for_status() - total_size = int(response.headers.get('Content-Length', 0)) + total_size = int(response.headers.get("Content-Length", 0)) downloaded = 0 start_time = time.time() last_update_time = start_time progress = 0 + temp_content = b"" # Store content temporarily to validate before saving + has_content = False - with open(file_path, 'wb') as file: - send_m3u_update(account.id, "downloading", 0) - for chunk in response.iter_content(chunk_size=8192): - if chunk: - file.write(chunk) + # First, let's collect the content and validate it + send_m3u_update(account.id, "downloading", 0) + for chunk in response.iter_content(chunk_size=8192): + if chunk: + temp_content += chunk + has_content = True - downloaded += len(chunk) - elapsed_time = time.time() - start_time + downloaded += len(chunk) + elapsed_time = time.time() - start_time - # Calculate download speed in KB/s - speed = downloaded / elapsed_time / 1024 # in KB/s + # Calculate download speed in KB/s + speed = downloaded / elapsed_time / 1024 # in KB/s - # Calculate progress percentage - if total_size and total_size > 0: - progress = (downloaded / total_size) * 100 + # Calculate progress percentage + if total_size and total_size > 0: + progress = (downloaded / total_size) * 100 - # Time remaining (in seconds) - time_remaining = (total_size - downloaded) / (speed * 1024) if speed > 0 else 0 + # Time remaining (in seconds) + time_remaining = ( + (total_size - downloaded) / (speed * 1024) + if speed > 0 + else 0 + ) - current_time = time.time() - if current_time - last_update_time >= 0.5: - last_update_time = current_time - if progress > 0: - # Update the account's last_message with detailed progress info - progress_msg = f"Downloading: {progress:.1f}% - {speed:.1f} KB/s - {time_remaining:.1f}s remaining" - account.last_message = progress_msg - account.save(update_fields=['last_message']) + current_time = time.time() + if current_time - last_update_time >= 0.5: + last_update_time = current_time + if progress > 0: + # Update the account's last_message with detailed progress info + progress_msg = f"Downloading: {progress:.1f}% - {speed:.1f} KB/s - {time_remaining:.1f}s remaining" + account.last_message = progress_msg + account.save(update_fields=["last_message"]) - send_m3u_update(account.id, "downloading", progress, - speed=speed, - elapsed_time=elapsed_time, - time_remaining=time_remaining, - message=progress_msg) + send_m3u_update( + account.id, + "downloading", + progress, + speed=speed, + elapsed_time=elapsed_time, + time_remaining=time_remaining, + message=progress_msg, + ) + + # Check if we actually received any content + logger.info(f"Download completed. Has content: {has_content}, Content length: {len(temp_content)} bytes") + if not has_content or len(temp_content) == 0: + error_msg = f"Server responded successfully (HTTP {response.status_code}) but provided empty M3U file from URL: {account.server_url}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account.id, + "downloading", + 100, + status="error", + error=error_msg, + ) + return [], False + + # Basic validation: check if content looks like an M3U file + try: + content_str = temp_content.decode('utf-8', errors='ignore') + content_lines = content_str.strip().split('\n') + + # Log first few lines for debugging (be careful not to log too much) + preview_lines = content_lines[:5] + logger.info(f"Content preview (first 5 lines): {preview_lines}") + logger.info(f"Total lines in content: {len(content_lines)}") + + # Check if it's a valid M3U file (should start with #EXTM3U or contain M3U-like content) + is_valid_m3u = False + + # First, check if this looks like an error response disguised as 200 OK + content_lower = content_str.lower() + if any(error_indicator in content_lower for error_indicator in [ + ' dict: """ Parse an EXTINF line from an M3U file. - This function removes the "#EXTINF:" prefix, then splits the remaining - string on the first comma that is not enclosed in quotes. + This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes, + and treats everything after the last attribute as the display name. Returns a dictionary with: - 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title) - - 'display_name': the text after the comma (the fallback display name) + - 'display_name': the text after the attributes (the fallback display name) - 'name': the value from tvg-name (if present) or the display name otherwise. """ if not line.startswith("#EXTINF:"): return None - content = line[len("#EXTINF:"):].strip() - # Split on the first comma that is not inside quotes. - parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1) - if len(parts) != 2: - return None - attributes_part, display_name = parts[0], parts[1].strip() - attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part)) - # Use tvg-name attribute if available; otherwise, use the display name. - name = attrs.get('tvg-name', display_name) - return { - 'attributes': attrs, - 'display_name': display_name, - 'name': name - } + content = line[len("#EXTINF:") :].strip() + + # Single pass: extract all attributes AND track the last attribute position + # This regex matches both key="value" and key='value' patterns + attrs = {} + last_attr_end = 0 + + # Use a single regex that handles both quote types + for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content): + key = match.group(1) + value = match.group(3) + attrs[key] = value + last_attr_end = match.end() + + # Everything after the last attribute (skipping leading comma and whitespace) is the display name + if last_attr_end > 0: + remaining = content[last_attr_end:].strip() + # Remove leading comma if present + if remaining.startswith(','): + remaining = remaining[1:].strip() + display_name = remaining + else: + # No attributes found, try the old comma-split method as fallback + parts = content.split(',', 1) + if len(parts) == 2: + display_name = parts[1].strip() + else: + display_name = content.strip() + + # Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name. + name = get_case_insensitive_attr(attrs, "tvg-name", None) + if not name: + name = get_case_insensitive_attr(attrs, "tvc-guide-title", None) + if not name: + name = display_name + return {"attributes": attrs, "display_name": display_name, "name": name} -def _matches_filters(stream_name: str, group_name: str, filters): - """Check if a stream or group name matches a precompiled regex filter.""" - compiled_filters = [(re.compile(f.regex_pattern, re.IGNORECASE), f.exclude) for f in filters] - for pattern, exclude in compiled_filters: - target = group_name if f.filter_type == 'group' else stream_name - if pattern.search(target or ''): - return exclude - return False @shared_task def refresh_m3u_accounts(): @@ -222,6 +500,7 @@ def refresh_m3u_accounts(): logger.info(msg) return msg + def check_field_lengths(streams_to_create): for stream in streams_to_create: for field, value in stream.__dict__.items(): @@ -231,46 +510,233 @@ def check_field_lengths(streams_to_create): print("") print("") + @shared_task def process_groups(account, groups): - existing_groups = {group.name: group for group in ChannelGroup.objects.filter(name__in=groups.keys())} + existing_groups = { + group.name: group + for group in ChannelGroup.objects.filter(name__in=groups.keys()) + } logger.info(f"Currently {len(existing_groups)} existing groups") - group_objs = [] + # Check if we should auto-enable new groups based on account settings + account_custom_props = account.custom_properties or {} + auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True) + + # Separate existing groups from groups that need to be created + existing_group_objs = [] groups_to_create = [] + for group_name, custom_props in groups.items(): - logger.debug(f"Handling group for M3U account {account.id}: {group_name}") - if (group_name not in existing_groups): - groups_to_create.append(ChannelGroup( - name=group_name, - )) + if group_name in existing_groups: + existing_group_objs.append(existing_groups[group_name]) else: - group_objs.append(existing_groups[group_name]) + groups_to_create.append(ChannelGroup(name=group_name)) + # Create new groups and fetch them back with IDs + newly_created_group_objs = [] if groups_to_create: - logger.debug(f"Creating {len(groups_to_create)} groups") - created = ChannelGroup.bulk_create_and_fetch(groups_to_create) - logger.debug(f"Created {len(created)} groups") - group_objs.extend(created) + logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}") + newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create)) + logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups") - relations = [] - for group in group_objs: - # Ensure we include the xc_id in the custom_properties - custom_props = groups.get(group.name, {}) - relations.append(ChannelGroupM3UAccount( - channel_group=group, + # Combine all groups + all_group_objs = existing_group_objs + newly_created_group_objs + + # Get existing relationships for this account + existing_relationships = { + rel.channel_group.name: rel + for rel in ChannelGroupM3UAccount.objects.filter( m3u_account=account, - custom_properties=json.dumps(custom_props), - enabled=True, # Default to enabled - )) + channel_group__name__in=groups.keys() + ).select_related('channel_group') + } - ChannelGroupM3UAccount.objects.bulk_create( - relations, - ignore_conflicts=True - ) + # Get ALL existing relationships for this account to identify orphaned ones + all_existing_relationships = { + rel.channel_group.name: rel + for rel in ChannelGroupM3UAccount.objects.filter( + m3u_account=account + ).select_related('channel_group') + } + + relations_to_create = [] + relations_to_update = [] + relations_to_delete = [] + + # Find orphaned relationships (groups that no longer exist in the source) + current_group_names = set(groups.keys()) + for group_name, rel in all_existing_relationships.items(): + if group_name not in current_group_names: + relations_to_delete.append(rel) + logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}") + + for group in all_group_objs: + custom_props = groups.get(group.name, {}) + + if group.name in existing_relationships: + # Update existing relationship if xc_id has changed (preserve other custom properties) + existing_rel = existing_relationships[group.name] + + # Get existing custom properties (now JSONB, no need to parse) + existing_custom_props = existing_rel.custom_properties or {} + + # Get the new xc_id from groups data + new_xc_id = custom_props.get("xc_id") + existing_xc_id = existing_custom_props.get("xc_id") + + # Only update if xc_id has changed + if new_xc_id != existing_xc_id: + # Merge new xc_id with existing custom properties to preserve user settings + updated_custom_props = existing_custom_props.copy() + if new_xc_id is not None: + updated_custom_props["xc_id"] = new_xc_id + elif "xc_id" in updated_custom_props: + # Remove xc_id if it's no longer provided (e.g., converting from XC to standard) + del updated_custom_props["xc_id"] + + existing_rel.custom_properties = updated_custom_props + relations_to_update.append(existing_rel) + logger.debug(f"Updated xc_id for group '{group.name}' from '{existing_xc_id}' to '{new_xc_id}' - account {account.id}") + else: + logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}") + else: + # Create new relationship - this group is new to this M3U account + # Use the auto_enable setting to determine if it should start enabled + if not auto_enable_new_groups_live: + logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)") + + relations_to_create.append( + ChannelGroupM3UAccount( + channel_group=group, + m3u_account=account, + custom_properties=custom_props, + enabled=auto_enable_new_groups_live, + ) + ) + + # Bulk create new relationships + if relations_to_create: + ChannelGroupM3UAccount.objects.bulk_create(relations_to_create, ignore_conflicts=True) + logger.debug(f"Created {len(relations_to_create)} new group relationships for account {account.id}") + + # Bulk update existing relationships + if relations_to_update: + ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties']) + logger.info(f"Updated {len(relations_to_update)} existing group relationships with new xc_id values for account {account.id}") + + # Delete orphaned relationships + if relations_to_delete: + ChannelGroupM3UAccount.objects.filter( + id__in=[rel.id for rel in relations_to_delete] + ).delete() + logger.info(f"Deleted {len(relations_to_delete)} orphaned group relationships for account {account.id}: {[rel.channel_group.name for rel in relations_to_delete]}") + + # Check if any of the deleted relationships left groups with no remaining associations + orphaned_group_ids = [] + for rel in relations_to_delete: + group = rel.channel_group + + # Check if this group has any remaining M3U account relationships + remaining_m3u_relationships = ChannelGroupM3UAccount.objects.filter( + channel_group=group + ).exists() + + # Check if this group has any direct channels (not through M3U accounts) + has_direct_channels = group.related_channels().exists() + + # If no relationships and no direct channels, it's safe to delete + if not remaining_m3u_relationships and not has_direct_channels: + orphaned_group_ids.append(group.id) + logger.debug(f"Group '{group.name}' has no remaining associations and will be deleted") + + # Delete truly orphaned groups + if orphaned_group_ids: + deleted_groups = list(ChannelGroup.objects.filter(id__in=orphaned_group_ids).values_list('name', flat=True)) + ChannelGroup.objects.filter(id__in=orphaned_group_ids).delete() + logger.info(f"Deleted {len(orphaned_group_ids)} orphaned groups that had no remaining associations: {deleted_groups}") + + +def collect_xc_streams(account_id, enabled_groups): + """Collect all XC streams in a single API call and filter by enabled groups.""" + account = M3UAccount.objects.get(id=account_id) + all_streams = [] + + # Create a mapping from category_id to group info for filtering + enabled_category_ids = {} + for group_name, props in enabled_groups.items(): + if "xc_id" in props: + enabled_category_ids[str(props["xc_id"])] = { + "name": group_name, + "props": props + } + + try: + with XCClient( + account.server_url, + account.username, + account.password, + account.get_user_agent(), + ) as xc_client: + + # Fetch ALL live streams in a single API call (much more efficient) + logger.info("Fetching ALL live streams from XC provider...") + all_xc_streams = xc_client.get_all_live_streams() # Get all streams without category filter + + if not all_xc_streams: + logger.warning("No live streams returned from XC provider") + return [] + + logger.info(f"Retrieved {len(all_xc_streams)} total live streams from provider") + + # Filter streams based on enabled categories + filtered_count = 0 + for stream in all_xc_streams: + # Get the category_id for this stream + category_id = str(stream.get("category_id", "")) + + # Only include streams from enabled categories + if category_id in enabled_category_ids: + group_info = enabled_category_ids[category_id] + + # Convert XC stream to our standard format with all properties preserved + stream_data = { + "name": stream["name"], + "url": xc_client.get_stream_url(stream["stream_id"]), + "attributes": { + "tvg-id": stream.get("epg_channel_id", ""), + "tvg-logo": stream.get("stream_icon", ""), + "group-title": group_info["name"], + # Preserve all XC stream properties as custom attributes + "stream_id": str(stream.get("stream_id", "")), + "category_id": category_id, + "stream_type": stream.get("stream_type", ""), + "added": stream.get("added", ""), + "is_adult": str(stream.get("is_adult", "0")), + "custom_sid": stream.get("custom_sid", ""), + # Include any other properties that might be present + **{k: str(v) for k, v in stream.items() if k not in [ + "name", "stream_id", "epg_channel_id", "stream_icon", + "category_id", "stream_type", "added", "is_adult", "custom_sid" + ] and v is not None} + } + } + all_streams.append(stream_data) + filtered_count += 1 + + except Exception as e: + logger.error(f"Failed to fetch XC streams: {str(e)}") + return [] + + logger.info(f"Filtered {filtered_count} streams from {len(enabled_category_ids)} enabled categories") + return all_streams + +def process_xc_category_direct(account_id, batch, groups, hash_keys): + from django.db import connections + + # Ensure clean database connections for threading + connections.close_all() -@shared_task -def process_xc_category(account_id, batch, groups, hash_keys): account = M3UAccount.objects.get(id=account_id) streams_to_create = [] @@ -278,66 +744,93 @@ def process_xc_category(account_id, batch, groups, hash_keys): stream_hashes = {} try: - xc_client = XCClient(account.server_url, account.username, account.password, account.get_user_agent()) + with XCClient( + account.server_url, + account.username, + account.password, + account.get_user_agent(), + ) as xc_client: + # Log the batch details to help with debugging + logger.debug(f"Processing XC batch: {batch}") - # Log the batch details to help with debugging - logger.debug(f"Processing XC batch: {batch}") - - for group_name, props in batch.items(): - # Check if we have a valid xc_id for this group - if 'xc_id' not in props: - logger.error(f"Missing xc_id for group {group_name} in batch {batch}") - continue - - # Get actual group ID from the mapping - group_id = groups.get(group_name) - if not group_id: - logger.error(f"Group {group_name} not found in enabled groups") - continue - - try: - logger.debug(f"Fetching streams for XC category: {group_name} (ID: {props['xc_id']})") - streams = xc_client.get_live_category_streams(props['xc_id']) - - if not streams: - logger.warning(f"No streams found for XC category {group_name} (ID: {props['xc_id']})") + for group_name, props in batch.items(): + # Check if we have a valid xc_id for this group + if "xc_id" not in props: + logger.error( + f"Missing xc_id for group {group_name} in batch {batch}" + ) continue - logger.debug(f"Found {len(streams)} streams for category {group_name}") + # Get actual group ID from the mapping + group_id = groups.get(group_name) + if not group_id: + logger.error(f"Group {group_name} not found in enabled groups") + continue - for stream in streams: - name = stream["name"] - url = xc_client.get_stream_url(stream["stream_id"]) - tvg_id = stream.get("epg_channel_id", "") - tvg_logo = stream.get("stream_icon", "") - group_title = group_name + try: + logger.debug( + f"Fetching streams for XC category: {group_name} (ID: {props['xc_id']})" + ) + streams = xc_client.get_live_category_streams(props["xc_id"]) - stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys) - stream_props = { - "name": name, - "url": url, - "logo_url": tvg_logo, - "tvg_id": tvg_id, - "m3u_account": account, - "channel_group_id": int(group_id), - "stream_hash": stream_hash, - "custom_properties": json.dumps(stream), - } + if not streams: + logger.warning( + f"No streams found for XC category {group_name} (ID: {props['xc_id']})" + ) + continue - if stream_hash not in stream_hashes: - stream_hashes[stream_hash] = stream_props - except Exception as e: - logger.error(f"Error processing XC category {group_name} (ID: {props['xc_id']}): {str(e)}") - continue + logger.debug( + f"Found {len(streams)} streams for category {group_name}" + ) + + for stream in streams: + name = stream["name"] + url = xc_client.get_stream_url(stream["stream_id"]) + tvg_id = stream.get("epg_channel_id", "") + tvg_logo = stream.get("stream_icon", "") + group_title = group_name + + stream_hash = Stream.generate_hash_key( + name, url, tvg_id, hash_keys, m3u_id=account_id + ) + stream_props = { + "name": name, + "url": url, + "logo_url": tvg_logo, + "tvg_id": tvg_id, + "m3u_account": account, + "channel_group_id": int(group_id), + "stream_hash": stream_hash, + "custom_properties": stream, + } + + if stream_hash not in stream_hashes: + stream_hashes[stream_hash] = stream_props + except Exception as e: + logger.error( + f"Error processing XC category {group_name} (ID: {props['xc_id']}): {str(e)}" + ) + continue # Process all found streams - existing_streams = {s.stream_hash: s for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys())} + existing_streams = { + s.stream_hash: s + for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys()).select_related('m3u_account').only( + 'id', 'stream_hash', 'name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'm3u_account' + ) + } for stream_hash, stream_props in stream_hashes.items(): if stream_hash in existing_streams: obj = existing_streams[stream_hash] - existing_attr = {field.name: getattr(obj, field.name) for field in Stream._meta.fields if field != 'channel_group_id'} - changed = any(existing_attr[key] != value for key, value in stream_props.items() if key != 'channel_group_id') + # Optimized field comparison for XC streams + changed = ( + obj.name != stream_props["name"] or + obj.url != stream_props["url"] or + obj.logo_url != stream_props["logo_url"] or + obj.tvg_id != stream_props["tvg_id"] or + obj.custom_properties != stream_props["custom_properties"] + ) if changed: for key, value in stream_props.items(): @@ -345,49 +838,48 @@ def process_xc_category(account_id, batch, groups, hash_keys): obj.last_seen = timezone.now() obj.updated_at = timezone.now() # Update timestamp only for changed streams streams_to_update.append(obj) - del existing_streams[stream_hash] else: # Always update last_seen, even if nothing else changed obj.last_seen = timezone.now() # Don't update updated_at for unchanged streams streams_to_update.append(obj) - existing_streams[stream_hash] = obj + + # Remove from existing_streams since we've processed it + del existing_streams[stream_hash] else: stream_props["last_seen"] = timezone.now() - stream_props["updated_at"] = timezone.now() # Set initial updated_at for new streams + stream_props["updated_at"] = ( + timezone.now() + ) # Set initial updated_at for new streams streams_to_create.append(Stream(**stream_props)) try: with transaction.atomic(): if streams_to_create: Stream.objects.bulk_create(streams_to_create, ignore_conflicts=True) + if streams_to_update: - # We need to split the bulk update to correctly handle updated_at - # First, get the subset of streams that have content changes - changed_streams = [s for s in streams_to_update if hasattr(s, 'updated_at') and s.updated_at] - unchanged_streams = [s for s in streams_to_update if not hasattr(s, 'updated_at') or not s.updated_at] - - # Update changed streams with all fields including updated_at - if changed_streams: - Stream.objects.bulk_update( - changed_streams, - {key for key in stream_props.keys() if key not in ["m3u_account", "stream_hash"] and key not in hash_keys} | {"last_seen", "updated_at"} - ) - - # Update unchanged streams with only last_seen - if unchanged_streams: - Stream.objects.bulk_update(unchanged_streams, ["last_seen"]) + # Simplified bulk update for better performance + Stream.objects.bulk_update( + streams_to_update, + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'], + batch_size=150 # Smaller batch size for XC processing + ) + # Update last_seen for any remaining existing streams that weren't processed if len(existing_streams.keys()) > 0: Stream.objects.bulk_update(existing_streams.values(), ["last_seen"]) except Exception as e: - logger.error(f"Bulk create failed for XC streams: {str(e)}") + logger.error(f"Bulk operation failed for XC streams: {str(e)}") retval = f"Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." except Exception as e: logger.error(f"XC category processing error: {str(e)}") retval = f"Error processing XC batch: {str(e)}" + finally: + # Clean up database connections for threading + connections.close_all() # Aggressive garbage collection del streams_to_create, streams_to_update, stream_hashes, existing_streams @@ -395,29 +887,85 @@ def process_xc_category(account_id, batch, groups, hash_keys): return retval -@shared_task -def process_m3u_batch(account_id, batch, groups, hash_keys): - """Processes a batch of M3U streams using bulk operations.""" + +def process_m3u_batch_direct(account_id, batch, groups, hash_keys): + """Processes a batch of M3U streams using bulk operations with thread-safe DB connections.""" + from django.db import connections + + # Ensure clean database connections for threading + connections.close_all() + account = M3UAccount.objects.get(id=account_id) + compiled_filters = [ + ( + re.compile( + f.regex_pattern, + ( + re.IGNORECASE + if (f.custom_properties or {}).get( + "case_sensitive", True + ) + == False + else 0 + ), + ), + f, + ) + for f in account.filters.order_by("order") + ] + streams_to_create = [] streams_to_update = [] stream_hashes = {} - # compiled_filters = [(f.filter_type, re.compile(f.regex_pattern, re.IGNORECASE)) for f in filters] logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}") + if compiled_filters: + logger.debug(f"Using compiled filters: {[f[1].regex_pattern for f in compiled_filters]}") for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] - tvg_id, tvg_logo = stream_info["attributes"].get("tvg-id", ""), stream_info["attributes"].get("tvg-logo", "") - group_title = stream_info["attributes"].get("group-title", "Default Group") + + # Validate URL length - maximum of 4096 characters + if url and len(url) > 4096: + logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)") + continue + + tvg_id, tvg_logo = get_case_insensitive_attr( + stream_info["attributes"], "tvg-id", "" + ), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "") + group_title = get_case_insensitive_attr( + stream_info["attributes"], "group-title", "Default Group" + ) + logger.debug(f"Processing stream: {name} - {url} in group {group_title}") + include = True + for pattern, filter in compiled_filters: + logger.trace(f"Checking filter pattern {pattern}") + target = name + if filter.filter_type == "url": + target = url + elif filter.filter_type == "group": + target = group_title + + if pattern.search(target or ""): + logger.debug( + f"Stream {name} - {url} matches filter pattern {filter.regex_pattern}" + ) + include = not filter.exclude + break + + if not include: + logger.debug(f"Stream excluded by filter, skipping.") + continue # Filter out disabled groups for this account if group_title not in groups: - logger.debug(f"Skipping stream in disabled group: {group_title}") + logger.debug( + f"Skipping stream in disabled or excluded group: {group_title}" + ) continue - stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys) + stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id) stream_props = { "name": name, "url": url, @@ -426,7 +974,7 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): "m3u_account": account, "channel_group_id": int(groups.get(group_title)), "stream_hash": stream_hash, - "custom_properties": json.dumps(stream_info["attributes"]), + "custom_properties": stream_info["attributes"], } if stream_hash not in stream_hashes: @@ -435,90 +983,96 @@ def process_m3u_batch(account_id, batch, groups, hash_keys): logger.error(f"Failed to process stream {name}: {e}") logger.error(json.dumps(stream_info)) - existing_streams = {s.stream_hash: s for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys())} + existing_streams = { + s.stream_hash: s + for s in Stream.objects.filter(stream_hash__in=stream_hashes.keys()).select_related('m3u_account').only( + 'id', 'stream_hash', 'name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'm3u_account' + ) + } for stream_hash, stream_props in stream_hashes.items(): if stream_hash in existing_streams: obj = existing_streams[stream_hash] - existing_attr = {field.name: getattr(obj, field.name) for field in Stream._meta.fields if field != 'channel_group_id'} - changed = any(existing_attr[key] != value for key, value in stream_props.items() if key != 'channel_group_id') + # Optimized field comparison + changed = ( + obj.name != stream_props["name"] or + obj.url != stream_props["url"] or + obj.logo_url != stream_props["logo_url"] or + obj.tvg_id != stream_props["tvg_id"] or + obj.custom_properties != stream_props["custom_properties"] + ) + + # Always update last_seen + obj.last_seen = timezone.now() if changed: - for key, value in stream_props.items(): - setattr(obj, key, value) - obj.last_seen = timezone.now() - obj.updated_at = timezone.now() # Update timestamp only for changed streams - streams_to_update.append(obj) - del existing_streams[stream_hash] - else: - # Always update last_seen, even if nothing else changed - obj.last_seen = timezone.now() - # Don't update updated_at for unchanged streams - streams_to_update.append(obj) - existing_streams[stream_hash] = obj + # Only update fields that changed and set updated_at + obj.name = stream_props["name"] + obj.url = stream_props["url"] + obj.logo_url = stream_props["logo_url"] + obj.tvg_id = stream_props["tvg_id"] + obj.custom_properties = stream_props["custom_properties"] + obj.updated_at = timezone.now() + + streams_to_update.append(obj) else: + # New stream stream_props["last_seen"] = timezone.now() - stream_props["updated_at"] = timezone.now() # Set initial updated_at for new streams + stream_props["updated_at"] = timezone.now() streams_to_create.append(Stream(**stream_props)) try: with transaction.atomic(): if streams_to_create: Stream.objects.bulk_create(streams_to_create, ignore_conflicts=True) + if streams_to_update: - # We need to split the bulk update to correctly handle updated_at - # First, get the subset of streams that have content changes - changed_streams = [s for s in streams_to_update if hasattr(s, 'updated_at') and s.updated_at] - unchanged_streams = [s for s in streams_to_update if not hasattr(s, 'updated_at') or not s.updated_at] - - # Update changed streams with all fields including updated_at - if changed_streams: - Stream.objects.bulk_update( - changed_streams, - {key for key in stream_props.keys() if key not in ["m3u_account", "stream_hash"] and key not in hash_keys} | {"last_seen", "updated_at"} - ) - - # Update unchanged streams with only last_seen - if unchanged_streams: - Stream.objects.bulk_update(unchanged_streams, ["last_seen"]) - - if len(existing_streams.keys()) > 0: - Stream.objects.bulk_update(existing_streams.values(), ["last_seen"]) + # Update all streams in a single bulk operation + Stream.objects.bulk_update( + streams_to_update, + ['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'], + batch_size=200 + ) except Exception as e: - logger.error(f"Bulk create failed: {str(e)}") + logger.error(f"Bulk operation failed: {str(e)}") retval = f"M3U account: {account_id}, Batch processed: {len(streams_to_create)} created, {len(streams_to_update)} updated." # Aggressive garbage collection - #del streams_to_create, streams_to_update, stream_hashes, existing_streams - #from core.utils import cleanup_memory - #cleanup_memory(log_usage=True, force_collection=True) + # del streams_to_create, streams_to_update, stream_hashes, existing_streams + # from core.utils import cleanup_memory + # cleanup_memory(log_usage=True, force_collection=True) + + # Clean up database connections for threading + connections.close_all() return retval + def cleanup_streams(account_id, scan_start_time=timezone.now): account = M3UAccount.objects.get(id=account_id, is_active=True) existing_groups = ChannelGroup.objects.filter( - m3u_account__m3u_account=account, - m3u_account__enabled=True, - ).values_list('id', flat=True) - logger.info(f"Found {len(existing_groups)} active groups for M3U account {account_id}") + m3u_accounts__m3u_account=account, + m3u_accounts__enabled=True, + ).values_list("id", flat=True) + logger.info( + f"Found {len(existing_groups)} active groups for M3U account {account_id}" + ) # Calculate cutoff date for stale streams stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days) - logger.info(f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}") + logger.info( + f"Removing streams not seen since {stale_cutoff} for M3U account {account_id}" + ) # Delete streams that are not in active groups - streams_to_delete = Stream.objects.filter( - m3u_account=account - ).exclude( + streams_to_delete = Stream.objects.filter(m3u_account=account).exclude( channel_group__in=existing_groups ) # Also delete streams that haven't been seen for longer than stale_stream_days stale_streams = Stream.objects.filter( - m3u_account=account, - last_seen__lt=stale_cutoff + m3u_account=account, last_seen__lt=stale_cutoff ) deleted_count = streams_to_delete.count() @@ -528,20 +1082,23 @@ def cleanup_streams(account_id, scan_start_time=timezone.now): stale_streams.delete() total_deleted = deleted_count + stale_count - logger.info(f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale") + logger.info( + f"Cleanup for M3U account {account_id} complete: {deleted_count} streams removed due to group filter, {stale_count} removed as stale" + ) # Return the total count of deleted streams return total_deleted + @shared_task def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): - if not acquire_task_lock('refresh_m3u_account_groups', account_id): + if not acquire_task_lock("refresh_m3u_account_groups", account_id): return f"Task already running for account_id={account_id}.", None try: account = M3UAccount.objects.get(id=account_id, is_active=True) except M3UAccount.DoesNotExist: - release_task_lock('refresh_m3u_account_groups', account_id) + release_task_lock("refresh_m3u_account_groups", account_id) return f"M3UAccount with ID={account_id} not found or inactive.", None extinf_data = [] @@ -549,8 +1106,12 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): if account.account_type == M3UAccount.Types.XC: # Log detailed information about the account - logger.info(f"Processing XC account {account_id} with URL: {account.server_url}") - logger.debug(f"Username: {account.username}, Has password: {'Yes' if account.password else 'No'}") + logger.info( + f"Processing XC account {account_id} with URL: {account.server_url}" + ) + logger.debug( + f"Username: {account.username}, Has password: {'Yes' if account.password else 'No'}" + ) # Validate required fields if not account.server_url: @@ -558,9 +1119,11 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): logger.error(error_msg) account.status = M3UAccount.Status.ERROR account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) return error_msg, None if not account.username or not account.password: @@ -568,15 +1131,19 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): logger.error(error_msg) account.status = M3UAccount.Status.ERROR account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) return error_msg, None try: # Ensure server URL is properly formatted - server_url = account.server_url.rstrip('/') - if not (server_url.startswith('http://') or server_url.startswith('https://')): + server_url = account.server_url.rstrip("/") + if not ( + server_url.startswith("http://") or server_url.startswith("https://") + ): server_url = f"http://{server_url}" # User agent handling - completely rewritten @@ -585,114 +1152,219 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): logger.debug(f"Getting user agent for account {account.id}") # Use a hardcoded user agent string to avoid any issues with object structure - user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + user_agent_string = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) try: # Try to get the user agent directly from the database if account.user_agent_id: ua_obj = UserAgent.objects.get(id=account.user_agent_id) - if ua_obj and hasattr(ua_obj, 'user_agent') and ua_obj.user_agent: + if ( + ua_obj + and hasattr(ua_obj, "user_agent") + and ua_obj.user_agent + ): user_agent_string = ua_obj.user_agent - logger.debug(f"Using user agent from account: {user_agent_string}") + logger.debug( + f"Using user agent from account: {user_agent_string}" + ) else: # Get default user agent from CoreSettings default_ua_id = CoreSettings.get_default_user_agent_id() - logger.debug(f"Default user agent ID from settings: {default_ua_id}") + logger.debug( + f"Default user agent ID from settings: {default_ua_id}" + ) if default_ua_id: ua_obj = UserAgent.objects.get(id=default_ua_id) - if ua_obj and hasattr(ua_obj, 'user_agent') and ua_obj.user_agent: + if ( + ua_obj + and hasattr(ua_obj, "user_agent") + and ua_obj.user_agent + ): user_agent_string = ua_obj.user_agent - logger.debug(f"Using default user agent: {user_agent_string}") + logger.debug( + f"Using default user agent: {user_agent_string}" + ) except Exception as e: - logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.warning( + f"Error getting user agent, using fallback: {str(e)}" + ) logger.debug(f"Final user agent string: {user_agent_string}") except Exception as e: - user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" - logger.warning(f"Exception in user agent handling, using fallback: {str(e)}") + user_agent_string = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + ) + logger.warning( + f"Exception in user agent handling, using fallback: {str(e)}" + ) - logger.info(f"Creating XCClient with URL: {server_url}, Username: {account.username}, User-Agent: {user_agent_string}") + logger.info( + f"Creating XCClient with URL: {account.server_url}, Username: {account.username}, User-Agent: {user_agent_string}" + ) # Create XCClient with explicit error handling try: - xc_client = XCClient(server_url, account.username, account.password, user_agent_string) - logger.info(f"XCClient instance created successfully") + with XCClient( + account.server_url, account.username, account.password, user_agent_string + ) as xc_client: + logger.info(f"XCClient instance created successfully") + + # Authenticate with detailed error handling + try: + logger.debug(f"Authenticating with XC server {server_url}") + auth_result = xc_client.authenticate() + logger.debug(f"Authentication response: {auth_result}") + + # Save account information to all active profiles + try: + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + # Update each profile with account information using its own transformed credentials + for profile in profiles: + try: + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials") + else: + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}") + + except Exception as save_error: + logger.warning(f"Failed to process profile account information: {str(save_error)}") + # Don't fail the whole process if saving account info fails + + except Exception as e: + error_msg = f"Failed to authenticate with XC server: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + # Get categories with detailed error handling + try: + logger.info(f"Getting live categories from XC server") + xc_categories = xc_client.get_live_categories() + logger.info( + f"Found {len(xc_categories)} categories: {xc_categories}" + ) + + # Validate response + if not isinstance(xc_categories, list): + error_msg = ( + f"Unexpected response from XC server: {xc_categories}" + ) + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + + if len(xc_categories) == 0: + logger.warning("No categories found in XC server response") + + for category in xc_categories: + cat_name = category.get("category_name", "Unknown Category") + cat_id = category.get("category_id", "0") + logger.info(f"Adding category: {cat_name} (ID: {cat_id})") + groups[cat_name] = { + "xc_id": cat_id, + } + except Exception as e: + error_msg = f"Failed to get categories from XC server: {str(e)}" + logger.error(error_msg) + account.status = M3UAccount.Status.ERROR + account.last_message = error_msg + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) + return error_msg, None + except Exception as e: - error_msg = f"Failed to create XCClient: {str(e)}" + error_msg = f"Failed to create XC Client: {str(e)}" logger.error(error_msg) account.status = M3UAccount.Status.ERROR account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) - return error_msg, None - - # Authenticate with detailed error handling - try: - logger.debug(f"Authenticating with XC server {server_url}") - auth_result = xc_client.authenticate() - logger.debug(f"Authentication response: {auth_result}") - except Exception as e: - error_msg = f"Failed to authenticate with XC server: {str(e)}" - logger.error(error_msg) - account.status = M3UAccount.Status.ERROR - account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) - return error_msg, None - - # Get categories with detailed error handling - try: - logger.info(f"Getting live categories from XC server") - xc_categories = xc_client.get_live_categories() - logger.info(f"Found {len(xc_categories)} categories: {xc_categories}") - - # Validate response - if not isinstance(xc_categories, list): - error_msg = f"Unexpected response from XC server: {xc_categories}" - logger.error(error_msg) - account.status = M3UAccount.Status.ERROR - account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) - return error_msg, None - - if len(xc_categories) == 0: - logger.warning("No categories found in XC server response") - - for category in xc_categories: - cat_name = category.get("category_name", "Unknown Category") - cat_id = category.get("category_id", "0") - logger.info(f"Adding category: {cat_name} (ID: {cat_id})") - groups[cat_name] = { - "xc_id": cat_id, - } - except Exception as e: - error_msg = f"Failed to get categories from XC server: {str(e)}" - logger.error(error_msg) - account.status = M3UAccount.Status.ERROR - account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="error", + error=error_msg, + ) + release_task_lock("refresh_m3u_account_groups", account_id) return error_msg, None except Exception as e: - error_msg = f"Unexpected error in XC processing: {str(e)}" + error_msg = f"Unexpected error occurred in XC Client: {str(e)}" logger.error(error_msg) account.status = M3UAccount.Status.ERROR account.last_message = error_msg - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "processing_groups", 100, status="error", error=error_msg) - release_task_lock('refresh_m3u_account_groups', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "processing_groups", 100, status="error", error=error_msg + ) + release_task_lock("refresh_m3u_account_groups", account_id) return error_msg, None else: # Here's the key change - use the success flag from fetch_m3u_lines lines, success = fetch_m3u_lines(account, use_cache) if not success: # If fetch failed, don't continue processing - release_task_lock('refresh_m3u_account_groups', account_id) + release_task_lock("refresh_m3u_account_groups", account_id) return f"Failed to fetch M3U data for account_id={account_id}.", None # Log basic file structure for debugging @@ -712,70 +1384,99 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): extinf_count += 1 parsed = parse_extinf_line(line) if parsed: - if "group-title" in parsed["attributes"]: - group_name = parsed["attributes"]["group-title"] + group_title_attr = get_case_insensitive_attr( + parsed["attributes"], "group-title", "" + ) + if group_title_attr: + group_name = group_title_attr # Log new groups as they're discovered if group_name not in groups: - logger.debug(f"Found new group for M3U account {account_id}: '{group_name}'") + logger.debug( + f"Found new group for M3U account {account_id}: '{group_name}'" + ) groups[group_name] = {} extinf_data.append(parsed) else: # Log problematic EXTINF lines - logger.warning(f"Failed to parse EXTINF at line {line_index+1}: {line[:200]}") - problematic_lines.append((line_index+1, line[:200])) + logger.warning( + f"Failed to parse EXTINF at line {line_index+1}: {line[:200]}" + ) + problematic_lines.append((line_index + 1, line[:200])) - elif extinf_data and line.startswith("http"): + elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")): url_count += 1 + # Normalize UDP URLs only (e.g., remove VLC-specific @ prefix) + normalized_url = normalize_stream_url(line) if line.startswith("udp") else line # Associate URL with the last EXTINF line - extinf_data[-1]["url"] = line + extinf_data[-1]["url"] = normalized_url valid_stream_count += 1 # Periodically log progress for large files if valid_stream_count % 1000 == 0: - logger.debug(f"Processed {valid_stream_count} valid streams so far for M3U account: {account_id}") + logger.debug( + f"Processed {valid_stream_count} valid streams so far for M3U account: {account_id}" + ) # Log summary statistics - logger.info(f"M3U parsing complete - Lines: {line_count}, EXTINF: {extinf_count}, URLs: {url_count}, Valid streams: {valid_stream_count}") + logger.info( + f"M3U parsing complete - Lines: {line_count}, EXTINF: {extinf_count}, URLs: {url_count}, Valid streams: {valid_stream_count}" + ) if problematic_lines: - logger.warning(f"Found {len(problematic_lines)} problematic lines during parsing") - for i, (line_num, content) in enumerate(problematic_lines[:10]): # Log max 10 examples + logger.warning( + f"Found {len(problematic_lines)} problematic lines during parsing" + ) + for i, (line_num, content) in enumerate( + problematic_lines[:10] + ): # Log max 10 examples logger.warning(f"Problematic line #{i+1} at line {line_num}: {content}") if len(problematic_lines) > 10: - logger.warning(f"... and {len(problematic_lines) - 10} more problematic lines") + logger.warning( + f"... and {len(problematic_lines) - 10} more problematic lines" + ) # Log group statistics - logger.info(f"Found {len(groups)} groups in M3U file: {', '.join(list(groups.keys())[:20])}" + - ("..." if len(groups) > 20 else "")) + logger.info( + f"Found {len(groups)} groups in M3U file: {', '.join(list(groups.keys())[:20])}" + + ("..." if len(groups) > 20 else "") + ) # Cache processed data cache_path = os.path.join(m3u_dir, f"{account_id}.json") - with open(cache_path, 'w', encoding='utf-8') as f: - json.dump({ - "extinf_data": extinf_data, - "groups": groups, - }, f) + with open(cache_path, "w", encoding="utf-8") as f: + json.dump( + { + "extinf_data": extinf_data, + "groups": groups, + }, + f, + ) logger.debug(f"Cached parsed M3U data to {cache_path}") send_m3u_update(account_id, "processing_groups", 0) process_groups(account, groups) - release_task_lock('refresh_m3u_account_groups', account_id) - - + release_task_lock("refresh_m3u_account_groups", account_id) if not full_refresh: # Use update() instead of save() to avoid triggering signals M3UAccount.objects.filter(id=account_id).update( status=M3UAccount.Status.PENDING_SETUP, - last_message="M3U groups loaded. Please select groups or refresh M3U to complete setup." + last_message="M3U groups loaded. Please select groups or refresh M3U to complete setup.", + ) + send_m3u_update( + account_id, + "processing_groups", + 100, + status="pending_setup", + message="M3U groups loaded. Please select groups or refresh M3U to complete setup.", ) - send_m3u_update(account_id, "processing_groups", 100, status="pending_setup", message="M3U groups loaded. Please select groups or refresh M3U to complete setup.") return extinf_data, groups + def delete_m3u_refresh_task_by_id(account_id): """ Delete the periodic task associated with an M3U account ID. @@ -789,6 +1490,7 @@ def delete_m3u_refresh_task_by_id(account_id): # Look for task by name try: from django_celery_beat.models import PeriodicTask, IntervalSchedule + task = PeriodicTask.objects.get(name=task_name) logger.debug(f"Found task by name: {task.id} for M3UAccount {account_id}") except PeriodicTask.DoesNotExist: @@ -799,12 +1501,16 @@ def delete_m3u_refresh_task_by_id(account_id): if task: # Store interval info before deleting the task interval_id = None - if hasattr(task, 'interval') and task.interval: + if hasattr(task, "interval") and task.interval: interval_id = task.interval.id # Count how many TOTAL tasks use this interval (including this one) - tasks_with_same_interval = PeriodicTask.objects.filter(interval_id=interval_id).count() - logger.debug(f"Interval {interval_id} is used by {tasks_with_same_interval} tasks total") + tasks_with_same_interval = PeriodicTask.objects.filter( + interval_id=interval_id + ).count() + logger.debug( + f"Interval {interval_id} is used by {tasks_with_same_interval} tasks total" + ) # Delete the task first task_id = task.id @@ -816,24 +1522,863 @@ def delete_m3u_refresh_task_by_id(account_id): if interval_id and tasks_with_same_interval == 1: try: interval = IntervalSchedule.objects.get(id=interval_id) - logger.debug(f"Deleting interval schedule {interval_id} (not shared with other tasks)") + logger.debug( + f"Deleting interval schedule {interval_id} (not shared with other tasks)" + ) interval.delete() logger.debug(f"Successfully deleted interval {interval_id}") except IntervalSchedule.DoesNotExist: logger.warning(f"Interval {interval_id} no longer exists") elif interval_id: - logger.debug(f"Not deleting interval {interval_id} as it's shared with {tasks_with_same_interval-1} other tasks") + logger.debug( + f"Not deleting interval {interval_id} as it's shared with {tasks_with_same_interval-1} other tasks" + ) return True return False except Exception as e: - logger.error(f"Error deleting periodic task for M3UAccount {account_id}: {str(e)}", exc_info=True) + logger.error( + f"Error deleting periodic task for M3UAccount {account_id}: {str(e)}", + exc_info=True, + ) return False + +@shared_task +def sync_auto_channels(account_id, scan_start_time=None): + """ + Automatically create/update/delete channels to match streams in groups with auto_channel_sync enabled. + Preserves existing channel UUIDs to maintain M3U link integrity. + Called after M3U refresh completes successfully. + """ + from apps.channels.models import ( + Channel, + ChannelGroup, + ChannelGroupM3UAccount, + Stream, + ChannelStream, + ) + from apps.epg.models import EPGData + from django.utils import timezone + + try: + account = M3UAccount.objects.get(id=account_id) + logger.info(f"Starting auto channel sync for M3U account {account.name}") + + # Always use scan_start_time as the cutoff for last_seen + if scan_start_time is not None: + if isinstance(scan_start_time, str): + scan_start_time = timezone.datetime.fromisoformat(scan_start_time) + else: + scan_start_time = timezone.now() + + # Get groups with auto sync enabled for this account + auto_sync_groups = ChannelGroupM3UAccount.objects.filter( + m3u_account=account, enabled=True, auto_channel_sync=True + ).select_related("channel_group") + + channels_created = 0 + channels_updated = 0 + channels_deleted = 0 + + for group_relation in auto_sync_groups: + channel_group = group_relation.channel_group + start_number = group_relation.auto_sync_channel_start or 1.0 + + # Get force_dummy_epg, group_override, and regex patterns from group custom_properties + group_custom_props = {} + force_dummy_epg = False # Backward compatibility: legacy option to disable EPG + override_group_id = None + name_regex_pattern = None + name_replace_pattern = None + name_match_regex = None + channel_profile_ids = None + channel_sort_order = None + channel_sort_reverse = False + stream_profile_id = None + custom_logo_id = None + custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg) + if group_relation.custom_properties: + group_custom_props = group_relation.custom_properties + force_dummy_epg = group_custom_props.get("force_dummy_epg", False) + override_group_id = group_custom_props.get("group_override") + name_regex_pattern = group_custom_props.get("name_regex_pattern") + name_replace_pattern = group_custom_props.get( + "name_replace_pattern" + ) + name_match_regex = group_custom_props.get("name_match_regex") + channel_profile_ids = group_custom_props.get("channel_profile_ids") + custom_epg_id = group_custom_props.get("custom_epg_id") + channel_sort_order = group_custom_props.get("channel_sort_order") + channel_sort_reverse = group_custom_props.get( + "channel_sort_reverse", False + ) + stream_profile_id = group_custom_props.get("stream_profile_id") + custom_logo_id = group_custom_props.get("custom_logo_id") + + # Determine which group to use for created channels + target_group = channel_group + if override_group_id: + try: + target_group = ChannelGroup.objects.get(id=override_group_id) + logger.info( + f"Using override group '{target_group.name}' instead of '{channel_group.name}' for auto-created channels" + ) + except ChannelGroup.DoesNotExist: + logger.warning( + f"Override group with ID {override_group_id} not found, using original group '{channel_group.name}'" + ) + + logger.info( + f"Processing auto sync for group: {channel_group.name} (start: {start_number})" + ) + + # Get all current streams in this group for this M3U account, filter out stale streams + current_streams = Stream.objects.filter( + m3u_account=account, + channel_group=channel_group, + last_seen__gte=scan_start_time, + ) + + # --- FILTER STREAMS BY NAME MATCH REGEX IF SPECIFIED --- + if name_match_regex: + try: + current_streams = current_streams.filter( + name__iregex=name_match_regex + ) + except re.error as e: + logger.warning( + f"Invalid name_match_regex '{name_match_regex}' for group '{channel_group.name}': {e}. Skipping name filter." + ) + + # --- APPLY CHANNEL SORT ORDER --- + streams_is_list = False # Track if we converted to list + if channel_sort_order and channel_sort_order != "": + if channel_sort_order == "name": + # Use natural sorting for names to handle numbers correctly + current_streams = list(current_streams) + current_streams.sort( + key=lambda stream: natural_sort_key(stream.name), + reverse=channel_sort_reverse, + ) + streams_is_list = True + elif channel_sort_order == "tvg_id": + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}tvg_id") + elif channel_sort_order == "updated_at": + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by( + f"{order_prefix}updated_at" + ) + else: + logger.warning( + f"Unknown channel_sort_order '{channel_sort_order}' for group '{channel_group.name}'. Using provider order." + ) + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}id") + else: + # Provider order (default) - can still be reversed + order_prefix = "-" if channel_sort_reverse else "" + current_streams = current_streams.order_by(f"{order_prefix}id") + + # Get existing auto-created channels for this account (regardless of current group) + # We'll find them by their stream associations instead of just group location + existing_channels = Channel.objects.filter( + auto_created=True, auto_created_by=account + ).select_related("logo", "epg_data") + + # Create mapping of existing channels by their associated stream + # This approach finds channels even if they've been moved to different groups + existing_channel_map = {} + for channel in existing_channels: + # Get streams associated with this channel that belong to our M3U account and original group + channel_streams = ChannelStream.objects.filter( + channel=channel, + stream__m3u_account=account, + stream__channel_group=channel_group, # Match streams from the original group + ).select_related("stream") + + # Map each of our M3U account's streams to this channel + for channel_stream in channel_streams: + if channel_stream.stream: + existing_channel_map[channel_stream.stream.id] = channel + + # Track which streams we've processed + processed_stream_ids = set() + + # Check if we have streams - handle both QuerySet and list cases + has_streams = ( + len(current_streams) > 0 + if streams_is_list + else current_streams.exists() + ) + + if not has_streams: + logger.debug(f"No streams found in group {channel_group.name}") + # Delete all existing auto channels if no streams + channels_to_delete = [ch for ch in existing_channel_map.values()] + if channels_to_delete: + deleted_count = len(channels_to_delete) + Channel.objects.filter( + id__in=[ch.id for ch in channels_to_delete] + ).delete() + channels_deleted += deleted_count + logger.debug( + f"Deleted {deleted_count} auto channels (no streams remaining)" + ) + continue + + # Prepare profiles to assign to new channels + from apps.channels.models import ChannelProfile, ChannelProfileMembership + + if ( + channel_profile_ids + and isinstance(channel_profile_ids, list) + and len(channel_profile_ids) > 0 + ): + # Convert all to int (in case they're strings) + try: + profile_ids = [int(pid) for pid in channel_profile_ids] + except Exception: + profile_ids = [] + profiles_to_assign = list( + ChannelProfile.objects.filter(id__in=profile_ids) + ) + else: + profiles_to_assign = list(ChannelProfile.objects.all()) + + # Get stream profile to assign if specified + from core.models import StreamProfile + stream_profile_to_assign = None + if stream_profile_id: + try: + stream_profile_to_assign = StreamProfile.objects.get(id=int(stream_profile_id)) + logger.info( + f"Will assign stream profile '{stream_profile_to_assign.name}' to auto-synced streams in group '{channel_group.name}'" + ) + except (StreamProfile.DoesNotExist, ValueError, TypeError): + logger.warning( + f"Stream profile with ID {stream_profile_id} not found for group '{channel_group.name}', streams will use default profile" + ) + stream_profile_to_assign = None + + # Process each current stream + current_channel_number = start_number + + # Always renumber all existing channels to match current sort order + # This ensures channels are always in the correct sequence + channels_to_renumber = [] + temp_channel_number = start_number + + # Get all channel numbers that are already in use by other channels (not auto-created by this account) + used_numbers = set( + Channel.objects.exclude( + auto_created=True, auto_created_by=account + ).values_list("channel_number", flat=True) + ) + + for stream in current_streams: + if stream.id in existing_channel_map: + channel = existing_channel_map[stream.id] + + # Find next available number starting from temp_channel_number + target_number = temp_channel_number + while target_number in used_numbers: + target_number += 1 + + # Add this number to used_numbers so we don't reuse it in this batch + used_numbers.add(target_number) + + if channel.channel_number != target_number: + channel.channel_number = target_number + channels_to_renumber.append(channel) + logger.debug( + f"Will renumber channel '{channel.name}' to {target_number}" + ) + + temp_channel_number += 1.0 + if temp_channel_number % 1 != 0: # Has decimal + temp_channel_number = int(temp_channel_number) + 1.0 + + # Bulk update channel numbers if any need renumbering + if channels_to_renumber: + Channel.objects.bulk_update(channels_to_renumber, ["channel_number"]) + logger.info( + f"Renumbered {len(channels_to_renumber)} channels to maintain sort order" + ) + + # Reset channel number counter for processing new channels + current_channel_number = start_number + + for stream in current_streams: + processed_stream_ids.add(stream.id) + try: + # Parse custom properties for additional info + stream_custom_props = stream.custom_properties or {} + tvc_guide_stationid = stream_custom_props.get("tvc-guide-stationid") + + # --- REGEX FIND/REPLACE LOGIC --- + original_name = stream.name + new_name = original_name + if name_regex_pattern is not None: + # If replace is None, treat as empty string (remove match) + replace = ( + name_replace_pattern + if name_replace_pattern is not None + else "" + ) + try: + # Convert $1, $2, etc. to \1, \2, etc. for consistency with M3U profiles + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', replace) + new_name = re.sub( + name_regex_pattern, safe_replace_pattern, original_name + ) + except re.error as e: + logger.warning( + f"Regex error for group '{channel_group.name}': {e}. Using original name." + ) + new_name = original_name + + # Check if we already have a channel for this stream + existing_channel = existing_channel_map.get(stream.id) + + if existing_channel: + # Update existing channel if needed (channel number already handled above) + channel_updated = False + + # Use new_name instead of stream.name + if existing_channel.name != new_name: + existing_channel.name = new_name + channel_updated = True + + if existing_channel.tvg_id != stream.tvg_id: + existing_channel.tvg_id = stream.tvg_id + channel_updated = True + + if existing_channel.tvc_guide_stationid != tvc_guide_stationid: + existing_channel.tvc_guide_stationid = tvc_guide_stationid + channel_updated = True + + # Check if channel group needs to be updated (in case override was added/changed) + if existing_channel.channel_group != target_group: + existing_channel.channel_group = target_group + channel_updated = True + logger.info( + f"Moved auto channel '{existing_channel.name}' from '{existing_channel.channel_group.name if existing_channel.channel_group else 'None'}' to '{target_group.name}'" + ) + + # Handle logo updates + current_logo = None + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + current_logo = Logo.objects.get(id=custom_logo_id) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + elif stream.logo_url: + # No custom logo configured, use stream logo + from apps.channels.models import Logo + + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + + if existing_channel.logo != current_logo: + existing_channel.logo = current_logo + channel_updated = True + + # Handle EPG data updates + current_epg_data = None + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + current_epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if not current_epg_data: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + # If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None + + if existing_channel.epg_data != current_epg_data: + existing_channel.epg_data = current_epg_data + channel_updated = True + + # Handle stream profile updates for the channel + if stream_profile_to_assign and existing_channel.stream_profile != stream_profile_to_assign: + existing_channel.stream_profile = stream_profile_to_assign + channel_updated = True + + if channel_updated: + existing_channel.save() + channels_updated += 1 + logger.debug( + f"Updated auto channel: {existing_channel.channel_number} - {existing_channel.name}" + ) + + # Update channel profile memberships for existing channels + current_memberships = set( + ChannelProfileMembership.objects.filter( + channel=existing_channel, enabled=True + ).values_list("channel_profile_id", flat=True) + ) + + target_profile_ids = set( + profile.id for profile in profiles_to_assign + ) + + # Only update if memberships have changed + if current_memberships != target_profile_ids: + # Disable all current memberships + ChannelProfileMembership.objects.filter( + channel=existing_channel + ).update(enabled=False) + + # Enable/create memberships for target profiles + for profile in profiles_to_assign: + membership, created = ( + ChannelProfileMembership.objects.get_or_create( + channel_profile=profile, + channel=existing_channel, + defaults={"enabled": True}, + ) + ) + if not created and not membership.enabled: + membership.enabled = True + membership.save() + + logger.debug( + f"Updated profile memberships for auto channel: {existing_channel.name}" + ) + + else: + # Create new channel + # Find next available channel number + target_number = current_channel_number + while target_number in used_numbers: + target_number += 1 + + # Add this number to used_numbers + used_numbers.add(target_number) + + channel = Channel.objects.create( + channel_number=target_number, + name=new_name, + tvg_id=stream.tvg_id, + tvc_guide_stationid=tvc_guide_stationid, + channel_group=target_group, + user_level=0, + auto_created=True, + auto_created_by=account, + ) + + # Associate the stream with the channel + ChannelStream.objects.create( + channel=channel, stream=stream, order=0 + ) + + # Assign to correct profiles + memberships = [ + ChannelProfileMembership( + channel_profile=profile, channel=channel, enabled=True + ) + for profile in profiles_to_assign + ] + if memberships: + ChannelProfileMembership.objects.bulk_create(memberships) + + # Try to match EPG data + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + else: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif force_dummy_epg: + # Force dummy EPG with no custom EPG selected (set to None) + channel.epg_data = None + channel.save(update_fields=["epg_data"]) + + # Handle logo + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + custom_logo = Logo.objects.get(id=custom_logo_id) + channel.logo = custom_logo + channel.save(update_fields=["logo"]) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + elif stream.logo_url: + from apps.channels.models import Logo + + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + + # Handle stream profile assignment + if stream_profile_to_assign: + channel.stream_profile = stream_profile_to_assign + channel.save(update_fields=['stream_profile']) + channels_created += 1 + logger.debug( + f"Created auto channel: {channel.channel_number} - {channel.name}" + ) + + # Increment channel number for next iteration + current_channel_number += 1.0 + if current_channel_number % 1 != 0: # Has decimal + current_channel_number = int(current_channel_number) + 1.0 + + except Exception as e: + logger.error( + f"Error processing auto channel for stream {stream.name}: {str(e)}" + ) + continue + + # Delete channels for streams that no longer exist + channels_to_delete = [] + for stream_id, channel in existing_channel_map.items(): + if stream_id not in processed_stream_ids: + channels_to_delete.append(channel) + + if channels_to_delete: + deleted_count = len(channels_to_delete) + Channel.objects.filter( + id__in=[ch.id for ch in channels_to_delete] + ).delete() + channels_deleted += deleted_count + logger.debug( + f"Deleted {deleted_count} auto channels for removed streams" + ) + + # Additional cleanup: Remove auto-created channels that no longer have any valid streams + # This handles the case where streams were deleted due to stale retention policy + orphaned_channels = Channel.objects.filter( + auto_created=True, + auto_created_by=account + ).exclude( + # Exclude channels that still have valid stream associations + id__in=ChannelStream.objects.filter( + stream__m3u_account=account, + stream__isnull=False + ).values_list('channel_id', flat=True) + ) + + orphaned_count = orphaned_channels.count() + if orphaned_count > 0: + orphaned_channels.delete() + channels_deleted += orphaned_count + logger.info( + f"Deleted {orphaned_count} orphaned auto channels with no valid streams" + ) + + logger.info( + f"Auto channel sync complete for account {account.name}: {channels_created} created, {channels_updated} updated, {channels_deleted} deleted" + ) + return f"Auto sync: {channels_created} channels created, {channels_updated} updated, {channels_deleted} deleted" + + except Exception as e: + logger.error(f"Error in auto channel sync for account {account_id}: {str(e)}") + return f"Auto sync error: {str(e)}" + + +def get_transformed_credentials(account, profile=None): + """ + Get transformed credentials for XtreamCodes API calls. + + Args: + account: M3UAccount instance + profile: M3UAccountProfile instance (optional, if not provided will use primary profile) + + Returns: + tuple: (transformed_url, transformed_username, transformed_password) + """ + import re + import urllib.parse + + # If no profile is provided, find the primary active profile + if profile is None: + try: + from apps.m3u.models import M3UAccountProfile + profile = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ).first() + if profile: + logger.debug(f"Using primary profile '{profile.name}' for URL transformation") + else: + logger.debug(f"No active profiles found for account {account.name}, using base credentials") + except Exception as e: + logger.warning(f"Could not get primary profile for account {account.name}: {e}") + profile = None + + base_url = account.server_url + base_username = account.username + base_password = account.password # Build a complete URL with credentials (similar to how IPTV URLs are structured) + # Format: http://server.com:port/live/username/password/1234.ts + if base_url and base_username and base_password: + # Remove trailing slash from server URL if present + clean_server_url = base_url.rstrip('/') + + # Build the complete URL with embedded credentials + complete_url = f"{clean_server_url}/live/{base_username}/{base_password}/1234.ts" + logger.debug(f"Built complete URL: {complete_url}") + + # Apply profile-specific transformations if profile is provided + if profile and profile.search_pattern and profile.replace_pattern: + try: + # Handle backreferences in the replacement pattern + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', profile.replace_pattern) + + # Apply transformation to the complete URL + transformed_complete_url = re.sub(profile.search_pattern, safe_replace_pattern, complete_url) + logger.info(f"Transformed complete URL: {complete_url} -> {transformed_complete_url}") + + # Extract components from the transformed URL + # Pattern: http://server.com:port/live/username/password/1234.ts + parsed_url = urllib.parse.urlparse(transformed_complete_url) + path_parts = [part for part in parsed_url.path.split('/') if part] + + if len(path_parts) >= 2: + # Extract username and password from path + transformed_username = path_parts[1] + transformed_password = path_parts[2] + + # Rebuild server URL without the username/password path + transformed_url = f"{parsed_url.scheme}://{parsed_url.netloc}" + if parsed_url.port: + transformed_url = f"{parsed_url.scheme}://{parsed_url.hostname}:{parsed_url.port}" + + logger.debug(f"Extracted transformed credentials:") + logger.debug(f" Server URL: {transformed_url}") + logger.debug(f" Username: {transformed_username}") + logger.debug(f" Password: {transformed_password}") + + return transformed_url, transformed_username, transformed_password + else: + logger.warning(f"Could not extract credentials from transformed URL: {transformed_complete_url}") + return base_url, base_username, base_password + + except Exception as e: + logger.error(f"Error transforming URL for profile {profile.name if profile else 'unknown'}: {e}") + return base_url, base_username, base_password + else: + # No profile or no transformation patterns + return base_url, base_username, base_password + else: + logger.warning(f"Missing credentials for account {account.name}") + return base_url, base_username, base_password + + +@shared_task +def refresh_account_info(profile_id): + """Refresh only the account information for a specific M3U profile.""" + if not acquire_task_lock("refresh_account_info", profile_id): + return f"Account info refresh task already running for profile_id={profile_id}." + + try: + from apps.m3u.models import M3UAccountProfile + import re + + profile = M3UAccountProfile.objects.get(id=profile_id) + account = profile.m3u_account + + if account.account_type != M3UAccount.Types.XC: + release_task_lock("refresh_account_info", profile_id) + return f"Profile {profile_id} belongs to account {account.id} which is not an XtreamCodes account." + + # Get transformed credentials using the helper function + transformed_url, transformed_username, transformed_password = get_transformed_credentials(account, profile) + + # Initialize XtreamCodes client with extracted/transformed credentials + client = XCClient( + transformed_url, + transformed_username, + transformed_password, + account.get_user_agent(), + ) # Authenticate and get account info + auth_result = client.authenticate() + if not auth_result: + error_msg = f"Authentication failed for profile {profile.name} ({profile_id})" + logger.error(error_msg) + + # Send error notification to frontend via websocket + send_websocket_update( + "updates", + "update", + { + "type": "account_info_refresh_error", + "profile_id": profile_id, + "profile_name": profile.name, + "error": "Authentication failed with the provided credentials", + "message": f"Failed to authenticate profile '{profile.name}'. Please check the credentials." + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg + + # Get account information + account_info = client.get_account_info() + + # Update only this specific profile with the new account info + if not profile.custom_properties: + profile.custom_properties = {} + profile.custom_properties.update(account_info) + profile.save() + + # Send success notification to frontend via websocket + send_websocket_update( + "updates", + "update", + { + "type": "account_info_refresh_success", + "profile_id": profile_id, + "profile_name": profile.name, + "message": f"Account information successfully refreshed for profile '{profile.name}'" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return f"Account info refresh completed for profile {profile_id} ({profile.name})." + + except M3UAccountProfile.DoesNotExist: + error_msg = f"Profile {profile_id} not found" + logger.error(error_msg) + + send_websocket_update( + "updates", + "update", + { + "type": "account_refresh_error", + "profile_id": profile_id, + "error": "Profile not found", + "message": f"Profile {profile_id} not found" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg + except Exception as e: + error_msg = f"Error refreshing account info for profile {profile_id}: {str(e)}" + logger.error(error_msg) + + send_websocket_update( + "updates", + "update", + { + "type": "account_refresh_error", + "profile_id": profile_id, + "error": str(e), + "message": f"Failed to refresh account info: {str(e)}" + } + ) + + release_task_lock("refresh_account_info", profile_id) + return error_msg @shared_task def refresh_single_m3u_account(account_id): """Splits M3U processing into chunks and dispatches them as parallel tasks.""" - if not acquire_task_lock('refresh_single_m3u_account', account_id): + if not acquire_task_lock("refresh_single_m3u_account", account_id): return f"Task already running for account_id={account_id}." # Record start time @@ -847,7 +2392,7 @@ def refresh_single_m3u_account(account_id): account = M3UAccount.objects.get(id=account_id, is_active=True) if not account.is_active: logger.debug(f"Account {account_id} is not active, skipping.") - release_task_lock('refresh_single_m3u_account', account_id) + release_task_lock("refresh_single_m3u_account", account_id) return # Set status to fetching @@ -855,17 +2400,28 @@ def refresh_single_m3u_account(account_id): account.save(update_fields=['status']) filters = list(account.filters.all()) + + # Check if VOD is enabled for this account + vod_enabled = False + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get('enable_vod', False) + except M3UAccount.DoesNotExist: # The M3U account doesn't exist, so delete the periodic task if it exists - logger.warning(f"M3U account with ID {account_id} not found, but task was triggered. Cleaning up orphaned task.") + logger.warning( + f"M3U account with ID {account_id} not found, but task was triggered. Cleaning up orphaned task." + ) # Call the helper function to delete the task if delete_m3u_refresh_task_by_id(account_id): - logger.info(f"Successfully cleaned up orphaned task for M3U account {account_id}") + logger.info( + f"Successfully cleaned up orphaned task for M3U account {account_id}" + ) else: logger.debug(f"No orphaned task found for M3U account {account_id}") - release_task_lock('refresh_single_m3u_account', account_id) + release_task_lock("refresh_single_m3u_account", account_id) return f"M3UAccount with ID={account_id} not found or inactive, task cleaned up" # Fetch M3U lines and handle potential issues @@ -875,14 +2431,16 @@ def refresh_single_m3u_account(account_id): cache_path = os.path.join(m3u_dir, f"{account_id}.json") if os.path.exists(cache_path): try: - with open(cache_path, 'r') as file: + with open(cache_path, "r") as file: data = json.load(file) - extinf_data = data['extinf_data'] - groups = data['groups'] + extinf_data = data["extinf_data"] + groups = data["groups"] except json.JSONDecodeError as e: # Handle corrupted JSON file - logger.error(f"Error parsing cached M3U data for account {account_id}: {str(e)}") + logger.error( + f"Error parsing cached M3U data for account {account_id}: {str(e)}" + ) # Backup the corrupted file for potential analysis backup_path = f"{cache_path}.corrupted" @@ -890,7 +2448,9 @@ def refresh_single_m3u_account(account_id): os.rename(cache_path, backup_path) logger.info(f"Renamed corrupted cache file to {backup_path}") except OSError as rename_err: - logger.warning(f"Failed to rename corrupted cache file: {str(rename_err)}") + logger.warning( + f"Failed to rename corrupted cache file: {str(rename_err)}" + ) # Reset the data to empty structures extinf_data = [] @@ -908,8 +2468,10 @@ def refresh_single_m3u_account(account_id): # Check for completely empty result or missing groups if not result or result[1] is None: - logger.error(f"Failed to refresh M3U groups for account {account_id}: {result}") - release_task_lock('refresh_single_m3u_account', account_id) + logger.error( + f"Failed to refresh M3U groups for account {account_id}: {result}" + ) + release_task_lock("refresh_single_m3u_account", account_id) return "Failed to update m3u account - download failed or other error" extinf_data, groups = result @@ -926,15 +2488,23 @@ def refresh_single_m3u_account(account_id): logger.error(f"No streams found for non-XC account {account_id}") account.status = M3UAccount.Status.ERROR account.last_message = "No streams found in M3U source" - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "parsing", 100, status="error", error="No streams found") + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, "parsing", 100, status="error", error="No streams found" + ) except Exception as e: logger.error(f"Exception in refresh_m3u_groups: {str(e)}", exc_info=True) account.status = M3UAccount.Status.ERROR account.last_message = f"Error refreshing M3U groups: {str(e)}" - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "parsing", 100, status="error", error=f"Error refreshing M3U groups: {str(e)}") - release_task_lock('refresh_single_m3u_account', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "parsing", + 100, + status="error", + error=f"Error refreshing M3U groups: {str(e)}", + ) + release_task_lock("refresh_single_m3u_account", account_id) return "Failed to update m3u account" # Only proceed with parsing if we actually have data and no errors were encountered @@ -949,37 +2519,120 @@ def refresh_single_m3u_account(account_id): logger.error(f"No data to process for account {account_id}") account.status = M3UAccount.Status.ERROR account.last_message = "No data available for processing" - account.save(update_fields=['status', 'last_message']) - send_m3u_update(account_id, "parsing", 100, status="error", error="No data available for processing") - release_task_lock('refresh_single_m3u_account', account_id) + account.save(update_fields=["status", "last_message"]) + send_m3u_update( + account_id, + "parsing", + 100, + status="error", + error="No data available for processing", + ) + release_task_lock("refresh_single_m3u_account", account_id) return "Failed to update m3u account, no data available" hash_keys = CoreSettings.get_m3u_hash_key().split(",") - existing_groups = {group.name: group.id for group in ChannelGroup.objects.filter( - m3u_account__m3u_account=account, # Filter by the M3UAccount - m3u_account__enabled=True # Filter by the enabled flag in the join table - )} + existing_groups = { + group.name: group.id + for group in ChannelGroup.objects.filter( + m3u_accounts__m3u_account=account, # Filter by the M3UAccount + m3u_accounts__enabled=True, # Filter by the enabled flag in the join table + ) + } try: # Set status to parsing account.status = M3UAccount.Status.PARSING - account.save(update_fields=['status']) + account.save(update_fields=["status"]) + + # Commit any pending transactions before threading + from django.db import transaction + transaction.commit() + + # Initialize stream counters + streams_created = 0 + streams_updated = 0 if account.account_type == M3UAccount.Types.STADNARD: - logger.debug(f"Processing Standard account ({account_id}) with groups: {existing_groups}") - # Break into batches and process in parallel - batches = [extinf_data[i:i + BATCH_SIZE] for i in range(0, len(extinf_data), BATCH_SIZE)] - task_group = group(process_m3u_batch.s(account_id, batch, existing_groups, hash_keys) for batch in batches) + logger.debug( + f"Processing Standard account ({account_id}) with groups: {existing_groups}" + ) + # Break into batches and process with threading - use global batch size + batches = [ + extinf_data[i : i + BATCH_SIZE] + for i in range(0, len(extinf_data), BATCH_SIZE) + ] + + logger.info(f"Processing {len(extinf_data)} streams in {len(batches)} thread batches") + + # Use 2 threads for optimal database connection handling + max_workers = min(2, len(batches)) + logger.debug(f"Using {max_workers} threads for processing") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit batch processing tasks using direct functions (now thread-safe) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } + + completed_batches = 0 + total_batches = len(batches) + + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 + + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass + + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time + + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 + + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) + + logger.debug(f"Thread batch {completed_batches}/{total_batches} completed") + + except Exception as e: + logger.error(f"Error in thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging + + logger.info(f"Thread-based processing completed for account {account_id}") else: # For XC accounts, get the groups with their custom properties containing xc_id logger.debug(f"Processing XC account with groups: {existing_groups}") # Get the ChannelGroupM3UAccount entries with their custom_properties channel_group_relationships = ChannelGroupM3UAccount.objects.filter( - m3u_account=account, - enabled=True - ).select_related('channel_group') + m3u_account=account, enabled=True + ).select_related("channel_group") filtered_groups = {} for rel in channel_group_relationships: @@ -987,115 +2640,143 @@ def refresh_single_m3u_account(account_id): group_id = rel.channel_group.id # Load the custom properties with the xc_id - try: - custom_props = json.loads(rel.custom_properties) if rel.custom_properties else {} - if 'xc_id' in custom_props: - filtered_groups[group_name] = { - 'xc_id': custom_props['xc_id'], - 'channel_group_id': group_id - } - logger.debug(f"Added group {group_name} with xc_id {custom_props['xc_id']}") - else: - logger.warning(f"No xc_id found in custom properties for group {group_name}") - except (json.JSONDecodeError, KeyError) as e: - logger.error(f"Error parsing custom properties for group {group_name}: {str(e)}") - - logger.info(f"Filtered {len(filtered_groups)} groups for processing: {filtered_groups}") - - # Batch the groups - filtered_groups_list = list(filtered_groups.items()) - batches = [ - dict(filtered_groups_list[i:i + 2]) - for i in range(0, len(filtered_groups_list), 2) - ] - - logger.info(f"Created {len(batches)} batches for XC processing") - task_group = group(process_xc_category.s(account_id, batch, existing_groups, hash_keys) for batch in batches) - - total_batches = len(batches) - completed_batches = 0 - streams_processed = 0 # Track total streams processed - logger.debug(f"Dispatched {len(batches)} parallel tasks for account_id={account_id}.") - - # result = task_group.apply_async() - result = task_group.apply_async() - - # Wait for all tasks to complete and collect their result IDs - completed_task_ids = set() - while completed_batches < total_batches: - for async_result in result: - if async_result.ready() and async_result.id not in completed_task_ids: # If the task has completed and we haven't counted it - task_result = async_result.result # The result of the task - logger.debug(f"Task completed with result: {task_result}") - - # Extract stream counts from result string if available - if isinstance(task_result, str): - try: - created_match = re.search(r"(\d+) created", task_result) - updated_match = re.search(r"(\d+) updated", task_result) - - if created_match and updated_match: - created_count = int(created_match.group(1)) - updated_count = int(updated_match.group(1)) - streams_processed += created_count + updated_count - streams_created += created_count - streams_updated += updated_count - except (AttributeError, ValueError): - pass - - completed_batches += 1 - completed_task_ids.add(async_result.id) # Mark this task as processed - - # Calculate progress - progress = int((completed_batches / total_batches) * 100) - - # Calculate elapsed time and estimated remaining time - current_elapsed = time.time() - start_time - if progress > 0: - estimated_total = (current_elapsed / progress) * 100 - time_remaining = max(0, estimated_total - current_elapsed) - else: - time_remaining = 0 - - # Send progress update via Channels - # Don't send 100% because we want to clean up after - if progress == 100: - progress = 99 - - send_m3u_update( - account_id, - "parsing", - progress, - elapsed_time=current_elapsed, - time_remaining=time_remaining, - streams_processed=streams_processed + custom_props = rel.custom_properties or {} + if "xc_id" in custom_props: + filtered_groups[group_name] = { + "xc_id": custom_props["xc_id"], + "channel_group_id": group_id, + } + logger.debug( + f"Added group {group_name} with xc_id {custom_props['xc_id']}" + ) + else: + logger.warning( + f"No xc_id found in custom properties for group {group_name}" ) - # Optionally remove completed task from the group to prevent processing it again - result.remove(async_result) - else: - logger.trace(f"Task is still running.") + logger.info( + f"Filtered {len(filtered_groups)} groups for processing: {filtered_groups}" + ) + + # Collect all XC streams in a single API call and filter by enabled categories + logger.info("Fetching all XC streams from provider and filtering by enabled categories...") + all_xc_streams = collect_xc_streams(account_id, filtered_groups) + + if not all_xc_streams: + logger.warning("No streams collected from XC groups") + else: + # Now batch by stream count (like standard M3U processing) + batches = [ + all_xc_streams[i : i + BATCH_SIZE] + for i in range(0, len(all_xc_streams), BATCH_SIZE) + ] + + logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") + + # Use threading for XC stream processing - now with consistent batch sizes + max_workers = min(4, len(batches)) + logger.debug(f"Using {max_workers} threads for XC stream processing") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit stream batch processing tasks (reuse standard M3U processing) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } + + completed_batches = 0 + total_batches = len(batches) + + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 + + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass + + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time + + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 + + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) + + logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") + + except Exception as e: + logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging + + logger.info(f"XC thread-based processing completed for account {account_id}") # Ensure all database transactions are committed before cleanup - logger.info(f"All {total_batches} tasks completed, ensuring DB transactions are committed before cleanup") + logger.info( + f"All thread processing completed, ensuring DB transactions are committed before cleanup" + ) # Force a simple DB query to ensure connection sync - Stream.objects.filter(id=-1).exists() # This will never find anything but ensures DB sync + Stream.objects.filter( + id=-1 + ).exists() # This will never find anything but ensures DB sync # Now run cleanup streams_deleted = cleanup_streams(account_id, refresh_start_timestamp) + # Run auto channel sync after successful refresh + auto_sync_message = "" + try: + sync_result = sync_auto_channels( + account_id, scan_start_time=str(refresh_start_timestamp) + ) + logger.info( + f"Auto channel sync result for account {account_id}: {sync_result}" + ) + if sync_result and "created" in sync_result: + auto_sync_message = f" {sync_result}." + except Exception as e: + logger.error( + f"Error running auto channel sync for account {account_id}: {str(e)}" + ) + # Calculate elapsed time elapsed_time = time.time() - start_time + # Calculate total streams processed + streams_processed = streams_created + streams_updated + # Set status to success and update timestamp BEFORE sending the final update account.status = M3UAccount.Status.SUCCESS account.last_message = ( f"Processing completed in {elapsed_time:.1f} seconds. " f"Streams: {streams_created} created, {streams_updated} updated, {streams_deleted} removed. " - f"Total processed: {streams_processed}." + f"Total processed: {streams_processed}.{auto_sync_message}" ) account.updated_at = timezone.now() - account.save(update_fields=['status', 'last_message', 'updated_at']) + account.save(update_fields=["status", "last_message", "updated_at"]) # Send final update with complete metrics and explicitly include success status send_m3u_update( @@ -1109,21 +2790,41 @@ def refresh_single_m3u_account(account_id): streams_created=streams_created, streams_updated=streams_updated, streams_deleted=streams_deleted, - message=account.last_message + message=account.last_message, ) + # Trigger VOD refresh if enabled and account is XtreamCodes type + if vod_enabled and account.account_type == M3UAccount.Types.XC: + logger.info(f"VOD is enabled for account {account_id}, triggering VOD refresh") + try: + from apps.vod.tasks import refresh_vod_content + refresh_vod_content.delay(account_id) + logger.info(f"VOD refresh task queued for account {account_id}") + except Exception as e: + logger.error(f"Failed to queue VOD refresh for account {account_id}: {str(e)}") + except Exception as e: logger.error(f"Error processing M3U for account {account_id}: {str(e)}") account.status = M3UAccount.Status.ERROR account.last_message = f"Error processing M3U: {str(e)}" - account.save(update_fields=['status', 'last_message']) + account.save(update_fields=["status", "last_message"]) raise # Re-raise the exception for Celery to handle - release_task_lock('refresh_single_m3u_account', account_id) + release_task_lock("refresh_single_m3u_account", account_id) # Aggressive garbage collection - del existing_groups, extinf_data, groups, batches + # Only delete variables if they exist + if 'existing_groups' in locals(): + del existing_groups + if 'extinf_data' in locals(): + del extinf_data + if 'groups' in locals(): + del groups + if 'batches' in locals(): + del batches + from core.utils import cleanup_memory + cleanup_memory(log_usage=True, force_collection=True) # Clean up cache file since we've fully processed it @@ -1132,6 +2833,7 @@ def refresh_single_m3u_account(account_id): return f"Dispatched jobs complete." + def send_m3u_update(account_id, action, progress, **kwargs): # Start with the base data dictionary data = { @@ -1154,11 +2856,7 @@ def send_m3u_update(account_id, action, progress, **kwargs): # Add the additional key-value pairs from kwargs data.update(kwargs) - - # Use the standardized function with memory management - # Enable garbage collection for certain operations - collect_garbage = action == "parsing" and progress % 25 == 0 - send_websocket_update('updates', 'update', data, collect_garbage=collect_garbage) + send_websocket_update("updates", "update", data, collect_garbage=False) # Explicitly clear data reference to help garbage collection data = None diff --git a/apps/m3u/utils.py b/apps/m3u/utils.py index 784188ba..598ef713 100644 --- a/apps/m3u/utils.py +++ b/apps/m3u/utils.py @@ -1,9 +1,40 @@ # apps/m3u/utils.py import threading +import logging +from django.db import models lock = threading.Lock() # Dictionary to track usage: {m3u_account_id: current_usage} active_streams_map = {} +logger = logging.getLogger(__name__) + + +def normalize_stream_url(url): + """ + Normalize stream URLs for compatibility with FFmpeg. + + Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol. + FFmpeg doesn't recognize the @ prefix for multicast addresses. + + Args: + url (str): The stream URL to normalize + + Returns: + str: The normalized URL + """ + if not url: + return url + + # Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234 + # The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax + if url.startswith('udp://@'): + normalized = url.replace('udp://@', 'udp://', 1) + logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}") + return normalized + + # Could add other normalizations here in the future (rtp://@, etc.) + return url + def increment_stream_count(account): with lock: @@ -24,3 +55,64 @@ def decrement_stream_count(account): active_streams_map[account.id] = current_usage account.active_streams = current_usage account.save(update_fields=['active_streams']) + + +def calculate_tuner_count(minimum=1, unlimited_default=10): + """ + Calculate tuner/connection count from active M3U profiles and custom streams. + This is the centralized function used by both HDHR and XtreamCodes APIs. + + Args: + minimum (int): Minimum number to return (default: 1) + unlimited_default (int): Default value when unlimited profiles exist (default: 10) + + Returns: + int: Calculated tuner/connection count + """ + try: + from apps.m3u.models import M3UAccountProfile + from apps.channels.models import Stream + + # Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile) + profiles = M3UAccountProfile.objects.filter( + is_active=True, + m3u_account__is_active=True, # Only include profiles from enabled M3U accounts + ).exclude(id=1) + + # 1. Check if any profile has unlimited streams (max_streams=0) + has_unlimited = profiles.filter(max_streams=0).exists() + + # 2. Calculate tuner count from limited profiles + limited_tuners = 0 + if not has_unlimited: + limited_tuners = ( + profiles.filter(max_streams__gt=0) + .aggregate(total=models.Sum("max_streams")) + .get("total", 0) + or 0 + ) + + # 3. Add custom stream count to tuner count + custom_stream_count = Stream.objects.filter(is_custom=True).count() + logger.debug(f"Found {custom_stream_count} custom streams") + + # 4. Calculate final tuner count + if has_unlimited: + # If there are unlimited profiles, start with unlimited_default plus custom streams + tuner_count = unlimited_default + custom_stream_count + else: + # Otherwise use the limited profile sum plus custom streams + tuner_count = limited_tuners + custom_stream_count + + # 5. Ensure minimum number + tuner_count = max(minimum, tuner_count) + + logger.debug( + f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})" + ) + + return tuner_count + + except Exception as e: + logger.error(f"Error calculating tuner count: {e}") + return minimum # Fallback to minimum value diff --git a/apps/m3u/views.py b/apps/m3u/views.py index f69dd6c4..0fab8c10 100644 --- a/apps/m3u/views.py +++ b/apps/m3u/views.py @@ -3,6 +3,7 @@ from django.views import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt +from django.http import JsonResponse from apps.m3u.models import M3UAccount import json diff --git a/apps/output/urls.py b/apps/output/urls.py index 92774adb..dc023ed7 100644 --- a/apps/output/urls.py +++ b/apps/output/urls.py @@ -1,16 +1,14 @@ from django.urls import path, re_path, include -from .views import generate_m3u, generate_epg +from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream from core.views import stream_view -app_name = 'output' +app_name = "output" urlpatterns = [ # Allow `/m3u`, `/m3u/`, `/m3u/profile_name`, and `/m3u/profile_name/` - re_path(r'^m3u(?:/(?P[^/]+))?/?$', generate_m3u, name='generate_m3u'), - + re_path(r"^m3u(?:/(?P[^/]+))?/?$", m3u_endpoint, name="m3u_endpoint"), # Allow `/epg`, `/epg/`, `/epg/profile_name`, and `/epg/profile_name/` - re_path(r'^epg(?:/(?P[^/]+))?/?$', generate_epg, name='generate_epg'), - + re_path(r"^epg(?:/(?P[^/]+))?/?$", epg_endpoint, name="epg_endpoint"), # Allow both `/stream/` and `/stream//` - re_path(r'^stream/(?P[0-9a-fA-F\-]+)/?$', stream_view, name='stream'), + re_path(r"^stream/(?P[0-9a-fA-F\-]+)/?$", stream_view, name="stream"), ] diff --git a/apps/output/views.py b/apps/output/views.py index ff02560c..df18b349 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -1,36 +1,95 @@ -from django.http import HttpResponse, HttpResponseForbidden +import ipaddress +from django.http import HttpResponse, JsonResponse, Http404, HttpResponseForbidden, StreamingHttpResponse +from rest_framework.response import Response from django.urls import reverse +from apps.channels.models import Channel, ChannelProfile, ChannelGroup from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_http_methods -from apps.channels.models import Channel, ChannelProfile from apps.epg.models import ProgramData -from django.utils import timezone -from django.views.decorators.csrf import csrf_exempt +from apps.accounts.models import User +from core.models import CoreSettings, NETWORK_ACCESS +from dispatcharr.utils import network_access_allowed +from django.utils import timezone as django_timezone +from django.shortcuts import get_object_or_404 from datetime import datetime, timedelta -import re import html # Add this import for XML escaping +import json # Add this import for JSON parsing +import time # Add this import for keep-alive delays +from tzlocal import get_localzone +from urllib.parse import urlparse +import base64 +import logging +from django.db.models.functions import Lower +import os +from apps.m3u.utils import calculate_tuner_count +import regex + +logger = logging.getLogger(__name__) + +def m3u_endpoint(request, profile_name=None, user=None): + if not network_access_allowed(request, "M3U_EPG"): + return JsonResponse({"error": "Forbidden"}, status=403) + + return generate_m3u(request, profile_name, user) + +def epg_endpoint(request, profile_name=None, user=None): + if not network_access_allowed(request, "M3U_EPG"): + return JsonResponse({"error": "Forbidden"}, status=403) + + return generate_epg(request, profile_name, user) @csrf_exempt @require_http_methods(["GET", "POST"]) -def generate_m3u(request, profile_name=None): +def generate_m3u(request, profile_name=None, user=None): """ Dynamically generate an M3U file from channels. The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ # Check if this is a POST request and the body is not empty (which we don't want to allow) + logger.debug("Generating M3U for profile: %s, user: %s", profile_name, user.username if user else "Anonymous") + # Check if this is a POST request with data (which we don't want to allow) if request.method == "POST" and request.body: if request.body.decode() != '{}': return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') + if user is not None: + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( + "channel_number" + ) + else: - channels = Channel.objects.order_by('channel_number') + if profile_name is not None: + channel_profile = ChannelProfile.objects.get(name=profile_name) + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True + ).order_by('channel_number') + else: + if profile_name is not None: + channel_profile = ChannelProfile.objects.get(name=profile_name) + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True, + ).order_by("channel_number") + else: + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -42,7 +101,22 @@ def generate_m3u(request, profile_name=None): # Options: 'channel_number' (default), 'tvg_id', 'gracenote' tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() - m3u_content = "#EXTM3U\n" + # Build EPG URL with query parameters if needed + epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint')) + + # Optionally preserve certain query parameters + preserved_params = ['tvg_id_source', 'cachedlogos', 'days'] + query_params = {k: v for k, v in request.GET.items() if k in preserved_params} + if query_params: + from urllib.parse import urlencode + epg_url = f"{epg_base_url}?{urlencode(query_params)}" + else: + epg_url = epg_base_url + + # Add x-tvg-url and url-tvg attribute for EPG URL + m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n' + + # Start building M3U content for channel in channels: group_title = channel.channel_group.name if channel.channel_group else "Default" @@ -70,7 +144,7 @@ def generate_m3u(request, profile_name=None): if channel.logo: if use_cached_logos: # Use cached logo as before - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) else: # Try to find direct logo URL from channel's streams direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None @@ -78,12 +152,14 @@ def generate_m3u(request, profile_name=None): if direct_logo: tvg_logo = direct_logo else: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) # create possible gracenote id insertion tvc_guide_stationid = "" if channel.tvc_guide_stationid: - tvc_guide_stationid = f'tvc-guide-stationid="{channel.tvc_guide_stationid}" ' + tvc_guide_stationid = ( + f'tvc-guide-stationid="{channel.tvc_guide_stationid}" ' + ) extinf_line = ( f'#EXTINF:-1 tvg-id="{tvg_id}" tvg-name="{tvg_name}" tvg-logo="{tvg_logo}" ' @@ -93,7 +169,7 @@ def generate_m3u(request, profile_name=None): # Determine the stream URL based on the direct parameter if use_direct_urls: # Try to get the first stream's direct URL - first_stream = channel.streams.first() + first_stream = channel.streams.order_by('channelstream__order').first() if first_stream and first_stream.url: # Use the direct stream URL stream_url = first_stream.url @@ -109,65 +185,144 @@ def generate_m3u(request, profile_name=None): m3u_content += extinf_line + stream_url + "\n" response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") - response['Content-Disposition'] = 'attachment; filename="channels.m3u"' + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' return response -def generate_dummy_epg(channel_id, channel_name, xml_lines=None, num_days=1, program_length_hours=4): + +def generate_fallback_programs(channel_id, channel_name, now, num_days, program_length_hours, fallback_title, fallback_description): """ - Generate dummy EPG programs for channels without EPG data. - Creates program blocks for a specified number of days. + Generate dummy programs using custom fallback templates when patterns don't match. Args: - channel_id: The channel ID to use in the program entries - channel_name: The name of the channel to use in program titles - xml_lines: Optional list to append lines to, otherwise returns new list - num_days: Number of days to generate EPG data for (default: 1) - program_length_hours: Length of each program block in hours (default: 4) + channel_id: Channel ID for the programs + channel_name: Channel name to use as fallback in templates + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + fallback_title: Custom fallback title template (empty string if not provided) + fallback_description: Custom fallback description template (empty string if not provided) Returns: - List of XML lines for the dummy EPG entries + List of program dictionaries """ - if xml_lines is None: - xml_lines = [] + programs = [] + # Use custom fallback title or channel name as default + title = fallback_title if fallback_title else channel_name + + # Use custom fallback description or a simple default message + if fallback_description: + description = fallback_description + else: + description = f"EPG information is currently unavailable for {channel_name}" + + # Create programs for each day + for day in range(num_days): + day_start = now + timedelta(days=day) + + # Create programs with specified length throughout the day + for hour_offset in range(0, 24, program_length_hours): + # Calculate program start and end times + start_time = day_start + timedelta(hours=hour_offset) + end_time = start_time + timedelta(hours=program_length_hours) + + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": title, + "description": description, + }) + + return programs + + +def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4, epg_source=None): + """ + Generate dummy EPG programs for channels. + + If epg_source is provided and it's a custom dummy EPG with patterns, + use those patterns to generate programs from the channel title. + Otherwise, generate default dummy programs. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title/name + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + epg_source: Optional EPGSource for custom dummy EPG with patterns + + Returns: + List of program dictionaries + """ # Get current time rounded to hour - now = timezone.now() + now = django_timezone.now() now = now.replace(minute=0, second=0, microsecond=0) - # Humorous program descriptions based on time of day + # Check if this is a custom dummy EPG with regex patterns + if epg_source and epg_source.source_type == 'dummy' and epg_source.custom_properties: + custom_programs = generate_custom_dummy_programs( + channel_id, channel_name, now, num_days, + epg_source.custom_properties + ) + # If custom generation succeeded, return those programs + # If it returned empty (pattern didn't match), check for custom fallback templates + if custom_programs: + return custom_programs + else: + logger.info(f"Custom pattern didn't match for '{channel_name}', checking for custom fallback templates") + + # Check if custom fallback templates are provided + custom_props = epg_source.custom_properties + fallback_title = custom_props.get('fallback_title_template', '').strip() + fallback_description = custom_props.get('fallback_description_template', '').strip() + + # If custom fallback templates exist, use them instead of default + if fallback_title or fallback_description: + logger.info(f"Using custom fallback templates for '{channel_name}'") + return generate_fallback_programs( + channel_id, channel_name, now, num_days, + program_length_hours, fallback_title, fallback_description + ) + else: + logger.info(f"No custom fallback templates found, using default dummy EPG") + + # Default humorous program descriptions based on time of day time_descriptions = { (0, 4): [ f"Late Night with {channel_name} - Where insomniacs unite!", f"The 'Why Am I Still Awake?' Show on {channel_name}", - f"Counting Sheep - A {channel_name} production for the sleepless" + f"Counting Sheep - A {channel_name} production for the sleepless", ], (4, 8): [ f"Dawn Patrol - Rise and shine with {channel_name}!", f"Early Bird Special - Coffee not included", - f"Morning Zombies - Before coffee viewing on {channel_name}" + f"Morning Zombies - Before coffee viewing on {channel_name}", ], (8, 12): [ f"Mid-Morning Meetings - Pretend you're paying attention while watching {channel_name}", f"The 'I Should Be Working' Hour on {channel_name}", - f"Productivity Killer - {channel_name}'s daytime programming" + f"Productivity Killer - {channel_name}'s daytime programming", ], (12, 16): [ f"Lunchtime Laziness with {channel_name}", f"The Afternoon Slump - Brought to you by {channel_name}", - f"Post-Lunch Food Coma Theater on {channel_name}" + f"Post-Lunch Food Coma Theater on {channel_name}", ], (16, 20): [ f"Rush Hour - {channel_name}'s alternative to traffic", f"The 'What's For Dinner?' Debate on {channel_name}", - f"Evening Escapism - {channel_name}'s remedy for reality" + f"Evening Escapism - {channel_name}'s remedy for reality", ], (20, 24): [ f"Prime Time Placeholder - {channel_name}'s finest not-programming", f"The 'Netflix Was Too Complicated' Show on {channel_name}", - f"Family Argument Avoider - Courtesy of {channel_name}" - ] + f"Family Argument Avoider - Courtesy of {channel_name}", + ], } + programs = [] + # Create programs for each day for day in range(num_days): day_start = now + timedelta(days=day) @@ -193,111 +348,851 @@ def generate_dummy_epg(channel_id, channel_name, xml_lines=None, num_days=1, pro # Fallback description if somehow no range matches description = f"Placeholder program for {channel_name} - EPG data went on vacation" - # Format times in XMLTV format - start_str = start_time.strftime("%Y%m%d%H%M%S %z") - stop_str = end_time.strftime("%Y%m%d%H%M%S %z") + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": channel_name, + "description": description, + }) - # Create program entry with escaped channel name - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(channel_name)}') - xml_lines.append(f' {html.escape(description)}') - xml_lines.append(f' ') + return programs + + +def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, custom_properties): + """ + Generate programs using custom dummy EPG regex patterns. + + Extracts information from channel title using regex patterns and generates + programs based on the extracted data. + + TIMEZONE HANDLING: + ------------------ + The timezone parameter specifies the timezone of the event times in your channel + titles using standard timezone names (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London'). + DST (Daylight Saving Time) is handled automatically by pytz. + + Examples: + - Channel: "NHL 01: Bruins VS Maple Leafs @ 8:00PM ET" + - Set timezone = "US/Eastern" + - In October (DST): 8:00PM EDT β†’ 12:00AM UTC (automatically uses UTC-4) + - In January (no DST): 8:00PM EST β†’ 1:00AM UTC (automatically uses UTC-5) + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title to parse + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + custom_properties: Dict with title_pattern, time_pattern, templates, etc. + - timezone: Timezone name (e.g., 'US/Eastern') + + Returns: + List of program dictionaries with start_time/end_time in UTC + """ + import pytz + + logger.info(f"Generating custom dummy programs for channel: {channel_name}") + + # Extract patterns from custom properties + title_pattern = custom_properties.get('title_pattern', '') + time_pattern = custom_properties.get('time_pattern', '') + date_pattern = custom_properties.get('date_pattern', '') + + # Get timezone name (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London') + timezone_value = custom_properties.get('timezone', 'UTC') + output_timezone_value = custom_properties.get('output_timezone', '') # Optional: display times in different timezone + program_duration = custom_properties.get('program_duration', 180) # Minutes + title_template = custom_properties.get('title_template', '') + description_template = custom_properties.get('description_template', '') + + # Templates for upcoming/ended programs + upcoming_title_template = custom_properties.get('upcoming_title_template', '') + upcoming_description_template = custom_properties.get('upcoming_description_template', '') + ended_title_template = custom_properties.get('ended_title_template', '') + ended_description_template = custom_properties.get('ended_description_template', '') + + # Image URL templates + channel_logo_url_template = custom_properties.get('channel_logo_url', '') + program_poster_url_template = custom_properties.get('program_poster_url', '') + + # EPG metadata options + category_string = custom_properties.get('category', '') + # Split comma-separated categories and strip whitespace, filter out empty strings + categories = [cat.strip() for cat in category_string.split(',') if cat.strip()] if category_string else [] + include_date = custom_properties.get('include_date', True) + include_live = custom_properties.get('include_live', False) + include_new = custom_properties.get('include_new', False) + + # Parse timezone name + try: + source_tz = pytz.timezone(timezone_value) + logger.debug(f"Using timezone: {timezone_value} (DST will be handled automatically)") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown timezone: {timezone_value}, defaulting to UTC") + source_tz = pytz.utc + + # Parse output timezone if provided (for display purposes) + output_tz = None + if output_timezone_value: + try: + output_tz = pytz.timezone(output_timezone_value) + logger.debug(f"Using output timezone for display: {output_timezone_value}") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown output timezone: {output_timezone_value}, will use source timezone") + output_tz = None + + if not title_pattern: + logger.warning(f"No title_pattern in custom_properties, falling back to default") + return [] # Return empty, will use default + + logger.debug(f"Title pattern from DB: {repr(title_pattern)}") + + # Convert PCRE/JavaScript named groups (?) to Python format (?P) + # This handles patterns created with JavaScript regex syntax + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', title_pattern) + logger.debug(f"Converted title pattern: {repr(title_pattern)}") + + # Compile regex patterns using the enhanced regex module + # (supports variable-width lookbehinds like JavaScript) + try: + title_regex = regex.compile(title_pattern) + except Exception as e: + logger.error(f"Invalid title regex pattern after conversion: {e}") + logger.error(f"Pattern was: {repr(title_pattern)}") + return [] + + time_regex = None + if time_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', time_pattern) + logger.debug(f"Converted time pattern: {repr(time_pattern)}") + try: + time_regex = regex.compile(time_pattern) + except Exception as e: + logger.warning(f"Invalid time regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(time_pattern)}") + + # Compile date regex if provided + date_regex = None + if date_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', date_pattern) + logger.debug(f"Converted date pattern: {repr(date_pattern)}") + try: + date_regex = regex.compile(date_pattern) + except Exception as e: + logger.warning(f"Invalid date regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(date_pattern)}") + + # Try to match the channel name with the title pattern + # Use search() instead of match() to match JavaScript behavior where .match() searches anywhere in the string + title_match = title_regex.search(channel_name) + if not title_match: + logger.debug(f"Channel name '{channel_name}' doesn't match title pattern") + return [] # Return empty, will use default + + groups = title_match.groupdict() + logger.debug(f"Title pattern matched. Groups: {groups}") + + # Helper function to format template with matched groups + def format_template(template, groups, url_encode=False): + """Replace {groupname} placeholders with matched group values + + Args: + template: Template string with {groupname} placeholders + groups: Dict of group names to values + url_encode: If True, URL encode the group values for safe use in URLs + """ + if not template: + return '' + result = template + for key, value in groups.items(): + if url_encode and value: + # URL encode the value to handle spaces and special characters + from urllib.parse import quote + encoded_value = quote(str(value), safe='') + result = result.replace(f'{{{key}}}', encoded_value) + else: + result = result.replace(f'{{{key}}}', str(value) if value else '') + return result + + # Extract time from title if time pattern exists + time_info = None + time_groups = {} + if time_regex: + time_match = time_regex.search(channel_name) + if time_match: + time_groups = time_match.groupdict() + try: + hour = int(time_groups.get('hour')) + # Handle optional minute group - could be None if not captured + minute_value = time_groups.get('minute') + minute = int(minute_value) if minute_value is not None else 0 + ampm = time_groups.get('ampm') + ampm = ampm.lower() if ampm else None + + # Determine if this is 12-hour or 24-hour format + if ampm in ('am', 'pm'): + # 12-hour format: convert to 24-hour + if ampm == 'pm' and hour != 12: + hour += 12 + elif ampm == 'am' and hour == 12: + hour = 0 + logger.debug(f"Extracted time (12-hour): {hour}:{minute:02d} {ampm}") + else: + # 24-hour format: hour is already in 24-hour format + # Validate that it's actually a 24-hour time (0-23) + if hour > 23: + logger.warning(f"Invalid 24-hour time: {hour}. Must be 0-23.") + hour = hour % 24 # Wrap around just in case + logger.debug(f"Extracted time (24-hour): {hour}:{minute:02d}") + + time_info = {'hour': hour, 'minute': minute} + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing time: {e}") + + # Extract date from title if date pattern exists + date_info = None + date_groups = {} + if date_regex: + date_match = date_regex.search(channel_name) + if date_match: + date_groups = date_match.groupdict() + try: + # Support various date group names: month, day, year + month_str = date_groups.get('month', '') + day = int(date_groups.get('day', 1)) + year = int(date_groups.get('year', now.year)) # Default to current year if not provided + + # Parse month - can be numeric (1-12) or text (Jan, January, etc.) + month = None + if month_str.isdigit(): + month = int(month_str) + else: + # Try to parse text month names + import calendar + month_str_lower = month_str.lower() + # Check full month names + for i, month_name in enumerate(calendar.month_name): + if month_name.lower() == month_str_lower: + month = i + break + # Check abbreviated month names if not found + if month is None: + for i, month_abbr in enumerate(calendar.month_abbr): + if month_abbr.lower() == month_str_lower: + month = i + break + + if month and 1 <= month <= 12 and 1 <= day <= 31: + date_info = {'year': year, 'month': month, 'day': day} + logger.debug(f"Extracted date: {year}-{month:02d}-{day:02d}") + else: + logger.warning(f"Invalid date values: month={month}, day={day}, year={year}") + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing date: {e}") + + # Merge title groups, time groups, and date groups for template formatting + all_groups = {**groups, **time_groups, **date_groups} + + # Add normalized versions of all groups for cleaner URLs + # These remove all non-alphanumeric characters and convert to lowercase + for key, value in list(all_groups.items()): + if value: + # Remove all non-alphanumeric characters (except spaces temporarily) + # then replace spaces with nothing, and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + all_groups[f'{key}_normalize'] = normalized + + # Format channel logo URL if template provided (with URL encoding) + channel_logo_url = None + if channel_logo_url_template: + channel_logo_url = format_template(channel_logo_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted channel logo URL: {channel_logo_url}") + + # Format program poster URL if template provided (with URL encoding) + program_poster_url = None + if program_poster_url_template: + program_poster_url = format_template(program_poster_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted program poster URL: {program_poster_url}") + + # Add formatted time strings for better display (handles minutes intelligently) + if time_info: + hour_24 = time_info['hour'] + minute = time_info['minute'] + + # Determine the base date to use for placeholders + # If date was extracted, use it; otherwise use current date + if date_info: + base_date = datetime(date_info['year'], date_info['month'], date_info['day']) + else: + base_date = datetime.now() + + # If output_timezone is specified, convert the display time to that timezone + if output_tz: + # Create a datetime in the source timezone using the base date + temp_date = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + # Convert to output timezone + temp_date_output = temp_date.astimezone(output_tz) + # Extract converted hour and minute for display + hour_24 = temp_date_output.hour + minute = temp_date_output.minute + logger.debug(f"Converted display time from {source_tz} to {output_tz}: {hour_24}:{minute:02d}") + + # Add date placeholders based on the OUTPUT timezone + # This ensures {date}, {month}, {day}, {year} reflect the converted timezone + all_groups['date'] = temp_date_output.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_output.month) + all_groups['day'] = str(temp_date_output.day) + all_groups['year'] = str(temp_date_output.year) + logger.debug(f"Converted date placeholders to {output_tz}: {all_groups['date']}") + else: + # No output timezone conversion - use source timezone for date + # Create temp date to get proper date in source timezone using the base date + temp_date_source = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + all_groups['date'] = temp_date_source.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_source.month) + all_groups['day'] = str(temp_date_source.day) + all_groups['year'] = str(temp_date_source.year) + + # Format 24-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime24'] = f"{hour_24}:{minute:02d}" + else: + all_groups['starttime24'] = f"{hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {starttime} placeholder + # Note: hour_24 is ALWAYS in 24-hour format at this point (converted earlier if needed) + ampm = 'AM' if hour_24 < 12 else 'PM' + hour_12 = hour_24 + if hour_24 == 0: + hour_12 = 12 + elif hour_24 > 12: + hour_12 = hour_24 - 12 + + # Format 12-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime'] = f"{hour_12}:{minute:02d} {ampm}" + else: + all_groups['starttime'] = f"{hour_12} {ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['starttime_long'] = f"{hour_12}:{minute:02d} {ampm}" + + # Calculate end time based on program duration + # Create a datetime for calculations + temp_start = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + temp_end = temp_start + timedelta(minutes=program_duration) + + # Extract end time components (already in correct timezone if output_tz was applied above) + end_hour_24 = temp_end.hour + end_minute = temp_end.minute + + # Format 24-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime24'] = f"{end_hour_24}:{end_minute:02d}" + else: + all_groups['endtime24'] = f"{end_hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {endtime} placeholder + end_ampm = 'AM' if end_hour_24 < 12 else 'PM' + end_hour_12 = end_hour_24 + if end_hour_24 == 0: + end_hour_12 = 12 + elif end_hour_24 > 12: + end_hour_12 = end_hour_24 - 12 + + # Format 12-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + else: + all_groups['endtime'] = f"{end_hour_12} {end_ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['endtime_long'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + + # Generate programs + programs = [] + + # If we have extracted time AND date, the event happens on a SPECIFIC date + # If we have time but NO date, generate for multiple days (existing behavior) + # All other days and times show "Upcoming" before or "Ended" after + event_happened = False + + # Determine how many iterations we need + if date_info and time_info: + # Specific date extracted - only generate for that one date + iterations = 1 + logger.debug(f"Date extracted, generating single event for specific date") + else: + # No specific date - use num_days (existing behavior) + iterations = num_days + + for day in range(iterations): + # Start from current time (like standard dummy) instead of midnight + # This ensures programs appear in the guide's current viewing window + day_start = now + timedelta(days=day) + day_end = day_start + timedelta(days=1) + + if time_info: + # We have an extracted event time - this is when the MAIN event starts + # The extracted time is in the SOURCE timezone (e.g., 8PM ET) + # We need to convert it to UTC for storage + + # Determine which date to use + if date_info: + # Use the extracted date from the channel title + current_date = datetime( + date_info['year'], + date_info['month'], + date_info['day'] + ).date() + logger.debug(f"Using extracted date: {current_date}") + else: + # No date extracted, use day offset from current time in SOURCE timezone + # This ensures we calculate "today" in the event's timezone, not UTC + # For example: 8:30 PM Central (1:30 AM UTC next day) for a 10 PM ET event + # should use today's date in ET, not tomorrow's date in UTC + now_in_source_tz = now.astimezone(source_tz) + current_date = (now_in_source_tz + timedelta(days=day)).date() + logger.debug(f"No date extracted, using day offset in {source_tz}: {current_date}") + + # Create a naive datetime (no timezone info) representing the event in source timezone + event_start_naive = datetime.combine( + current_date, + datetime.min.time().replace( + hour=time_info['hour'], + minute=time_info['minute'] + ) + ) + + # Use pytz to localize the naive datetime to the source timezone + # This automatically handles DST! + try: + event_start_local = source_tz.localize(event_start_naive) + # Convert to UTC + event_start_utc = event_start_local.astimezone(pytz.utc) + logger.debug(f"Converted {event_start_local} to UTC: {event_start_utc}") + except Exception as e: + logger.error(f"Error localizing time to {source_tz}: {e}") + # Fallback: treat as UTC + event_start_utc = django_timezone.make_aware(event_start_naive, pytz.utc) + + event_end_utc = event_start_utc + timedelta(minutes=program_duration) + + # Pre-generate the main event title and description for reuse + if title_template: + main_event_title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + main_event_title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + main_event_description = format_template(description_template, all_groups) + else: + main_event_description = main_event_title + + + + # Determine if this day is before, during, or after the event + # Event only happens on day 0 (first day) + is_event_day = (day == 0) + + if is_event_day and not event_happened: + # This is THE day the event happens + # Fill programs BEFORE the event + current_time = day_start + + while current_time < event_start_utc: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), event_start_utc) + + # Use custom upcoming templates if provided, otherwise use defaults + if upcoming_title_template: + upcoming_title = format_template(upcoming_title_template, all_groups) + else: + upcoming_title = main_event_title + + if upcoming_description_template: + upcoming_description = format_template(upcoming_description_template, all_groups) + else: + upcoming_description = f"Upcoming: {main_event_description}" + + # Build custom_properties for upcoming programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": upcoming_title, + "description": upcoming_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + + # Add the MAIN EVENT at the extracted time + # Build custom_properties for main event (includes category and live) + main_event_custom_properties = {} + + # Add categories if provided + if categories: + main_event_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = event_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + main_event_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + main_event_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + main_event_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + main_event_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": event_start_utc, + "end_time": event_end_utc, + "title": main_event_title, + "description": main_event_description, + "custom_properties": main_event_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + event_happened = True + + # Fill programs AFTER the event until end of day + current_time = event_end_utc + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom ended templates if provided, otherwise use defaults + if ended_title_template: + ended_title = format_template(ended_title_template, all_groups) + else: + ended_title = main_event_title + + if ended_description_template: + ended_description = format_template(ended_description_template, all_groups) + else: + ended_description = f"Ended: {main_event_description}" + + # Build custom_properties for ended programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": ended_title, + "description": ended_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + else: + # This day is either before the event (future days) or after the event happened + # Fill entire day with appropriate message + current_time = day_start + + # If event already happened, all programs show "Ended" + # If event hasn't happened yet (shouldn't occur with day 0 logic), show "Upcoming" + is_ended = event_happened + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom templates based on whether event has ended or is upcoming + if is_ended: + if ended_title_template: + program_title = format_template(ended_title_template, all_groups) + else: + program_title = main_event_title + + if ended_description_template: + program_description = format_template(ended_description_template, all_groups) + else: + program_description = f"Ended: {main_event_description}" + else: + if upcoming_title_template: + program_title = format_template(upcoming_title_template, all_groups) + else: + program_title = main_event_title + + if upcoming_description_template: + program_description = format_template(upcoming_description_template, all_groups) + else: + program_description = f"Upcoming: {main_event_description}" + + # Build custom_properties (only date for upcoming/ended filler programs) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": program_title, + "description": program_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, + }) + + current_time += timedelta(minutes=program_duration) + else: + # No extracted time - fill entire day with regular intervals + # day_start and day_end are already in UTC, so no conversion needed + programs_per_day = max(1, int(24 / (program_duration / 60))) + + for program_num in range(programs_per_day): + program_start_utc = day_start + timedelta(minutes=program_num * program_duration) + program_end_utc = program_start_utc + timedelta(minutes=program_duration) + + if title_template: + title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + description = format_template(description_template, all_groups) + else: + description = title + + # Build custom_properties for this program + program_custom_properties = {} + + # Add categories if provided + if categories: + program_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + program_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + program_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": title, + "description": description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + logger.info(f"Generated {len(programs)} custom dummy programs for {channel_name}") + return programs + + +def generate_dummy_epg( + channel_id, channel_name, xml_lines=None, num_days=1, program_length_hours=4 +): + """ + Generate dummy EPG programs for channels without EPG data. + Creates program blocks for a specified number of days. + + Args: + channel_id: The channel ID to use in the program entries + channel_name: The name of the channel to use in program titles + xml_lines: Optional list to append lines to, otherwise returns new list + num_days: Number of days to generate EPG data for (default: 1) + program_length_hours: Length of each program block in hours (default: 4) + + Returns: + List of XML lines for the dummy EPG entries + """ + if xml_lines is None: + xml_lines = [] + + for program in generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4): + # Format times in XMLTV format + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + # Create program entry with escaped channel name + xml_lines.append( + f' ' + ) + xml_lines.append(f" {html.escape(program['title'])}") + xml_lines.append(f" {html.escape(program['description'])}") + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + xml_lines.append(f" {html.escape(cat)}") + + # Date tag + if 'date' in custom_data: + xml_lines.append(f" {html.escape(custom_data['date'])}") + + # Live tag + if custom_data.get('live', False): + xml_lines.append(f" ") + + # New tag + if custom_data.get('new', False): + xml_lines.append(f" ") + + xml_lines.append(f" ") return xml_lines -def generate_epg(request, profile_name=None): + +def generate_epg(request, profile_name=None, user=None): """ - Dynamically generate an XMLTV (EPG) file using the new EPGData/ProgramData models. + Dynamically generate an XMLTV (EPG) file using streaming response to handle keep-alives. Since the EPG data is stored independently of Channels, we group programmes by their associated EPGData record. - This version filters data based on the 'days' parameter. + This version filters data based on the 'days' parameter and sends keep-alives during processing. """ - xml_lines = [] - xml_lines.append('') - xml_lines.append('') + def epg_generator(): + """Generator function that yields EPG data with keep-alives during processing""" # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True + xml_lines = [] + xml_lines.append('') + xml_lines.append( + '' ) - else: - channels = Channel.objects.all() - # Check if the request wants to use direct logo URLs instead of cache - use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + # Get channels based on user/profile + if user is not None: + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() - # Get the source to use for tvg-id value - # Options: 'channel_number' (default), 'tvg_id', 'gracenote' - tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() - - # Get the number of days for EPG data - try: - # Default to 0 days (everything) for real EPG if not specified - days_param = request.GET.get('days', '0') - num_days = int(days_param) - # Set reasonable limits - num_days = max(0, min(num_days, 365)) # Between 0 and 365 days - except ValueError: - num_days = 0 # Default to all data if invalid value - - # For dummy EPG, use either the specified value or default to 3 days - dummy_days = num_days if num_days > 0 else 3 - - # Calculate cutoff date for EPG data filtering (only if days > 0) - now = timezone.now() - cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None - - # Retrieve all active channels - for channel in channels: - # Format channel number as integer if it has no decimal component - same as M3U generation - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = int(channel.channel_number) - else: - formatted_channel_number = channel.channel_number - else: - formatted_channel_number = "" - - # Determine the channel ID based on the selected source - if tvg_id_source == 'tvg_id' and channel.tvg_id: - channel_id = channel.tvg_id - elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: - channel_id = channel.tvc_guide_stationid - else: - # Default to channel number (original behavior) - channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) - - # Add channel logo if available - tvg_logo = "" - if channel.logo: - if use_cached_logos: - # Use cached logo as before - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) - else: - # Try to find direct logo URL from channel's streams - direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None - # If direct logo found, use it; otherwise fall back to cached version - if direct_logo: - tvg_logo = direct_logo + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") else: - tvg_logo = request.build_absolute_uri(reverse('api:channels:logo-cache', args=[channel.logo.id])) - display_name = channel.epg_data.name if channel.epg_data else channel.name - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(display_name)}') - xml_lines.append(f' ') - - xml_lines.append(' ') - - for channel in channels: - # Use the same channel ID determination for program entries - if tvg_id_source == 'tvg_id' and channel.tvg_id: - channel_id = channel.tvg_id - elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: - channel_id = channel.tvc_guide_stationid + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( + "channel_number" + ) else: - # Get formatted channel number + if profile_name is not None: + channel_profile = ChannelProfile.objects.get(name=profile_name) + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True, + ).order_by("channel_number") + else: + channels = Channel.objects.all().order_by("channel_number") + + # Check if the request wants to use direct logo URLs instead of cache + use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' + + # Get the source to use for tvg-id value + # Options: 'channel_number' (default), 'tvg_id', 'gracenote' + tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower() + + # Get the number of days for EPG data + try: + # Default to 0 days (everything) for real EPG if not specified + days_param = request.GET.get('days', '0') + num_days = int(days_param) + # Set reasonable limits + num_days = max(0, min(num_days, 365)) # Between 0 and 365 days + except ValueError: + num_days = 0 # Default to all data if invalid value + + # For dummy EPG, use either the specified value or default to 3 days + dummy_days = num_days if num_days > 0 else 3 + + # Calculate cutoff date for EPG data filtering (only if days > 0) + now = django_timezone.now() + cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None + + # Process channels for the section + for channel in channels: + # Format channel number as integer if it has no decimal component - same as M3U generation if channel.channel_number is not None: if channel.channel_number == int(channel.channel_number): formatted_channel_number = int(channel.channel_number) @@ -305,126 +1200,1510 @@ def generate_epg(request, profile_name=None): formatted_channel_number = channel.channel_number else: formatted_channel_number = "" - # Default to channel number - channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) - display_name = channel.epg_data.name if channel.epg_data else channel.name - if not channel.epg_data: - # Use the enhanced dummy EPG generation function with defaults - program_length_hours = 4 # Default to 4-hour program blocks - generate_dummy_epg( - channel_id, - display_name, - xml_lines, - num_days=dummy_days, # Use dummy_days (3 days by default) - program_length_hours=program_length_hours - ) + # Determine the channel ID based on the selected source + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid + else: + # Default to channel number (original behavior) + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + + # Add channel logo if available + tvg_logo = "" + + # Check if this is a custom dummy EPG with channel logo URL template + if channel.epg_data and channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + channel_logo_url_template = custom_props.get('channel_logo_url', '') + + if channel_logo_url_template: + # Determine which name to use for pattern matching (same logic as program generation) + pattern_match_name = channel.name + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + + # Try to extract groups from the channel/stream name and build the logo URL + title_pattern = custom_props.get('title_pattern', '') + if title_pattern: + try: + # Convert PCRE/JavaScript named groups to Python format + title_pattern = regex.sub(r'\(\?<(?![=!])([^>]+)>', r'(?P<\1>', title_pattern) + title_regex = regex.compile(title_pattern) + title_match = title_regex.search(pattern_match_name) + + if title_match: + groups = title_match.groupdict() + + # Add normalized versions of all groups for cleaner URLs + for key, value in list(groups.items()): + if value: + # Remove all non-alphanumeric characters and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + groups[f'{key}_normalize'] = normalized + + # Format the logo URL template with the matched groups (with URL encoding) + from urllib.parse import quote + for key, value in groups.items(): + if value: + encoded_value = quote(str(value), safe='') + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', encoded_value) + else: + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', '') + tvg_logo = channel_logo_url_template + logger.debug(f"Built channel logo URL from template: {tvg_logo}") + except Exception as e: + logger.warning(f"Failed to build channel logo URL for {channel.name}: {e}") + + # If no custom dummy logo, use regular logo logic + if not tvg_logo and channel.logo: + if use_cached_logos: + # Use cached logo as before + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) + else: + # Try to find direct logo URL from channel's streams + direct_logo = channel.logo.url if channel.logo.url.startswith(('http://', 'https://')) else None + # If direct logo found, use it; otherwise fall back to cached version + if direct_logo: + tvg_logo = direct_logo + else: + tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) + display_name = channel.name + xml_lines.append(f' ') + xml_lines.append(f' {html.escape(display_name)}') + xml_lines.append(f' ') + xml_lines.append(" ") + + # Send all channel definitions + yield '\n'.join(xml_lines) + '\n' + xml_lines = [] # Clear to save memory + + # Process programs for each channel + for channel in channels: + + # Use the same channel ID determination for program entries + if tvg_id_source == 'tvg_id' and channel.tvg_id: + channel_id = channel.tvg_id + elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: + channel_id = channel.tvc_guide_stationid + else: + # Get formatted channel number + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" + # Default to channel number + channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + + # Use EPG data name for display, but channel name for pattern matching + display_name = channel.epg_data.name if channel.epg_data else channel.name + # For dummy EPG pattern matching, determine which name to use + pattern_match_name = channel.name + + # Check if we should use stream name instead of channel name + if channel.epg_data and channel.epg_data.epg_source: + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + logger.debug(f"Using stream name for parsing: {pattern_match_name} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + + if not channel.epg_data: + # Use the enhanced dummy EPG generation function with defaults + program_length_hours = 4 # Default to 4-hour program blocks + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=None + ) + + for program in dummy_programs: + # Format times in XMLTV format + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + # Create program entry with escaped channel name + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + + yield f" \n" + + else: + # Check if this is a dummy EPG with no programs (generate on-demand) + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + # This is a custom dummy EPG - check if it has programs + if not channel.epg_data.programs.exists(): + # No programs stored, generate on-demand using custom patterns + # Use actual channel name for pattern matching + program_length_hours = 4 + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=channel.epg_data.epg_source + ) + + for program in dummy_programs: + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + + yield f" \n" + + continue # Skip to next channel + + # For real EPG data - filter only if days parameter was specified + if num_days > 0: + programs_qs = channel.epg_data.programs.filter( + start_time__gte=now, + start_time__lt=cutoff_date + ).order_by('id') # Explicit ordering for consistent chunking + else: + # Return all programs if days=0 or not specified + programs_qs = channel.epg_data.programs.all().order_by('id') + + # Process programs in chunks to avoid cursor timeout issues + program_batch = [] + batch_size = 250 + chunk_size = 1000 # Fetch 1000 programs at a time from DB + + # Fetch chunks until no more results (avoids count() query) + offset = 0 + while True: + # Fetch a chunk of programs - this closes the cursor after fetching + program_chunk = list(programs_qs[offset:offset + chunk_size]) + + # Break if no more programs + if not program_chunk: + break + + # Process each program in the chunk + for prog in program_chunk: + start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") + stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") + + program_xml = [f' '] + program_xml.append(f' {html.escape(prog.title)}') + + # Add subtitle if available + if prog.sub_title: + program_xml.append(f" {html.escape(prog.sub_title)}") + + # Add description if available + if prog.description: + program_xml.append(f" {html.escape(prog.description)}") + + # Process custom properties if available + if prog.custom_properties: + custom_data = prog.custom_properties or {} + + # Add categories if available + if "categories" in custom_data and custom_data["categories"]: + for category in custom_data["categories"]: + program_xml.append(f" {html.escape(category)}") + + # Add keywords if available + if "keywords" in custom_data and custom_data["keywords"]: + for keyword in custom_data["keywords"]: + program_xml.append(f" {html.escape(keyword)}") + + # Handle episode numbering - multiple formats supported + # Prioritize onscreen_episode over standalone episode for onscreen system + if "onscreen_episode" in custom_data: + program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') + elif "episode" in custom_data: + program_xml.append(f' E{custom_data["episode"]}') + + # Handle dd_progid format + if 'dd_progid' in custom_data: + program_xml.append(f' {html.escape(custom_data["dd_progid"])}') + + # Handle external database IDs + for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: + if f'{system}_id' in custom_data: + program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') + + # Add season and episode numbers in xmltv_ns format if available + if "season" in custom_data and "episode" in custom_data: + season = ( + int(custom_data["season"]) - 1 + if str(custom_data["season"]).isdigit() + else 0 + ) + episode = ( + int(custom_data["episode"]) - 1 + if str(custom_data["episode"]).isdigit() + else 0 + ) + program_xml.append(f' {season}.{episode}.') + + # Add language information + if "language" in custom_data: + program_xml.append(f' {html.escape(custom_data["language"])}') + + if "original_language" in custom_data: + program_xml.append(f' {html.escape(custom_data["original_language"])}') + + # Add length information + if "length" in custom_data and isinstance(custom_data["length"], dict): + length_value = custom_data["length"].get("value", "") + length_units = custom_data["length"].get("units", "minutes") + program_xml.append(f' {html.escape(str(length_value))}') + + # Add video information + if "video" in custom_data and isinstance(custom_data["video"], dict): + program_xml.append(" ") + + # Add audio information + if "audio" in custom_data and isinstance(custom_data["audio"], dict): + program_xml.append(" ") + + # Add subtitles information + if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): + for subtitle in custom_data["subtitles"]: + if isinstance(subtitle, dict): + subtitle_type = subtitle.get("type", "") + type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" + program_xml.append(f" ") + if "language" in subtitle: + program_xml.append(f" {html.escape(subtitle['language'])}") + program_xml.append(" ") + + # Add rating if available + if "rating" in custom_data: + rating_system = custom_data.get("rating_system", "TV Parental Guidelines") + program_xml.append(f' ') + program_xml.append(f' {html.escape(custom_data["rating"])}') + program_xml.append(f" ") + + # Add star ratings + if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): + for star_rating in custom_data["star_ratings"]: + if isinstance(star_rating, dict) and "value" in star_rating: + system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" + program_xml.append(f" ") + program_xml.append(f" {html.escape(star_rating['value'])}") + program_xml.append(" ") + + # Add reviews + if "reviews" in custom_data and isinstance(custom_data["reviews"], list): + for review in custom_data["reviews"]: + if isinstance(review, dict) and "content" in review: + review_type = review.get("type", "text") + attrs = [f'type="{html.escape(review_type)}"'] + if "source" in review: + attrs.append(f'source="{html.escape(review["source"])}"') + if "reviewer" in review: + attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') + attr_str = " ".join(attrs) + program_xml.append(f' {html.escape(review["content"])}') + + # Add images + if "images" in custom_data and isinstance(custom_data["images"], list): + for image in custom_data["images"]: + if isinstance(image, dict) and "url" in image: + attrs = [] + for attr in ['type', 'size', 'orient', 'system']: + if attr in image: + attrs.append(f'{attr}="{html.escape(image[attr])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f' {html.escape(image["url"])}') + + # Add enhanced credits handling + if "credits" in custom_data: + program_xml.append(" ") + credits = custom_data["credits"] + + # Handle different credit types + for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if role in credits: + people = credits[role] + if isinstance(people, list): + for person in people: + program_xml.append(f" <{role}>{html.escape(person)}") + else: + program_xml.append(f" <{role}>{html.escape(people)}") + + # Handle actors separately to include role and guest attributes + if "actor" in credits: + actors = credits["actor"] + if isinstance(actors, list): + for actor in actors: + if isinstance(actor, dict): + name = actor.get("name", "") + role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" + guest_attr = ' guest="yes"' if actor.get("guest") else "" + program_xml.append(f" {html.escape(name)}") + else: + program_xml.append(f" {html.escape(actor)}") + else: + program_xml.append(f" {html.escape(actors)}") + + program_xml.append(" ") + + # Add program date if available (full date, not just year) + if "date" in custom_data: + program_xml.append(f' {html.escape(custom_data["date"])}') + + # Add country if available + if "country" in custom_data: + program_xml.append(f' {html.escape(custom_data["country"])}') + + # Add icon if available + if "icon" in custom_data: + program_xml.append(f' ') + + # Add special flags as proper tags with enhanced handling + if custom_data.get("previously_shown", False): + prev_shown_details = custom_data.get("previously_shown_details", {}) + attrs = [] + if "start" in prev_shown_details: + attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') + if "channel" in prev_shown_details: + attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f" ") + + if custom_data.get("premiere", False): + premiere_text = custom_data.get("premiere_text", "") + if premiere_text: + program_xml.append(f" {html.escape(premiere_text)}") + else: + program_xml.append(" ") + + if custom_data.get("last_chance", False): + last_chance_text = custom_data.get("last_chance_text", "") + if last_chance_text: + program_xml.append(f" {html.escape(last_chance_text)}") + else: + program_xml.append(" ") + + if custom_data.get("new", False): + program_xml.append(" ") + + if custom_data.get('live', False): + program_xml.append(' ') + + program_xml.append(" ") + + # Add to batch + program_batch.extend(program_xml) + + # Send batch when full or send keep-alive + if len(program_batch) >= batch_size: + yield '\n'.join(program_batch) + '\n' + program_batch = [] + + # Move to next chunk + offset += chunk_size + + # Send remaining programs in batch + if program_batch: + yield '\n'.join(program_batch) + '\n' + + # Send final closing tag and completion message + yield "\n" # Return streaming response + response = StreamingHttpResponse( + streaming_content=epg_generator(), + content_type="application/xml" + ) + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + + +def xc_get_user(request): + username = request.GET.get("username") + password = request.GET.get("password") + + if not username or not password: + return None + + user = get_object_or_404(User, username=username) + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return None + + if custom_properties["xc_password"] != password: + return None + + return user + + +def xc_get_info(request, full=False): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + raw_host = request.get_host() + if ":" in raw_host: + hostname, port = raw_host.split(":", 1) + else: + hostname = raw_host + port = "443" if request.is_secure() else "80" + + info = { + "user_info": { + "username": request.GET.get("username"), + "password": request.GET.get("password"), + "message": "Dispatcharr XC API", + "auth": 1, + "status": "Active", + "exp_date": str(int(time.time()) + (90 * 24 * 60 * 60)), + "max_connections": str(calculate_tuner_count(minimum=1, unlimited_default=50)), + "allowed_output_formats": [ + "ts", + ], + }, + "server_info": { + "url": hostname, + "server_protocol": request.scheme, + "port": port, + "timezone": get_localzone().key, + "timestamp_now": int(time.time()), + "time_now": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "process": True, + }, + } + + if full == True: + info['categories'] = { + "series": [], + "movie": [], + "live": xc_get_live_categories(user), + } + info['available_channels'] = {channel["stream_id"]: channel for channel in xc_get_live_streams(request, user, request.GET.get("category_id"))} + + return info + + +def xc_player_api(request, full=False): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + action = request.GET.get("action") + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + server_info = xc_get_info(request) + + if not action: + return JsonResponse(server_info) + + if action == "get_live_categories": + return JsonResponse(xc_get_live_categories(user), safe=False) + if action == "get_live_streams": + return JsonResponse(xc_get_live_streams(request, user, request.GET.get("category_id")), safe=False) + if action == "get_short_epg": + return JsonResponse(xc_get_epg(request, user, short=True), safe=False) + if action == "get_simple_data_table": + return JsonResponse(xc_get_epg(request, user, short=False), safe=False) + + # Endpoints not implemented, but still provide a response + if action in [ + "get_vod_categories", + "get_vod_streams", + "get_series", + "get_series_categories", + "get_series_info", + "get_vod_info", + ]: + if action == "get_vod_categories": + return JsonResponse(xc_get_vod_categories(user), safe=False) + elif action == "get_vod_streams": + return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_categories": + return JsonResponse(xc_get_series_categories(user), safe=False) + elif action == "get_series": + return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_info": + return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) + elif action == "get_vod_info": + return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) else: - # For real EPG data - filter only if days parameter was specified - if num_days > 0: - programs = channel.epg_data.programs.filter( - start_time__gte=now, - start_time__lt=cutoff_date + return JsonResponse([], safe=False) + + raise Http404() + + +def xc_panel_api(request): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return JsonResponse(xc_get_info(request, True)) + + +def xc_get(request): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + action = request.GET.get("action") + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return generate_m3u(request, None, user) + + +def xc_xmltv(request): + if not network_access_allowed(request, 'XC_API'): + return JsonResponse({'error': 'Forbidden'}, status=403) + + user = xc_get_user(request) + + if user is None: + return JsonResponse({'error': 'Unauthorized'}, status=401) + + return generate_epg(request, None, user) + + +def xc_get_live_categories(user): + from django.db.models import Min + response = [] + + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channel groups + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + else: + # User has specific limited profiles assigned + filters = { + "channels__channelprofilemembership__enabled": True, + "channels__user_level": 0, + "channels__channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel_groups = ChannelGroup.objects.filter(**filters).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + else: + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + + for group in channel_groups: + response.append( + { + "category_id": str(group.id), + "category_name": group.name, + "parent_id": 0, + } + ) + + return response + + +def xc_get_live_streams(request, user, category_id=None): + streams = [] + + if user.user_level == 0: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = {"user_level__lte": user.user_level} + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") + else: + if not category_id: + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + channels = Channel.objects.filter( + channel_group__id=category_id, user_level__lte=user.user_level + ).order_by("channel_number") + + for channel in channels: + streams.append( + { + "num": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, + "name": channel.name, + "stream_type": "live", + "stream_id": channel.id, + "stream_icon": ( + None + if not channel.logo + else build_absolute_uri_with_port( + request, + reverse("api:channels:logo-cache", args=[channel.logo.id]) + ) + ), + "epg_channel_id": str(int(channel.channel_number)) if channel.channel_number.is_integer() else str(channel.channel_number), + "added": int(channel.created_at.timestamp()), + "is_adult": 0, + "category_id": str(channel.channel_group.id), + "category_ids": [channel.channel_group.id], + "custom_sid": None, + "tv_archive": 0, + "direct_source": "", + "tv_archive_duration": 0, + } + ) + + return streams + + +def xc_get_epg(request, user, short=False): + channel_id = request.GET.get('stream_id') + if not channel_id: + raise Http404() + + channel = None + if user.user_level < 10: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channel = Channel.objects.filter( + id=channel_id, + user_level__lte=user.user_level + ).first() + else: + # User has specific limited profiles assigned + filters = { + "id": channel_id, + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() + + if not channel: + raise Http404() + else: + channel = get_object_or_404(Channel, id=channel_id) + + if not channel: + raise Http404() + + limit = request.GET.get('limit', 4) + if channel.epg_data: + # Check if this is a dummy EPG that generates on-demand + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + if not channel.epg_data.programs.exists(): + # Generate on-demand using custom patterns + programs = generate_dummy_programs( + channel_id=channel_id, + channel_name=channel.name, + epg_source=channel.epg_data.epg_source ) else: - # Return all programs if days=0 or not specified - programs = channel.epg_data.programs.all() + # Has stored programs, use them + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + else: + # Regular EPG with stored programs + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + else: + # No EPG data assigned, generate default dummy + programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name, epg_source=None) - for prog in programs: - start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") - stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(prog.title)}') + output = {"epg_listings": []} + for program in programs: + id = "0" + epg_id = "0" + title = program['title'] if isinstance(program, dict) else program.title + description = program['description'] if isinstance(program, dict) else program.description - # Add subtitle if available - if prog.sub_title: - xml_lines.append(f' {html.escape(prog.sub_title)}') + start = program["start_time"] if isinstance(program, dict) else program.start_time + end = program["end_time"] if isinstance(program, dict) else program.end_time - # Add description if available - if prog.description: - xml_lines.append(f' {html.escape(prog.description)}') + program_output = { + "id": f"{id}", + "epg_id": f"{epg_id}", + "title": base64.b64encode(title.encode()).decode(), + "lang": "", + "start": start.strftime("%Y%m%d%H%M%S"), + "end": end.strftime("%Y%m%d%H%M%S"), + "description": base64.b64encode(description.encode()).decode(), + "channel_id": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, + "start_timestamp": int(start.timestamp()), + "stop_timestamp": int(end.timestamp()), + "stream_id": f"{channel_id}", + } - # Process custom properties if available - if prog.custom_properties: - try: - import json - custom_data = json.loads(prog.custom_properties) + if short == False: + program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 + program_output["has_archive"] = "0" - # Add categories if available - if 'categories' in custom_data and custom_data['categories']: - for category in custom_data['categories']: - xml_lines.append(f' {html.escape(category)}') + output['epg_listings'].append(program_output) - # Handle episode numbering - multiple formats supported - # Standard episode number if available - if 'episode' in custom_data: - xml_lines.append(f' E{custom_data["episode"]}') + return output - # Handle onscreen episode format (like S06E128) - if 'onscreen_episode' in custom_data: - xml_lines.append(f' {html.escape(custom_data["onscreen_episode"])}') - # Handle dd_progid format - if 'dd_progid' in custom_data: - xml_lines.append(f' {html.escape(custom_data["dd_progid"])}') +def xc_get_vod_categories(user): + """Get VOD categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3UMovieRelation - # Add season and episode numbers in xmltv_ns format if available - if 'season' in custom_data and 'episode' in custom_data: - season = int(custom_data['season']) - 1 if str(custom_data['season']).isdigit() else 0 - episode = int(custom_data['episode']) - 1 if str(custom_data['episode']).isdigit() else 0 - xml_lines.append(f' {season}.{episode}.') + response = [] - # Add rating if available - if 'rating' in custom_data: - rating_system = custom_data.get('rating_system', 'TV Parental Guidelines') - xml_lines.append(f' ') - xml_lines.append(f' {html.escape(custom_data["rating"])}') - xml_lines.append(f' ') + # All authenticated users get access to VOD from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='movie', + m3umovierelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) - # Add actors/directors/writers if available - if 'credits' in custom_data: - xml_lines.append(f' ') - for role, people in custom_data['credits'].items(): - if isinstance(people, list): - for person in people: - xml_lines.append(f' <{role}>{html.escape(person)}') - else: - xml_lines.append(f' <{role}>{html.escape(people)}') - xml_lines.append(f' ') + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) - # Add program date/year if available - if 'year' in custom_data: - xml_lines.append(f' {html.escape(custom_data["year"])}') - - # Add country if available - if 'country' in custom_data: - xml_lines.append(f' {html.escape(custom_data["country"])}') - - # Add icon if available - if 'icon' in custom_data: - xml_lines.append(f' ') - - # Add special flags as proper tags - if custom_data.get('previously_shown', False): - xml_lines.append(f' ') - - if custom_data.get('premiere', False): - xml_lines.append(f' ') - - if custom_data.get('new', False): - xml_lines.append(f' ') - - if custom_data.get('live', False): - xml_lines.append(f' ') - - except Exception as e: - xml_lines.append(f' ') - - xml_lines.append(' ') - - xml_lines.append('') - xml_content = "\n".join(xml_lines) - - response = HttpResponse(xml_content, content_type="application/xml") - response['Content-Disposition'] = 'attachment; filename="epg.xml"' return response + + +def xc_get_vod_streams(request, user, category_id=None): + """Get VOD streams (movies) for XtreamCodes API""" + from apps.vod.models import Movie, M3UMovieRelation + from django.db.models import Prefetch + + streams = [] + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"m3u_relations__m3u_account__is_active": True} + + if category_id: + filters["m3u_relations__category_id"] = category_id + + # Optimize with prefetch_related to eliminate N+1 queries + # This loads all relations in a single query instead of one per movie + movies = Movie.objects.filter(**filters).select_related('logo').prefetch_related( + Prefetch( + 'm3u_relations', + queryset=M3UMovieRelation.objects.filter( + m3u_account__is_active=True + ).select_related('m3u_account', 'category').order_by('-m3u_account__priority', 'id'), + to_attr='active_relations' + ) + ).distinct() + + for movie in movies: + # Get the first (highest priority) relation from prefetched data + # This avoids the N+1 query problem entirely + if hasattr(movie, 'active_relations') and movie.active_relations: + relation = movie.active_relations[0] + else: + # Fallback - should rarely be needed with proper prefetching + continue + + streams.append({ + "num": movie.id, + "name": movie.name, + "stream_type": "movie", + "stream_id": movie.id, + "stream_icon": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + #'stream_icon': movie.logo.url if movie.logo else '', + "rating": movie.rating or "0", + "rating_5based": round(float(movie.rating or 0) / 2, 2) if movie.rating else 0, + "added": str(int(movie.created_at.timestamp())), + "is_adult": 0, + "tmdb_id": movie.tmdb_id or "", + "imdb_id": movie.imdb_id or "", + "trailer": (movie.custom_properties or {}).get('trailer') or "", + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + "container_extension": relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + }) + + return streams + + +def xc_get_series_categories(user): + """Get series categories for XtreamCodes API""" + from apps.vod.models import VODCategory, M3USeriesRelation + + response = [] + + # All authenticated users get access to series from all active M3U accounts + categories = VODCategory.objects.filter( + category_type='series', + m3useriesrelation__m3u_account__is_active=True + ).distinct().order_by(Lower("name")) + + for category in categories: + response.append({ + "category_id": str(category.id), + "category_name": category.name, + "parent_id": 0, + }) + + return response + + +def xc_get_series(request, user, category_id=None): + """Get series list for XtreamCodes API""" + from apps.vod.models import M3USeriesRelation + + series_list = [] + + # All authenticated users get access to series from all active M3U accounts + filters = {"m3u_account__is_active": True} + + if category_id: + filters["category_id"] = category_id + + # Get series relations instead of series directly + series_relations = M3USeriesRelation.objects.filter(**filters).select_related( + 'series', 'series__logo', 'category', 'm3u_account' + ) + + for relation in series_relations: + series = relation.series + series_list.append({ + "num": relation.id, # Use relation ID + "name": series.name, + "series_id": relation.id, # Use relation ID + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) + ) + ), + "plot": series.description or "", + "cast": series.custom_properties.get('cast', '') if series.custom_properties else "", + "director": series.custom_properties.get('director', '') if series.custom_properties else "", + "genre": series.genre or "", + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "last_modified": str(int(relation.updated_at.timestamp())), + "rating": str(series.rating or "0"), + "rating_5based": str(round(float(series.rating or 0) / 2, 2)) if series.rating else "0", + "backdrop_path": series.custom_properties.get('backdrop_path', []) if series.custom_properties else [], + "youtube_trailer": series.custom_properties.get('youtube_trailer', '') if series.custom_properties else "", + "episode_run_time": series.custom_properties.get('episode_run_time', '') if series.custom_properties else "", + "category_id": str(relation.category.id) if relation.category else "0", + "category_ids": [int(relation.category.id)] if relation.category else [], + }) + + return series_list + + +def xc_get_series_info(request, user, series_id): + """Get detailed series information including episodes""" + from apps.vod.models import M3USeriesRelation, M3UEpisodeRelation + + if not series_id: + raise Http404() + + # All authenticated users get access to series from all active M3U accounts + filters = {"id": series_id, "m3u_account__is_active": True} + + try: + series_relation = M3USeriesRelation.objects.select_related('series', 'series__logo').get(**filters) + series = series_relation.series + except M3USeriesRelation.DoesNotExist: + raise Http404() + + # Check if we need to refresh detailed info (similar to vod api_views pattern) + try: + should_refresh = ( + not series_relation.last_episode_refresh or + series_relation.last_episode_refresh < django_timezone.now() - timedelta(hours=24) + ) + + # Check if detailed data has been fetched + custom_props = series_relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes/details have never been fetched or time interval exceeded + if not episodes_fetched or not detailed_fetched or should_refresh: + from apps.vod.tasks import refresh_series_episodes + account = series_relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, series_relation.external_series_id) + # Refresh objects from database after task completion + series.refresh_from_db() + series_relation.refresh_from_db() + + except Exception as e: + logger.error(f"Error refreshing series data for relation {series_relation.id}: {str(e)}") + + # Get episodes for this series from the same M3U account + episode_relations = M3UEpisodeRelation.objects.filter( + episode__series=series, + m3u_account=series_relation.m3u_account + ).select_related('episode').order_by('episode__season_number', 'episode__episode_number') + + # Group episodes by season + seasons = {} + for relation in episode_relations: + episode = relation.episode + season_num = episode.season_number or 1 + if season_num not in seasons: + seasons[season_num] = [] + + # Try to get the highest priority related M3UEpisodeRelation for this episode (for video/audio/bitrate) + from apps.vod.models import M3UEpisodeRelation + first_relation = M3UEpisodeRelation.objects.filter( + episode=episode + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + video = audio = bitrate = None + if first_relation and first_relation.custom_properties: + info = first_relation.custom_properties.get('info') + if info and isinstance(info, dict): + info_info = info.get('info') + if info_info and isinstance(info_info, dict): + video = info_info.get('video', {}) + audio = info_info.get('audio', {}) + bitrate = info_info.get('bitrate', 0) + if video is None: + video = episode.custom_properties.get('video', {}) if episode.custom_properties else {} + if audio is None: + audio = episode.custom_properties.get('audio', {}) if episode.custom_properties else {} + if bitrate is None: + bitrate = episode.custom_properties.get('bitrate', 0) if episode.custom_properties else 0 + + seasons[season_num].append({ + "id": episode.id, + "season": season_num, + "episode_num": episode.episode_number or 0, + "title": episode.name, + "container_extension": relation.container_extension or "mp4", + "added": str(int(relation.created_at.timestamp())), + "custom_sid": None, + "direct_source": "", + "info": { + "id": int(episode.id), + "name": episode.name, + "overview": episode.description or "", + "crew": str(episode.custom_properties.get('crew', "") if episode.custom_properties else ""), + "directed_by": episode.custom_properties.get('director', '') if episode.custom_properties else "", + "imdb_id": episode.imdb_id or "", + "air_date": f"{episode.air_date}" if episode.air_date else "", + "backdrop_path": episode.custom_properties.get('backdrop_path', []) if episode.custom_properties else [], + "movie_image": episode.custom_properties.get('movie_image', '') if episode.custom_properties else "", + "rating": float(episode.rating or 0), + "release_date": f"{episode.air_date}" if episode.air_date else "", + "duration_secs": (episode.duration_secs or 0), + "duration": format_duration_hms(episode.duration_secs), + "video": video, + "audio": audio, + "bitrate": bitrate, + } + }) + + # Build response using potentially refreshed data + series_data = { + 'name': series.name, + 'description': series.description or '', + 'year': series.year, + 'genre': series.genre or '', + 'rating': series.rating or '0', + 'cast': '', + 'director': '', + 'youtube_trailer': '', + 'episode_run_time': '', + 'backdrop_path': [], + } + + # Add detailed info from custom_properties if available + try: + if series.custom_properties: + custom_data = series.custom_properties + series_data.update({ + 'cast': custom_data.get('cast', ''), + 'director': custom_data.get('director', ''), + 'youtube_trailer': custom_data.get('youtube_trailer', ''), + 'episode_run_time': custom_data.get('episode_run_time', ''), + 'backdrop_path': custom_data.get('backdrop_path', []), + }) + + # Check relation custom_properties for detailed_info + if series_relation.custom_properties and 'detailed_info' in series_relation.custom_properties: + detailed_info = series_relation.custom_properties['detailed_info'] + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating']: + if detailed_info.get(key): + series_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + series_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + series_data['description'] = detailed_info['description'] + + # Update additional fields from detailed info + series_data.update({ + 'cast': detailed_info.get('cast', series_data['cast']), + 'director': detailed_info.get('director', series_data['director']), + 'youtube_trailer': detailed_info.get('youtube_trailer', series_data['youtube_trailer']), + 'episode_run_time': detailed_info.get('episode_run_time', series_data['episode_run_time']), + 'backdrop_path': detailed_info.get('backdrop_path', series_data['backdrop_path']), + }) + + except Exception as e: + logger.error(f"Error parsing series custom_properties: {str(e)}") + + seasons_list = [ + {"season_number": int(season_num), "name": f"Season {season_num}"} + for season_num in sorted(seasons.keys(), key=lambda x: int(x)) + ] + + info = { + 'seasons': seasons_list, + "info": { + "name": series_data['name'], + "cover": ( + None if not series.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) + ) + ), + "plot": series_data['description'], + "cast": series_data['cast'], + "director": series_data['director'], + "genre": series_data['genre'], + "release_date": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "releaseDate": series.custom_properties.get('release_date', str(series.year) if series.year else "") if series.custom_properties else (str(series.year) if series.year else ""), + "added": str(int(series_relation.created_at.timestamp())), + "last_modified": str(int(series_relation.updated_at.timestamp())), + "rating": str(series_data['rating']), + "rating_5based": str(round(float(series_data['rating'] or 0) / 2, 2)) if series_data['rating'] else "0", + "backdrop_path": series_data['backdrop_path'], + "youtube_trailer": series_data['youtube_trailer'], + "imdb": str(series.imdb_id) if series.imdb_id else "", + "tmdb": str(series.tmdb_id) if series.tmdb_id else "", + "episode_run_time": str(series_data['episode_run_time']), + "category_id": str(series_relation.category.id) if series_relation.category else "0", + "category_ids": [int(series_relation.category.id)] if series_relation.category else [], + }, + "episodes": dict(seasons) + } + return info + + +def xc_get_vod_info(request, user, vod_id): + """Get detailed VOD (movie) information""" + from apps.vod.models import M3UMovieRelation + from django.utils import timezone + from datetime import timedelta + + if not vod_id: + raise Http404() + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"movie_id": vod_id, "m3u_account__is_active": True} + + try: + # Order by account priority to get the best relation when multiple exist + movie_relation = M3UMovieRelation.objects.select_related('movie', 'movie__logo').filter(**filters).order_by('-m3u_account__priority', 'id').first() + if not movie_relation: + raise Http404() + movie = movie_relation.movie + except (M3UMovieRelation.DoesNotExist, M3UMovieRelation.MultipleObjectsReturned): + raise Http404() + + # Initialize basic movie data first + movie_data = { + 'name': movie.name, + 'description': movie.description or '', + 'year': movie.year, + 'genre': movie.genre or '', + 'rating': movie.rating or 0, + 'tmdb_id': movie.tmdb_id or '', + 'imdb_id': movie.imdb_id or '', + 'director': '', + 'actors': '', + 'country': '', + 'release_date': '', + 'youtube_trailer': '', + 'backdrop_path': [], + 'cover_big': '', + 'bitrate': 0, + 'video': {}, + 'audio': {}, + } + + # Duplicate the provider_info logic for detailed information + try: + # Check if we need to refresh detailed info (same logic as provider_info) + should_refresh = ( + not movie_relation.last_advanced_refresh or + movie_relation.last_advanced_refresh < timezone.now() - timedelta(hours=24) + ) + + if should_refresh: + # Trigger refresh of detailed info + from apps.vod.tasks import refresh_movie_advanced_data + refresh_movie_advanced_data(movie_relation.id) + # Refresh objects from database after task completion + movie.refresh_from_db() + movie_relation.refresh_from_db() + + # Add detailed info from custom_properties if available + if movie.custom_properties: + custom_data = movie.custom_properties or {} + + # Extract detailed info + #detailed_info = custom_data.get('detailed_info', {}) + detailed_info = movie_relation.custom_properties.get('detailed_info', {}) + # Update movie_data with detailed info + movie_data.update({ + 'director': custom_data.get('director') or detailed_info.get('director', ''), + 'actors': custom_data.get('actors') or detailed_info.get('actors', ''), + 'country': custom_data.get('country') or detailed_info.get('country', ''), + 'release_date': custom_data.get('release_date') or detailed_info.get('release_date') or detailed_info.get('releasedate', ''), + 'youtube_trailer': custom_data.get('youtube_trailer') or detailed_info.get('youtube_trailer') or detailed_info.get('trailer', ''), + 'backdrop_path': custom_data.get('backdrop_path') or detailed_info.get('backdrop_path', []), + 'cover_big': detailed_info.get('cover_big', ''), + 'bitrate': detailed_info.get('bitrate', 0), + 'video': detailed_info.get('video', {}), + 'audio': detailed_info.get('audio', {}), + }) + + # Override with detailed_info values where available + for key in ['name', 'description', 'year', 'genre', 'rating', 'tmdb_id', 'imdb_id']: + if detailed_info.get(key): + movie_data[key] = detailed_info[key] + + # Handle plot vs description + if detailed_info.get('plot'): + movie_data['description'] = detailed_info['plot'] + elif detailed_info.get('description'): + movie_data['description'] = detailed_info['description'] + + except Exception as e: + logger.error(f"Failed to process movie data: {e}") + + # Transform API response to XtreamCodes format + info = { + "info": { + "name": movie_data.get('name', movie.name), + "o_name": movie_data.get('name', movie.name), + "cover_big": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + "movie_image": ( + None if not movie.logo + else build_absolute_uri_with_port( + request, + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) + ) + ), + 'description': movie_data.get('description', ''), + 'plot': movie_data.get('description', ''), + 'year': movie_data.get('year', ''), + 'release_date': movie_data.get('release_date', ''), + 'genre': movie_data.get('genre', ''), + 'director': movie_data.get('director', ''), + 'actors': movie_data.get('actors', ''), + 'cast': movie_data.get('actors', ''), + 'country': movie_data.get('country', ''), + 'rating': movie_data.get('rating', 0), + 'imdb_id': movie_data.get('imdb_id', ''), + "tmdb_id": movie_data.get('tmdb_id', ''), + 'youtube_trailer': movie_data.get('youtube_trailer', ''), + 'backdrop_path': movie_data.get('backdrop_path', []), + 'cover': movie_data.get('cover_big', ''), + 'bitrate': movie_data.get('bitrate', 0), + 'video': movie_data.get('video', {}), + 'audio': movie_data.get('audio', {}), + }, + "movie_data": { + "stream_id": movie.id, + "name": movie.name, + "added": int(movie_relation.created_at.timestamp()), + "category_id": str(movie_relation.category.id) if movie_relation.category else "0", + "category_ids": [int(movie_relation.category.id)] if movie_relation.category else [], + "container_extension": movie_relation.container_extension or "mp4", + "custom_sid": None, + "direct_source": "", + } + } + + return info + + +def xc_movie_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes movie streaming requests""" + from apps.vod.models import M3UMovieRelation + + user = get_object_or_404(User, username=username) + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # All authenticated users get access to VOD from all active M3U accounts + filters = {"movie_id": stream_id, "m3u_account__is_active": True} + + try: + # Order by account priority to get the best relation when multiple exist + movie_relation = M3UMovieRelation.objects.select_related('movie').filter(**filters).order_by('-m3u_account__priority', 'id').first() + if not movie_relation: + return JsonResponse({"error": "Movie not found"}, status=404) + except (M3UMovieRelation.DoesNotExist, M3UMovieRelation.MultipleObjectsReturned): + return JsonResponse({"error": "Movie not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'movie', + 'content_id': movie_relation.movie.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def xc_series_stream(request, username, password, stream_id, extension): + """Handle XtreamCodes series/episode streaming requests""" + from apps.vod.models import M3UEpisodeRelation + + user = get_object_or_404(User, username=username) + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return JsonResponse({"error": "Invalid credentials"}, status=401) + + # All authenticated users get access to series/episodes from all active M3U accounts + filters = {"episode_id": stream_id, "m3u_account__is_active": True} + + try: + episode_relation = M3UEpisodeRelation.objects.select_related('episode').get(**filters) + except M3UEpisodeRelation.DoesNotExist: + return JsonResponse({"error": "Episode not found"}, status=404) + + # Redirect to the VOD proxy endpoint + from django.http import HttpResponseRedirect + from django.urls import reverse + + vod_url = reverse('proxy:vod_proxy:vod_stream', kwargs={ + 'content_type': 'episode', + 'content_id': episode_relation.episode.uuid + }) + + return HttpResponseRedirect(vod_url) + + +def get_host_and_port(request): + """ + Returns (host, port) for building absolute URIs. + - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). + - Falls back to Host header. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. + """ + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx + xfh = request.META.get("HTTP_X_FORWARDED_HOST") + if xfh: + if ":" in xfh: + host, port = xfh.split(":", 1) + # Omit standard ports from URLs, or omit if port doesn't match standard for scheme + # (e.g., HTTPS but port is 9191 = behind external reverse proxy) + if port == standard_port: + return host, None + # If port doesn't match standard and X-Forwarded-Proto is set, likely behind external RP + if request.META.get("HTTP_X_FORWARDED_PROTO"): + host = xfh.split(":")[0] # Strip port, will check for proper port below + else: + return host, port + else: + host = xfh + + # Check for X-Forwarded-Port header (if we didn't already find a valid port) + port = request.META.get("HTTP_X_FORWARDED_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): + return host, None + + # 2. Try Host header + raw_host = request.get_host() + if ":" in raw_host: + host, port = raw_host.split(":", 1) + # Omit standard ports from URLs + return host, None if port == standard_port else port + else: + host = raw_host + + # 3. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 4. Try SERVER_PORT from META (only if NOT behind reverse proxy) + port = request.META.get("SERVER_PORT") + if port: + # Omit standard ports from URLs + return host, None if port == standard_port else port + + # 5. Dev fallback: guess port 5656 + if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): + return host, "5656" + + # 6. Final fallback: assume standard port for scheme (omit from URL) + return host, None + +def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ + host, port = get_host_and_port(request) + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" + +def format_duration_hms(seconds): + """ + Format a duration in seconds as HH:MM:SS zero-padded string. + """ + seconds = int(seconds or 0) + return f"{seconds//3600:02}:{(seconds%3600)//60:02}:{seconds%60:02}" diff --git a/apps/plugins/__init__.py b/apps/plugins/__init__.py new file mode 100644 index 00000000..22c35396 --- /dev/null +++ b/apps/plugins/__init__.py @@ -0,0 +1,2 @@ +default_app_config = "apps.plugins.apps.PluginsConfig" + diff --git a/apps/plugins/api_urls.py b/apps/plugins/api_urls.py new file mode 100644 index 00000000..a229a07c --- /dev/null +++ b/apps/plugins/api_urls.py @@ -0,0 +1,22 @@ +from django.urls import path +from .api_views import ( + PluginsListAPIView, + PluginReloadAPIView, + PluginSettingsAPIView, + PluginRunAPIView, + PluginEnabledAPIView, + PluginImportAPIView, + PluginDeleteAPIView, +) + +app_name = "plugins" + +urlpatterns = [ + path("plugins/", PluginsListAPIView.as_view(), name="list"), + path("plugins/reload/", PluginReloadAPIView.as_view(), name="reload"), + path("plugins/import/", PluginImportAPIView.as_view(), name="import"), + path("plugins//delete/", PluginDeleteAPIView.as_view(), name="delete"), + path("plugins//settings/", PluginSettingsAPIView.as_view(), name="settings"), + path("plugins//run/", PluginRunAPIView.as_view(), name="run"), + path("plugins//enabled/", PluginEnabledAPIView.as_view(), name="enabled"), +] diff --git a/apps/plugins/api_views.py b/apps/plugins/api_views.py new file mode 100644 index 00000000..0d68fc7d --- /dev/null +++ b/apps/plugins/api_views.py @@ -0,0 +1,306 @@ +import logging +from rest_framework.views import APIView +from rest_framework.response import Response +from rest_framework import status +from rest_framework.decorators import api_view +from django.conf import settings +from django.core.files.uploadedfile import UploadedFile +import io +import os +import zipfile +import shutil +import tempfile +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_method, +) + +from .loader import PluginManager +from .models import PluginConfig + +logger = logging.getLogger(__name__) + + +class PluginsListAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def get(self, request): + pm = PluginManager.get() + # Ensure registry is up-to-date on each request + pm.discover_plugins() + return Response({"plugins": pm.list_plugins()}) + + +class PluginReloadAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request): + pm = PluginManager.get() + pm.discover_plugins() + return Response({"success": True, "count": len(pm._registry)}) + + +class PluginImportAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request): + file: UploadedFile = request.FILES.get("file") + if not file: + return Response({"success": False, "error": "Missing 'file' upload"}, status=status.HTTP_400_BAD_REQUEST) + + pm = PluginManager.get() + plugins_dir = pm.plugins_dir + + try: + zf = zipfile.ZipFile(file) + except zipfile.BadZipFile: + return Response({"success": False, "error": "Invalid zip file"}, status=status.HTTP_400_BAD_REQUEST) + + # Extract to a temporary directory first to avoid server reload thrash + tmp_root = tempfile.mkdtemp(prefix="plugin_import_") + try: + file_members = [m for m in zf.infolist() if not m.is_dir()] + if not file_members: + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Archive is empty"}, status=status.HTTP_400_BAD_REQUEST) + + for member in file_members: + name = member.filename + if not name or name.endswith("/"): + continue + # Normalize and prevent path traversal + norm = os.path.normpath(name) + if norm.startswith("..") or os.path.isabs(norm): + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Unsafe path in archive"}, status=status.HTTP_400_BAD_REQUEST) + dest_path = os.path.join(tmp_root, norm) + os.makedirs(os.path.dirname(dest_path), exist_ok=True) + with zf.open(member, 'r') as src, open(dest_path, 'wb') as dst: + shutil.copyfileobj(src, dst) + + # Find candidate directory containing plugin.py or __init__.py + candidates = [] + for dirpath, dirnames, filenames in os.walk(tmp_root): + has_pluginpy = "plugin.py" in filenames + has_init = "__init__.py" in filenames + if has_pluginpy or has_init: + depth = len(os.path.relpath(dirpath, tmp_root).split(os.sep)) + candidates.append((0 if has_pluginpy else 1, depth, dirpath)) + if not candidates: + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": "Invalid plugin: missing plugin.py or package __init__.py"}, status=status.HTTP_400_BAD_REQUEST) + + candidates.sort() + chosen = candidates[0][2] + # Determine plugin key: prefer chosen folder name; if chosen is tmp_root, use zip base name + base_name = os.path.splitext(getattr(file, "name", "plugin"))[0] + plugin_key = os.path.basename(chosen.rstrip(os.sep)) + if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep): + plugin_key = base_name + plugin_key = plugin_key.replace(" ", "_").lower() + + final_dir = os.path.join(plugins_dir, plugin_key) + if os.path.exists(final_dir): + # If final dir exists but contains a valid plugin, refuse; otherwise clear it + if os.path.exists(os.path.join(final_dir, "plugin.py")) or os.path.exists(os.path.join(final_dir, "__init__.py")): + shutil.rmtree(tmp_root, ignore_errors=True) + return Response({"success": False, "error": f"Plugin '{plugin_key}' already exists"}, status=status.HTTP_400_BAD_REQUEST) + try: + shutil.rmtree(final_dir) + except Exception: + pass + + # Move chosen directory into final location + if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep): + # Move all contents into final_dir + os.makedirs(final_dir, exist_ok=True) + for item in os.listdir(tmp_root): + shutil.move(os.path.join(tmp_root, item), os.path.join(final_dir, item)) + else: + shutil.move(chosen, final_dir) + # Cleanup temp + shutil.rmtree(tmp_root, ignore_errors=True) + target_dir = final_dir + finally: + try: + shutil.rmtree(tmp_root, ignore_errors=True) + except Exception: + pass + + # Reload discovery and validate plugin entry + pm.discover_plugins() + plugin = pm._registry.get(plugin_key) + if not plugin: + # Cleanup the copied folder to avoid leaving invalid plugin behind + try: + shutil.rmtree(target_dir, ignore_errors=True) + except Exception: + pass + return Response({"success": False, "error": "Invalid plugin: missing Plugin class in plugin.py or __init__.py"}, status=status.HTTP_400_BAD_REQUEST) + + # Extra validation: ensure Plugin.run exists + instance = getattr(plugin, "instance", None) + run_method = getattr(instance, "run", None) + if not callable(run_method): + try: + shutil.rmtree(target_dir, ignore_errors=True) + except Exception: + pass + return Response({"success": False, "error": "Invalid plugin: Plugin class must define a callable run(action, params, context)"}, status=status.HTTP_400_BAD_REQUEST) + + # Find DB config to return enabled/ever_enabled + try: + cfg = PluginConfig.objects.get(key=plugin_key) + enabled = cfg.enabled + ever_enabled = getattr(cfg, "ever_enabled", False) + except PluginConfig.DoesNotExist: + enabled = False + ever_enabled = False + + return Response({ + "success": True, + "plugin": { + "key": plugin.key, + "name": plugin.name, + "version": plugin.version, + "description": plugin.description, + "enabled": enabled, + "ever_enabled": ever_enabled, + "fields": plugin.fields or [], + "actions": plugin.actions or [], + } + }) + + +class PluginSettingsAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + pm = PluginManager.get() + data = request.data or {} + settings = data.get("settings", {}) + try: + updated = pm.update_settings(key, settings) + return Response({"success": True, "settings": updated}) + except Exception as e: + return Response({"success": False, "error": str(e)}, status=status.HTTP_400_BAD_REQUEST) + + +class PluginRunAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + pm = PluginManager.get() + action = request.data.get("action") + params = request.data.get("params", {}) + if not action: + return Response({"success": False, "error": "Missing 'action'"}, status=status.HTTP_400_BAD_REQUEST) + + # Respect plugin enabled flag + try: + cfg = PluginConfig.objects.get(key=key) + if not cfg.enabled: + return Response({"success": False, "error": "Plugin is disabled"}, status=status.HTTP_403_FORBIDDEN) + except PluginConfig.DoesNotExist: + return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND) + + try: + result = pm.run_action(key, action, params) + return Response({"success": True, "result": result}) + except PermissionError as e: + return Response({"success": False, "error": str(e)}, status=status.HTTP_403_FORBIDDEN) + except Exception as e: + logger.exception("Plugin action failed") + return Response({"success": False, "error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +class PluginEnabledAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def post(self, request, key): + enabled = request.data.get("enabled") + if enabled is None: + return Response({"success": False, "error": "Missing 'enabled' boolean"}, status=status.HTTP_400_BAD_REQUEST) + try: + cfg = PluginConfig.objects.get(key=key) + cfg.enabled = bool(enabled) + # Mark that this plugin has been enabled at least once + if cfg.enabled and not cfg.ever_enabled: + cfg.ever_enabled = True + cfg.save(update_fields=["enabled", "ever_enabled", "updated_at"]) + return Response({"success": True, "enabled": cfg.enabled, "ever_enabled": cfg.ever_enabled}) + except PluginConfig.DoesNotExist: + return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND) + + +class PluginDeleteAPIView(APIView): + def get_permissions(self): + try: + return [ + perm() for perm in permission_classes_by_method[self.request.method] + ] + except KeyError: + return [Authenticated()] + + def delete(self, request, key): + pm = PluginManager.get() + plugins_dir = pm.plugins_dir + target_dir = os.path.join(plugins_dir, key) + # Safety: ensure path inside plugins_dir + abs_plugins = os.path.abspath(plugins_dir) + os.sep + abs_target = os.path.abspath(target_dir) + if not abs_target.startswith(abs_plugins): + return Response({"success": False, "error": "Invalid plugin path"}, status=status.HTTP_400_BAD_REQUEST) + + # Remove files + if os.path.isdir(target_dir): + try: + shutil.rmtree(target_dir) + except Exception as e: + return Response({"success": False, "error": f"Failed to delete plugin files: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Remove DB record + try: + PluginConfig.objects.filter(key=key).delete() + except Exception: + pass + + # Reload registry + pm.discover_plugins() + return Response({"success": True}) diff --git a/apps/plugins/apps.py b/apps/plugins/apps.py new file mode 100644 index 00000000..3ab44cb1 --- /dev/null +++ b/apps/plugins/apps.py @@ -0,0 +1,54 @@ +from django.apps import AppConfig +import os +import sys +from django.db.models.signals import post_migrate + + +class PluginsConfig(AppConfig): + name = "apps.plugins" + verbose_name = "Plugins" + + def ready(self): + """Wire up plugin discovery without hitting the DB during app init. + + - Skip during common management commands that don't need discovery. + - Register post_migrate handler to sync plugin registry to DB after migrations. + - Do an in-memory discovery (no DB) so registry is available early. + """ + try: + # Allow explicit opt-out via env var + if os.environ.get("DISPATCHARR_SKIP_PLUGIN_AUTODISCOVERY", "").lower() in ("1", "true", "yes"): + return + + argv = sys.argv[1:] if len(sys.argv) > 1 else [] + mgmt_cmds_to_skip = { + # Skip immediate discovery for these commands + "makemigrations", "collectstatic", "check", "test", "shell", "showmigrations", + } + if argv and argv[0] in mgmt_cmds_to_skip: + return + + # Run discovery with DB sync after the plugins app has been migrated + def _post_migrate_discover(sender=None, app_config=None, **kwargs): + try: + if app_config and getattr(app_config, 'label', None) != 'plugins': + return + from .loader import PluginManager + PluginManager.get().discover_plugins(sync_db=True) + except Exception: + import logging + logging.getLogger(__name__).exception("Plugin discovery failed in post_migrate") + + post_migrate.connect( + _post_migrate_discover, + dispatch_uid="apps.plugins.post_migrate_discover", + ) + + # Perform non-DB discovery now to populate in-memory registry. + from .loader import PluginManager + PluginManager.get().discover_plugins(sync_db=False) + except Exception: + # Avoid breaking startup due to plugin errors + import logging + + logging.getLogger(__name__).exception("Plugin discovery wiring failed during app ready") diff --git a/apps/plugins/loader.py b/apps/plugins/loader.py new file mode 100644 index 00000000..5422ae7e --- /dev/null +++ b/apps/plugins/loader.py @@ -0,0 +1,254 @@ +import importlib +import json +import logging +import os +import sys +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from django.db import transaction + +from .models import PluginConfig + +logger = logging.getLogger(__name__) + + +@dataclass +class LoadedPlugin: + key: str + name: str + version: str = "" + description: str = "" + module: Any = None + instance: Any = None + fields: List[Dict[str, Any]] = field(default_factory=list) + actions: List[Dict[str, Any]] = field(default_factory=list) + + +class PluginManager: + """Singleton manager that discovers and runs plugins from /data/plugins.""" + + _instance: Optional["PluginManager"] = None + + @classmethod + def get(cls) -> "PluginManager": + if not cls._instance: + cls._instance = PluginManager() + return cls._instance + + def __init__(self) -> None: + self.plugins_dir = os.environ.get("DISPATCHARR_PLUGINS_DIR", "/data/plugins") + self._registry: Dict[str, LoadedPlugin] = {} + + # Ensure plugins directory exists + os.makedirs(self.plugins_dir, exist_ok=True) + if self.plugins_dir not in sys.path: + sys.path.append(self.plugins_dir) + + def discover_plugins(self, *, sync_db: bool = True) -> Dict[str, LoadedPlugin]: + if sync_db: + logger.info(f"Discovering plugins in {self.plugins_dir}") + else: + logger.debug(f"Discovering plugins (no DB sync) in {self.plugins_dir}") + self._registry.clear() + + try: + for entry in sorted(os.listdir(self.plugins_dir)): + path = os.path.join(self.plugins_dir, entry) + if not os.path.isdir(path): + continue + + plugin_key = entry.replace(" ", "_").lower() + + try: + self._load_plugin(plugin_key, path) + except Exception: + logger.exception(f"Failed to load plugin '{plugin_key}' from {path}") + + logger.info(f"Discovered {len(self._registry)} plugin(s)") + except FileNotFoundError: + logger.warning(f"Plugins directory not found: {self.plugins_dir}") + + # Sync DB records (optional) + if sync_db: + try: + self._sync_db_with_registry() + except Exception: + # Defer sync if database is not ready (e.g., first startup before migrate) + logger.exception("Deferring plugin DB sync; database not ready yet") + return self._registry + + def _load_plugin(self, key: str, path: str): + # Plugin can be a package and/or contain plugin.py. Prefer plugin.py when present. + has_pkg = os.path.exists(os.path.join(path, "__init__.py")) + has_pluginpy = os.path.exists(os.path.join(path, "plugin.py")) + if not (has_pkg or has_pluginpy): + logger.debug(f"Skipping {path}: no plugin.py or package") + return + + candidate_modules = [] + if has_pluginpy: + candidate_modules.append(f"{key}.plugin") + if has_pkg: + candidate_modules.append(key) + + module = None + plugin_cls = None + last_error = None + for module_name in candidate_modules: + try: + logger.debug(f"Importing plugin module {module_name}") + module = importlib.import_module(module_name) + plugin_cls = getattr(module, "Plugin", None) + if plugin_cls is not None: + break + else: + logger.warning(f"Module {module_name} has no Plugin class") + except Exception as e: + last_error = e + logger.exception(f"Error importing module {module_name}") + + if plugin_cls is None: + if last_error: + raise last_error + else: + logger.warning(f"No Plugin class found for {key}; skipping") + return + + instance = plugin_cls() + + name = getattr(instance, "name", key) + version = getattr(instance, "version", "") + description = getattr(instance, "description", "") + fields = getattr(instance, "fields", []) + actions = getattr(instance, "actions", []) + + self._registry[key] = LoadedPlugin( + key=key, + name=name, + version=version, + description=description, + module=module, + instance=instance, + fields=fields, + actions=actions, + ) + + def _sync_db_with_registry(self): + with transaction.atomic(): + for key, lp in self._registry.items(): + obj, _ = PluginConfig.objects.get_or_create( + key=key, + defaults={ + "name": lp.name, + "version": lp.version, + "description": lp.description, + "settings": {}, + }, + ) + # Update meta if changed + changed = False + if obj.name != lp.name: + obj.name = lp.name + changed = True + if obj.version != lp.version: + obj.version = lp.version + changed = True + if obj.description != lp.description: + obj.description = lp.description + changed = True + if changed: + obj.save() + + def list_plugins(self) -> List[Dict[str, Any]]: + from .models import PluginConfig + + plugins: List[Dict[str, Any]] = [] + try: + configs = {c.key: c for c in PluginConfig.objects.all()} + except Exception as e: + # Database might not be migrated yet; fall back to registry only + logger.warning("PluginConfig table unavailable; listing registry only: %s", e) + configs = {} + + # First, include all discovered plugins + for key, lp in self._registry.items(): + conf = configs.get(key) + plugins.append( + { + "key": key, + "name": lp.name, + "version": lp.version, + "description": lp.description, + "enabled": conf.enabled if conf else False, + "ever_enabled": getattr(conf, "ever_enabled", False) if conf else False, + "fields": lp.fields or [], + "settings": (conf.settings if conf else {}), + "actions": lp.actions or [], + "missing": False, + } + ) + + # Then, include any DB-only configs (files missing or failed to load) + discovered_keys = set(self._registry.keys()) + for key, conf in configs.items(): + if key in discovered_keys: + continue + plugins.append( + { + "key": key, + "name": conf.name, + "version": conf.version, + "description": conf.description, + "enabled": conf.enabled, + "ever_enabled": getattr(conf, "ever_enabled", False), + "fields": [], + "settings": conf.settings or {}, + "actions": [], + "missing": True, + } + ) + + return plugins + + def get_plugin(self, key: str) -> Optional[LoadedPlugin]: + return self._registry.get(key) + + def update_settings(self, key: str, settings: Dict[str, Any]) -> Dict[str, Any]: + cfg = PluginConfig.objects.get(key=key) + cfg.settings = settings or {} + cfg.save(update_fields=["settings", "updated_at"]) + return cfg.settings + + def run_action(self, key: str, action_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + lp = self.get_plugin(key) + if not lp or not lp.instance: + raise ValueError(f"Plugin '{key}' not found") + + cfg = PluginConfig.objects.get(key=key) + if not cfg.enabled: + raise PermissionError(f"Plugin '{key}' is disabled") + params = params or {} + + # Provide a context object to the plugin + context = { + "settings": cfg.settings or {}, + "logger": logger, + "actions": {a.get("id"): a for a in (lp.actions or [])}, + } + + # Run either via Celery if plugin provides a delayed method, or inline + run_method = getattr(lp.instance, "run", None) + if not callable(run_method): + raise ValueError(f"Plugin '{key}' has no runnable 'run' method") + + try: + result = run_method(action_id, params, context) + except Exception: + logger.exception(f"Plugin '{key}' action '{action_id}' failed") + raise + + # Normalize return + if isinstance(result, dict): + return result + return {"status": "ok", "result": result} diff --git a/apps/plugins/migrations/0001_initial.py b/apps/plugins/migrations/0001_initial.py new file mode 100644 index 00000000..6de1490a --- /dev/null +++ b/apps/plugins/migrations/0001_initial.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.4 on 2025-09-13 13:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='PluginConfig', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('key', models.CharField(max_length=128, unique=True)), + ('name', models.CharField(max_length=255)), + ('version', models.CharField(blank=True, default='', max_length=64)), + ('description', models.TextField(blank=True, default='')), + ('enabled', models.BooleanField(default=False)), + ('ever_enabled', models.BooleanField(default=False)), + ('settings', models.JSONField(blank=True, default=dict)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ], + ), + ] diff --git a/apps/plugins/migrations/__init__.py b/apps/plugins/migrations/__init__.py new file mode 100644 index 00000000..ade076bd --- /dev/null +++ b/apps/plugins/migrations/__init__.py @@ -0,0 +1 @@ +# This file marks the migrations package for the plugins app. diff --git a/apps/plugins/models.py b/apps/plugins/models.py new file mode 100644 index 00000000..8ae0b5be --- /dev/null +++ b/apps/plugins/models.py @@ -0,0 +1,19 @@ +from django.db import models + + +class PluginConfig(models.Model): + """Stores discovered plugins and their persisted settings.""" + + key = models.CharField(max_length=128, unique=True) + name = models.CharField(max_length=255) + version = models.CharField(max_length=64, blank=True, default="") + description = models.TextField(blank=True, default="") + enabled = models.BooleanField(default=False) + # Tracks whether this plugin has ever been enabled at least once + ever_enabled = models.BooleanField(default=False) + settings = models.JSONField(default=dict, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + def __str__(self) -> str: + return f"{self.name} ({self.key})" diff --git a/apps/plugins/serializers.py b/apps/plugins/serializers.py new file mode 100644 index 00000000..cc7b1882 --- /dev/null +++ b/apps/plugins/serializers.py @@ -0,0 +1,28 @@ +from rest_framework import serializers + + +class PluginActionSerializer(serializers.Serializer): + id = serializers.CharField() + label = serializers.CharField() + description = serializers.CharField(required=False, allow_blank=True) + + +class PluginFieldSerializer(serializers.Serializer): + id = serializers.CharField() + label = serializers.CharField() + type = serializers.ChoiceField(choices=["string", "number", "boolean", "select"]) # simple types + default = serializers.JSONField(required=False) + help_text = serializers.CharField(required=False, allow_blank=True) + options = serializers.ListField(child=serializers.DictField(), required=False) + + +class PluginSerializer(serializers.Serializer): + key = serializers.CharField() + name = serializers.CharField() + version = serializers.CharField(allow_blank=True) + description = serializers.CharField(allow_blank=True) + enabled = serializers.BooleanField() + fields = PluginFieldSerializer(many=True) + settings = serializers.JSONField() + actions = PluginActionSerializer(many=True) + diff --git a/apps/proxy/config.py b/apps/proxy/config.py index b00bd224..3b1ce967 100644 --- a/apps/proxy/config.py +++ b/apps/proxy/config.py @@ -1,4 +1,6 @@ """Shared configuration between proxy types""" +import time +from django.db import connection class BaseConfig: DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail @@ -9,8 +11,56 @@ class BaseConfig: CONNECTION_TIMEOUT = 10 # seconds to wait for initial connection MAX_STREAM_SWITCHES = 10 # Maximum number of stream switch attempts before giving up BUFFER_CHUNK_SIZE = 188 * 1361 # ~256KB - # Redis settings - REDIS_CHUNK_TTL = 60 # Number in seconds - Chunks expire after 1 minute + BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams + BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc. + + # Cache for proxy settings (class-level, shared across all instances) + _proxy_settings_cache = None + _proxy_settings_cache_time = 0 + _proxy_settings_cache_ttl = 10 # Cache for 10 seconds + + @classmethod + def get_proxy_settings(cls): + """Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)""" + # Check if cache is still valid + now = time.time() + if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl: + return cls._proxy_settings_cache + + # Cache miss or expired - fetch from database + try: + from core.models import CoreSettings + settings = CoreSettings.get_proxy_settings() + cls._proxy_settings_cache = settings + cls._proxy_settings_cache_time = now + return settings + + except Exception: + # Return defaults if database query fails + return { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + + finally: + # Always close the connection after reading settings + try: + connection.close() + except Exception: + pass + + @classmethod + def get_redis_chunk_ttl(cls): + """Get Redis chunk TTL from database or default""" + settings = cls.get_proxy_settings() + return settings.get("redis_chunk_ttl", 60) + + @property + def REDIS_CHUNK_TTL(self): + return self.get_redis_chunk_ttl() class HLSConfig(BaseConfig): MIN_SEGMENTS = 12 @@ -32,6 +82,8 @@ class TSConfig(BaseConfig): INITIAL_BEHIND_CHUNKS = 4 # How many chunks behind to start a client (4 chunks = ~1MB) CHUNK_BATCH_SIZE = 5 # How many chunks to fetch in one batch KEEPALIVE_INTERVAL = 0.5 # Seconds between keepalive packets when at buffer head + # Chunk read timeout + CHUNK_TIMEOUT = 5 # Seconds to wait for each chunk read # Streaming settings TARGET_BITRATE = 8000000 # Target bitrate (8 Mbps) @@ -40,21 +92,14 @@ class TSConfig(BaseConfig): # Resource management CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds - CHANNEL_SHUTDOWN_DELAY = 0 # How long to wait after last client before shutdown (seconds) # Client tracking settings - CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. + CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds) - CHANNEL_INIT_GRACE_PERIOD = 5 # How long to wait for first client after initialization (seconds) - CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds) - GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1) + CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds) + GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6) CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect - - # TS packets are 188 bytes - # Make chunk size a multiple of TS packet size for perfect alignment - # ~1MB is ideal for streaming (matches typical media buffer sizes) - # Stream health and recovery settings MAX_HEALTH_RECOVERY_ATTEMPTS = 2 # Maximum times to attempt recovery for a single stream MAX_RECONNECT_ATTEMPTS = 3 # Maximum reconnects to try before switching streams @@ -64,3 +109,47 @@ class TSConfig(BaseConfig): + # Database-dependent settings with fallbacks + @classmethod + def get_channel_shutdown_delay(cls): + """Get channel shutdown delay from database or default""" + settings = cls.get_proxy_settings() + return settings.get("channel_shutdown_delay", 0) + + @classmethod + def get_buffering_timeout(cls): + """Get buffering timeout from database or default""" + settings = cls.get_proxy_settings() + return settings.get("buffering_timeout", 15) + + @classmethod + def get_buffering_speed(cls): + """Get buffering speed threshold from database or default""" + settings = cls.get_proxy_settings() + return settings.get("buffering_speed", 1.0) + + @classmethod + def get_channel_init_grace_period(cls): + """Get channel init grace period from database or default""" + settings = cls.get_proxy_settings() + return settings.get("channel_init_grace_period", 5) + + # Dynamic property access for these settings + @property + def CHANNEL_SHUTDOWN_DELAY(self): + return self.get_channel_shutdown_delay() + + @property + def BUFFERING_TIMEOUT(self): + return self.get_buffering_timeout() + + @property + def BUFFERING_SPEED(self): + return self.get_buffering_speed() + + @property + def CHANNEL_INIT_GRACE_PERIOD(self): + return self.get_channel_init_grace_period() + + + diff --git a/apps/proxy/tasks.py b/apps/proxy/tasks.py index 00e3e039..68843712 100644 --- a/apps/proxy/tasks.py +++ b/apps/proxy/tasks.py @@ -10,6 +10,7 @@ import gc # Add import for garbage collection from core.utils import RedisClient from apps.proxy.ts_proxy.channel_status import ChannelStatus from core.utils import send_websocket_update +from apps.proxy.vod_proxy.connection_manager import get_connection_manager logger = logging.getLogger(__name__) @@ -59,3 +60,13 @@ def fetch_channel_stats(): # Explicitly clean up large data structures all_channels = None gc.collect() + +@shared_task +def cleanup_vod_connections(): + """Clean up stale VOD connections""" + try: + connection_manager = get_connection_manager() + connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour + logger.info("VOD connection cleanup completed") + except Exception as e: + logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True) diff --git a/apps/proxy/ts_proxy/channel_status.py b/apps/proxy/ts_proxy/channel_status.py index 50e84eec..8f1d0649 100644 --- a/apps/proxy/ts_proxy/channel_status.py +++ b/apps/proxy/ts_proxy/channel_status.py @@ -264,6 +264,63 @@ class ChannelStatus: 'last_data_age': time.time() - manager.last_data_time } + # Add FFmpeg stream information + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + + pixel_format = metadata.get(ChannelMetadataField.PIXEL_FORMAT.encode('utf-8')) + if pixel_format: + info['pixel_format'] = pixel_format.decode('utf-8') + + source_bitrate = metadata.get(ChannelMetadataField.SOURCE_BITRATE.encode('utf-8')) + if source_bitrate: + info['source_bitrate'] = float(source_bitrate.decode('utf-8')) + + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') + + sample_rate = metadata.get(ChannelMetadataField.SAMPLE_RATE.encode('utf-8')) + if sample_rate: + info['sample_rate'] = int(sample_rate.decode('utf-8')) + + audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8')) + if audio_channels: + info['audio_channels'] = audio_channels.decode('utf-8') + + audio_bitrate = metadata.get(ChannelMetadataField.AUDIO_BITRATE.encode('utf-8')) + if audio_bitrate: + info['audio_bitrate'] = float(audio_bitrate.decode('utf-8')) + + # Add FFmpeg performance stats + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + + ffmpeg_fps = metadata.get(ChannelMetadataField.FFMPEG_FPS.encode('utf-8')) + if ffmpeg_fps: + info['ffmpeg_fps'] = float(ffmpeg_fps.decode('utf-8')) + + actual_fps = metadata.get(ChannelMetadataField.ACTUAL_FPS.encode('utf-8')) + if actual_fps: + info['actual_fps'] = float(actual_fps.decode('utf-8')) + + ffmpeg_bitrate = metadata.get(ChannelMetadataField.FFMPEG_BITRATE.encode('utf-8')) + if ffmpeg_bitrate: + info['ffmpeg_bitrate'] = float(ffmpeg_bitrate.decode('utf-8')) + stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8')) + if stream_type: + info['stream_type'] = stream_type.decode('utf-8') + return info @staticmethod @@ -422,6 +479,31 @@ class ChannelStatus: except ValueError: logger.warning(f"Invalid m3u_profile_id format in Redis: {m3u_profile_id_bytes}") + # Add stream info to basic info as well + video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8')) + if video_codec: + info['video_codec'] = video_codec.decode('utf-8') + + resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8')) + if resolution: + info['resolution'] = resolution.decode('utf-8') + + source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8')) + if source_fps: + info['source_fps'] = float(source_fps.decode('utf-8')) + ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8')) + if ffmpeg_speed: + info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8')) + audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8')) + if audio_codec: + info['audio_codec'] = audio_codec.decode('utf-8') + audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8')) + if audio_channels: + info['audio_channels'] = audio_channels.decode('utf-8') + stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8')) + if stream_type: + info['stream_type'] = stream_type.decode('utf-8') + return info except Exception as e: logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index 98dbf072..3d89b3b8 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -4,13 +4,15 @@ import threading import logging import time import json +import gevent from typing import Set, Optional from apps.proxy.config import TSConfig as Config from redis.exceptions import ConnectionError, TimeoutError -from .constants import EventType +from .constants import EventType, ChannelState, ChannelMetadataField from .config_helper import ConfigHelper from .redis_keys import RedisKeys from .utils import get_logger +from core.utils import send_websocket_update logger = get_logger() @@ -24,6 +26,7 @@ class ClientManager: self.lock = threading.Lock() self.last_active_time = time.time() self.worker_id = worker_id # Store worker ID as instance variable + self._heartbeat_running = True # Flag to control heartbeat thread # STANDARDIZED KEYS: Move client set under channel namespace self.client_set_key = RedisKeys.clients(channel_id) @@ -35,35 +38,68 @@ class ClientManager: self._start_heartbeat_thread() self._registered_clients = set() # Track already registered client IDs - def _start_heartbeat_thread(self): - """Start thread to regularly refresh client presence in Redis""" - def heartbeat_task(): - no_clients_count = 0 # Track consecutive empty cycles - max_empty_cycles = 3 # Exit after this many consecutive empty checks + def _trigger_stats_update(self): + """Trigger a channel stats update via WebSocket""" + try: + # Import here to avoid potential import issues + from apps.proxy.ts_proxy.channel_status import ChannelStatus + import redis - logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + # Get all channels from Redis + redis_client = redis.Redis.from_url('redis://localhost:6379', decode_responses=True) + all_channels = [] + cursor = 0 while True: + cursor, keys = redis_client.scan(cursor, match="ts_proxy:channel:*:clients", count=100) + for key in keys: + # Extract channel ID from key + parts = key.split(':') + if len(parts) >= 4: + ch_id = parts[2] + channel_info = ChannelStatus.get_basic_channel_info(ch_id) + if channel_info: + all_channels.append(channel_info) + + if cursor == 0: + break + + # Send WebSocket update using existing infrastructure + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) + } + ) + except Exception as e: + logger.debug(f"Failed to trigger stats update: {e}") + + def _start_heartbeat_thread(self): + """Start thread to regularly refresh client presence in Redis for local clients""" + def heartbeat_task(): + logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + + while self._heartbeat_running: try: - # Wait for the interval - time.sleep(self.heartbeat_interval) + # Wait for the interval, but check stop flag frequently for quick shutdown + # Sleep in 1-second increments to allow faster response to stop signal + for _ in range(int(self.heartbeat_interval)): + if not self._heartbeat_running: + break + time.sleep(1) + + # Final check before doing work + if not self._heartbeat_running: + break # Send heartbeat for all local clients with self.lock: - if not self.clients or not self.redis_client: - # No clients left, increment our counter - no_clients_count += 1 - - # If we've seen no clients for several consecutive checks, exit the thread - if no_clients_count >= max_empty_cycles: - logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks, exiting heartbeat thread") - return # This exits the thread - - # Skip this cycle if we have no clients + # Skip this cycle if we have no local clients + if not self.clients: continue - else: - # Reset counter when we see clients - no_clients_count = 0 # IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats current_time = time.time() @@ -134,11 +170,20 @@ class ClientManager: except Exception as e: logger.error(f"Error in client heartbeat thread: {e}") + logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}") + thread = threading.Thread(target=heartbeat_task, daemon=True) thread.name = f"client-heartbeat-{self.channel_id}" thread.start() logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + def stop(self): + """Stop the heartbeat thread and cleanup""" + logger.debug(f"Stopping ClientManager for channel {self.channel_id}") + self._heartbeat_running = False + # Give the thread a moment to exit gracefully + # Note: We don't join() here because it's a daemon thread and will exit on its own + def _execute_redis_command(self, command_func): """Execute Redis command with error handling""" if not self.redis_client: @@ -237,6 +282,9 @@ class ClientManager: json.dumps(event_data) ) + # Trigger channel stats update via WebSocket + self._trigger_stats_update() + # Get total clients across all workers total_clients = self.get_total_client_count() logger.info(f"New client connected: {client_id} (local: {len(self.clients)}, total: {total_clients})") @@ -251,6 +299,8 @@ class ClientManager: def remove_client(self, client_id): """Remove a client from this channel and Redis""" + client_ip = None + with self.lock: if client_id in self.clients: self.clients.remove(client_id) @@ -261,6 +311,14 @@ class ClientManager: self.last_active_time = time.time() if self.redis_client: + # Get client IP before removing the data + client_key = f"ts_proxy:channel:{self.channel_id}:clients:{client_id}" + client_data = self.redis_client.hgetall(client_key) + if client_data and b'ip_address' in client_data: + client_ip = client_data[b'ip_address'].decode('utf-8') + elif client_data and 'ip_address' in client_data: + client_ip = client_data['ip_address'] + # Remove from channel's client set self.redis_client.srem(self.client_set_key, client_id) @@ -290,6 +348,9 @@ class ClientManager: }) self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + # Trigger channel stats update via WebSocket + self._trigger_stats_update() + total_clients = self.get_total_client_count() logger.info(f"Client disconnected: {client_id} (local: {len(self.clients)}, total: {total_clients})") diff --git a/apps/proxy/ts_proxy/config_helper.py b/apps/proxy/ts_proxy/config_helper.py index 773ab378..d7d33558 100644 --- a/apps/proxy/ts_proxy/config_helper.py +++ b/apps/proxy/ts_proxy/config_helper.py @@ -34,7 +34,7 @@ class ConfigHelper: @staticmethod def channel_shutdown_delay(): """Get channel shutdown delay in seconds""" - return ConfigHelper.get('CHANNEL_SHUTDOWN_DELAY', 0) + return Config.get_channel_shutdown_delay() @staticmethod def initial_behind_chunks(): @@ -54,7 +54,7 @@ class ConfigHelper: @staticmethod def redis_chunk_ttl(): """Get Redis chunk TTL in seconds""" - return ConfigHelper.get('REDIS_CHUNK_TTL', 60) + return Config.get_redis_chunk_ttl() @staticmethod def chunk_size(): @@ -85,3 +85,27 @@ class ConfigHelper: def failover_grace_period(): """Get extra time (in seconds) to allow for stream switching before disconnecting clients""" return ConfigHelper.get('FAILOVER_GRACE_PERIOD', 20) # Default to 20 seconds + + @staticmethod + def buffering_timeout(): + """Get buffering timeout in seconds""" + return Config.get_buffering_timeout() + + @staticmethod + def buffering_speed(): + """Get buffering speed threshold""" + return Config.get_buffering_speed() + + @staticmethod + def channel_init_grace_period(): + """Get channel initialization grace period in seconds""" + return Config.get_channel_init_grace_period() + + @staticmethod + def chunk_timeout(): + """ + Get chunk timeout in seconds (used for both socket and HTTP read timeouts). + This controls how long we wait for each chunk before timing out. + Set this higher (e.g., 30s) for slow providers that may have intermittent delays. + """ + return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index 4827b24b..7baa9e1c 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -18,6 +18,7 @@ class ChannelState: ERROR = "error" STOPPING = "stopping" STOPPED = "stopped" + BUFFERING = "buffering" # Event types class EventType: @@ -32,6 +33,8 @@ class EventType: # Stream types class StreamType: HLS = "hls" + RTSP = "rtsp" + UDP = "udp" TS = "ts" UNKNOWN = "unknown" @@ -63,6 +66,33 @@ class ChannelMetadataField: STREAM_SWITCH_TIME = "stream_switch_time" STREAM_SWITCH_REASON = "stream_switch_reason" + # FFmpeg performance metrics + FFMPEG_SPEED = "ffmpeg_speed" + FFMPEG_FPS = "ffmpeg_fps" + ACTUAL_FPS = "actual_fps" + FFMPEG_OUTPUT_BITRATE = "ffmpeg_output_bitrate" + FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated" + + # Video stream info + VIDEO_CODEC = "video_codec" + RESOLUTION = "resolution" + WIDTH = "width" + HEIGHT = "height" + SOURCE_FPS = "source_fps" + PIXEL_FORMAT = "pixel_format" + VIDEO_BITRATE = "video_bitrate" + + # Audio stream info + AUDIO_CODEC = "audio_codec" + SAMPLE_RATE = "sample_rate" + AUDIO_CHANNELS = "audio_channels" + AUDIO_BITRATE = "audio_bitrate" + + # Stream format info + STREAM_TYPE = "stream_type" + # Stream info timestamp + STREAM_INFO_UPDATED = "stream_info_updated" + # Client metadata fields CONNECTED_AT = "connected_at" LAST_ACTIVE = "last_active" diff --git a/apps/proxy/ts_proxy/http_streamer.py b/apps/proxy/ts_proxy/http_streamer.py new file mode 100644 index 00000000..147d2c93 --- /dev/null +++ b/apps/proxy/ts_proxy/http_streamer.py @@ -0,0 +1,138 @@ +""" +HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe. +This allows us to use the same fetch_chunk() path for both transcode and HTTP streams. +""" + +import threading +import os +import requests +from requests.adapters import HTTPAdapter +from .utils import get_logger + +logger = get_logger() + + +class HTTPStreamReader: + """Thread-based HTTP stream reader that writes to a pipe""" + + def __init__(self, url, user_agent=None, chunk_size=8192): + self.url = url + self.user_agent = user_agent + self.chunk_size = chunk_size + self.session = None + self.response = None + self.thread = None + self.pipe_read = None + self.pipe_write = None + self.running = False + + def start(self): + """Start the HTTP stream reader thread""" + # Create a pipe (works on Windows and Unix) + self.pipe_read, self.pipe_write = os.pipe() + + # Start the reader thread + self.running = True + self.thread = threading.Thread(target=self._read_stream, daemon=True) + self.thread.start() + + logger.info(f"Started HTTP stream reader thread for {self.url}") + return self.pipe_read + + def _read_stream(self): + """Thread worker that reads HTTP stream and writes to pipe""" + try: + # Build headers + headers = {} + if self.user_agent: + headers['User-Agent'] = self.user_agent + + logger.info(f"HTTP reader connecting to {self.url}") + + # Create session + self.session = requests.Session() + + # Disable retries for faster failure detection + adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + # Stream the URL + self.response = self.session.get( + self.url, + headers=headers, + stream=True, + timeout=(5, 30) # 5s connect, 30s read + ) + + if self.response.status_code != 200: + logger.error(f"HTTP {self.response.status_code} from {self.url}") + return + + logger.info(f"HTTP reader connected successfully, streaming data...") + + # Stream chunks to pipe + chunk_count = 0 + for chunk in self.response.iter_content(chunk_size=self.chunk_size): + if not self.running: + break + + if chunk: + try: + # Write binary data to pipe + os.write(self.pipe_write, chunk) + chunk_count += 1 + + # Log progress periodically + if chunk_count % 1000 == 0: + logger.debug(f"HTTP reader streamed {chunk_count} chunks") + except OSError as e: + logger.error(f"Pipe write error: {e}") + break + + logger.info("HTTP stream ended") + + except requests.exceptions.RequestException as e: + logger.error(f"HTTP reader request error: {e}") + except Exception as e: + logger.error(f"HTTP reader unexpected error: {e}", exc_info=True) + finally: + self.running = False + # Close write end of pipe to signal EOF + try: + if self.pipe_write is not None: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + def stop(self): + """Stop the HTTP stream reader""" + logger.info("Stopping HTTP stream reader") + self.running = False + + # Close response + if self.response: + try: + self.response.close() + except: + pass + + # Close session + if self.session: + try: + self.session.close() + except: + pass + + # Close write end of pipe + if self.pipe_write is not None: + try: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + # Wait for thread + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index 3d0a53d9..cca827a9 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -131,6 +131,8 @@ class ProxyServer: max_retries = 10 base_retry_delay = 1 # Start with 1 second delay max_retry_delay = 30 # Cap at 30 seconds + pubsub_client = None + pubsub = None while True: try: @@ -206,7 +208,7 @@ class ProxyServer: self.redis_client.setex(disconnect_key, 60, str(time.time())) # Get configured shutdown delay or default - shutdown_delay = getattr(Config, 'CHANNEL_SHUTDOWN_DELAY', 0) + shutdown_delay = ConfigHelper.channel_shutdown_delay() if shutdown_delay > 0: logger.info(f"Waiting {shutdown_delay}s before stopping channel...") @@ -339,20 +341,27 @@ class ProxyServer: logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})") gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay) - # Try to clean up the old connection - try: - if 'pubsub' in locals(): - pubsub.close() - if 'pubsub_client' in locals(): - pubsub_client.close() - except: - pass - except Exception as e: logger.error(f"Error in event listener: {e}") # Add a short delay to prevent rapid retries on persistent errors gevent.sleep(5) # REPLACE: time.sleep(5) + finally: + # Always clean up PubSub connections in all error paths + try: + if pubsub: + pubsub.close() + pubsub = None + except Exception as e: + logger.debug(f"Error closing pubsub: {e}") + + try: + if pubsub_client: + pubsub_client.close() + pubsub_client = None + except Exception as e: + logger.debug(f"Error closing pubsub_client: {e}") + thread = threading.Thread(target=event_listener, daemon=True) thread.name = "redis-event-listener" thread.start() @@ -472,7 +481,7 @@ class ProxyServer: if b'state' in metadata: state = metadata[b'state'].decode('utf-8') active_states = [ChannelState.INITIALIZING, ChannelState.CONNECTING, - ChannelState.WAITING_FOR_CLIENTS, ChannelState.ACTIVE] + ChannelState.WAITING_FOR_CLIENTS, ChannelState.ACTIVE, ChannelState.BUFFERING] if state in active_states: logger.info(f"Channel {channel_id} already being initialized with state {state}") # Create buffer and client manager only if we don't have them @@ -486,17 +495,18 @@ class ProxyServer: ) return True - # Create buffer and client manager instances - buffer = StreamBuffer(channel_id, redis_client=self.redis_client) - client_manager = ClientManager( - channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) + # Create buffer and client manager instances (or reuse if they exist) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Store in local tracking - self.stream_buffers[channel_id] = buffer - self.client_managers[channel_id] = client_manager + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # IMPROVED: Set initializing state in Redis BEFORE any other operations if self.redis_client: @@ -550,13 +560,15 @@ class ProxyServer: logger.info(f"Channel {channel_id} already owned by worker {current_owner}") logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -571,13 +583,15 @@ class ProxyServer: # Another worker just acquired ownership logger.info(f"Another worker just acquired ownership of channel {channel_id}") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -596,7 +610,7 @@ class ProxyServer: if channel_user_agent: metadata["user_agent"] = channel_user_agent - # CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged + # Make sure stream_id is always set in metadata and properly logged if channel_stream_id: metadata["stream_id"] = str(channel_stream_id) logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}") @@ -632,13 +646,14 @@ class ProxyServer: logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager - # Create client manager with channel_id, redis_client AND worker_id - client_manager = ClientManager( - channel_id=channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id=channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # Start stream manager thread only for the owner thread = threading.Thread(target=stream_manager.run, daemon=True) @@ -689,7 +704,8 @@ class ProxyServer: owner = metadata.get(b'owner', b'').decode('utf-8') # States that indicate the channel is running properly - valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, ChannelState.CONNECTING] + valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -707,7 +723,7 @@ class ProxyServer: elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: # These states indicate the channel should be reinitialized logger.info(f"Channel {channel_id} exists but in terminal state: {state}") - return False + return True else: # Unknown or initializing state, check how long it's been in this state if b'state_changed_at' in metadata: @@ -845,6 +861,10 @@ class ProxyServer: # Clean up client manager - SAFE CHECK HERE TOO if channel_id in self.client_managers: try: + client_manager = self.client_managers[channel_id] + # Stop the heartbeat thread before deleting + if hasattr(client_manager, 'stop'): + client_manager.stop() del self.client_managers[channel_id] logger.info(f"Removed client manager for channel {channel_id}") except KeyError: @@ -941,7 +961,7 @@ class ProxyServer: # If waiting for clients, check grace period if connection_ready_time: - grace_period = ConfigHelper.get('CHANNEL_INIT_GRACE_PERIOD', 20) + grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time # Add this debug log diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index bd1f2f81..551e2d27 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -6,6 +6,7 @@ This separates business logic from HTTP handling in views. import logging import time import json +import re from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.proxy.config import TSConfig as Config @@ -181,7 +182,7 @@ class ChannelService: old_url = manager.url # Update the stream - success = manager.update_url(new_url, stream_id) + success = manager.update_url(new_url, stream_id, m3u_profile_id) logger.info(f"Stream URL changed from {old_url} to {new_url}, result: {success}") result.update({ @@ -415,6 +416,222 @@ class ChannelService: logger.error(f"Error validating channel state: {e}", exc_info=True) return False, None, None, {"error": f"Exception: {str(e)}"} + @staticmethod + def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None): + """Parse FFmpeg stream info line and store in Redis metadata and database""" + try: + if stream_type == "input": + # Example lines: + # Input #0, mpegts, from 'http://example.com/stream.ts': + # Input #0, hls, from 'http://example.com/stream.m3u8': + + # Extract input format (e.g., "mpegts", "hls", "flv", etc.) + input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line) + input_format = input_match.group(1).strip() if input_match else None + + # Store in Redis if we have valid data + if input_format: + ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format) + # Save to database if stream_id is provided + if stream_id: + ChannelService._update_stream_stats_in_db(stream_id, stream_type=input_format) + + logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}") + + elif stream_type == "video": + # Example line: + # Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn + + # Extract video codec (e.g., "h264", "mpeg2video", etc.) + codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line) + video_codec = codec_match.group(1) if codec_match else None + + # Extract resolution (e.g., "1280x720") - be more specific to avoid hex values + # Look for resolution patterns that are realistic video dimensions + resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line) + if resolution_match: + width = int(resolution_match.group(1)) + height = int(resolution_match.group(2)) + # Validate that these look like reasonable video dimensions + if 100 <= width <= 10000 and 100 <= height <= 10000: + resolution = f"{width}x{height}" + else: + width = height = resolution = None + else: + width = height = resolution = None + + # Extract source FPS (e.g., "29.97 fps") + fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line) + source_fps = float(fps_match.group(1)) if fps_match else None + + # Extract pixel format (e.g., "yuv420p") + pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line) + pixel_format = None + if pixel_format_match: + pf = pixel_format_match.group(1).strip() + # Clean up pixel format (remove extra info in parentheses) + if '(' in pf: + pf = pf.split('(')[0].strip() + pixel_format = pf + + # Extract bitrate if present (e.g., "2000 kb/s") + video_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + video_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]): + ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None) + # Save to database if stream_id is provided + if stream_id: + ChannelService._update_stream_stats_in_db( + stream_id, + video_codec=video_codec, + resolution=resolution, + source_fps=source_fps, + pixel_format=pixel_format, + video_bitrate=video_bitrate + ) + + logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, " + f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, " + f"Video Bitrate: {video_bitrate} kb/s") + + elif stream_type == "audio": + # Example line: + # Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s + + # Extract audio codec (e.g., "aac", "mp3", etc.) + codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line) + audio_codec = codec_match.group(1) if codec_match else None + + # Extract sample rate (e.g., "48000 Hz") + sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line) + sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None + + # Extract channel layout (e.g., "stereo", "5.1", "mono") + # Look for common channel layouts + channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE) + channels = channel_match.group(1) if channel_match else None + + # Extract audio bitrate if present (e.g., "64 kb/s") + audio_bitrate = None + bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line) + if bitrate_match: + audio_bitrate = float(bitrate_match.group(1)) + + # Store in Redis if we have valid data + if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]): + ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None) + # Save to database if stream_id is provided + if stream_id: + ChannelService._update_stream_stats_in_db( + stream_id, + audio_codec=audio_codec, + sample_rate=sample_rate, + audio_channels=channels, + audio_bitrate=audio_bitrate + ) + + except Exception as e: + logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}") + + @staticmethod + def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None): + """Update stream info in Redis metadata""" + try: + proxy_server = ProxyServer.get_instance() + if not proxy_server.redis_client: + return False + + metadata_key = RedisKeys.channel_metadata(channel_id) + update_data = { + ChannelMetadataField.STREAM_INFO_UPDATED: str(time.time()) + } + + # Video info + if codec is not None: + update_data[ChannelMetadataField.VIDEO_CODEC] = str(codec) + + if resolution is not None: + update_data[ChannelMetadataField.RESOLUTION] = str(resolution) + + if width is not None: + update_data[ChannelMetadataField.WIDTH] = str(width) + + if height is not None: + update_data[ChannelMetadataField.HEIGHT] = str(height) + + if fps is not None: + update_data[ChannelMetadataField.SOURCE_FPS] = str(round(fps, 2)) + + if pixel_format is not None: + update_data[ChannelMetadataField.PIXEL_FORMAT] = str(pixel_format) + + if video_bitrate is not None: + update_data[ChannelMetadataField.VIDEO_BITRATE] = str(round(video_bitrate, 1)) + + # Audio info + if audio_codec is not None: + update_data[ChannelMetadataField.AUDIO_CODEC] = str(audio_codec) + + if sample_rate is not None: + update_data[ChannelMetadataField.SAMPLE_RATE] = str(sample_rate) + + if channels is not None: + update_data[ChannelMetadataField.AUDIO_CHANNELS] = str(channels) + + if audio_bitrate is not None: + update_data[ChannelMetadataField.AUDIO_BITRATE] = str(round(audio_bitrate, 1)) + if input_format is not None: + update_data[ChannelMetadataField.STREAM_TYPE] = str(input_format) + + proxy_server.redis_client.hset(metadata_key, mapping=update_data) + return True + + except Exception as e: + logger.error(f"Error updating stream info in Redis: {e}") + return False + + @staticmethod + def _update_stream_stats_in_db(stream_id, **stats): + """Update stream stats in database""" + from django.db import connection + + try: + from apps.channels.models import Stream + from django.utils import timezone + + stream = Stream.objects.get(id=stream_id) + + # Get existing stats or create new dict + current_stats = stream.stream_stats or {} + + # Update with new stats + for key, value in stats.items(): + if value is not None: + current_stats[key] = value + + # Save updated stats and timestamp + stream.stream_stats = current_stats + stream.stream_stats_updated_at = timezone.now() + stream.save(update_fields=['stream_stats', 'stream_stats_updated_at']) + + logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}") + return True + + except Exception as e: + logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") + return False + + finally: + # Always close database connection after update + try: + connection.close() + except Exception: + pass + # Helper methods for Redis operations @staticmethod @@ -470,7 +687,7 @@ class ChannelService: switch_request = { "event": EventType.STREAM_SWITCH, - "channel_id": channel_id, + "channel_id": str(channel_id), "url": new_url, "user_agent": user_agent, "stream_id": stream_id, @@ -495,7 +712,7 @@ class ChannelService: stop_request = { "event": EventType.CHANNEL_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() } @@ -518,7 +735,7 @@ class ChannelService: stop_request = { "event": EventType.CLIENT_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "client_id": client_id, "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() diff --git a/apps/proxy/ts_proxy/stream_buffer.py b/apps/proxy/ts_proxy/stream_buffer.py index a5169c3a..85feb5dd 100644 --- a/apps/proxy/ts_proxy/stream_buffer.py +++ b/apps/proxy/ts_proxy/stream_buffer.py @@ -303,6 +303,14 @@ class StreamBuffer: # Retrieve chunks chunks = self.get_chunks_exact(client_index, chunk_count) + # Check if we got significantly fewer chunks than expected (likely due to expiration) + # Only check if we expected multiple chunks and got none or very few + if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10: + # Chunks are missing - likely expired from Redis + # Return empty list to signal client should skip forward + logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)") + return [], client_index + # Check total size total_size = sum(len(c) for c in chunks) @@ -316,7 +324,7 @@ class StreamBuffer: additional_size = sum(len(c) for c in more_chunks) if total_size + additional_size <= MAX_SIZE: chunks.extend(more_chunks) - chunk_count += additional + chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved return chunks, client_index + chunk_count diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 817a7b82..5d4f661f 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -52,6 +52,10 @@ class StreamGenerator: self.last_stats_bytes = 0 self.current_rate = 0.0 + # TTL refresh tracking + self.last_ttl_refresh = time.time() + self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming + def generate(self): """ Generator function that produces the stream content for the client. @@ -204,6 +208,18 @@ class StreamGenerator: self.empty_reads += 1 self.consecutive_empty += 1 + # Check if we're too far behind (chunks expired from Redis) + chunks_behind = self.buffer.index - self.local_index + if chunks_behind > 50: # If more than 50 chunks behind, jump forward + # Calculate new position: stay a few chunks behind current buffer + initial_behind = ConfigHelper.initial_behind_chunks() + new_index = max(self.local_index, self.buffer.index - initial_behind) + + logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}") + self.local_index = new_index + self.consecutive_empty = 0 # Reset since we're repositioning + continue # Try again immediately with new position + if self._should_send_keepalive(self.local_index): keepalive_packet = create_ts_packet('keepalive') logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head") @@ -324,7 +340,20 @@ class StreamGenerator: ChannelMetadataField.STATS_UPDATED_AT: str(current_time) } proxy_server.redis_client.hset(client_key, mapping=stats) - # No need to set expiration as client heartbeat will refresh this key + + # Refresh TTL periodically while actively streaming + # This provides proof-of-life independent of heartbeat thread + if current_time - self.last_ttl_refresh > self.ttl_refresh_interval: + try: + # Refresh TTL on client key + proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL) + # Also refresh the client set TTL + client_set_key = f"ts_proxy:channel:{self.channel_id}:clients" + proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL) + self.last_ttl_refresh = current_time + logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)") + except Exception as ttl_error: + logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}") except Exception as e: logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}") diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index 054de05b..c717398c 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -6,9 +6,12 @@ import time import socket import requests import subprocess -import gevent # Add this import +import gevent +import re from typing import Optional, List +from django.db import connection from django.shortcuts import get_object_or_404 +from urllib3.exceptions import ReadTimeoutError from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile @@ -39,6 +42,10 @@ class StreamManager: self.url_switching = False self.url_switch_start_time = 0 self.url_switch_timeout = ConfigHelper.url_switch_timeout() + self.buffering = False + self.buffering_timeout = ConfigHelper.buffering_timeout() + self.buffering_speed = ConfigHelper.buffering_speed() + self.buffering_start_time = None # Store worker_id for ownership checks self.worker_id = worker_id @@ -86,11 +93,13 @@ class StreamManager: self.tried_stream_ids.add(self.current_stream_id) logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}") else: - logger.warning(f"No stream_id found in Redis for channel {channel_id}") + logger.warning(f"No stream_id found in Redis for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") except Exception as e: logger.warning(f"Error loading stream ID from Redis: {e}") else: - logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly") + logger.warning(f"Unable to get stream ID for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") logger.info(f"Initialized stream manager for channel {buffer.channel_id}") @@ -104,6 +113,10 @@ class StreamManager: # Add stderr reader thread property self.stderr_reader_thread = None + self.ffmpeg_input_phase = True # Track if we're still reading input info + + # Add HTTP reader thread property + self.http_reader = None def _create_session(self): """Create and configure requests session with optimal settings""" @@ -129,6 +142,37 @@ class StreamManager: return session + def _wait_for_existing_processes_to_close(self, timeout=5.0): + """Wait for existing processes/connections to fully close before establishing new ones""" + start_time = time.time() + + while time.time() - start_time < timeout: + # Check if transcode process is still running + if self.transcode_process and self.transcode_process.poll() is None: + logger.debug(f"Waiting for existing transcode process to terminate for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # Check if HTTP connections are still active + if self.current_response or self.current_session: + logger.debug(f"Waiting for existing HTTP connections to close for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # Check if socket is still active + if self.socket: + logger.debug(f"Waiting for existing socket to close for channel {self.channel_id}") + gevent.sleep(0.1) + continue + + # All processes/connections are closed + logger.debug(f"All existing processes closed for channel {self.channel_id}") + return True + + # Timeout reached + logger.warning(f"Timeout waiting for existing processes to close for channel {self.channel_id} after {timeout}s") + return False + def run(self): """Main execution loop using HTTP streaming with improved connection handling and stream switching""" # Add a stop flag to the class properties @@ -145,7 +189,7 @@ class StreamManager: health_thread = threading.Thread(target=self._monitor_health, daemon=True) health_thread.start() - logger.info(f"Starting stream for URL: {self.url}") + logger.info(f"Starting stream for URL: {self.url} for channel {self.channel_id}") # Main stream switching loop - we'll try different streams if needed while self.running and stream_switch_attempts <= max_stream_switches: @@ -156,12 +200,39 @@ class StreamManager: f"Resetting switching state.") self._reset_url_switching_state() + # NEW: Check for health monitor recovery requests + if hasattr(self, 'needs_reconnect') and self.needs_reconnect and not self.url_switching: + logger.info(f"Health monitor requested reconnect for channel {self.channel_id}") + self.needs_reconnect = False + + # Attempt reconnect without changing streams + if self._attempt_reconnect(): + logger.info(f"Health-requested reconnect successful for channel {self.channel_id}") + continue # Go back to main loop + else: + logger.warning(f"Health-requested reconnect failed, will try stream switch for channel {self.channel_id}") + self.needs_stream_switch = True + + if hasattr(self, 'needs_stream_switch') and self.needs_stream_switch and not self.url_switching: + logger.info(f"Health monitor requested stream switch for channel {self.channel_id}") + self.needs_stream_switch = False + + if self._try_next_stream(): + logger.info(f"Health-requested stream switch successful for channel {self.channel_id}") + stream_switch_attempts += 1 + self.retry_count = 0 # Reset retries for new stream + continue # Go back to main loop with new stream + else: + logger.error(f"Health-requested stream switch failed for channel {self.channel_id}") + # Continue with normal flow + # Check stream type before connecting - stream_type = detect_stream_type(self.url) - if self.transcode == False and stream_type == StreamType.HLS: - logger.info(f"Detected HLS stream: {self.url}") - logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively") - # Enable transcoding for HLS streams + self.stream_type = detect_stream_type(self.url) + if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP): + stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP") + logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}") + logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}") + # Enable transcoding for HLS, RTSP/RTP, and UDP streams self.transcode = True # We'll override the stream profile selection with ffmpeg in the transcoding section self.force_ffmpeg = True @@ -169,13 +240,13 @@ class StreamManager: self.retry_count = 0 url_failed = False if self.url_switching: - logger.debug("Skipping connection attempt during URL switch") + logger.debug(f"Skipping connection attempt during URL switch for channel {self.channel_id}") gevent.sleep(0.1) # REPLACE time.sleep(0.1) continue # Connection retry loop for current URL - while self.running and self.retry_count < self.max_retries and not url_failed: + while self.running and self.retry_count < self.max_retries and not url_failed and not self.needs_stream_switch: - logger.info(f"Connection attempt {self.retry_count + 1}/{self.max_retries} for URL: {self.url}") + logger.info(f"Connection attempt {self.retry_count + 1}/{self.max_retries} for URL: {self.url} for channel {self.channel_id}") # Handle connection based on whether we transcode or not connection_result = False @@ -197,8 +268,12 @@ class StreamManager: # This indicates we had a stable connection for a while before failing connection_duration = time.time() - connection_start_time stable_connection_threshold = 30 # 30 seconds threshold + + if self.needs_stream_switch: + logger.info(f"Stream needs to switch after {connection_duration:.1f} seconds for channel: {self.channel_id}") + break # Exit to switch streams if connection_duration > stable_connection_threshold: - logger.info(f"Stream was stable for {connection_duration:.1f} seconds, resetting switch attempts counter") + logger.info(f"Stream was stable for {connection_duration:.1f} seconds, resetting switch attempts counter for channel: {self.channel_id}") stream_switch_attempts = 0 # Connection failed or ended - decide what to do next @@ -213,15 +288,15 @@ class StreamManager: # If we've reached max retries, mark this URL as failed if self.retry_count >= self.max_retries: url_failed = True - logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url}") + logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds - logger.info(f"Reconnecting in {timeout} seconds... (attempt {self.retry_count}/{self.max_retries})") + logger.info(f"Reconnecting in {timeout} seconds... (attempt {self.retry_count}/{self.max_retries}) for channel: {self.channel_id}") gevent.sleep(timeout) # REPLACE time.sleep(timeout) except Exception as e: - logger.error(f"Connection error: {e}", exc_info=True) + logger.error(f"Connection error on channel: {self.channel_id}: {e}", exc_info=True) self.retry_count += 1 self.connected = False @@ -230,25 +305,25 @@ class StreamManager: else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds - logger.info(f"Reconnecting in {timeout} seconds after error... (attempt {self.retry_count}/{self.max_retries})") + logger.info(f"Reconnecting in {timeout} seconds after error... (attempt {self.retry_count}/{self.max_retries}) for channel: {self.channel_id}") gevent.sleep(timeout) # REPLACE time.sleep(timeout) # If URL failed and we're still running, try switching to another stream if url_failed and self.running: - logger.info(f"URL {self.url} failed after {self.retry_count} attempts, trying next stream") + logger.info(f"URL {self.url} failed after {self.retry_count} attempts, trying next stream for channel: {self.channel_id}") # Try to switch to next stream switch_result = self._try_next_stream() if switch_result: # Successfully switched to a new stream, continue with the new URL stream_switch_attempts += 1 - logger.info(f"Successfully switched to new URL: {self.url} (switch attempt {stream_switch_attempts}/{max_stream_switches})") + logger.info(f"Successfully switched to new URL: {self.url} (switch attempt {stream_switch_attempts}/{max_stream_switches}) for channel: {self.channel_id}") # Reset retry count for the new stream - important for the loop to work correctly self.retry_count = 0 # Continue outer loop with new URL - DON'T add a break statement here else: # No more streams to try - logger.error(f"Failed to find alternative streams after {stream_switch_attempts} attempts") + logger.error(f"Failed to find alternative streams after {stream_switch_attempts} attempts for channel: {self.channel_id}") break elif not self.running: # Normal shutdown was requested @@ -272,7 +347,7 @@ class StreamManager: # Make sure transcode process is terminated if self.transcode_process_active: - logger.info("Ensuring transcode process is terminated in finally block") + logger.info(f"Ensuring transcode process is terminated in finally block for channel: {self.channel_id}") self._close_socket() # Close all connections @@ -309,7 +384,13 @@ class StreamManager: stop_key = RedisKeys.channel_stopping(self.channel_id) self.buffer.redis_client.setex(stop_key, 60, "true") except Exception as e: - logger.error(f"Failed to update channel state in Redis: {e}") + logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True) + + # Close database connection for this thread + try: + connection.close() + except Exception: + pass logger.info(f"Stream manager stopped for channel {self.channel_id}") @@ -317,6 +398,22 @@ class StreamManager: """Establish a connection using transcoding""" try: logger.debug(f"Building transcode command for channel {self.channel_id}") + + # Check if we already have a running transcode process + if self.transcode_process and self.transcode_process.poll() is None: + logger.info(f"Existing transcode process found for channel {self.channel_id}, closing before establishing new connection") + self._close_socket() + + # Wait for the process to fully terminate + if not self._wait_for_existing_processes_to_close(): + logger.error(f"Failed to close existing transcode process for channel {self.channel_id}") + return False + + # Also check for any lingering HTTP connections + if self.current_response or self.current_session: + logger.debug(f"Closing existing HTTP connections before establishing transcode connection for channel {self.channel_id}") + self._close_connection() + channel = get_stream_object(self.channel_id) # Use FFmpeg specifically for HLS streams @@ -324,17 +421,24 @@ class StreamManager: from core.models import StreamProfile try: stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True) - logger.info("Using FFmpeg stream profile for HLS content") + logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)") except StreamProfile.DoesNotExist: # Fall back to channel's profile if FFmpeg not found stream_profile = channel.get_stream_profile() - logger.warning("FFmpeg profile not found, using channel default profile") + logger.warning(f"FFmpeg profile not found, using channel default profile for channel: {self.channel_id}") else: stream_profile = channel.get_stream_profile() # Build and start transcode command self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent) - logger.debug(f"Starting transcode process: {self.transcode_cmd}") + + # For UDP streams, remove any user_agent parameters from the command + if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP: + # Filter out any arguments that contain the user_agent value or related headers + self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()] + logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}") + + logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}") # Modified to capture stderr instead of discarding it self.transcode_process = subprocess.Popen( @@ -361,7 +465,7 @@ class StreamManager: return True except Exception as e: - logger.error(f"Error establishing transcode connection: {e}", exc_info=True) + logger.error(f"Error establishing transcode connection for channel: {self.channel_id}: {e}", exc_info=True) self._close_socket() return False @@ -376,74 +480,336 @@ class StreamManager: logger.debug(f"Started stderr reader thread for channel {self.channel_id}") def _read_stderr(self): - """Read and log ffmpeg stderr output""" + """Read and log ffmpeg stderr output with real-time stats parsing""" try: - for error_line in iter(self.transcode_process.stderr.readline, b''): - if error_line: - error_line = error_line.decode('utf-8', errors='replace').strip() - try: - # Wrap the logging call in a try-except to prevent crashes due to logging errors - logger.debug(f"Transcode stderr [{self.channel_id}]: {error_line}") - except OSError as e: - # If logging fails, try a simplified log message - if e.errno == 105: # No buffer space available + buffer = b"" + last_stats_line = b"" + + # Read byte by byte for immediate detection + while self.transcode_process and self.transcode_process.stderr: + try: + # Read one byte at a time for immediate processing + byte = self.transcode_process.stderr.read(1) + if not byte: + break + + buffer += byte + + # Check for frame= at the start of buffer (new stats line) + if buffer == b"frame=": + # We detected the start of a stats line, read until we get a complete line + # or hit a carriage return (which overwrites the previous stats) + while True: + next_byte = self.transcode_process.stderr.read(1) + if not next_byte: + break + + buffer += next_byte + + # Break on carriage return (stats overwrite) or newline + if next_byte in (b'\r', b'\n'): + break + + # Also break if we have enough data for a typical stats line + if len(buffer) > 200: # Typical stats line length + break + + # Process the stats line immediately + if buffer.strip(): try: - # Try a much shorter message without the error content - logger.warning(f"Logging error (buffer full) in channel {self.channel_id}") - except: - # If even that fails, we have to silently continue - pass - except Exception: - # Ignore other logging errors to prevent thread crashes - pass + stats_text = buffer.decode('utf-8', errors='ignore').strip() + if stats_text and "frame=" in stats_text: + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + except Exception as e: + logger.debug(f"Error parsing immediate stats line: {e}") + + # Clear buffer after processing + buffer = b"" + continue + + # Handle regular line breaks for non-stats content + elif byte == b'\n': + if buffer.strip(): + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text and not line_text.startswith("frame="): + self._log_stderr_content(line_text) + buffer = b"" + + # Handle carriage returns (potential stats overwrite) + elif byte == b'\r': + # Check if this might be a stats line + if b"frame=" in buffer: + try: + stats_text = buffer.decode('utf-8', errors='ignore').strip() + if stats_text and "frame=" in stats_text: + self._parse_ffmpeg_stats(stats_text) + self._log_stderr_content(stats_text) + except Exception as e: + logger.debug(f"Error parsing stats on carriage return: {e}") + elif buffer.strip(): + # Regular content with carriage return + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text: + self._log_stderr_content(line_text) + buffer = b"" + + # Prevent buffer from growing too large for non-stats content + elif len(buffer) > 1024 and b"frame=" not in buffer: + # Process whatever we have if it's not a stats line + if buffer.strip(): + line_text = buffer.decode('utf-8', errors='ignore').strip() + if line_text: + self._log_stderr_content(line_text) + buffer = b"" + + except Exception as e: + logger.error(f"Error reading stderr byte: {e}") + break + + # Process any remaining buffer content + if buffer.strip(): + try: + remaining_text = buffer.decode('utf-8', errors='ignore').strip() + if remaining_text: + if "frame=" in remaining_text: + self._parse_ffmpeg_stats(remaining_text) + self._log_stderr_content(remaining_text) + except Exception as e: + logger.debug(f"Error processing remaining buffer: {e}") + except Exception as e: # Catch any other exceptions in the thread to prevent crashes try: - logger.error(f"Error in stderr reader thread: {e}") + logger.error(f"Error in stderr reader thread for channel {self.channel_id}: {e}") except: - # Again, if logging fails, continue silently pass - def _establish_http_connection(self): - """Establish a direct HTTP connection to the stream""" + def _log_stderr_content(self, content): + """Log stderr content from FFmpeg with appropriate log levels""" try: - logger.debug(f"Using TS Proxy to connect to stream: {self.url}") + content = content.strip() + if not content: + return - # Create new session for each connection attempt - session = self._create_session() - self.current_session = session + # Convert to lowercase for easier matching + content_lower = content.lower() + # Check if we are still in the input phase + if content_lower.startswith('input #') or 'decoder' in content_lower: + self.ffmpeg_input_phase = True + # Track FFmpeg phases - once we see output info, we're past input phase + if content_lower.startswith('output #') or 'encoder' in content_lower: + self.ffmpeg_input_phase = False - # Stream the URL with proper timeout handling - response = session.get( - self.url, - stream=True, - timeout=(10, 60) # 10s connect timeout, 60s read timeout - ) - self.current_response = response + # Only parse stream info if we're still in the input phase + if ("stream #" in content_lower and + ("video:" in content_lower or "audio:" in content_lower) and + self.ffmpeg_input_phase): - if response.status_code == 200: - self.connected = True - self.healthy = True - logger.info(f"Successfully connected to stream source") + from .services.channel_service import ChannelService + if "video:" in content_lower: + ChannelService.parse_and_store_stream_info(self.channel_id, content, "video", self.current_stream_id) + elif "audio:" in content_lower: + ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio", self.current_stream_id) - # Store connection start time for stability tracking - self.connection_start_time = time.time() - - # Set channel state to waiting for clients - self._set_waiting_for_clients() - - return True + # Determine log level based on content + if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']): + logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}") + elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']): + logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}") + elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content: + # Stats lines - log at trace level to avoid spam + logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}") + elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']): + # Stream info - log at info level + logger.info(f"FFmpeg info for channel {self.channel_id}: {content}") + if content.startswith('Input #0'): + # If it's input 0, parse stream info + from .services.channel_service import ChannelService + ChannelService.parse_and_store_stream_info(self.channel_id, content, "input", self.current_stream_id) else: - logger.error(f"Failed to connect to stream: HTTP {response.status_code}") - self._close_connection() - return False - except requests.exceptions.RequestException as e: - logger.error(f"HTTP request error: {e}") - self._close_connection() - return False + # Everything else at debug level + logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}") + except Exception as e: - logger.error(f"Error establishing HTTP connection: {e}", exc_info=True) - self._close_connection() + logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}") + + def _parse_ffmpeg_stats(self, stats_line): + """Parse FFmpeg stats line and extract speed, fps, and bitrate""" + try: + # Example FFmpeg stats line: + # frame= 1234 fps= 30 q=28.0 size= 2048kB time=00:00:41.33 bitrate= 406.1kbits/s speed=1.02x + + # Extract speed (e.g., "speed=1.02x") + speed_match = re.search(r'speed=\s*([0-9.]+)x?', stats_line) + ffmpeg_speed = float(speed_match.group(1)) if speed_match else None + + # Extract fps (e.g., "fps= 30") + fps_match = re.search(r'fps=\s*([0-9.]+)', stats_line) + ffmpeg_fps = float(fps_match.group(1)) if fps_match else None + + # Extract bitrate (e.g., "bitrate= 406.1kbits/s") + bitrate_match = re.search(r'bitrate=\s*([0-9.]+(?:\.[0-9]+)?)\s*([kmg]?)bits/s', stats_line, re.IGNORECASE) + ffmpeg_output_bitrate = None + if bitrate_match: + bitrate_value = float(bitrate_match.group(1)) + unit = bitrate_match.group(2).lower() + # Convert to kbps + if unit == 'm': + bitrate_value *= 1000 + elif unit == 'g': + bitrate_value *= 1000000 + # If no unit or 'k', it's already in kbps + ffmpeg_output_bitrate = bitrate_value + + # Calculate actual FPS + actual_fps = None + if ffmpeg_fps is not None and ffmpeg_speed is not None and ffmpeg_speed > 0: + actual_fps = ffmpeg_fps / ffmpeg_speed + # Store in Redis if we have valid data + if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate]): + self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate) + + # Also save ffmpeg_output_bitrate to database if we have stream_id + if ffmpeg_output_bitrate is not None and self.current_stream_id: + from .services.channel_service import ChannelService + ChannelService._update_stream_stats_in_db( + self.current_stream_id, + ffmpeg_output_bitrate=ffmpeg_output_bitrate + ) + + # Fix the f-string formatting + actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A" + ffmpeg_output_bitrate_str = f"{ffmpeg_output_bitrate:.1f}" if ffmpeg_output_bitrate is not None else "N/A" + # Log the stats + logger.debug(f"FFmpeg stats for channel {self.channel_id}: - Speed: {ffmpeg_speed}x, FFmpeg FPS: {ffmpeg_fps}, " + f"Actual FPS: {actual_fps_str}, " + f"Output Bitrate: {ffmpeg_output_bitrate_str} kbps") + # If we have a valid speed, check for buffering + if ffmpeg_speed is not None and ffmpeg_speed < self.buffering_speed: + if self.buffering: + # Buffering is still ongoing, check for how long + if self.buffering_start_time is None: + self.buffering_start_time = time.time() + else: + buffering_duration = time.time() - self.buffering_start_time + if buffering_duration > self.buffering_timeout: + # Buffering timeout reached, log error and try next stream + logger.error(f"Buffering timeout reached for channel {self.channel_id} after {buffering_duration:.1f} seconds") + # Send next stream request + if self._try_next_stream(): + logger.info(f"Switched to next stream for channel {self.channel_id} after buffering timeout") + # Reset buffering state + self.buffering = False + self.buffering_start_time = None + else: + logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout") + else: + # Buffering just started, set the flag and start timer + self.buffering = True + self.buffering_start_time = time.time() + logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x") + # Log buffering warning + logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected") + # Set channel state to buffering + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, ChannelMetadataField.STATE, ChannelState.BUFFERING) + elif ffmpeg_speed is not None and ffmpeg_speed >= self.buffering_speed: + # Speed is good, check if we were buffering + if self.buffering: + # Reset buffering state + logger.info(f"Buffering ended for channel {self.channel_id} - speed: {ffmpeg_speed}x") + self.buffering = False + self.buffering_start_time = None + # Set channel state to active if speed is good + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, ChannelMetadataField.STATE, ChannelState.ACTIVE) + + except Exception as e: + logger.debug(f"Error parsing FFmpeg stats: {e}") + + def _update_ffmpeg_stats_in_redis(self, speed, fps, actual_fps, output_bitrate): + """Update FFmpeg performance stats in Redis metadata""" + try: + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + update_data = { + ChannelMetadataField.FFMPEG_STATS_UPDATED: str(time.time()) + } + + if speed is not None: + update_data[ChannelMetadataField.FFMPEG_SPEED] = str(round(speed, 3)) + + if fps is not None: + update_data[ChannelMetadataField.FFMPEG_FPS] = str(round(fps, 1)) + + if actual_fps is not None: + update_data[ChannelMetadataField.ACTUAL_FPS] = str(round(actual_fps, 1)) + + if output_bitrate is not None: + update_data[ChannelMetadataField.FFMPEG_OUTPUT_BITRATE] = str(round(output_bitrate, 1)) + + self.buffer.redis_client.hset(metadata_key, mapping=update_data) + + except Exception as e: + logger.error(f"Error updating FFmpeg stats in Redis: {e}") + + + def _establish_http_connection(self): + """Establish HTTP connection using thread-based reader (same as transcode path)""" + try: + logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}") + + # Check if we already have active HTTP connections + if self.current_response or self.current_session: + logger.info(f"Existing HTTP connection found for channel {self.channel_id}, closing before establishing new connection") + self._close_connection() + + # Wait for connections to fully close + if not self._wait_for_existing_processes_to_close(): + logger.error(f"Failed to close existing HTTP connections for channel {self.channel_id}") + return False + + # Also check for any lingering transcode processes + if self.transcode_process and self.transcode_process.poll() is None: + logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}") + self._close_socket() + + # Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor + # This allows us to use the same fetch_chunk() path as transcode + from .http_streamer import HTTPStreamReader + + # Create and start the HTTP stream reader + self.http_reader = HTTPStreamReader( + url=self.url, + user_agent=self.user_agent, + chunk_size=self.chunk_size + ) + + # Start the reader thread and get the read end of the pipe + pipe_fd = self.http_reader.start() + + # Wrap the file descriptor in a file object (same as transcode stdout) + import os + self.socket = os.fdopen(pipe_fd, 'rb', buffering=0) + self.connected = True + self.healthy = True + + logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}") + + # Store connection start time for stability tracking + self.connection_start_time = time.time() + + # Set channel state to waiting for clients + self._set_waiting_for_clients() + + return True + + except Exception as e: + logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True) + self._close_socket() return False def _update_bytes_processed(self, chunk_size): @@ -471,73 +837,44 @@ class StreamManager: logger.error(f"Error updating bytes processed: {e}") def _process_stream_data(self): - """Process stream data until disconnect or error""" + """Process stream data until disconnect or error - unified path for both transcode and HTTP""" try: - if self.transcode: - # Handle transcoded stream data - while self.running and self.connected: - if self.fetch_chunk(): - self.last_data_time = time.time() - else: - if not self.running: - break - gevent.sleep(0.1) # REPLACE time.sleep(0.1) - else: - # Handle direct HTTP connection - chunk_count = 0 - try: - for chunk in self.current_response.iter_content(chunk_size=self.chunk_size): - # Check if we've been asked to stop - if self.stop_requested or self.url_switching: - break - - if chunk: - # Track chunk size before adding to buffer - chunk_size = len(chunk) - self._update_bytes_processed(chunk_size) - - # Add chunk to buffer with TS packet alignment - success = self.buffer.add_chunk(chunk) - - if success: - self.last_data_time = time.time() - chunk_count += 1 - - # Update last data timestamp in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - last_data_key = RedisKeys.last_data(self.buffer.channel_id) - self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60) - except (AttributeError, ConnectionError) as e: - if self.stop_requested or self.url_switching: - logger.debug(f"Expected connection error during shutdown/URL switch: {e}") - else: - logger.error(f"Unexpected stream error: {e}") - raise + # Both transcode and HTTP now use the same subprocess/socket approach + # This gives us perfect control: check flags between chunks, timeout just returns False + while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: + if self.fetch_chunk(): + self.last_data_time = time.time() + else: + # fetch_chunk() returned False - could be timeout, no data, or error + if not self.running: + break + # Brief sleep before retry to avoid tight loop + gevent.sleep(0.1) except Exception as e: - logger.error(f"Error processing stream data: {e}", exc_info=True) + logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True) # If we exit the loop, connection is closed or failed self.connected = False def _close_all_connections(self): """Close all connection resources""" - if self.socket: + if self.socket or self.transcode_process: try: self._close_socket() except Exception as e: - logger.debug(f"Error closing socket: {e}") + logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") if self.current_response: try: self.current_response.close() except Exception as e: - logger.debug(f"Error closing response: {e}") + logger.debug(f"Error closing response for channel {self.channel_id}: {e}") if self.current_session: try: self.current_session.close() except Exception as e: - logger.debug(f"Error closing session: {e}") + logger.debug(f"Error closing session for channel {self.channel_id}: {e}") # Clear references self.socket = None @@ -564,7 +901,7 @@ class StreamManager: if timer and timer.is_alive(): timer.cancel() except Exception as e: - logger.error(f"Error canceling buffer check timer: {e}") + logger.error(f"Error canceling buffer check timer for channel {self.channel_id}: {e}") self._buffer_check_timers.clear() @@ -591,16 +928,17 @@ class StreamManager: # Set running to false to ensure thread exits self.running = False - def update_url(self, new_url, stream_id=None): + def update_url(self, new_url, stream_id=None, m3u_profile_id=None): """Update stream URL and reconnect with proper cleanup for both HTTP and transcode sessions""" if new_url == self.url: logger.info(f"URL unchanged: {new_url}") return False - logger.info(f"Switching stream URL from {self.url} to {new_url}") + logger.info(f"Switching stream URL from {self.url} to {new_url} for channel {self.channel_id}") # Import both models for proper resource management from apps.channels.models import Stream, Channel + from django.db import connection # Update stream profile if we're switching streams if self.current_stream_id and stream_id and self.current_stream_id != stream_id: @@ -609,18 +947,26 @@ class StreamManager: channel = Channel.objects.get(uuid=self.channel_id) # Get stream to find its profile - new_stream = Stream.objects.get(pk=stream_id) + #new_stream = Stream.objects.get(pk=stream_id) # Use the new method to update the profile and manage connection counts - if new_stream.m3u_account_id: - success = channel.update_stream_profile(new_stream.m3u_account_id) + if m3u_profile_id: + success = channel.update_stream_profile(m3u_profile_id) if success: - logger.debug(f"Updated stream profile for channel {self.channel_id} to use profile from stream {stream_id}") + logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}") else: logger.warning(f"Failed to update stream profile for channel {self.channel_id}") + except Exception as e: logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}") + finally: + # Always close database connection after profile update + try: + connection.close() + except Exception: + pass + # CRITICAL: Set a flag to prevent immediate reconnection with old URL self.url_switching = True self.url_switch_start_time = time.time() @@ -628,10 +974,10 @@ class StreamManager: try: # Check which type of connection we're using and close it properly if self.transcode or self.socket: - logger.debug("Closing transcode process before URL change") + logger.debug(f"Closing transcode process before URL change for channel {self.channel_id}") self._close_socket() else: - logger.debug("Closing HTTP connection before URL change") + logger.debug(f"Closing HTTP connection before URL change for channel {self.channel_id}") self._close_connection() # Update URL and reset connection state @@ -645,7 +991,7 @@ class StreamManager: self.current_stream_id = stream_id # Add stream ID to tried streams for proper tracking self.tried_stream_ids.add(stream_id) - logger.info(f"Updated stream ID from {old_stream_id} to {stream_id} for channel {self.buffer.channel_id}") + logger.info(f"Updated stream ID from {old_stream_id} to {stream_id} for channel {self.channel_id}") # Reset retry counter to allow immediate reconnect self.retry_count = 0 @@ -660,25 +1006,27 @@ class StreamManager: return True except Exception as e: - logger.error(f"Error during URL update: {e}", exc_info=True) + logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True) return False finally: # CRITICAL FIX: Always reset the URL switching flag when done, whether successful or not self.url_switching = False - logger.info(f"Stream switch completed for channel {self.buffer.channel_id}") + logger.info(f"Stream switch completed for channel {self.channel_id}") def should_retry(self) -> bool: """Check if connection retry is allowed""" return self.retry_count < self.max_retries def _monitor_health(self): - """Monitor stream health and attempt recovery if needed""" + """Monitor stream health and set flags for the main loop to handle recovery""" consecutive_unhealthy_checks = 0 - health_recovery_attempts = 0 - reconnect_attempts = 0 - max_health_recovery_attempts = ConfigHelper.get('MAX_HEALTH_RECOVERY_ATTEMPTS', 2) - max_reconnect_attempts = ConfigHelper.get('MAX_RECONNECT_ATTEMPTS', 3) - min_stable_time = ConfigHelper.get('MIN_STABLE_TIME_BEFORE_RECONNECT', 30) # seconds + max_unhealthy_checks = 3 + + # Add flags for the main loop to check + self.needs_reconnect = False + self.needs_stream_switch = False + self.last_health_action_time = 0 + action_cooldown = 30 # Prevent rapid recovery attempts while self.running: try: @@ -687,48 +1035,43 @@ class StreamManager: timeout_threshold = getattr(Config, 'CONNECTION_TIMEOUT', 10) if inactivity_duration > timeout_threshold and self.connected: - # Mark unhealthy if no data for too long if self.healthy: - logger.warning(f"Stream unhealthy - no data for {inactivity_duration:.1f}s") + logger.warning(f"Stream unhealthy for channel {self.channel_id} - no data for {inactivity_duration:.1f}s") self.healthy = False - # Track consecutive unhealthy checks consecutive_unhealthy_checks += 1 - # After several unhealthy checks in a row, try recovery - if consecutive_unhealthy_checks >= 3 and health_recovery_attempts < max_health_recovery_attempts: - # Calculate how long the stream was stable before failing + # Only set flags if enough time has passed since last action + if (consecutive_unhealthy_checks >= max_unhealthy_checks and + now - self.last_health_action_time > action_cooldown): + + # Calculate stability to decide on action type connection_start_time = getattr(self, 'connection_start_time', 0) stable_time = self.last_data_time - connection_start_time if connection_start_time > 0 else 0 - if stable_time >= min_stable_time and reconnect_attempts < max_reconnect_attempts: - # Stream was stable for a while, try reconnecting first - logger.warning(f"Stream was stable for {stable_time:.1f}s before failing. " - f"Attempting reconnect {reconnect_attempts + 1}/{max_reconnect_attempts}") - reconnect_attempts += 1 - threading.Thread(target=self._attempt_reconnect, daemon=True).start() + if stable_time >= 30: # Stream was stable, try reconnect first + if not self.needs_reconnect: + logger.info(f"Setting reconnect flag for stable stream (stable for {stable_time:.1f}s) for channel {self.channel_id}") + self.needs_reconnect = True + self.last_health_action_time = now else: - # Stream was not stable long enough, or reconnects failed too many times - # Try switching to another stream - if reconnect_attempts > 0: - logger.warning(f"Reconnect attempts exhausted ({reconnect_attempts}/{max_reconnect_attempts}). " - f"Attempting stream switch recovery") - else: - logger.warning(f"Stream was only stable for {stable_time:.1f}s (<{min_stable_time}s). " - f"Skipping reconnect, attempting stream switch") + # Stream wasn't stable, suggest stream switch + if not self.needs_stream_switch: + logger.info(f"Setting stream switch flag for unstable stream (stable for {stable_time:.1f}s) for channel {self.channel_id}") + self.needs_stream_switch = True + self.last_health_action_time = now + + consecutive_unhealthy_checks = 0 # Reset after setting flag - health_recovery_attempts += 1 - reconnect_attempts = 0 # Reset for next time - threading.Thread(target=self._attempt_health_recovery, daemon=True).start() elif self.connected and not self.healthy: # Auto-recover health when data resumes - logger.info(f"Stream health restored") + logger.info(f"Stream health restored for channel {self.channel_id} - data resumed after {inactivity_duration:.1f}s") self.healthy = True consecutive_unhealthy_checks = 0 - health_recovery_attempts = 0 - reconnect_attempts = 0 + # Clear recovery flags when healthy again + self.needs_reconnect = False + self.needs_stream_switch = False - # If healthy, reset unhealthy counter (but keep other state) if self.healthy: consecutive_unhealthy_checks = 0 @@ -744,45 +1087,52 @@ class StreamManager: # Don't try to reconnect if we're already switching URLs if self.url_switching: - # Add timeout check to prevent permanent deadlock - if time.time() - self.url_switch_start_time > self.url_switch_timeout: - logger.warning(f"URL switching has been in progress too long ({time.time() - self.url_switch_start_time:.1f}s), " - f"resetting switching state and allowing reconnect") - self._reset_url_switching_state() - else: - logger.info("URL switching already in progress, skipping reconnect") - return False + logger.info(f"URL switching already in progress, skipping reconnect for channel {self.channel_id}") + return False - # Close existing connection - if self.transcode or self.socket: - self._close_socket() - else: - self._close_connection() + # Set a flag to prevent concurrent operations + if hasattr(self, 'reconnecting') and self.reconnecting: + logger.info(f"Reconnect already in progress, skipping for channel {self.channel_id}") + return False - self.connected = False + self.reconnecting = True - # Attempt to establish a new connection using the same URL - connection_result = False try: + # Close existing connection and wait for it to fully terminate + if self.transcode or self.socket: + logger.debug(f"Closing transcode process before reconnect for channel {self.channel_id}") + self._close_socket() + else: + logger.debug(f"Closing HTTP connection before reconnect for channel {self.channel_id}") + self._close_connection() + + # Wait for all processes to fully close before attempting reconnect + if not self._wait_for_existing_processes_to_close(): + logger.warning(f"Some processes may still be running during reconnect for channel {self.channel_id}") + + self.connected = False + + # Attempt to establish a new connection using the same URL + connection_result = False if self.transcode: connection_result = self._establish_transcode_connection() else: connection_result = self._establish_http_connection() if connection_result: - # Store connection start time to measure stability self.connection_start_time = time.time() logger.info(f"Reconnect successful for channel {self.channel_id}") return True else: logger.warning(f"Reconnect failed for channel {self.channel_id}") return False - except Exception as e: - logger.error(f"Error during reconnect: {e}", exc_info=True) - return False + + finally: + self.reconnecting = False except Exception as e: - logger.error(f"Error in reconnect attempt: {e}", exc_info=True) + logger.error(f"Error in reconnect attempt for channel {self.channel_id}: {e}", exc_info=True) + self.reconnecting = False return False def _attempt_health_recovery(self): @@ -792,7 +1142,7 @@ class StreamManager: # Don't try to switch if we're already in the process of switching URLs if self.url_switching: - logger.info("URL switching already in progress, skipping health recovery") + logger.info(f"URL switching already in progress, skipping health recovery for channel {self.channel_id}") return # Try to switch to next stream @@ -805,7 +1155,7 @@ class StreamManager: return False except Exception as e: - logger.error(f"Error in health recovery attempt: {e}", exc_info=True) + logger.error(f"Error in health recovery attempt for channel {self.channel_id}: {e}", exc_info=True) return False def _close_connection(self): @@ -815,7 +1165,7 @@ class StreamManager: try: self.current_response.close() except Exception as e: - logger.debug(f"Error closing response: {e}") + logger.debug(f"Error closing response for channel {self.channel_id}: {e}") self.current_response = None # Close session if it exists @@ -823,7 +1173,7 @@ class StreamManager: try: self.current_session.close() except Exception as e: - logger.debug(f"Error closing session: {e}") + logger.debug(f"Error closing session for channel {self.channel_id}: {e}") self.current_session = None def _close_socket(self): @@ -831,19 +1181,24 @@ class StreamManager: # First try to use _close_connection for HTTP resources if self.current_response or self.current_session: self._close_connection() - return + + # Stop HTTP reader thread if it exists + if hasattr(self, 'http_reader') and self.http_reader: + try: + logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}") + self.http_reader.stop() + self.http_reader = None + except Exception as e: + logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}") # Otherwise handle socket and transcode resources if self.socket: try: self.socket.close() except Exception as e: - logger.debug(f"Error closing socket: {e}") + logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") pass - self.socket = None - self.connected = False - # Enhanced transcode process cleanup with more aggressive termination if self.transcode_process: try: @@ -856,21 +1211,45 @@ class StreamManager: self.transcode_process.wait(timeout=1.0) except subprocess.TimeoutExpired: # If it doesn't terminate quickly, kill it - logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully") + logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}") self.transcode_process.kill() try: self.transcode_process.wait(timeout=1.0) except subprocess.TimeoutExpired: - logger.error(f"Failed to kill transcode process even with force") + logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error terminating transcode process: {e}") + logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}") # Final attempt: try to kill directly try: self.transcode_process.kill() except Exception as e: - logger.error(f"Final kill attempt failed: {e}") + logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}") + + # Explicitly close all subprocess pipes to prevent file descriptor leaks + try: + if self.transcode_process.stdin: + self.transcode_process.stdin.close() + if self.transcode_process.stdout: + self.transcode_process.stdout.close() + if self.transcode_process.stderr: + self.transcode_process.stderr.close() + logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}") + + # Join stderr reader thread to ensure it's fully terminated + if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive(): + try: + logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}") + self.stderr_reader_thread.join(timeout=2.0) + if self.stderr_reader_thread.is_alive(): + logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}") + finally: + self.stderr_reader_thread = None self.transcode_process = None self.transcode_process_active = False # Reset the flag @@ -882,8 +1261,9 @@ class StreamManager: self.buffer.redis_client.delete(transcode_key) logger.debug(f"Cleared transcode active flag for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error clearing transcode flag: {e}") - + logger.debug(f"Error clearing transcode flag for channel {self.channel_id}: {e}") + self.socket = None + self.connected = False # Cancel any remaining buffer check timers for timer in list(self._buffer_check_timers): try: @@ -891,31 +1271,47 @@ class StreamManager: timer.cancel() logger.debug(f"Cancelled buffer check timer during socket close for channel {self.channel_id}") except Exception as e: - logger.debug(f"Error canceling timer during socket close: {e}") + logger.debug(f"Error canceling timer during socket close for channel {self.channel_id}: {e}") self._buffer_check_timers = [] def fetch_chunk(self): - """Fetch data from socket with direct pass-through to buffer""" + """Fetch data from socket with timeout handling""" if not self.connected or not self.socket: return False try: - # Read data chunk - no need to align with TS packet size anymore - try: - # Try to read data chunk - if hasattr(self.socket, 'recv'): - chunk = self.socket.recv(Config.CHUNK_SIZE) # Standard socket - else: - chunk = self.socket.read(Config.CHUNK_SIZE) # SocketIO object + # Set timeout for chunk reads + chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration - except AttributeError: - # Fall back to read() if recv() isn't available - chunk = self.socket.read(Config.CHUNK_SIZE) + try: + # Handle different socket types with timeout + if hasattr(self.socket, 'recv'): + # Standard socket - set timeout + original_timeout = self.socket.gettimeout() + self.socket.settimeout(chunk_timeout) + chunk = self.socket.recv(Config.CHUNK_SIZE) + self.socket.settimeout(original_timeout) # Restore original timeout + else: + # SocketIO object (transcode process stdout) - use select for timeout + import select + ready, _, _ = select.select([self.socket], [], [], chunk_timeout) + + if not ready: + # Timeout occurred + logger.debug(f"Chunk read timeout ({chunk_timeout}s) for channel {self.channel_id}") + return False + + chunk = self.socket.read(Config.CHUNK_SIZE) + + except socket.timeout: + # Socket timeout occurred + logger.debug(f"Socket timeout ({chunk_timeout}s) for channel {self.channel_id}") + return False if not chunk: # Connection closed by server - logger.warning("Server closed connection") + logger.warning(f"Server closed connection for channel {self.channel_id}") self._close_socket() self.connected = False return False @@ -969,7 +1365,17 @@ class StreamManager: # Only update if not already past connecting if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: # NEW CODE: Check if buffer has enough chunks - current_buffer_index = getattr(self.buffer, 'index', 0) + # IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup + # each worker has its own StreamBuffer instance with potentially stale local index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + initial_chunks_needed = ConfigHelper.initial_behind_chunks() if current_buffer_index < initial_chunks_needed: @@ -998,13 +1404,13 @@ class StreamManager: redis_client.hset(metadata_key, mapping=update_data) # Get configured grace period or default - grace_period = ConfigHelper.get('CHANNEL_INIT_GRACE_PERIOD', 20) + grace_period = ConfigHelper.channel_init_grace_period() logger.info(f"STREAM MANAGER: Updated channel {channel_id} state: {current_state or 'None'} -> {ChannelState.WAITING_FOR_CLIENTS} with {current_buffer_index} buffer chunks") logger.info(f"Started initial connection grace period ({grace_period}s) for channel {channel_id}") else: logger.debug(f"Not changing state: channel {channel_id} already in {current_state} state") except Exception as e: - logger.error(f"Error setting waiting for clients state: {e}") + logger.error(f"Error setting waiting for clients state for channel {channel_id}: {e}") def _check_buffer_and_set_state(self): """Check buffer size and set state to waiting_for_clients when ready""" @@ -1017,10 +1423,21 @@ class StreamManager: # Clean up completed timers self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()] - if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'): - current_buffer_index = self.buffer.index - initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10) + if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'): channel_id = self.buffer.channel_id + redis_client = self.buffer.redis_client + + # IMPORTANT: Read from Redis, not local buffer.index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + + initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency if current_buffer_index >= initial_chunks_needed: # We now have enough buffer, call _set_waiting_for_clients again @@ -1039,12 +1456,13 @@ class StreamManager: return True # Return value to indicate check was successful except Exception as e: - logger.error(f"Error in buffer check: {e}") + logger.error(f"Error in buffer check for channel {self.channel_id}: {e}") return False def _try_next_stream(self): """ Try to switch to the next available stream for this channel. + Will iterate through multiple alternate streams if needed to find one with a different URL. Returns: bool: True if successfully switched to a new stream, False otherwise @@ -1070,59 +1488,71 @@ class StreamManager: logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}") return False - # Get the next stream to try - next_stream = untried_streams[0] - stream_id = next_stream['stream_id'] + # IMPROVED: Try multiple streams until we find one with a different URL + for next_stream in untried_streams: + stream_id = next_stream['stream_id'] + profile_id = next_stream['profile_id'] # This is the M3U profile ID we need - # Add to tried streams - self.tried_stream_ids.add(stream_id) + # Add to tried streams + self.tried_stream_ids.add(stream_id) - # Get stream info including URL - logger.info(f"Trying next stream ID {stream_id} for channel {self.channel_id}") - stream_info = get_stream_info_for_switch(self.channel_id, stream_id) + # Get stream info including URL using the profile_id we already have + logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") + stream_info = get_stream_info_for_switch(self.channel_id, stream_id) - if 'error' in stream_info or not stream_info.get('url'): - logger.error(f"Error getting info for stream {stream_id}: {stream_info.get('error', 'No URL')}") - return False + if 'error' in stream_info or not stream_info.get('url'): + logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") + continue # Try next stream instead of giving up - # Update URL and user agent - new_url = stream_info['url'] - new_user_agent = stream_info['user_agent'] - new_transcode = stream_info['transcode'] + # Update URL and user agent + new_url = stream_info['url'] + new_user_agent = stream_info['user_agent'] + new_transcode = stream_info['transcode'] - logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") + # CRITICAL FIX: Check if the new URL is the same as current URL + # This can happen when current_stream_id is None and we accidentally select the same stream + if new_url == self.url: + logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). " + f"Skipping this stream and trying next alternative.") + continue # Try next stream instead of giving up - # Update stream ID tracking - self.current_stream_id = stream_id + logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") - # Store the new user agent and transcode settings - self.user_agent = new_user_agent - self.transcode = new_transcode + # IMPORTANT: Just update the URL, don't stop the channel or release resources + switch_result = self.update_url(new_url, stream_id, profile_id) + if not switch_result: + logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") + continue # Try next stream - # Update stream metadata in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - metadata_key = RedisKeys.channel_metadata(self.channel_id) - self.buffer.redis_client.hset(metadata_key, mapping={ - ChannelMetadataField.URL: new_url, - ChannelMetadataField.USER_AGENT: new_user_agent, - ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], - ChannelMetadataField.M3U_PROFILE: stream_info['m3u_profile_id'], - ChannelMetadataField.STREAM_ID: str(stream_id), - ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), - ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" - }) + # Update stream ID tracking + self.current_stream_id = stream_id - # Log the switch - logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id}") + # Store the new user agent and transcode settings + self.user_agent = new_user_agent + self.transcode = new_transcode - # IMPORTANT: Just update the URL, don't stop the channel or release resources - switch_result = self.update_url(new_url, stream_id) - if not switch_result: - logger.error(f"Failed to update URL for stream ID {stream_id}") - return False + # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, mapping={ + ChannelMetadataField.URL: new_url, + ChannelMetadataField.USER_AGENT: new_user_agent, + ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], + ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams + ChannelMetadataField.STREAM_ID: str(stream_id), + ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), + ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" + }) - logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url}") - return True + # Log the switch + logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") + + logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") + return True + + # If we get here, we tried all streams but none worked + logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}") + return False except Exception as e: logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True) diff --git a/apps/proxy/ts_proxy/url_utils.py b/apps/proxy/ts_proxy/url_utils.py index e3b1c264..4717dc0d 100644 --- a/apps/proxy/ts_proxy/url_utils.py +++ b/apps/proxy/ts_proxy/url_utils.py @@ -8,7 +8,7 @@ from typing import Optional, Tuple, List from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile -from core.models import UserAgent, CoreSettings +from core.models import UserAgent, CoreSettings, StreamProfile from .utils import get_logger from uuid import UUID import requests @@ -17,7 +17,6 @@ logger = get_logger() def get_stream_object(id: str): try: - uuid_obj = UUID(id, version=4) logger.info(f"Fetching channel ID {id}") return get_object_or_404(Channel, uuid=id) except: @@ -27,16 +26,67 @@ def get_stream_object(id: str): def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]: """ - Generate the appropriate stream URL for a channel based on its profile settings. + Generate the appropriate stream URL for a channel or stream based on its profile settings. Args: - channel_id: The UUID of the channel + channel_id: The UUID of the channel or stream hash Returns: Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id) """ try: - channel = get_stream_object(channel_id) + channel_or_stream = get_stream_object(channel_id) + + # Handle direct stream preview (custom streams) + if isinstance(channel_or_stream, Stream): + stream = channel_or_stream + logger.info(f"Previewing stream directly: {stream.id} ({stream.name})") + + # For custom streams, we need to get the M3U account and profile + m3u_account = stream.m3u_account + if not m3u_account: + logger.error(f"Stream {stream.id} has no M3U account") + return None, None, False, None + + # Get the default profile for this M3U account (custom streams use default) + m3u_profiles = m3u_account.profiles.all() + profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not profile: + logger.error(f"No default profile found for M3U account {m3u_account.id}") + return None, None, False, None + + # Get the appropriate user agent + stream_user_agent = m3u_account.get_user_agent().user_agent + if stream_user_agent is None: + stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + logger.debug(f"No user agent found for account, using default: {stream_user_agent}") + + # Get stream URL (no transformation for custom streams) + stream_url = stream.url + + # Check if the stream has its own stream_profile set, otherwise use default + if stream.stream_profile: + stream_profile = stream.stream_profile + logger.debug(f"Using stream's own stream profile: {stream_profile.name}") + else: + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) + logger.debug(f"Using default stream profile: {stream_profile.name}") + + # Check if transcoding is needed + if stream_profile.is_proxy() or stream_profile is None: + transcode = False + else: + transcode = True + + stream_profile_id = stream_profile.id + + return stream_url, stream_user_agent, transcode, stream_profile_id + + # Handle channel preview (existing logic) + channel = channel_or_stream # Get stream and profile for this channel # Note: get_stream now returns 3 values (stream_id, profile_id, error_reason) @@ -126,7 +176,10 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] dict: Stream information including URL, user agent and transcode flag """ try: + from core.utils import RedisClient + channel = get_object_or_404(Channel, uuid=channel_id) + redis_client = RedisClient.get_client() # Use the target stream if specified, otherwise use current stream if target_stream_id: @@ -135,24 +188,58 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] # Get the stream object stream = get_object_or_404(Stream, pk=stream_id) - # Find compatible profile for this stream - profiles = M3UAccountProfile.objects.filter(m3u_account=stream.m3u_account) + # Find compatible profile for this stream with connection availability check + m3u_account = stream.m3u_account + if not m3u_account: + return {'error': 'Stream has no M3U account'} - if not profiles.exists(): - # Try to get default profile - default_profile = M3UAccountProfile.objects.filter( - m3u_account=stream.m3u_account, - is_default=True - ).first() + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) - if default_profile: - m3u_profile_id = default_profile.id + if not default_profile: + return {'error': 'M3U account has no default profile'} + + # Check profiles in order: default first, then others + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + selected_profile = None + for profile in profiles: + + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if this channel is already using this profile + channel_using_profile = False + existing_stream_id = redis_client.get(f"channel_stream:{channel.id}") + if existing_stream_id: + # Decode bytes to string/int for proper Redis key lookup + existing_stream_id = existing_stream_id.decode('utf-8') + existing_profile_id = redis_client.get(f"stream_profile:{existing_stream_id}") + if existing_profile_id and int(existing_profile_id.decode('utf-8')) == profile.id: + channel_using_profile = True + logger.debug(f"Channel {channel.id} already using profile {profile.id}") + + # Calculate effective connections (subtract 1 if channel already using this profile) + effective_connections = current_connections - (1 if channel_using_profile else 0) + + # Check if profile has available slots + if profile.max_streams == 0 or effective_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Selected profile {profile.id} with {effective_connections}/{profile.max_streams} effective connections (current: {current_connections}, already using: {channel_using_profile})") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {effective_connections}/{profile.max_streams} (current: {current_connections}, already using: {channel_using_profile})") else: - logger.error(f"No profile found for stream {stream_id}") - return {'error': 'No profile found for stream'} - else: - # Use first available profile - m3u_profile_id = profiles.first().id + # No Redis available, assume first active profile is okay + selected_profile = profile + break + + if not selected_profile: + return {'error': 'No profiles available with connection capacity'} + + m3u_profile_id = selected_profile.id else: stream_id, m3u_profile_id, error_reason = channel.get_stream() if stream_id is None or m3u_profile_id is None: @@ -162,8 +249,15 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] stream = get_object_or_404(Stream, pk=stream_id) profile = get_object_or_404(M3UAccountProfile, pk=m3u_profile_id) - # Get the user agent from the M3U account + # Check connections left m3u_account = M3UAccount.objects.get(id=profile.m3u_account.id) + #connections_left = get_connections_left(m3u_profile_id) + + #if connections_left <= 0: + #logger.warning(f"No connections left for M3U account {m3u_account.id}") + #return {'error': 'No connections left'} + + # Get the user agent from the M3U account user_agent = m3u_account.get_user_agent().user_agent # Generate URL using the transform function directly @@ -172,7 +266,7 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int] # Get transcode info from the channel's stream profile stream_profile = channel.get_stream_profile() transcode = not (stream_profile.is_proxy() or stream_profile is None) - profile_value = str(stream_profile) + profile_value = stream_profile.id return { 'url': stream_url, @@ -198,15 +292,18 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No List[dict]: List of stream information dictionaries with stream_id and profile_id """ try: + from core.utils import RedisClient + # Get channel object channel = get_stream_object(channel_id) if isinstance(channel, Stream): logger.error(f"Stream is not a channel") return [] + redis_client = RedisClient.get_client() logger.debug(f"Looking for alternate streams for channel {channel_id}, current stream ID: {current_stream_id}") - # Get all assigned streams for this channel using the correct ordering from the channelstream table + # Get all assigned streams for this channel using the correct ordering streams = channel.streams.all().order_by('channelstream__order') logger.debug(f"Channel {channel_id} has {streams.count()} total assigned streams") @@ -218,7 +315,6 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No # Process each stream in the user-defined order for stream in streams: - # Log each stream we're checking logger.debug(f"Checking stream ID {stream.id} ({stream.name}) for channel {channel_id}") # Skip the current failing stream @@ -226,44 +322,76 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No logger.debug(f"Skipping current stream ID {current_stream_id}") continue - # Find compatible profiles for this stream + # Find compatible profiles for this stream with connection checking try: - # Check if we can find profiles via m3u_account - profiles = M3UAccountProfile.objects.filter(m3u_account=stream.m3u_account) - if not profiles.exists(): - logger.debug(f"No profiles found via m3u_account for stream {stream.id}") - # Fallback to the default profile of the account - default_profile = M3UAccountProfile.objects.filter( - m3u_account=stream.m3u_account, - is_default=True - ).first() - if default_profile: - profiles = [default_profile] + m3u_account = stream.m3u_account + if not m3u_account: + logger.debug(f"Stream {stream.id} has no M3U account") + continue + if m3u_account.is_active == False: + logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.") + continue + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not default_profile: + logger.debug(f"M3U account {m3u_account.id} has no default profile") + continue + + # Check profiles in order with connection availability + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + selected_profile = None + for profile in profiles: + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if this channel is already using this profile + channel_using_profile = False + existing_stream_id = redis_client.get(f"channel_stream:{channel.id}") + if existing_stream_id: + # Decode bytes to string/int for proper Redis key lookup + existing_stream_id = existing_stream_id.decode('utf-8') + existing_profile_id = redis_client.get(f"stream_profile:{existing_stream_id}") + if existing_profile_id and int(existing_profile_id.decode('utf-8')) == profile.id: + channel_using_profile = True + logger.debug(f"Channel {channel.id} already using profile {profile.id}") + + # Calculate effective connections (subtract 1 if channel already using this profile) + effective_connections = current_connections - (1 if channel_using_profile else 0) + + # Check if profile has available slots + if profile.max_streams == 0 or effective_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Found available profile {profile.id} for stream {stream.id}: {effective_connections}/{profile.max_streams} effective (current: {current_connections}, already using: {channel_using_profile})") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {effective_connections}/{profile.max_streams} (current: {current_connections}, already using: {channel_using_profile})") else: - logger.warning(f"No default profile found for m3u_account {stream.m3u_account.id}") - continue - - # Get first compatible profile - profile = profiles.first() - if profile: - logger.debug(f"Found compatible profile ID {profile.id} for stream ID {stream.id}") + # No Redis available, assume first active profile is okay + selected_profile = profile + break + if selected_profile: alternate_streams.append({ 'stream_id': stream.id, - 'profile_id': profile.id, + 'profile_id': selected_profile.id, 'name': stream.name }) else: - logger.debug(f"No compatible profile found for stream ID {stream.id}") + logger.debug(f"No available profiles for stream ID {stream.id}") + except Exception as inner_e: logger.error(f"Error finding profiles for stream {stream.id}: {inner_e}") continue if alternate_streams: stream_ids = ', '.join([str(s['stream_id']) for s in alternate_streams]) - logger.info(f"Found {len(alternate_streams)} alternate streams for channel {channel_id}: [{stream_ids}]") + logger.info(f"Found {len(alternate_streams)} alternate streams with available connections for channel {channel_id}: [{stream_ids}]") else: - logger.warning(f"No alternate streams found for channel {channel_id}") + logger.warning(f"No alternate streams with available connections found for channel {channel_id}") return alternate_streams except Exception as e: @@ -274,6 +402,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): """ Validate if a stream URL is accessible without downloading the full content. + Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot + be validated via HTTP methods. + Args: url (str): The URL to validate user_agent (str): User agent to use for the request @@ -282,6 +413,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): Returns: tuple: (is_valid, final_url, status_code, message) """ + # Check if URL uses non-HTTP protocols (UDP/RTP/RTSP) + # These cannot be validated via HTTP methods, so we skip validation + if url.startswith(('udp://', 'rtp://', 'rtsp://')): + logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}") + return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped" + try: # Create session with proper headers session = requests.Session() @@ -381,3 +518,47 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): finally: if 'session' in locals(): session.close() + +def get_connections_left(m3u_profile_id: int) -> int: + """ + Get the number of available connections left for an M3U profile. + + Args: + m3u_profile_id: The ID of the M3U profile + + Returns: + int: Number of connections available (0 if none available) + """ + try: + from core.utils import RedisClient + + # Get the M3U profile + m3u_profile = M3UAccountProfile.objects.get(id=m3u_profile_id) + + # If max_streams is 0, it means unlimited + if m3u_profile.max_streams == 0: + return 999999 # Return a large number to indicate unlimited + + # Get Redis client + redis_client = RedisClient.get_client() + if not redis_client: + logger.warning("Redis not available, assuming connections available") + return max(0, m3u_profile.max_streams - 1) # Conservative estimate + + # Check current connections for this specific profile + profile_connections_key = f"profile_connections:{m3u_profile_id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Calculate available connections + connections_left = max(0, m3u_profile.max_streams - current_connections) + + logger.debug(f"M3U profile {m3u_profile_id}: {current_connections}/{m3u_profile.max_streams} used, {connections_left} available") + + return connections_left + + except M3UAccountProfile.DoesNotExist: + logger.error(f"M3U profile {m3u_profile_id} not found") + return 0 + except Exception as e: + logger.error(f"Error getting connections left for M3U profile {m3u_profile_id}: {e}") + return 0 diff --git a/apps/proxy/ts_proxy/utils.py b/apps/proxy/ts_proxy/utils.py index b568b804..20a6e140 100644 --- a/apps/proxy/ts_proxy/utils.py +++ b/apps/proxy/ts_proxy/utils.py @@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy") def detect_stream_type(url): """ - Detect if stream URL is HLS or TS format. + Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format. Args: url (str): The stream URL to analyze Returns: - str: 'hls' or 'ts' depending on detected format + str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format """ if not url: return 'unknown' url_lower = url.lower() + # Check for UDP streams (requires FFmpeg) + if url_lower.startswith('udp://'): + return 'udp' + + # Check for RTSP/RTP streams (requires FFmpeg) + if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'): + return 'rtsp' + # Look for common HLS indicators if (url_lower.endswith('.m3u8') or '.m3u8?' in url_lower or diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index b90e1585..c1b803ab 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -3,7 +3,8 @@ import threading import time import random import re -from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect +import pathlib +from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse from django.views.decorators.csrf import csrf_exempt from django.shortcuts import get_object_or_404 from apps.proxy.config import TSConfig as Config @@ -15,22 +16,39 @@ from .redis_keys import RedisKeys import logging from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile +from apps.accounts.models import User from core.models import UserAgent, CoreSettings, PROXY_PROFILE_NAME from rest_framework.decorators import api_view, permission_classes -from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from apps.accounts.permissions import ( + IsAdmin, + permission_classes_by_method, + permission_classes_by_action, +) from .constants import ChannelState, EventType, StreamType, ChannelMetadataField from .config_helper import ConfigHelper from .services.channel_service import ChannelService -from .url_utils import generate_stream_url, transform_url, get_stream_info_for_switch, get_stream_object, get_alternate_streams +from core.utils import send_websocket_update +from .url_utils import ( + generate_stream_url, + transform_url, + get_stream_info_for_switch, + get_stream_object, + get_alternate_streams, +) from .utils import get_logger from uuid import UUID import gevent +from dispatcharr.utils import network_access_allowed logger = get_logger() -@api_view(['GET']) +@api_view(["GET"]) def stream_ts(request, channel_id): + if not network_access_allowed(request, "STREAMS"): + return JsonResponse({"error": "Forbidden"}, status=403) + """Stream TS data to client with immediate response and keep-alive packets during initialization""" channel = get_stream_object(channel_id) @@ -44,10 +62,12 @@ def stream_ts(request, channel_id): logger.info(f"[{client_id}] Requested stream for channel {channel_id}") # Extract client user agent early - for header in ['HTTP_USER_AGENT', 'User-Agent', 'user-agent']: - if (header in request.META): + for header in ["HTTP_USER_AGENT", "User-Agent", "user-agent"]: + if header in request.META: client_user_agent = request.META[header] - logger.debug(f"[{client_id}] Client connected with user agent: {client_user_agent}") + logger.debug( + f"[{client_id}] Client connected with user agent: {client_user_agent}" + ) break # Check if we need to reinitialize the channel @@ -60,43 +80,56 @@ def stream_ts(request, channel_id): metadata_key = RedisKeys.channel_metadata(channel_id) if proxy_server.redis_client.exists(metadata_key): metadata = proxy_server.redis_client.hgetall(metadata_key) - state_field = ChannelMetadataField.STATE.encode('utf-8') + state_field = ChannelMetadataField.STATE.encode("utf-8") if state_field in metadata: - channel_state = metadata[state_field].decode('utf-8') + channel_state = metadata[state_field].decode("utf-8") - # IMPROVED: Check for *any* state that indicates initialization is in progress - active_states = [ChannelState.INITIALIZING, ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS, ChannelState.ACTIVE] - if channel_state in active_states: + if channel_state: # Channel is being initialized or already active - no need for reinitialization needs_initialization = False - logger.debug(f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization") + logger.debug( + f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization" + ) # Special handling for initializing/connecting states - if channel_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: + if channel_state in [ + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ]: channel_initializing = True - logger.debug(f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion") + logger.debug( + f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion" + ) else: # Only check for owner if channel is in a valid state - owner_field = ChannelMetadataField.OWNER.encode('utf-8') + owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: - owner = metadata[owner_field].decode('utf-8') + owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): # Owner is still active, so we don't need to reinitialize needs_initialization = False - logger.debug(f"[{client_id}] Channel {channel_id} has active owner {owner}") + logger.debug( + f"[{client_id}] Channel {channel_id} has active owner {owner}" + ) # Start initialization if needed if needs_initialization or not proxy_server.check_if_channel_exists(channel_id): logger.info(f"[{client_id}] Starting channel {channel_id} initialization") # Force cleanup of any previous instance if in terminal state - if channel_state in [ChannelState.ERROR, ChannelState.STOPPING, ChannelState.STOPPED]: - logger.warning(f"[{client_id}] Channel {channel_id} in state {channel_state}, forcing cleanup") - proxy_server.stop_channel(channel_id) + if channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPING, + ChannelState.STOPPED, + ]: + logger.warning( + f"[{client_id}] Channel {channel_id} in state {channel_state}, forcing cleanup" + ) + ChannelService.stop_channel(channel_id) - # Use max retry attempts and connection timeout from config - max_retries = ConfigHelper.max_retries() - retry_timeout = ConfigHelper.connection_timeout() + # Use fixed retry interval and timeout + retry_timeout = 3 # 3 seconds total timeout + retry_interval = 0.1 # 100ms between attempts wait_start_time = time.time() stream_url = None @@ -104,70 +137,111 @@ def stream_ts(request, channel_id): transcode = False profile_value = None error_reason = None + attempt = 0 + should_retry = True - # Try to get a stream with configured retries - for attempt in range(max_retries): - stream_url, stream_user_agent, transcode, profile_value = generate_stream_url(channel_id) + # Try to get a stream with fixed interval retries + while should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) if stream_url is not None: - logger.info(f"[{client_id}] Successfully obtained stream for channel {channel_id}") + logger.info( + f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts" + ) break - # If we failed because there are no streams assigned, don't retry - _, _, error_reason = channel.get_stream() - if error_reason and 'maximum connection limits' not in error_reason: - logger.warning(f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}") + # On first failure, check if the error is retryable + if attempt == 1: + _, _, error_reason = channel.get_stream() + if error_reason and "maximum connection limits" not in error_reason: + logger.warning( + f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" + ) + should_retry = False + break + + # Check if we have time remaining for another sleep cycle + elapsed_time = time.time() - wait_start_time + remaining_time = retry_timeout - elapsed_time + + # If we don't have enough time for the next sleep interval, break + # but only after we've already made an attempt (the while condition will try one more time) + if remaining_time <= retry_interval: + logger.info( + f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt" + ) break - # Don't exceed the overall connection timeout - if time.time() - wait_start_time > retry_timeout: - logger.warning(f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)") - break + # Wait before retrying + logger.info( + f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + ) + gevent.sleep(retry_interval) + retry_interval += 0.025 # Increase wait time by 25ms for next attempt - # Wait before retrying (using exponential backoff with a cap) - wait_time = min(0.5 * (2 ** attempt), 2.0) # Caps at 2 seconds - logger.info(f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})") - gevent.sleep(wait_time) # FIXED: Using gevent.sleep instead of time.sleep + # Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout + if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + logger.info( + f"[{client_id}] Making final attempt {attempt} at timeout boundary" + ) + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) + if stream_url is not None: + logger.info( + f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}" + ) if stream_url is None: - # Make sure to release any stream locks that might have been acquired - if hasattr(channel, 'streams') and channel.streams.exists(): - for stream in channel.streams.all(): - try: - stream.release_stream() - logger.info(f"[{client_id}] Released stream {stream.id} for channel {channel_id}") - except Exception as e: - logger.error(f"[{client_id}] Error releasing stream: {e}") + # Release the channel's stream lock if one was acquired + # Note: Only call this if get_stream() actually assigned a stream + # In our case, if stream_url is None, no stream was ever assigned, so don't release # Get the specific error message if available wait_duration = f"{int(time.time() - wait_start_time)}s" - error_msg = error_reason if error_reason else 'No available streams for this channel' - return JsonResponse({ - 'error': error_msg, - 'waited': wait_duration - }, status=503) # 503 Service Unavailable is appropriate here + error_msg = ( + error_reason + if error_reason + else "No available streams for this channel" + ) + logger.info( + f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}" + ) + return JsonResponse( + {"error": error_msg, "waited": wait_duration}, status=503 + ) # 503 Service Unavailable is appropriate here # Get the stream ID from the channel stream_id, m3u_profile_id, _ = channel.get_stream() - logger.info(f"Channel {channel_id} using stream ID {stream_id}, m3u account profile ID {m3u_profile_id}") + logger.info( + f"Channel {channel_id} using stream ID {stream_id}, m3u account profile ID {m3u_profile_id}" + ) # Generate transcode command if needed stream_profile = channel.get_stream_profile() if stream_profile.is_redirect(): # Validate the stream URL before redirecting - from .url_utils import validate_stream_url, get_alternate_streams, get_stream_info_for_switch + from .url_utils import ( + validate_stream_url, + get_alternate_streams, + get_stream_info_for_switch, + ) # Try initial URL logger.info(f"[{client_id}] Validating redirect URL: {stream_url}") is_valid, final_url, status_code, message = validate_stream_url( - stream_url, - user_agent=stream_user_agent, - timeout=(5, 5) + stream_url, user_agent=stream_user_agent, timeout=(5, 5) ) # If first URL doesn't validate, try alternates if not is_valid: - logger.warning(f"[{client_id}] Primary stream URL failed validation: {message}") + logger.warning( + f"[{client_id}] Primary stream URL failed validation: {message}" + ) # Track tried streams to avoid loops tried_streams = {stream_id} @@ -177,49 +251,80 @@ def stream_ts(request, channel_id): # Try each alternate until one works for alt in alternates: - if alt['stream_id'] in tried_streams: + if alt["stream_id"] in tried_streams: continue - tried_streams.add(alt['stream_id']) + tried_streams.add(alt["stream_id"]) # Get stream info - alt_info = get_stream_info_for_switch(channel_id, alt['stream_id']) - if 'error' in alt_info: - logger.warning(f"[{client_id}] Error getting alternate stream info: {alt_info['error']}") + alt_info = get_stream_info_for_switch( + channel_id, alt["stream_id"] + ) + if "error" in alt_info: + logger.warning( + f"[{client_id}] Error getting alternate stream info: {alt_info['error']}" + ) continue # Validate the alternate URL - logger.info(f"[{client_id}] Trying alternate stream #{alt['stream_id']}: {alt_info['url']}") + logger.info( + f"[{client_id}] Trying alternate stream #{alt['stream_id']}: {alt_info['url']}" + ) is_valid, final_url, status_code, message = validate_stream_url( - alt_info['url'], - user_agent=alt_info['user_agent'], - timeout=(5, 5) + alt_info["url"], + user_agent=alt_info["user_agent"], + timeout=(5, 5), ) if is_valid: - logger.info(f"[{client_id}] Alternate stream #{alt['stream_id']} validated successfully") + logger.info( + f"[{client_id}] Alternate stream #{alt['stream_id']} validated successfully" + ) break else: - logger.warning(f"[{client_id}] Alternate stream #{alt['stream_id']} failed validation: {message}") + logger.warning( + f"[{client_id}] Alternate stream #{alt['stream_id']} failed validation: {message}" + ) # Release stream lock before redirecting channel.release_stream() # Final decision based on validation results if is_valid: - logger.info(f"[{client_id}] Redirecting to validated URL: {final_url} ({message})") + logger.info( + f"[{client_id}] Redirecting to validated URL: {final_url} ({message})" + ) + + # For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect + # because Django's HttpResponseRedirect blocks them for security + if final_url.startswith(('rtsp://', 'rtp://', 'udp://')): + logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol") + response = HttpResponse(status=301) + response['Location'] = final_url + return response + return HttpResponseRedirect(final_url) else: - logger.error(f"[{client_id}] All available redirect URLs failed validation") - return JsonResponse({ - 'error': 'All available streams failed validation' - }, status=502) # 502 Bad Gateway + logger.error( + f"[{client_id}] All available redirect URLs failed validation" + ) + return JsonResponse( + {"error": "All available streams failed validation"}, status=502 + ) # 502 Bad Gateway # Initialize channel with the stream's user agent (not the client's) success = ChannelService.initialize_channel( - channel_id, stream_url, stream_user_agent, transcode, profile_value, stream_id, m3u_profile_id + channel_id, + stream_url, + stream_user_agent, + transcode, + profile_value, + stream_id, + m3u_profile_id, ) if not success: - return JsonResponse({'error': 'Failed to initialize channel'}, status=500) + return JsonResponse( + {"error": "Failed to initialize channel"}, status=500 + ) # If we're the owner, wait for connection to establish if proxy_server.am_i_owner(channel_id): @@ -230,7 +335,9 @@ def stream_ts(request, channel_id): while not manager.connected: if time.time() - wait_start > timeout: proxy_server.stop_channel(channel_id) - return JsonResponse({'error': 'Connection timeout'}, status=504) + return JsonResponse( + {"error": "Connection timeout"}, status=504 + ) # Check if this manager should keep retrying or stop if not manager.should_retry(): @@ -240,41 +347,68 @@ def stream_ts(request, channel_id): if proxy_server.redis_client: try: - state_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STATE) + state_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STATE + ) if state_bytes: - current_state = state_bytes.decode('utf-8') - logger.debug(f"[{client_id}] Current state of channel {channel_id}: {current_state}") + current_state = state_bytes.decode("utf-8") + logger.debug( + f"[{client_id}] Current state of channel {channel_id}: {current_state}" + ) except Exception as e: - logger.warning(f"[{client_id}] Error getting channel state: {e}") + logger.warning( + f"[{client_id}] Error getting channel state: {e}" + ) # Allow normal transitional states to continue - if current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: - logger.info(f"[{client_id}] Channel {channel_id} is in {current_state} state, continuing to wait") + if current_state in [ + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ]: + logger.info( + f"[{client_id}] Channel {channel_id} is in {current_state} state, continuing to wait" + ) # Reset wait timer to allow the transition to complete wait_start = time.time() continue # Check if we're switching URLs - if hasattr(manager, 'url_switching') and manager.url_switching: - logger.info(f"[{client_id}] Stream manager is currently switching URLs for channel {channel_id}") + if ( + hasattr(manager, "url_switching") + and manager.url_switching + ): + logger.info( + f"[{client_id}] Stream manager is currently switching URLs for channel {channel_id}" + ) # Reset wait timer to give the switch a chance wait_start = time.time() continue # If we reach here, we've exhausted retries and the channel isn't in a valid transitional state - logger.warning(f"[{client_id}] Channel {channel_id} failed to connect and is not in transitional state") + logger.warning( + f"[{client_id}] Channel {channel_id} failed to connect and is not in transitional state" + ) proxy_server.stop_channel(channel_id) - return JsonResponse({'error': 'Failed to connect'}, status=502) + return JsonResponse( + {"error": "Failed to connect"}, status=502 + ) - gevent.sleep(0.1) # FIXED: Using gevent.sleep instead of time.sleep + gevent.sleep( + 0.1 + ) # FIXED: Using gevent.sleep instead of time.sleep logger.info(f"[{client_id}] Successfully initialized channel {channel_id}") channel_initializing = True # Register client - can do this regardless of initialization state # Create local resources if needed - if channel_id not in proxy_server.stream_buffers or channel_id not in proxy_server.client_managers: - logger.debug(f"[{client_id}] Channel {channel_id} exists in Redis but not initialized in this worker - initializing now") + if ( + channel_id not in proxy_server.stream_buffers + or channel_id not in proxy_server.client_managers + ): + logger.debug( + f"[{client_id}] Channel {channel_id} exists in Redis but not initialized in this worker - initializing now" + ) # Get URL from Redis metadata url = None @@ -282,32 +416,54 @@ def stream_ts(request, channel_id): if proxy_server.redis_client: metadata_key = RedisKeys.channel_metadata(channel_id) - url_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.URL) - ua_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.USER_AGENT) - profile_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STREAM_PROFILE) + url_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.URL + ) + ua_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.USER_AGENT + ) + profile_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STREAM_PROFILE + ) if url_bytes: - url = url_bytes.decode('utf-8') + url = url_bytes.decode("utf-8") if ua_bytes: - stream_user_agent = ua_bytes.decode('utf-8') + stream_user_agent = ua_bytes.decode("utf-8") # Extract transcode setting from Redis if profile_bytes: - profile_str = profile_bytes.decode('utf-8') - use_transcode = (profile_str == PROXY_PROFILE_NAME or profile_str == 'None') - logger.debug(f"Using profile '{profile_str}' for channel {channel_id}, transcode={use_transcode}") + profile_str = profile_bytes.decode("utf-8") + use_transcode = ( + profile_str == PROXY_PROFILE_NAME or profile_str == "None" + ) + logger.debug( + f"Using profile '{profile_str}' for channel {channel_id}, transcode={use_transcode}" + ) else: # Default settings when profile not found in Redis - profile_str = 'None' # Default profile name - use_transcode = False # Default to direct streaming without transcoding - logger.debug(f"No profile found in Redis for channel {channel_id}, defaulting to transcode={use_transcode}") + profile_str = "None" # Default profile name + use_transcode = ( + False # Default to direct streaming without transcoding + ) + logger.debug( + f"No profile found in Redis for channel {channel_id}, defaulting to transcode={use_transcode}" + ) # Use client_user_agent as fallback if stream_user_agent is None - success = proxy_server.initialize_channel(url, channel_id, stream_user_agent or client_user_agent, use_transcode) + success = proxy_server.initialize_channel( + url, channel_id, stream_user_agent or client_user_agent, use_transcode + ) if not success: - logger.error(f"[{client_id}] Failed to initialize channel {channel_id} locally") - return JsonResponse({'error': 'Failed to initialize channel locally'}, status=500) + logger.error( + f"[{client_id}] Failed to initialize channel {channel_id} locally" + ) + return JsonResponse( + {"error": "Failed to initialize channel locally"}, status=500 + ) - logger.info(f"[{client_id}] Successfully initialized channel {channel_id} locally") + logger.info( + f"[{client_id}] Successfully initialized channel {channel_id} locally" + ) # Register client buffer = proxy_server.stream_buffers[channel_id] @@ -322,53 +478,106 @@ def stream_ts(request, channel_id): # Return the StreamingHttpResponse from the main function response = StreamingHttpResponse( - streaming_content=generate(), - content_type='video/mp2t' + streaming_content=generate(), content_type="video/mp2t" ) - response['Cache-Control'] = 'no-cache' + response["Cache-Control"] = "no-cache" return response except Exception as e: logger.error(f"Error in stream_ts: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + + +@api_view(["GET"]) +def stream_xc(request, username, password, channel_id): + user = get_object_or_404(User, username=username) + + extension = pathlib.Path(channel_id).suffix + channel_id = pathlib.Path(channel_id).stem + + custom_properties = user.custom_properties or {} + + if "xc_password" not in custom_properties: + return Response({"error": "Invalid credentials"}, status=401) + + if custom_properties["xc_password"] != password: + return Response({"error": "Invalid credentials"}, status=401) + + print(f"Fetchin channel with ID: {channel_id}") + if user.user_level < 10: + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = { + "id": int(channel_id), + "user_level__lte": user.user_level + } + channel = Channel.objects.filter(**filters).first() + else: + # User has specific limited profiles assigned + filters = { + "id": int(channel_id), + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() + + if not channel: + return JsonResponse({"error": "Not found"}, status=404) + else: + channel = get_object_or_404(Channel, id=channel_id) + + # @TODO: we've got the file 'type' via extension, support this when we support multiple outputs + return stream_ts(request._request, str(channel.uuid)) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def change_stream(request, channel_id): """Change stream URL for existing channel with enhanced diagnostics""" proxy_server = ProxyServer.get_instance() try: data = json.loads(request.body) - new_url = data.get('url') - user_agent = data.get('user_agent') - stream_id = data.get('stream_id') + new_url = data.get("url") + user_agent = data.get("user_agent") + stream_id = data.get("stream_id") # If stream_id is provided, get the URL and user_agent from it if stream_id: - logger.info(f"Stream ID {stream_id} provided, looking up stream info for channel {channel_id}") + logger.info( + f"Stream ID {stream_id} provided, looking up stream info for channel {channel_id}" + ) stream_info = get_stream_info_for_switch(channel_id, stream_id) - if 'error' in stream_info: - return JsonResponse({ - 'error': stream_info['error'], - 'stream_id': stream_id - }, status=404) + if "error" in stream_info: + return JsonResponse( + {"error": stream_info["error"], "stream_id": stream_id}, status=404 + ) # Use the info from the stream - new_url = stream_info['url'] - user_agent = stream_info['user_agent'] - m3u_profile_id = stream_info.get('m3u_profile_id') + new_url = stream_info["url"] + user_agent = stream_info["user_agent"] + m3u_profile_id = stream_info.get("m3u_profile_id") # Stream ID will be passed to change_stream_url later elif not new_url: - return JsonResponse({'error': 'Either url or stream_id must be provided'}, status=400) + return JsonResponse( + {"error": "Either url or stream_id must be provided"}, status=400 + ) - logger.info(f"Attempting to change stream for channel {channel_id} to {new_url}") + logger.info( + f"Attempting to change stream for channel {channel_id} to {new_url}" + ) # Use the service layer instead of direct implementation # Pass stream_id to ensure proper connection tracking - result = ChannelService.change_stream_url(channel_id, new_url, user_agent, stream_id, m3u_profile_id) + result = ChannelService.change_stream_url( + channel_id, new_url, user_agent, stream_id, m3u_profile_id + ) # Get the stream manager before updating URL stream_manager = proxy_server.stream_managers.get(channel_id) @@ -377,37 +586,43 @@ def change_stream(request, channel_id): if stream_manager: # Reset tried streams when manually switching URL via API stream_manager.tried_stream_ids = set() - logger.debug(f"Reset tried stream IDs for channel {channel_id} during manual stream change") + logger.debug( + f"Reset tried stream IDs for channel {channel_id} during manual stream change" + ) - if result.get('status') == 'error': - return JsonResponse({ - 'error': result.get('message', 'Unknown error'), - 'diagnostics': result.get('diagnostics', {}) - }, status=404) + if result.get("status") == "error": + return JsonResponse( + { + "error": result.get("message", "Unknown error"), + "diagnostics": result.get("diagnostics", {}), + }, + status=404, + ) # Format response based on whether it was a direct update or event-based response_data = { - 'message': 'Stream changed successfully', - 'channel': channel_id, - 'url': new_url, - 'owner': result.get('direct_update', False), - 'worker_id': proxy_server.worker_id + "message": "Stream changed successfully", + "channel": channel_id, + "url": new_url, + "owner": result.get("direct_update", False), + "worker_id": proxy_server.worker_id, } # Include stream_id in response if it was used if stream_id: - response_data['stream_id'] = stream_id + response_data["stream_id"] = stream_id return JsonResponse(response_data) except json.JSONDecodeError: - return JsonResponse({'error': 'Invalid JSON'}, status=400) + return JsonResponse({"error": "Invalid JSON"}, status=400) except Exception as e: logger.error(f"Failed to change stream: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) + +@api_view(["GET"]) +@permission_classes([IsAdmin]) def channel_status(request, channel_id=None): """ Returns status information about channels with detail level based on request: @@ -419,7 +634,7 @@ def channel_status(request, channel_id=None): try: # Check if Redis is available if not proxy_server.redis_client: - return JsonResponse({'error': 'Redis connection not available'}, status=500) + return JsonResponse({"error": "Redis connection not available"}, status=500) # Handle single channel or all channels if channel_id: @@ -428,7 +643,9 @@ def channel_status(request, channel_id=None): if channel_info: return JsonResponse(channel_info) else: - return JsonResponse({'error': f'Channel {channel_id} not found'}, status=404) + return JsonResponse( + {"error": f"Channel {channel_id} not found"}, status=404 + ) else: # Basic info for all channels channel_pattern = "ts_proxy:channel:*:metadata" @@ -437,9 +654,13 @@ def channel_status(request, channel_id=None): # Extract channel IDs from keys cursor = 0 while True: - cursor, keys = proxy_server.redis_client.scan(cursor, match=channel_pattern) + cursor, keys = proxy_server.redis_client.scan( + cursor, match=channel_pattern + ) for key in keys: - channel_id_match = re.search(r"ts_proxy:channel:(.*):metadata", key.decode('utf-8')) + channel_id_match = re.search( + r"ts_proxy:channel:(.*):metadata", key.decode("utf-8") + ) if channel_id_match: ch_id = channel_id_match.group(1) channel_info = ChannelStatus.get_basic_channel_info(ch_id) @@ -449,15 +670,28 @@ def channel_status(request, channel_id=None): if cursor == 0: break - return JsonResponse({'channels': all_channels, 'count': len(all_channels)}) + # Send WebSocket update with the stats + # Format it the same way the original Celery task did + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "channel_stats", + "stats": json.dumps({'channels': all_channels, 'count': len(all_channels)}) + } + ) + + return JsonResponse({"channels": all_channels, "count": len(all_channels)}) except Exception as e: logger.error(f"Error in channel_status: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST', 'DELETE']) -@permission_classes([IsAuthenticated]) +@api_view(["POST", "DELETE"]) +@permission_classes([IsAdmin]) def stop_channel(request, channel_id): """Stop a channel and release all associated resources using PubSub events""" try: @@ -466,60 +700,70 @@ def stop_channel(request, channel_id): # Use the service layer instead of direct implementation result = ChannelService.stop_channel(channel_id) - if result.get('status') == 'error': - return JsonResponse({'error': result.get('message', 'Unknown error')}, status=404) + if result.get("status") == "error": + return JsonResponse( + {"error": result.get("message", "Unknown error")}, status=404 + ) - return JsonResponse({ - 'message': 'Channel stop request sent', - 'channel_id': channel_id, - 'previous_state': result.get('previous_state') - }) + return JsonResponse( + { + "message": "Channel stop request sent", + "channel_id": channel_id, + "previous_state": result.get("previous_state"), + } + ) except Exception as e: logger.error(f"Failed to stop channel: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def stop_client(request, channel_id): """Stop a specific client connection using existing client management""" try: # Parse request body to get client ID data = json.loads(request.body) - client_id = data.get('client_id') + client_id = data.get("client_id") if not client_id: - return JsonResponse({'error': 'No client_id provided'}, status=400) + return JsonResponse({"error": "No client_id provided"}, status=400) # Use the service layer instead of direct implementation result = ChannelService.stop_client(channel_id, client_id) - if result.get('status') == 'error': - return JsonResponse({'error': result.get('message')}, status=404) + if result.get("status") == "error": + return JsonResponse({"error": result.get("message")}, status=404) - return JsonResponse({ - 'message': 'Client stop request processed', - 'channel_id': channel_id, - 'client_id': client_id, - 'locally_processed': result.get('locally_processed', False) - }) + return JsonResponse( + { + "message": "Client stop request processed", + "channel_id": channel_id, + "client_id": client_id, + "locally_processed": result.get("locally_processed", False), + } + ) except json.JSONDecodeError: - return JsonResponse({'error': 'Invalid JSON'}, status=400) + return JsonResponse({"error": "Invalid JSON"}, status=400) except Exception as e: logger.error(f"Failed to stop client: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) + @csrf_exempt -@api_view(['POST']) -@permission_classes([IsAuthenticated]) +@api_view(["POST"]) +@permission_classes([IsAdmin]) def next_stream(request, channel_id): """Switch to the next available stream for a channel""" proxy_server = ProxyServer.get_instance() try: - logger.info(f"Request to switch to next stream for channel {channel_id} received") + logger.info( + f"Request to switch to next stream for channel {channel_id} received" + ) # Check if the channel exists channel = get_stream_object(channel_id) @@ -532,29 +776,42 @@ def next_stream(request, channel_id): metadata_key = RedisKeys.channel_metadata(channel_id) if proxy_server.redis_client.exists(metadata_key): # Get current stream ID from Redis - stream_id_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.STREAM_ID) + stream_id_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.STREAM_ID + ) if stream_id_bytes: - current_stream_id = int(stream_id_bytes.decode('utf-8')) - logger.info(f"Found current stream ID {current_stream_id} in Redis for channel {channel_id}") + current_stream_id = int(stream_id_bytes.decode("utf-8")) + logger.info( + f"Found current stream ID {current_stream_id} in Redis for channel {channel_id}" + ) # Get M3U profile from Redis if available - profile_id_bytes = proxy_server.redis_client.hget(metadata_key, ChannelMetadataField.M3U_PROFILE) + profile_id_bytes = proxy_server.redis_client.hget( + metadata_key, ChannelMetadataField.M3U_PROFILE + ) if profile_id_bytes: - profile_id = int(profile_id_bytes.decode('utf-8')) - logger.info(f"Found M3U profile ID {profile_id} in Redis for channel {channel_id}") + profile_id = int(profile_id_bytes.decode("utf-8")) + logger.info( + f"Found M3U profile ID {profile_id} in Redis for channel {channel_id}" + ) if not current_stream_id: # Channel is not running - return JsonResponse({'error': 'No current stream found for channel'}, status=404) + return JsonResponse( + {"error": "No current stream found for channel"}, status=404 + ) # Get all streams for this channel in their defined order - streams = list(channel.streams.all().order_by('channelstream__order')) + streams = list(channel.streams.all().order_by("channelstream__order")) if len(streams) <= 1: - return JsonResponse({ - 'error': 'No alternate streams available for this channel', - 'current_stream_id': current_stream_id - }, status=404) + return JsonResponse( + { + "error": "No alternate streams available for this channel", + "current_stream_id": current_stream_id, + }, + status=404, + ) # Find the current stream's position in the list current_index = None @@ -564,61 +821,74 @@ def next_stream(request, channel_id): break if current_index is None: - logger.warning(f"Current stream ID {current_stream_id} not found in channel's streams list") + logger.warning( + f"Current stream ID {current_stream_id} not found in channel's streams list" + ) # Fall back to the first stream that's not the current one next_stream = next((s for s in streams if s.id != current_stream_id), None) if not next_stream: - return JsonResponse({ - 'error': 'Could not find current stream in channel list', - 'current_stream_id': current_stream_id - }, status=404) + return JsonResponse( + { + "error": "Could not find current stream in channel list", + "current_stream_id": current_stream_id, + }, + status=404, + ) else: # Get the next stream in the rotation (with wrap-around) next_index = (current_index + 1) % len(streams) next_stream = streams[next_index] next_stream_id = next_stream.id - logger.info(f"Rotating to next stream ID {next_stream_id} for channel {channel_id}") + logger.info( + f"Rotating to next stream ID {next_stream_id} for channel {channel_id}" + ) # Get full stream info including URL for the next stream stream_info = get_stream_info_for_switch(channel_id, next_stream_id) - if 'error' in stream_info: - return JsonResponse({ - 'error': stream_info['error'], - 'current_stream_id': current_stream_id, - 'next_stream_id': next_stream_id - }, status=404) + if "error" in stream_info: + return JsonResponse( + { + "error": stream_info["error"], + "current_stream_id": current_stream_id, + "next_stream_id": next_stream_id, + }, + status=404, + ) # Now use the ChannelService to change the stream URL result = ChannelService.change_stream_url( channel_id, - stream_info['url'], - stream_info['user_agent'], - next_stream_id # Pass the stream_id to be stored in Redis + stream_info["url"], + stream_info["user_agent"], + next_stream_id, # Pass the stream_id to be stored in Redis ) - if result.get('status') == 'error': - return JsonResponse({ - 'error': result.get('message', 'Unknown error'), - 'diagnostics': result.get('diagnostics', {}), - 'current_stream_id': current_stream_id, - 'next_stream_id': next_stream_id - }, status=404) + if result.get("status") == "error": + return JsonResponse( + { + "error": result.get("message", "Unknown error"), + "diagnostics": result.get("diagnostics", {}), + "current_stream_id": current_stream_id, + "next_stream_id": next_stream_id, + }, + status=404, + ) # Format success response response_data = { - 'message': 'Stream switched to next available', - 'channel': channel_id, - 'previous_stream_id': current_stream_id, - 'new_stream_id': next_stream_id, - 'new_url': stream_info['url'], - 'owner': result.get('direct_update', False), - 'worker_id': proxy_server.worker_id + "message": "Stream switched to next available", + "channel": channel_id, + "previous_stream_id": current_stream_id, + "new_stream_id": next_stream_id, + "new_url": stream_info["url"], + "owner": result.get("direct_update", False), + "worker_id": proxy_server.worker_id, } return JsonResponse(response_data) except Exception as e: logger.error(f"Failed to switch to next stream: {e}", exc_info=True) - return JsonResponse({'error': str(e)}, status=500) + return JsonResponse({"error": str(e)}, status=500) diff --git a/apps/proxy/urls.py b/apps/proxy/urls.py index 98303990..34c026a9 100644 --- a/apps/proxy/urls.py +++ b/apps/proxy/urls.py @@ -5,4 +5,5 @@ app_name = 'proxy' urlpatterns = [ path('ts/', include('apps.proxy.ts_proxy.urls')), path('hls/', include('apps.proxy.hls_proxy.urls')), + path('vod/', include('apps.proxy.vod_proxy.urls')), ] \ No newline at end of file diff --git a/apps/proxy/vod_proxy/__init__.py b/apps/proxy/vod_proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/proxy/vod_proxy/connection_manager.py b/apps/proxy/vod_proxy/connection_manager.py new file mode 100644 index 00000000..dea5759b --- /dev/null +++ b/apps/proxy/vod_proxy/connection_manager.py @@ -0,0 +1,1445 @@ +""" +VOD Connection Manager - Redis-based connection tracking for VOD streams +""" + +import time +import json +import logging +import threading +import random +import re +import requests +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +class PersistentVODConnection: + """Handles a single persistent connection to a VOD provider for a session""" + + def __init__(self, session_id: str, stream_url: str, headers: dict): + self.session_id = session_id + self.stream_url = stream_url + self.base_headers = headers + self.session = None + self.current_response = None + self.content_length = None + self.content_type = 'video/mp4' + self.final_url = None + self.lock = threading.Lock() + self.request_count = 0 # Track number of requests on this connection + self.last_activity = time.time() # Track last activity for cleanup + self.cleanup_timer = None # Timer for delayed cleanup + self.active_streams = 0 # Count of active stream generators + + def _establish_connection(self, range_header=None): + """Establish or re-establish connection to provider""" + try: + if not self.session: + self.session = requests.Session() + + headers = self.base_headers.copy() + + # Validate range header against content length + if range_header and self.content_length: + logger.info(f"[{self.session_id}] Validating range {range_header} against content length {self.content_length}") + validated_range = self._validate_range_header(range_header, int(self.content_length)) + if validated_range is None: + # Range is not satisfiable, but don't raise error - return empty response + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header} for content length {self.content_length}") + return None + elif validated_range != range_header: + range_header = validated_range + logger.info(f"[{self.session_id}] Adjusted range header: {range_header}") + else: + logger.info(f"[{self.session_id}] Range header validated successfully: {range_header}") + elif range_header: + logger.info(f"[{self.session_id}] Range header provided but no content length available yet: {range_header}") + + if range_header: + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Track request count for better logging + self.request_count += 1 + if self.request_count == 1: + logger.info(f"[{self.session_id}] Making initial request to provider") + target_url = self.stream_url + allow_redirects = True + else: + logger.info(f"[{self.session_id}] Making range request #{self.request_count} on SAME session (using final URL)") + # Use the final URL from first request to avoid redirect chain + target_url = self.final_url if self.final_url else self.stream_url + allow_redirects = False # No need to follow redirects again + logger.info(f"[{self.session_id}] Using cached final URL: {target_url}") + + response = self.session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 30), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Log successful response + if self.request_count == 1: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (followed redirects)") + else: + logger.info(f"[{self.session_id}] Request #{self.request_count} successful: {response.status_code} (direct to final URL)") + + # Capture headers from final URL + if not self.content_length: + # First check if we have a pre-stored content length from HEAD request + try: + import redis + r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + content_length_key = f"vod_content_length:{self.session_id}" + stored_length = r.get(content_length_key) + if stored_length: + self.content_length = stored_length + logger.info(f"[{self.session_id}] *** USING PRE-STORED CONTENT LENGTH: {self.content_length} ***") + else: + # Fallback to response headers + self.content_length = response.headers.get('content-length') + logger.info(f"[{self.session_id}] *** USING RESPONSE CONTENT LENGTH: {self.content_length} ***") + except Exception as e: + logger.error(f"[{self.session_id}] Error checking Redis for content length: {e}") + # Fallback to response headers + self.content_length = response.headers.get('content-length') + + self.content_type = response.headers.get('content-type', 'video/mp4') + self.final_url = response.url + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Final URL: {self.final_url} ***") + logger.info(f"[{self.session_id}] *** PERSISTENT CONNECTION - Content-Length: {self.content_length} ***") + + self.current_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header, content_length): + """Validate and potentially adjust range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + # Start is beyond file end - not satisfiable + logger.warning(f"[{self.session_id}] Range start {start_byte} >= content length {content_length} - not satisfiable") + return None + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + # Adjust end to file end + end_byte = content_length - 1 + logger.info(f"[{self.session_id}] Adjusted range end to {end_byte}") + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + logger.warning(f"[{self.session_id}] Range start {start_byte} > end {end_byte} - not satisfiable") + return None + + validated_range = f"bytes={start_byte}-{end_byte}" + return validated_range + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def get_stream(self, range_header=None): + """Get stream with optional range header - reuses connection for range requests""" + with self.lock: + # Update activity timestamp + self.last_activity = time.time() + + # Cancel any pending cleanup since connection is being reused + self.cancel_cleanup() + + # For range requests, we don't need to close the connection + # We can make a new request on the same session + if range_header: + logger.info(f"[{self.session_id}] Range request on existing connection: {range_header}") + # Close only the response stream, keep the session alive + if self.current_response: + logger.info(f"[{self.session_id}] Closing previous response stream (keeping connection alive)") + self.current_response.close() + self.current_response = None + + # Make new request (reuses connection if session exists) + response = self._establish_connection(range_header) + if response is None: + # Range not satisfiable - return None to indicate this + return None + + return self.current_response + + def cancel_cleanup(self): + """Cancel any pending cleanup - called when connection is reused""" + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.info(f"[{self.session_id}] Cancelled pending cleanup - connection being reused for new request") + + def increment_active_streams(self): + """Increment the count of active streams""" + with self.lock: + self.active_streams += 1 + logger.debug(f"[{self.session_id}] Active streams incremented to {self.active_streams}") + + def decrement_active_streams(self): + """Decrement the count of active streams""" + with self.lock: + if self.active_streams > 0: + self.active_streams -= 1 + logger.debug(f"[{self.session_id}] Active streams decremented to {self.active_streams}") + else: + logger.warning(f"[{self.session_id}] Attempted to decrement active streams when already at 0") + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + with self.lock: + return self.active_streams > 0 + + def schedule_cleanup_if_not_streaming(self, delay_seconds: int = 10): + """Schedule cleanup only if no active streams""" + with self.lock: + if self.active_streams > 0: + logger.info(f"[{self.session_id}] Connection has {self.active_streams} active streams - NOT scheduling cleanup") + return False + + # No active streams, proceed with delayed cleanup + if self.cleanup_timer: + self.cleanup_timer.cancel() + + def delayed_cleanup(): + logger.info(f"[{self.session_id}] Delayed cleanup triggered - checking if connection is still needed") + # Use the singleton VODConnectionManager instance + manager = VODConnectionManager.get_instance() + manager.cleanup_persistent_connection(self.session_id) + + self.cleanup_timer = threading.Timer(delay_seconds, delayed_cleanup) + self.cleanup_timer.start() + logger.info(f"[{self.session_id}] Scheduled cleanup in {delay_seconds} seconds (connection not actively streaming)") + return True + + def get_headers(self): + """Get headers for response""" + return { + 'content_length': self.content_length, + 'content_type': self.content_type, + 'final_url': self.final_url + } + + def cleanup(self): + """Clean up connection resources""" + with self.lock: + # Cancel any pending cleanup timer + if self.cleanup_timer: + self.cleanup_timer.cancel() + self.cleanup_timer = None + logger.debug(f"[{self.session_id}] Cancelled cleanup timer during manual cleanup") + + # Clear active streams count + self.active_streams = 0 + + if self.current_response: + self.current_response.close() + self.current_response = None + if self.session: + self.session.close() + self.session = None + logger.info(f"[{self.session_id}] Persistent connection cleaned up") + + +class VODConnectionManager: + """Manages VOD connections using Redis for tracking""" + + _instance = None + _persistent_connections = {} # session_id -> PersistentVODConnection + + @classmethod + def get_instance(cls): + """Get the singleton instance of VODConnectionManager""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """ + Find an existing session that matches content and client criteria with no active streams + + Args: + content_type: Type of content (movie, episode, series) + content_uuid: UUID of the content + client_ip: Client IP address + user_agent: Client user agent + utc_start: UTC start time for timeshift + utc_end: UTC end time for timeshift + offset: Offset in seconds + + Returns: + Session ID if matching idle session found, None otherwise + """ + if not self.redis_client: + return None + + try: + # Search for sessions with matching content + pattern = "vod_session:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + session_data = self.redis_client.hgetall(key) + if not session_data: + continue + + # Extract session info + stored_content_type = session_data.get(b'content_type', b'').decode('utf-8') + stored_content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + + # Check if content matches + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID from key + session_id = key.decode('utf-8').replace('vod_session:', '') + + # Check if session has an active persistent connection + persistent_conn = self._persistent_connections.get(session_id) + if not persistent_conn: + # No persistent connection exists, skip + continue + + # Check if connection has no active streams + if persistent_conn.has_active_streams(): + logger.debug(f"[{session_id}] Session has active streams - skipping") + continue + + # Get stored client info for comparison + stored_client_ip = session_data.get(b'client_ip', b'').decode('utf-8') + stored_user_agent = session_data.get(b'user_agent', b'').decode('utf-8') + + # Check timeshift parameters match + stored_utc_start = session_data.get(b'utc_start', b'').decode('utf-8') + stored_utc_end = session_data.get(b'utc_end', b'').decode('utf-8') + stored_offset = session_data.get(b'offset', b'').decode('utf-8') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + # Calculate match score + score = 0 + match_reasons = [] + + # Content already matches (required) + score += 10 + match_reasons.append("content") + + # IP match (high priority) + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + # User-Agent match (medium priority) + if stored_user_agent and stored_user_agent == user_agent: + score += 3 + match_reasons.append("user-agent") + + # Timeshift parameters match (high priority for seeking) + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + # Consider it a good match if we have at least content + one other criteria + if score >= 13: # content(10) + ip(5) or content(10) + user-agent(3) + something else + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(session_data.get(b'last_activity', b'0').decode('utf-8')) + }) + + except Exception as e: + logger.debug(f"Error processing session key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score (highest first), then by last activity (most recent first) + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + else: + logger.debug(f"No matching idle sessions found for {content_type} {content_uuid}") + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None + + def _get_connection_key(self, content_type: str, content_uuid: str, client_id: str) -> str: + """Get Redis key for a specific connection""" + return f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _get_content_connections_key(self, content_type: str, content_uuid: str) -> str: + """Get Redis key for tracking connections per content""" + return f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """ + Create a new VOD connection with profile limit checking + + Returns: + bool: True if connection was created, False if profile limit exceeded + """ + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits using standardized key + if not self._check_profile_limits(m3u_profile): + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded") + return False + + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + + # Check if connection already exists to prevent duplicate counting + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + # Update activity but don't increment profile counter + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0", + "last_position_update": str(time.time()) + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + + # Store connection data + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + + # Increment profile connections using standardized method + pipe.incr(profile_connections_key) + + # Add to content connections set + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + + # Execute all operations + pipe.execute() + + logger.info(f"Created VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating VOD connection: {e}") + return False + + def _check_profile_limits(self, m3u_profile: M3UAccountProfile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int = 0, + position_seconds: int = 0) -> bool: + """Update connection activity""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + update_data = { + "last_activity": str(time.time()) + } + + if bytes_sent > 0: + # Get current bytes and add to it + current_bytes = self.redis_client.hget(connection_key, "bytes_sent") + if current_bytes: + total_bytes = int(current_bytes.decode('utf-8')) + bytes_sent + else: + total_bytes = bytes_sent + update_data["bytes_sent"] = str(total_bytes) + + if position_seconds > 0: + update_data["position_seconds"] = str(position_seconds) + + # Update connection data + self.redis_client.hset(connection_key, mapping=update_data) + self.redis_client.expire(connection_key, self.connection_ttl) + + return True + + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str) -> bool: + """Remove a VOD connection""" + if not self.redis_client: + return False + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + + # Get connection data before removing + connection_data = self.redis_client.hgetall(connection_key) + if not connection_data: + return True # Already removed + + # Get profile ID for cleanup + profile_id = None + if b"m3u_profile_id" in connection_data: + try: + profile_id = int(connection_data[b"m3u_profile_id"].decode('utf-8')) + except ValueError: + pass + + # Use pipeline for atomic cleanup + pipe = self.redis_client.pipeline() + + # Remove connection data + pipe.delete(connection_key) + + # Decrement profile connections using standardized key + if profile_id: + profile_connections_key = self._get_profile_connections_key(profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + pipe.decr(profile_connections_key) + + # Remove from content connections set + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + pipe.srem(content_connections_key, client_id) + + # Execute cleanup + pipe.execute() + + logger.info(f"Removed VOD connection: {client_id}") + return True + + except Exception as e: + logger.error(f"Error removing connection: {e}") + return False + + def get_connection_info(self, content_type: str, content_uuid: str, client_id: str) -> Optional[Dict[str, Any]]: + """Get connection information""" + if not self.redis_client: + return None + + try: + connection_key = self._get_connection_key(content_type, content_uuid, client_id) + connection_data = self.redis_client.hgetall(connection_key) + + if not connection_data: + return None + + # Convert bytes to strings and parse numbers + info = {} + for key, value in connection_data.items(): + key_str = key.decode('utf-8') + value_str = value.decode('utf-8') + + # Parse numeric fields + if key_str in ['connected_at', 'last_activity']: + info[key_str] = float(value_str) + elif key_str in ['bytes_sent', 'position_seconds', 'm3u_profile_id']: + info[key_str] = int(value_str) + else: + info[key_str] = value_str + + return info + + except Exception as e: + logger.error(f"Error getting connection info: {e}") + return None + + def get_profile_connections(self, profile_id: int) -> int: + """Get current connection count for a profile using standardized key""" + if not self.redis_client: + return 0 + + try: + profile_connections_key = self._get_profile_connections_key(profile_id) + return int(self.redis_client.get(profile_connections_key) or 0) + + except Exception as e: + logger.error(f"Error getting profile connections: {e}") + return 0 + + def get_content_connections(self, content_type: str, content_uuid: str) -> int: + """Get current connection count for content""" + if not self.redis_client: + return 0 + + try: + content_connections_key = self._get_content_connections_key(content_type, content_uuid) + return self.redis_client.scard(content_connections_key) or 0 + + except Exception as e: + logger.error(f"Error getting content connections: {e}") + return 0 + + def cleanup_stale_connections(self, max_age_seconds: int = 3600): + """Clean up stale connections that haven't been active recently""" + if not self.redis_client: + return + + try: + pattern = "vod_proxy:connection:*" + cursor = 0 + cleaned = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + key_str = key.decode('utf-8') + last_activity = self.redis_client.hget(key, "last_activity") + + if last_activity: + last_activity_time = float(last_activity.decode('utf-8')) + if current_time - last_activity_time > max_age_seconds: + # Extract info for cleanup + parts = key_str.split(':') + if len(parts) >= 5: + content_type = parts[2] + content_uuid = parts[3] + client_id = parts[4] + self.remove_connection(content_type, content_uuid, client_id) + cleaned += 1 + except Exception as e: + logger.error(f"Error processing key {key}: {e}") + + if cursor == 0: + break + + if cleaned > 0: + logger.info(f"Cleaned up {cleaned} stale VOD connections") + + except Exception as e: + logger.error(f"Error during connection cleanup: {e}") + + def stream_content(self, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with connection tracking and timeshift support + + Args: + content_obj: Movie or Episode object + stream_url: Final stream URL to proxy + m3u_profile: M3UAccountProfile instance + client_ip: Client IP address + user_agent: Client user agent + request: Django request object + utc_start: UTC start time for timeshift (e.g., '2023-01-01T12:00:00') + utc_end: UTC end time for timeshift + offset: Offset in seconds for seeking + range_header: HTTP Range header for partial content requests + + Returns: + StreamingHttpResponse or HttpResponse with error + """ + + try: + # Generate unique client ID + client_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Create connection tracking + connection_created = self.create_connection( + content_type=content_type, + content_uuid=content_uuid, + content_name=content_name, + client_id=client_id, + client_ip=client_ip, + user_agent=user_agent, + m3u_profile=m3u_profile + ) + + if not connection_created: + logger.error(f"Failed to create connection tracking for {content_type} {content_uuid}") + return HttpResponse("Connection limit exceeded", status=503) + + # Modify stream URL for timeshift functionality + modified_stream_url = self._apply_timeshift_parameters( + stream_url, utc_start, utc_end, offset + ) + + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Create streaming generator with simplified header handling + upstream_response = None + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + def stream_generator(): + nonlocal upstream_response + try: + logger.info(f"[{client_id}] Starting VOD stream for {content_type} {content_name}") + + # Prepare request headers + headers = {} + if user_agent: + headers['User-Agent'] = user_agent + + # Forward important headers + important_headers = [ + 'authorization', 'x-forwarded-for', 'x-real-ip', + 'referer', 'origin', 'accept' + ] + + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + logger.debug(f"[{client_id}] Forwarded header {header_name}") + + # Add client IP + if client_ip: + headers['X-Forwarded-For'] = client_ip + headers['X-Real-IP'] = client_ip + + # Add Range header if provided for seeking support + if range_header: + headers['Range'] = range_header + logger.info(f"[{client_id}] Added Range header: {range_header}") + + # Make single request to upstream server with automatic redirect following + upstream_response = requests.get(modified_stream_url, headers=headers, stream=True, timeout=(10, 30), allow_redirects=True) + upstream_response.raise_for_status() + + # Log upstream response info + logger.info(f"[{client_id}] Upstream response status: {upstream_response.status_code}") + logger.info(f"[{client_id}] Final URL after redirects: {upstream_response.url}") + logger.info(f"[{client_id}] Upstream content-type: {upstream_response.headers.get('content-type', 'unknown')}") + if 'content-length' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-length: {upstream_response.headers['content-length']}") + if 'content-range' in upstream_response.headers: + logger.info(f"[{client_id}] Upstream content-range: {upstream_response.headers['content-range']}") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] VOD stream completed: {bytes_sent} bytes sent") + + except requests.RequestException as e: + logger.error(f"[{client_id}] Error streaming from source: {e}") + yield b"Error: Unable to stream content" + except Exception as e: + logger.error(f"[{client_id}] Error in stream generator: {e}") + finally: + # Clean up connection tracking + self.remove_connection(content_type, content_uuid, client_id) + if upstream_response: + upstream_response.close() + + # Create streaming response with sensible defaults + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type='video/mp4' + ) + + # Set status code based on request type + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC and other players expect + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # Log the critical headers we're sending to the client + logger.info(f"[{client_id}] Response headers to client - Status: {response.status_code}, Accept-Ranges: {response.get('Accept-Ranges', 'MISSING')}") + if 'Content-Length' in response: + logger.info(f"[{client_id}] Content-Length: {response['Content-Length']}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] Content-Range: {response['Content-Range']}") + if 'Content-Type' in response: + logger.info(f"[{client_id}] Content-Type: {response['Content-Type']}") + + # Critical: Log what VLC needs to see for seeking to work + if response.status_code == 200: + logger.info(f"[{client_id}] VLC SEEKING INFO: Full content response (200). VLC should see Accept-Ranges and Content-Length to enable seeking.") + elif response.status_code == 206: + logger.info(f"[{client_id}] VLC SEEKING INFO: Partial content response (206). This confirms seeking is working if VLC requested a range.") + + return response + + except Exception as e: + logger.error(f"Error in stream_content: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, client_ip, user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """ + Stream VOD content with persistent connection per session + + Maintains 1 open connection to provider per session that handles all range requests + dynamically based on client Range headers for seeking functionality. + """ + + try: + # Use session_id as client_id for connection tracking + client_id = session_id + + # Determine content type and get content info + if hasattr(content_obj, 'episodes'): # Series + content_type = 'series' + elif hasattr(content_obj, 'series'): # Episode + content_type = 'episode' + else: # Movie + content_type = 'movie' + + content_uuid = str(content_obj.uuid) + content_name = getattr(content_obj, 'name', getattr(content_obj, 'title', 'Unknown')) + + # Check for existing connection or create new one + persistent_conn = self._persistent_connections.get(session_id) + + # Cancel any pending cleanup timer for this session regardless of new/existing + if persistent_conn: + persistent_conn.cancel_cleanup() + + # If no existing connection, try to find a matching idle session first + if not persistent_conn: + # Look for existing idle sessions that match content and client criteria + matching_session_id = self.find_matching_idle_session( + content_type, content_uuid, client_ip, user_agent, + utc_start, utc_end, offset + ) + + if matching_session_id: + logger.info(f"[{client_id}] Found matching idle session {matching_session_id} - redirecting client") + + # Update the session activity and client info + session_key = f"vod_session:{matching_session_id}" + if self.redis_client: + update_data = { + "last_activity": str(time.time()), + "client_ip": client_ip, # Update in case IP changed + "user_agent": user_agent # Update in case user agent changed + } + self.redis_client.hset(session_key, mapping=update_data) + self.redis_client.expire(session_key, self.session_ttl) + + # Get the existing persistent connection + persistent_conn = self._persistent_connections.get(matching_session_id) + if persistent_conn: + # Update the session_id to use the matching one + client_id = matching_session_id + session_id = matching_session_id + logger.info(f"[{client_id}] Successfully redirected to existing idle session") + else: + logger.warning(f"[{client_id}] Matching session found but no persistent connection - will create new") + + if not persistent_conn: + logger.info(f"[{client_id}] Creating NEW persistent connection for {content_type} {content_name}") + + # Create session in Redis for tracking + session_info = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "created_at": str(time.time()), + "last_activity": str(time.time()), + "profile_id": str(m3u_profile.id), + "connection_counted": "True", + "client_ip": client_ip, + "user_agent": user_agent, + "utc_start": utc_start or "", + "utc_end": utc_end or "", + "offset": str(offset) if offset else "" + } + + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, mapping=session_info) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Created new session: {session_info}") + + # Apply timeshift parameters to URL + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + logger.info(f"[{client_id}] Modified stream URL for timeshift: {modified_stream_url}") + + # Prepare headers + headers = { + 'User-Agent': user_agent or 'VLC/3.0.21 LibVLC/3.0.21', + 'Accept': '*/*', + 'Connection': 'keep-alive' + } + + # Add any authentication headers from profile + if hasattr(m3u_profile, 'auth_headers') and m3u_profile.auth_headers: + headers.update(m3u_profile.auth_headers) + + # Create persistent connection + persistent_conn = PersistentVODConnection(session_id, modified_stream_url, headers) + self._persistent_connections[session_id] = persistent_conn + + # Track connection in profile + self.create_connection(content_type, content_uuid, content_name, client_id, client_ip, user_agent, m3u_profile) + else: + logger.info(f"[{client_id}] Using EXISTING persistent connection for {content_type} {content_name}") + # Update session activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + self.redis_client.hset(session_key, "last_activity", str(time.time())) + self.redis_client.expire(session_key, self.session_ttl) + + logger.info(f"[{client_id}] Reusing existing session - no new connection created") + + # Log the incoming Range header for debugging + if range_header: + logger.info(f"[{client_id}] *** CLIENT RANGE REQUEST: {range_header} ***") + + # Parse range for seeking detection + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte and int(start_byte) > 0: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[{client_id}] *** VLC SEEKING TO: {start_pos_mb:.1f} MB ***") + else: + logger.info(f"[{client_id}] Range request from start") + except Exception as e: + logger.warning(f"[{client_id}] Could not parse range header: {e}") + else: + logger.info(f"[{client_id}] Full content request (no Range header)") + + # Get stream from persistent connection with current range + upstream_response = persistent_conn.get_stream(range_header) + + # Handle range not satisfiable + if upstream_response is None: + logger.warning(f"[{client_id}] Range not satisfiable - returning 416 error") + return HttpResponse( + "Requested Range Not Satisfiable", + status=416, + headers={ + 'Content-Range': f'bytes */{persistent_conn.content_length}' if persistent_conn.content_length else 'bytes */*' + } + ) + + connection_headers = persistent_conn.get_headers() + + # Ensure any pending cleanup is cancelled before starting stream + persistent_conn.cancel_cleanup() + + # Create streaming generator + def stream_generator(): + decremented = False # Track if we've already decremented the counter + + try: + logger.info(f"[{client_id}] Starting stream from persistent connection") + + # Increment active streams counter + persistent_conn.increment_active_streams() + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update connection activity every 100 chunks + if chunk_count % 100 == 0: + self.update_connection_activity( + content_type=content_type, + content_uuid=content_uuid, + client_id=client_id, + bytes_sent=len(chunk) + ) + + logger.info(f"[{client_id}] Persistent stream completed normally: {bytes_sent} bytes sent") + # Stream completed normally - decrement counter + persistent_conn.decrement_active_streams() + decremented = True + + except GeneratorExit: + # Client disconnected - decrement counter and schedule cleanup only if no active streams + logger.info(f"[{client_id}] Client disconnected - checking if cleanup should be scheduled") + persistent_conn.decrement_active_streams() + decremented = True + scheduled = persistent_conn.schedule_cleanup_if_not_streaming(delay_seconds=10) + if not scheduled: + logger.info(f"[{client_id}] Cleanup not scheduled - connection still has active streams") + + except Exception as e: + logger.error(f"[{client_id}] Error in persistent stream: {e}") + # On error, decrement counter and cleanup the connection as it may be corrupted + persistent_conn.decrement_active_streams() + decremented = True + logger.info(f"[{client_id}] Cleaning up persistent connection due to error") + self.cleanup_persistent_connection(session_id) + yield b"Error: Stream interrupted" + + finally: + # Safety net: only decrement if we haven't already + if not decremented: + logger.warning(f"[{client_id}] Stream generator exited without decrement - applying safety net") + persistent_conn.decrement_active_streams() + # This runs regardless of how the generator exits + logger.debug(f"[{client_id}] Stream generator finished") + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers['content_type'] + ) + + # Set status code based on range request + if range_header: + response.status_code = 206 + logger.info(f"[{client_id}] Set response status to 206 for range request") + else: + response.status_code = 200 + logger.info(f"[{client_id}] Set response status to 200 for full request") + + # Set headers that VLC expects + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['Accept-Ranges'] = 'bytes' + + # CRITICAL: Forward Content-Length from persistent connection + if connection_headers['content_length']: + response['Content-Length'] = connection_headers['content_length'] + logger.info(f"[{client_id}] *** FORWARDED Content-Length: {connection_headers['content_length']} *** (VLC seeking enabled)") + else: + logger.warning(f"[{client_id}] *** NO Content-Length available *** (VLC seeking may not work)") + + # Handle range requests - set Content-Range for partial responses + if range_header and connection_headers['content_length']: + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + end = int(end_byte) if end_byte else int(connection_headers['content_length']) - 1 + total_size = int(connection_headers['content_length']) + + content_range = f"bytes {start}-{end}/{total_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Set Content-Range: {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Could not set Content-Range: {e}") + + # Log response headers + logger.info(f"[{client_id}] PERSISTENT Response - Status: {response.status_code}, Content-Length: {response.get('Content-Length', 'MISSING')}") + if 'Content-Range' in response: + logger.info(f"[{client_id}] PERSISTENT Content-Range: {response['Content-Range']}") + + # Log VLC seeking status + if response.status_code == 200: + if connection_headers['content_length']: + logger.info(f"[{client_id}] βœ… PERSISTENT VLC SEEKING: Full response with Content-Length - seeking should work!") + else: + logger.info(f"[{client_id}] ❌ PERSISTENT VLC SEEKING: Full response but no Content-Length - seeking won't work!") + elif response.status_code == 206: + logger.info(f"[{client_id}] βœ… PERSISTENT VLC SEEKING: Partial response - seeking is working!") + + return response + + except Exception as e: + logger.error(f"Error in persistent stream_content_with_session: {e}", exc_info=True) + # Cleanup persistent connection on error + if session_id in self._persistent_connections: + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """ + Apply timeshift parameters to the stream URL + + Args: + original_url: Original stream URL + utc_start: UTC start time (ISO format string) + utc_end: UTC end time (ISO format string) + offset: Offset in seconds + + Returns: + Modified URL with timeshift parameters + """ + try: + from urllib.parse import urlparse, parse_qs, urlencode, urlunparse + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + + logger.debug(f"Original URL: {original_url}") + logger.debug(f"Original query params: {query_params}") + + # Add timeshift parameters if provided + if utc_start: + # Support both utc_start and start parameter names + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] # Some providers use 'start' + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + # Support both utc_end and end parameter names + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] # Some providers use 'end' + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + # Ensure offset is a valid number + offset_seconds = int(offset) + # Support multiple offset parameter names + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] # Some providers use 'seek' + query_params['t'] = [str(offset_seconds)] # Some providers use 't' + logger.info(f"Added offset/seek/t parameter: {offset_seconds} seconds") + except (ValueError, TypeError): + logger.warning(f"Invalid offset value: {offset}, skipping") + + # Handle special URL patterns for VOD providers + # Some providers embed timeshift info in the path rather than query params + path = parsed_url.path + + # Check if this looks like an IPTV catchup URL pattern + catchup_pattern = r'/(\d{4}-\d{2}-\d{2})/(\d{2}-\d{2}-\d{2})' + if utc_start and re.search(catchup_pattern, path): + # Convert ISO format to provider-specific format if needed + try: + from datetime import datetime + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + # Replace existing date/time in path + path = re.sub(catchup_pattern, f'/{date_part}/{time_part}', path) + logger.info(f"Modified path for catchup: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL with new parameters + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, # Use potentially modified path + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific persistent connection""" + if session_id in self._persistent_connections: + logger.info(f"[{session_id}] Cleaning up persistent connection") + self._persistent_connections[session_id].cleanup() + del self._persistent_connections[session_id] + + # Clean up ALL Redis keys associated with this session + session_key = f"vod_session:{session_id}" + if self.redis_client: + try: + session_data = self.redis_client.hgetall(session_key) + if session_data: + # Get session details for connection cleanup + content_type = session_data.get(b'content_type', b'').decode('utf-8') + content_uuid = session_data.get(b'content_uuid', b'').decode('utf-8') + profile_id = session_data.get(b'profile_id') + + # Generate client_id from session_id (matches what's used during streaming) + client_id = session_id + + # Remove individual connection tracking keys created during streaming + if content_type and content_uuid: + logger.info(f"[{session_id}] Cleaning up connection tracking keys") + self.remove_connection(content_type, content_uuid, client_id) + + # Remove from profile connections if counted (additional safety check) + if session_data.get(b'connection_counted') == b'True' and profile_id: + profile_key = self._get_profile_connections_key(int(profile_id.decode('utf-8'))) + current_count = int(self.redis_client.get(profile_key) or 0) + if current_count > 0: + self.redis_client.decr(profile_key) + logger.info(f"[{session_id}] Decremented profile {profile_id.decode('utf-8')} connections") + + # Remove session tracking key + self.redis_client.delete(session_key) + logger.info(f"[{session_id}] Removed session tracking") + + # Clean up any additional session-related keys (pattern cleanup) + try: + # Look for any other keys that might be related to this session + pattern = f"*{session_id}*" + cursor = 0 + session_related_keys = [] + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + session_related_keys.extend(keys) + if cursor == 0: + break + + if session_related_keys: + # Filter out keys we already deleted + remaining_keys = [k for k in session_related_keys if k.decode('utf-8') != session_key] + if remaining_keys: + self.redis_client.delete(*remaining_keys) + logger.info(f"[{session_id}] Cleaned up {len(remaining_keys)} additional session-related keys") + except Exception as scan_error: + logger.warning(f"[{session_id}] Error during pattern cleanup: {scan_error}") + + except Exception as e: + logger.error(f"[{session_id}] Error cleaning up session: {e}") + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale persistent connections that haven't been used recently""" + current_time = time.time() + stale_sessions = [] + + for session_id, conn in self._persistent_connections.items(): + try: + # Check connection's last activity time first + if hasattr(conn, 'last_activity'): + time_since_last_activity = current_time - conn.last_activity + if time_since_last_activity > max_age_seconds: + logger.info(f"[{session_id}] Connection inactive for {time_since_last_activity:.1f}s (max: {max_age_seconds}s)") + stale_sessions.append(session_id) + continue + + # Fallback to Redis session data if connection doesn't have last_activity + session_key = f"vod_session:{session_id}" + if self.redis_client: + session_data = self.redis_client.hgetall(session_key) + if session_data: + created_at = float(session_data.get(b'created_at', b'0').decode('utf-8')) + if current_time - created_at > max_age_seconds: + logger.info(f"[{session_id}] Session older than {max_age_seconds}s") + stale_sessions.append(session_id) + else: + # Session data missing, connection is stale + logger.info(f"[{session_id}] Session data missing from Redis") + stale_sessions.append(session_id) + + except Exception as e: + logger.error(f"[{session_id}] Error checking session age: {e}") + stale_sessions.append(session_id) + + # Clean up stale connections + for session_id in stale_sessions: + logger.info(f"[{session_id}] Cleaning up stale persistent connection") + self.cleanup_persistent_connection(session_id) + + if stale_sessions: + logger.info(f"Cleaned up {len(stale_sessions)} stale persistent connections") + else: + logger.debug(f"No stale persistent connections found (checked {len(self._persistent_connections)} connections)") + + +# Global instance +_connection_manager = None + +def get_connection_manager() -> VODConnectionManager: + """Get the global VOD connection manager instance""" + global _connection_manager + if _connection_manager is None: + _connection_manager = VODConnectionManager() + return _connection_manager diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py new file mode 100644 index 00000000..fefc8739 --- /dev/null +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -0,0 +1,1370 @@ +""" +Enhanced VOD Connection Manager with Redis-based connection sharing for multi-worker environments +""" + +import time +import json +import logging +import threading +import random +import re +import requests +import pickle +import base64 +import os +import socket +import mimetypes +from urllib.parse import urlparse +from typing import Optional, Dict, Any +from django.http import StreamingHttpResponse, HttpResponse +from core.utils import RedisClient +from apps.vod.models import Movie, Episode +from apps.m3u.models import M3UAccountProfile + +logger = logging.getLogger("vod_proxy") + + +def infer_content_type_from_url(url: str) -> Optional[str]: + """ + Infer MIME type from file extension in URL + + Args: + url: The stream URL + + Returns: + MIME type string or None if cannot be determined + """ + try: + parsed_url = urlparse(url) + path = parsed_url.path + + # Extract file extension + _, ext = os.path.splitext(path) + ext = ext.lower() + + # Common video format mappings + video_mime_types = { + '.mp4': 'video/mp4', + '.mkv': 'video/x-matroska', + '.avi': 'video/x-msvideo', + '.mov': 'video/quicktime', + '.wmv': 'video/x-ms-wmv', + '.flv': 'video/x-flv', + '.webm': 'video/webm', + '.m4v': 'video/x-m4v', + '.3gp': 'video/3gpp', + '.ts': 'video/mp2t', + '.m3u8': 'application/x-mpegURL', + '.mpg': 'video/mpeg', + '.mpeg': 'video/mpeg', + } + + if ext in video_mime_types: + logger.debug(f"Inferred content type '{video_mime_types[ext]}' from extension '{ext}' in URL: {url}") + return video_mime_types[ext] + + # Fallback to mimetypes module + mime_type, _ = mimetypes.guess_type(path) + if mime_type and mime_type.startswith('video/'): + logger.debug(f"Inferred content type '{mime_type}' using mimetypes for URL: {url}") + return mime_type + + logger.debug(f"Could not infer content type from URL: {url}") + return None + + except Exception as e: + logger.warning(f"Error inferring content type from URL '{url}': {e}") + return None + + +class SerializableConnectionState: + """Serializable connection state that can be stored in Redis""" + + def __init__(self, session_id: str, stream_url: str, headers: dict, + content_length: str = None, content_type: str = None, + final_url: str = None, m3u_profile_id: int = None, + # Session metadata fields (previously stored in vod_session key) + content_obj_type: str = None, content_uuid: str = None, + content_name: str = None, client_ip: str = None, + client_user_agent: str = None, utc_start: str = None, + utc_end: str = None, offset: str = None, + worker_id: str = None, connection_type: str = "redis_backed"): + self.session_id = session_id + self.stream_url = stream_url + self.headers = headers + self.content_length = content_length + self.content_type = content_type + self.final_url = final_url + self.m3u_profile_id = m3u_profile_id # Store M3U profile ID for connection counting + self.last_activity = time.time() + self.request_count = 0 + self.active_streams = 0 + + # Session metadata (consolidated from vod_session key) + self.content_obj_type = content_obj_type + self.content_uuid = content_uuid + self.content_name = content_name + self.client_ip = client_ip + self.client_user_agent = client_user_agent + self.utc_start = utc_start or "" + self.utc_end = utc_end or "" + self.offset = offset or "" + self.worker_id = worker_id + self.connection_type = connection_type + self.created_at = time.time() + + # Additional tracking fields + self.bytes_sent = 0 + self.position_seconds = 0 + + # Range/seek tracking for position calculation + self.last_seek_byte = 0 + self.last_seek_percentage = 0.0 + self.total_content_size = 0 + self.last_seek_timestamp = 0.0 + + def to_dict(self): + """Convert to dictionary for Redis storage""" + return { + 'session_id': self.session_id or '', + 'stream_url': self.stream_url or '', + 'headers': json.dumps(self.headers or {}), + 'content_length': str(self.content_length) if self.content_length is not None else '', + 'content_type': self.content_type or '', + 'final_url': self.final_url or '', + 'm3u_profile_id': str(self.m3u_profile_id) if self.m3u_profile_id is not None else '', + 'last_activity': str(self.last_activity), + 'request_count': str(self.request_count), + 'active_streams': str(self.active_streams), + # Session metadata + 'content_obj_type': self.content_obj_type or '', + 'content_uuid': self.content_uuid or '', + 'content_name': self.content_name or '', + 'client_ip': self.client_ip or '', + 'client_user_agent': self.client_user_agent or '', + 'utc_start': self.utc_start or '', + 'utc_end': self.utc_end or '', + 'offset': self.offset or '', + 'worker_id': self.worker_id or '', + 'connection_type': self.connection_type or 'redis_backed', + 'created_at': str(self.created_at), + # Additional tracking fields + 'bytes_sent': str(self.bytes_sent), + 'position_seconds': str(self.position_seconds), + # Range/seek tracking + 'last_seek_byte': str(self.last_seek_byte), + 'last_seek_percentage': str(self.last_seek_percentage), + 'total_content_size': str(self.total_content_size), + 'last_seek_timestamp': str(self.last_seek_timestamp) + } + + @classmethod + def from_dict(cls, data: dict): + """Create from dictionary loaded from Redis""" + obj = cls( + session_id=data['session_id'], + stream_url=data['stream_url'], + headers=json.loads(data['headers']) if data['headers'] else {}, + content_length=data.get('content_length') if data.get('content_length') else None, + content_type=data.get('content_type') or None, + final_url=data.get('final_url') if data.get('final_url') else None, + m3u_profile_id=int(data.get('m3u_profile_id')) if data.get('m3u_profile_id') else None, + # Session metadata + content_obj_type=data.get('content_obj_type') or None, + content_uuid=data.get('content_uuid') or None, + content_name=data.get('content_name') or None, + client_ip=data.get('client_ip') or None, + client_user_agent=data.get('client_user_agent') or data.get('user_agent') or None, + utc_start=data.get('utc_start') or '', + utc_end=data.get('utc_end') or '', + offset=data.get('offset') or '', + worker_id=data.get('worker_id') or None, + connection_type=data.get('connection_type', 'redis_backed') + ) + obj.last_activity = float(data.get('last_activity', time.time())) + obj.request_count = int(data.get('request_count', 0)) + obj.active_streams = int(data.get('active_streams', 0)) + obj.created_at = float(data.get('created_at', time.time())) + # Additional tracking fields + obj.bytes_sent = int(data.get('bytes_sent', 0)) + obj.position_seconds = int(data.get('position_seconds', 0)) + # Range/seek tracking + obj.last_seek_byte = int(data.get('last_seek_byte', 0)) + obj.last_seek_percentage = float(data.get('last_seek_percentage', 0.0)) + obj.total_content_size = int(data.get('total_content_size', 0)) + obj.last_seek_timestamp = float(data.get('last_seek_timestamp', 0.0)) + return obj + + +class RedisBackedVODConnection: + """Redis-backed VOD connection that can be accessed from any worker""" + + def __init__(self, session_id: str, redis_client=None): + self.session_id = session_id + self.redis_client = redis_client or RedisClient.get_client() + self.connection_key = f"vod_persistent_connection:{session_id}" + self.lock_key = f"vod_connection_lock:{session_id}" + self.local_session = None # Local requests session + self.local_response = None # Local current response + + def _get_connection_state(self) -> Optional[SerializableConnectionState]: + """Get connection state from Redis""" + if not self.redis_client: + return None + + try: + data = self.redis_client.hgetall(self.connection_key) + if not data: + return None + + # Convert bytes keys/values to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + return SerializableConnectionState.from_dict(data) + except Exception as e: + logger.error(f"[{self.session_id}] Error getting connection state from Redis: {e}") + return None + + def _save_connection_state(self, state: SerializableConnectionState): + """Save connection state to Redis""" + if not self.redis_client: + return False + + try: + data = state.to_dict() + # Log the data being saved for debugging + logger.trace(f"[{self.session_id}] Saving connection state: {data}") + + # Verify all values are valid for Redis + for key, value in data.items(): + if value is None: + logger.error(f"[{self.session_id}] None value found for key '{key}' - this should not happen") + return False + + self.redis_client.hset(self.connection_key, mapping=data) + self.redis_client.expire(self.connection_key, 3600) # 1 hour TTL + return True + except Exception as e: + logger.error(f"[{self.session_id}] Error saving connection state to Redis: {e}") + return False + + def _acquire_lock(self, timeout: int = 10) -> bool: + """Acquire distributed lock for connection operations""" + if not self.redis_client: + return False + + try: + return self.redis_client.set(self.lock_key, "locked", nx=True, ex=timeout) + except Exception as e: + logger.error(f"[{self.session_id}] Error acquiring lock: {e}") + return False + + def _release_lock(self): + """Release distributed lock""" + if not self.redis_client: + return + + try: + self.redis_client.delete(self.lock_key) + except Exception as e: + logger.error(f"[{self.session_id}] Error releasing lock: {e}") + + def create_connection(self, stream_url: str, headers: dict, m3u_profile_id: int = None, + # Session metadata (consolidated from vod_session key) + content_obj_type: str = None, content_uuid: str = None, + content_name: str = None, client_ip: str = None, + client_user_agent: str = None, utc_start: str = None, + utc_end: str = None, offset: str = None, + worker_id: str = None) -> bool: + """Create a new connection state in Redis with consolidated session metadata""" + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for connection creation") + return False + + try: + # Check if connection already exists + existing_state = self._get_connection_state() + if existing_state: + logger.info(f"[{self.session_id}] Connection already exists in Redis") + return True + + # Create new connection state with consolidated session metadata + state = SerializableConnectionState( + session_id=self.session_id, + stream_url=stream_url, + headers=headers, + m3u_profile_id=m3u_profile_id, + # Session metadata + content_obj_type=content_obj_type, + content_uuid=content_uuid, + content_name=content_name, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=offset, + worker_id=worker_id + ) + success = self._save_connection_state(state) + + if success: + logger.info(f"[{self.session_id}] Created new connection state in Redis with consolidated session metadata") + + return success + finally: + self._release_lock() + + def get_stream(self, range_header: str = None): + """Get stream with optional range header - works across workers""" + # Get connection state from Redis + state = self._get_connection_state() + if not state: + logger.error(f"[{self.session_id}] No connection state found in Redis") + return None + + # Update activity and increment request count + state.last_activity = time.time() + state.request_count += 1 + + try: + # Create local session if needed + if not self.local_session: + self.local_session = requests.Session() + + # Prepare headers + headers = state.headers.copy() + if range_header: + # Validate range against content length if available + if state.content_length: + validated_range = self._validate_range_header(range_header, int(state.content_length)) + if validated_range is None: + logger.warning(f"[{self.session_id}] Range not satisfiable: {range_header}") + return None + range_header = validated_range + + headers['Range'] = range_header + logger.info(f"[{self.session_id}] Setting Range header: {range_header}") + + # Use final URL if available, otherwise original URL + target_url = state.final_url if state.final_url else state.stream_url + allow_redirects = not state.final_url # Only follow redirects if we don't have final URL + + logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL") + + # Make request + response = self.local_session.get( + target_url, + headers=headers, + stream=True, + timeout=(10, 30), + allow_redirects=allow_redirects + ) + response.raise_for_status() + + # Update state with response info on first request + if state.request_count == 1: + if not state.content_length: + # Try to get full file size from Content-Range header first (for range requests) + content_range = response.headers.get('content-range') + if content_range and '/' in content_range: + try: + # Parse "bytes 0-1023/12653476926" to get total size + total_size = content_range.split('/')[-1] + if total_size.isdigit(): + state.content_length = total_size + logger.debug(f"[{self.session_id}] Got full file size from Content-Range: {total_size}") + else: + # Fallback to Content-Length for partial size + state.content_length = response.headers.get('content-length') + except Exception as e: + logger.warning(f"[{self.session_id}] Error parsing Content-Range: {e}") + state.content_length = response.headers.get('content-length') + else: + # No Content-Range, use Content-Length (for non-range requests) + state.content_length = response.headers.get('content-length') + + logger.debug(f"[{self.session_id}] Response headers received: {dict(response.headers)}") + + if not state.content_type: # This will be True for None, '', or any falsy value + # Get content type from provider response headers + provider_content_type = (response.headers.get('content-type') or + response.headers.get('Content-Type') or + response.headers.get('CONTENT-TYPE')) + + if provider_content_type: + logger.debug(f"[{self.session_id}] Using provider Content-Type: '{provider_content_type}'") + state.content_type = provider_content_type + else: + # Provider didn't send Content-Type, infer from URL extension + inferred_content_type = infer_content_type_from_url(state.stream_url) + if inferred_content_type: + logger.info(f"[{self.session_id}] Provider missing Content-Type, inferred from URL: '{inferred_content_type}'") + state.content_type = inferred_content_type + else: + logger.debug(f"[{self.session_id}] No Content-Type from provider and could not infer from URL, using default: 'video/mp4'") + state.content_type = 'video/mp4' + else: + logger.debug(f"[{self.session_id}] Content-Type already set in state: {state.content_type}") + if not state.final_url: + state.final_url = response.url + + logger.info(f"[{self.session_id}] Updated connection state: length={state.content_length}, type={state.content_type}") + + # Save updated state + self._save_connection_state(state) + + self.local_response = response + return response + + except Exception as e: + logger.error(f"[{self.session_id}] Error establishing connection: {e}") + self.cleanup() + raise + + def _validate_range_header(self, range_header: str, content_length: int): + """Validate range header against content length""" + try: + if not range_header or not range_header.startswith('bytes='): + return range_header + + range_part = range_header.replace('bytes=', '') + if '-' not in range_part: + return range_header + + start_str, end_str = range_part.split('-', 1) + + # Parse start byte + if start_str: + start_byte = int(start_str) + if start_byte >= content_length: + return None # Not satisfiable + else: + start_byte = 0 + + # Parse end byte + if end_str: + end_byte = int(end_str) + if end_byte >= content_length: + end_byte = content_length - 1 + else: + end_byte = content_length - 1 + + # Ensure start <= end + if start_byte > end_byte: + return None + + return f"bytes={start_byte}-{end_byte}" + + except (ValueError, IndexError) as e: + logger.warning(f"[{self.session_id}] Could not validate range header {range_header}: {e}") + return range_header + + def increment_active_streams(self): + """Increment active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state: + state.active_streams += 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams incremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def decrement_active_streams(self): + """Decrement active streams count in Redis""" + if not self._acquire_lock(): + return False + + try: + state = self._get_connection_state() + if state and state.active_streams > 0: + state.active_streams -= 1 + state.last_activity = time.time() + self._save_connection_state(state) + logger.debug(f"[{self.session_id}] Active streams decremented to {state.active_streams}") + return True + return False + finally: + self._release_lock() + + def has_active_streams(self) -> bool: + """Check if connection has any active streams""" + state = self._get_connection_state() + return state.active_streams > 0 if state else False + + def get_headers(self): + """Get headers for response""" + state = self._get_connection_state() + if state: + return { + 'content_length': state.content_length, + 'content_type': state.content_type or 'video/mp4', + 'final_url': state.final_url + } + return {} + + def get_session_metadata(self): + """Get session metadata from consolidated connection state""" + state = self._get_connection_state() + if state: + return { + 'content_obj_type': state.content_obj_type, + 'content_uuid': state.content_uuid, + 'content_name': state.content_name, + 'client_ip': state.client_ip, + 'client_user_agent': state.client_user_agent, + 'utc_start': state.utc_start, + 'utc_end': state.utc_end, + 'offset': state.offset, + 'worker_id': state.worker_id, + 'connection_type': state.connection_type, + 'created_at': state.created_at, + 'last_activity': state.last_activity, + 'm3u_profile_id': state.m3u_profile_id, + 'bytes_sent': state.bytes_sent, + 'position_seconds': state.position_seconds, + 'active_streams': state.active_streams, + 'request_count': state.request_count, + # Range/seek tracking + 'last_seek_byte': state.last_seek_byte, + 'last_seek_percentage': state.last_seek_percentage, + 'total_content_size': state.total_content_size, + 'last_seek_timestamp': state.last_seek_timestamp + } + return {} + + def cleanup(self, connection_manager=None, current_worker_id=None): + """Smart cleanup based on worker ownership and active streams""" + # Always clean up local resources first + if self.local_response: + self.local_response.close() + self.local_response = None + if self.local_session: + self.local_session.close() + self.local_session = None + + # Get current connection state to check ownership and active streams + state = self._get_connection_state() + + if not state: + logger.info(f"[{self.session_id}] No connection state found - local cleanup only") + return + + # Check if there are active streams + if state.active_streams > 0: + # There are active streams - check ownership + if current_worker_id and state.worker_id == current_worker_id: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) and we own them - local cleanup only") + else: + logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) but owned by worker {state.worker_id} - local cleanup only") + return + + # No active streams - we can clean up Redis state + if not self.redis_client: + logger.info(f"[{self.session_id}] No Redis client - local cleanup only") + return + + # Acquire lock and do final check before cleanup to prevent race conditions + if not self._acquire_lock(): + logger.warning(f"[{self.session_id}] Could not acquire lock for cleanup - skipping") + return + + try: + # Re-check active streams with lock held to prevent race conditions + current_state = self._get_connection_state() + if not current_state: + logger.info(f"[{self.session_id}] Connection state no longer exists - cleanup already done") + return + + if current_state.active_streams > 0: + logger.info(f"[{self.session_id}] Active streams now present ({current_state.active_streams}) - skipping cleanup") + return + + # Use pipeline for atomic cleanup operations + pipe = self.redis_client.pipeline() + + # 1. Remove main connection state (contains consolidated data) + pipe.delete(self.connection_key) + + # 2. Remove distributed lock (will be released below anyway) + pipe.delete(self.lock_key) + + # Execute all cleanup operations + pipe.execute() + + logger.info(f"[{self.session_id}] Cleaned up Redis keys (verified no active streams)") + + # Decrement profile connections if we have the state and connection manager + if state.m3u_profile_id and connection_manager: + connection_manager._decrement_profile_connections(state.m3u_profile_id) + logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}") + else: + if not state.m3u_profile_id: + logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections") + elif not connection_manager: + logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections") + + except Exception as e: + logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}") + finally: + # Always release the lock + self._release_lock() + + +# Modify the VODConnectionManager to use Redis-backed connections +class MultiWorkerVODConnectionManager: + """Enhanced VOD Connection Manager that works across multiple uwsgi workers""" + + _instance = None + + @classmethod + def get_instance(cls): + """Get the singleton instance""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self.redis_client = RedisClient.get_client() + self.connection_ttl = 3600 # 1 hour TTL for connections + self.session_ttl = 1800 # 30 minutes TTL for sessions + self.worker_id = self._get_worker_id() + logger.info(f"MultiWorkerVODConnectionManager initialized for worker {self.worker_id}") + + def _get_worker_id(self): + """Get unique worker ID for this process""" + import os + import socket + try: + # Use combination of hostname and PID for unique worker ID + return f"{socket.gethostname()}-{os.getpid()}" + except: + import random + return f"worker-{random.randint(1000, 9999)}" + + def _get_profile_connections_key(self, profile_id: int) -> str: + """Get Redis key for tracking connections per profile - STANDARDIZED with TS proxy""" + return f"profile_connections:{profile_id}" + + def _check_profile_limits(self, m3u_profile) -> bool: + """Check if profile has available connection slots""" + if m3u_profile.max_streams == 0: # Unlimited + return True + + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + current_connections = int(self.redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-CHECK] Profile {m3u_profile.id} has {current_connections}/{m3u_profile.max_streams} connections") + return current_connections < m3u_profile.max_streams + + except Exception as e: + logger.error(f"Error checking profile limits: {e}") + return False + + def _increment_profile_connections(self, m3u_profile): + """Increment profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile.id) + new_count = self.redis_client.incr(profile_connections_key) + logger.info(f"[PROFILE-INCR] Profile {m3u_profile.id} connections: {new_count}") + return new_count + except Exception as e: + logger.error(f"Error incrementing profile connections: {e}") + return None + + def _decrement_profile_connections(self, m3u_profile_id: int): + """Decrement profile connection count""" + try: + profile_connections_key = self._get_profile_connections_key(m3u_profile_id) + current_count = int(self.redis_client.get(profile_connections_key) or 0) + if current_count > 0: + new_count = self.redis_client.decr(profile_connections_key) + logger.info(f"[PROFILE-DECR] Profile {m3u_profile_id} connections: {new_count}") + return new_count + else: + logger.warning(f"[PROFILE-DECR] Profile {m3u_profile_id} already at 0 connections") + return 0 + except Exception as e: + logger.error(f"Error decrementing profile connections: {e}") + return None + + def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_profile, + client_ip, client_user_agent, request, + utc_start=None, utc_end=None, offset=None, range_header=None): + """Stream content with Redis-backed persistent connection""" + + # Generate client ID + content_type = "movie" if isinstance(content_obj, Movie) else "episode" + content_uuid = str(content_obj.uuid) + content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj) + client_id = session_id + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}") + + try: + # First, try to find an existing idle session that matches our criteria + matching_session_id = self.find_matching_idle_session( + content_type=content_type, + content_uuid=content_uuid, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=offset + ) + + # Use matching session if found, otherwise use the provided session_id + if matching_session_id: + logger.info(f"[{client_id}] Worker {self.worker_id} - Found matching idle session: {matching_session_id}") + effective_session_id = matching_session_id + client_id = matching_session_id # Update client_id for logging consistency + + # IMMEDIATELY reserve this session by incrementing active streams to prevent cleanup + temp_connection = RedisBackedVODConnection(effective_session_id, self.redis_client) + if temp_connection.increment_active_streams(): + logger.info(f"[{client_id}] Reserved idle session - incremented active streams") + else: + logger.warning(f"[{client_id}] Failed to reserve idle session - falling back to new session") + effective_session_id = session_id + matching_session_id = None # Clear the match so we create a new connection + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - No matching idle session found, using new session") + effective_session_id = session_id + + # Create Redis-backed connection + redis_connection = RedisBackedVODConnection(effective_session_id, self.redis_client) + + # Check if connection exists, create if not + existing_state = redis_connection._get_connection_state() + if not existing_state: + logger.info(f"[{client_id}] Worker {self.worker_id} - Creating new Redis-backed connection") + + # Check profile limits before creating new connection + if not self._check_profile_limits(m3u_profile): + logger.warning(f"[{client_id}] Profile {m3u_profile.name} connection limit exceeded") + return HttpResponse("Connection limit exceeded for profile", status=429) + + # Apply timeshift parameters + modified_stream_url = self._apply_timeshift_parameters(stream_url, utc_start, utc_end, offset) + + # Prepare headers for provider request + headers = {} + # Use M3U account's user-agent for provider requests, not client's user-agent + m3u_user_agent = m3u_profile.m3u_account.get_user_agent() + if m3u_user_agent: + headers['User-Agent'] = m3u_user_agent.user_agent + logger.info(f"[{client_id}] Using M3U account user-agent: {m3u_user_agent.user_agent}") + elif client_user_agent: + # Fallback to client's user-agent if M3U doesn't have one + headers['User-Agent'] = client_user_agent + logger.info(f"[{client_id}] Using client user-agent (M3U fallback): {client_user_agent}") + else: + logger.warning(f"[{client_id}] No user-agent available (neither M3U nor client)") + + # Forward important headers from request + important_headers = ['authorization', 'referer', 'origin', 'accept'] + for header_name in important_headers: + django_header = f'HTTP_{header_name.upper().replace("-", "_")}' + if hasattr(request, 'META') and django_header in request.META: + headers[header_name] = request.META[django_header] + + # Create connection state in Redis with consolidated session metadata + if not redis_connection.create_connection( + stream_url=modified_stream_url, + headers=headers, + m3u_profile_id=m3u_profile.id, + # Session metadata (consolidated from separate vod_session key) + content_obj_type=content_type, + content_uuid=content_uuid, + content_name=content_name, + client_ip=client_ip, + client_user_agent=client_user_agent, + utc_start=utc_start, + utc_end=utc_end, + offset=str(offset) if offset else None, + worker_id=self.worker_id + ): + logger.error(f"[{client_id}] Worker {self.worker_id} - Failed to create Redis connection") + return HttpResponse("Failed to create connection", status=500) + + # Increment profile connections after successful connection creation + self._increment_profile_connections(m3u_profile) + + logger.info(f"[{client_id}] Worker {self.worker_id} - Created consolidated connection with session metadata") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection") + + # Transfer ownership to current worker and update session activity + if redis_connection._acquire_lock(): + try: + state = redis_connection._get_connection_state() + if state: + old_worker = state.worker_id + state.last_activity = time.time() + state.worker_id = self.worker_id # Transfer ownership to current worker + redis_connection._save_connection_state(state) + + if old_worker != self.worker_id: + logger.info(f"[{client_id}] Ownership transferred from worker {old_worker} to {self.worker_id}") + else: + logger.debug(f"[{client_id}] Worker {self.worker_id} retaining ownership") + finally: + redis_connection._release_lock() + + # Get stream from Redis-backed connection + upstream_response = redis_connection.get_stream(range_header) + + if upstream_response is None: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Range not satisfiable") + return HttpResponse("Requested Range Not Satisfiable", status=416) + + # Get connection headers + connection_headers = redis_connection.get_headers() + + # Create streaming generator + def stream_generator(): + decremented = False + try: + logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") + + # Increment active streams (unless we already did it for session reuse) + if not matching_session_id: + # New session - increment active streams + redis_connection.increment_active_streams() + else: + # Reused session - we already incremented when reserving the session + logger.debug(f"[{client_id}] Using pre-reserved session - active streams already incremented") + + bytes_sent = 0 + chunk_count = 0 + + for chunk in upstream_response.iter_content(chunk_size=8192): + if chunk: + yield chunk + bytes_sent += len(chunk) + chunk_count += 1 + + # Update activity every 100 chunks in consolidated connection state + if chunk_count % 100 == 0: + # Update the connection state + logger.debug(f"Client: [{client_id}] Worker: {self.worker_id} sent {chunk_count} chunks for VOD: {content_name}") + if redis_connection._acquire_lock(): + try: + state = redis_connection._get_connection_state() + if state: + state.last_activity = time.time() + # Store cumulative bytes sent in connection state + state.bytes_sent = bytes_sent # Use cumulative bytes_sent, not chunk size + redis_connection._save_connection_state(state) + finally: + redis_connection._release_lock() + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") + redis_connection.decrement_active_streams() + decremented = True + + # Schedule smart cleanup if no active streams after normal completion + if not redis_connection.has_active_streams(): + def delayed_cleanup(): + time.sleep(1) # Wait 1 second + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after normal completion") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + + import threading + cleanup_thread = threading.Thread(target=delayed_cleanup) + cleanup_thread.daemon = True + cleanup_thread.start() + + except GeneratorExit: + logger.info(f"[{client_id}] Worker {self.worker_id} - Client disconnected from Redis-backed stream") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + + # Schedule smart cleanup if no active streams + if not redis_connection.has_active_streams(): + def delayed_cleanup(): + time.sleep(1) # Wait 1 second + # Smart cleanup: check active streams and ownership + logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after client disconnect") + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + + import threading + cleanup_thread = threading.Thread(target=delayed_cleanup) + cleanup_thread.daemon = True + cleanup_thread.start() + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream: {e}") + if not decremented: + redis_connection.decrement_active_streams() + decremented = True + # Smart cleanup on error - immediate cleanup since we're in error state + redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id) + yield b"Error: Stream interrupted" + + finally: + if not decremented: + redis_connection.decrement_active_streams() + + # Create streaming response + response = StreamingHttpResponse( + streaming_content=stream_generator(), + content_type=connection_headers.get('content_type', 'video/mp4') + ) + + # Set appropriate status code + response.status_code = 206 if range_header else 200 + + # Set required headers + response['Cache-Control'] = 'no-cache' + response['Pragma'] = 'no-cache' + response['X-Content-Type-Options'] = 'nosniff' + response['Connection'] = 'keep-alive' + response['X-Worker-ID'] = self.worker_id # Identify which worker served this + + if connection_headers.get('content_length'): + response['Accept-Ranges'] = 'bytes' + + # For range requests, Content-Length should be the partial content size, not full file size + if range_header and 'bytes=' in range_header: + try: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + start = int(start_byte) if start_byte else 0 + + # Get the FULL content size from the connection state (from initial request) + state = redis_connection._get_connection_state() + if state and state.content_length: + full_content_size = int(state.content_length) + end = int(end_byte) if end_byte else full_content_size - 1 + + # Calculate partial content size for Content-Length header + partial_content_size = end - start + 1 + response['Content-Length'] = str(partial_content_size) + + # Content-Range should show full file size per HTTP standards + content_range = f"bytes {start}-{end}/{full_content_size}" + response['Content-Range'] = content_range + logger.info(f"[{client_id}] Worker {self.worker_id} - Set Content-Range: {content_range}, Content-Length: {partial_content_size}") + + # Store range information for the VOD stats API to calculate position + if start > 0: + try: + position_percentage = (start / full_content_size) * 100 + current_timestamp = time.time() + + # Update the Redis connection state with seek information + if redis_connection._acquire_lock(): + try: + # Refresh state in case it changed + state = redis_connection._get_connection_state() + if state: + # Store range/seek information for stats API + state.last_seek_byte = start + state.last_seek_percentage = position_percentage + state.total_content_size = full_content_size + state.last_seek_timestamp = current_timestamp + state.last_activity = current_timestamp + redis_connection._save_connection_state(state) + logger.info(f"[{client_id}] *** SEEK INFO STORED *** {position_percentage:.1f}% at byte {start:,}/{full_content_size:,} (timestamp: {current_timestamp})") + finally: + redis_connection._release_lock() + else: + logger.warning(f"[{client_id}] Could not acquire lock to update seek info") + except Exception as pos_e: + logger.error(f"[{client_id}] Error storing seek info: {pos_e}") + else: + # Fallback to partial content size if full size not available + partial_size = int(connection_headers['content_length']) + end = int(end_byte) if end_byte else partial_size - 1 + content_range = f"bytes {start}-{end}/{partial_size}" + response['Content-Range'] = content_range + response['Content-Length'] = str(end - start + 1) + logger.warning(f"[{client_id}] Using partial content size for Content-Range (full size not available): {content_range}") + except Exception as e: + logger.warning(f"[{client_id}] Worker {self.worker_id} - Could not set Content-Range: {e}") + response['Content-Length'] = connection_headers['content_length'] + else: + # For non-range requests, use the full content length + response['Content-Length'] = connection_headers['content_length'] + + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed response ready (status: {response.status_code})") + return response + + except Exception as e: + logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None): + """Apply timeshift parameters to URL""" + if not any([utc_start, utc_end, offset]): + return original_url + + try: + from urllib.parse import urlparse, urlunparse, parse_qs, urlencode + + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + path = parsed_url.path + + logger.info(f"Applying timeshift parameters: utc_start={utc_start}, utc_end={utc_end}, offset={offset}") + + # Add timeshift parameters + if utc_start: + query_params['utc_start'] = [utc_start] + query_params['start'] = [utc_start] + logger.info(f"Added utc_start/start parameter: {utc_start}") + + if utc_end: + query_params['utc_end'] = [utc_end] + query_params['end'] = [utc_end] + logger.info(f"Added utc_end/end parameter: {utc_end}") + + if offset: + try: + offset_seconds = int(offset) + query_params['offset'] = [str(offset_seconds)] + query_params['seek'] = [str(offset_seconds)] + query_params['t'] = [str(offset_seconds)] + logger.info(f"Added offset/seek/t parameter: {offset_seconds}") + except ValueError: + logger.warning(f"Invalid offset value: {offset}") + + # Handle special catchup URL patterns + if utc_start: + try: + from datetime import datetime + import re + + # Parse the UTC start time + start_dt = datetime.fromisoformat(utc_start.replace('Z', '+00:00')) + + # Check for catchup URL patterns like /catchup/YYYY-MM-DD/HH-MM-SS/ + catchup_pattern = r'/catchup/\d{4}-\d{2}-\d{2}/\d{2}-\d{2}-\d{2}/' + if re.search(catchup_pattern, path): + # Replace the date/time in the path + date_part = start_dt.strftime('%Y-%m-%d') + time_part = start_dt.strftime('%H-%M-%S') + + path = re.sub(catchup_pattern, f'/catchup/{date_part}/{time_part}/', path) + logger.info(f"Modified catchup path: {path}") + except Exception as e: + logger.warning(f"Could not parse timeshift date: {e}") + + # Reconstruct URL + new_query = urlencode(query_params, doseq=True) + modified_url = urlunparse(( + parsed_url.scheme, + parsed_url.netloc, + path, + parsed_url.params, + new_query, + parsed_url.fragment + )) + + logger.info(f"Modified URL: {modified_url}") + return modified_url + + except Exception as e: + logger.error(f"Error applying timeshift parameters: {e}") + return original_url + + def cleanup_persistent_connection(self, session_id: str): + """Clean up a specific Redis-backed persistent connection""" + logger.info(f"[{session_id}] Cleaning up Redis-backed persistent connection") + + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + + # The cleanup method now handles all Redis keys including session data + + def cleanup_stale_persistent_connections(self, max_age_seconds: int = 1800): + """Clean up stale Redis-backed persistent connections""" + if not self.redis_client: + return + + try: + logger.info(f"Cleaning up Redis-backed connections older than {max_age_seconds} seconds") + + # Find all persistent connection keys + pattern = "vod_persistent_connection:*" + cursor = 0 + cleanup_count = 0 + current_time = time.time() + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + # Get connection state + data = self.redis_client.hgetall(key) + if not data: + continue + + # Convert bytes to strings if needed + if isinstance(list(data.keys())[0], bytes): + data = {k.decode('utf-8'): v.decode('utf-8') for k, v in data.items()} + + last_activity = float(data.get('last_activity', 0)) + active_streams = int(data.get('active_streams', 0)) + + # Clean up if stale and no active streams + if (current_time - last_activity > max_age_seconds) and active_streams == 0: + session_id = key.decode('utf-8').replace('vod_persistent_connection:', '') + logger.info(f"Cleaning up stale connection: {session_id}") + + # Clean up connection and related keys + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + redis_connection.cleanup(connection_manager=self) + cleanup_count += 1 + + except Exception as e: + logger.error(f"Error processing connection key {key}: {e}") + continue + + if cursor == 0: + break + + if cleanup_count > 0: + logger.info(f"Cleaned up {cleanup_count} stale Redis-backed connections") + else: + logger.debug("No stale Redis-backed connections found") + + except Exception as e: + logger.error(f"Error during Redis-backed connection cleanup: {e}") + + def create_connection(self, content_type: str, content_uuid: str, content_name: str, + client_id: str, client_ip: str, user_agent: str, + m3u_profile: M3UAccountProfile) -> bool: + """Create connection tracking in Redis (same as original but for Redis-backed connections)""" + if not self.redis_client: + logger.error("Redis client not available for VOD connection tracking") + return False + + try: + # Check profile connection limits + profile_connections_key = f"profile_connections:{m3u_profile.id}" + current_connections = self.redis_client.get(profile_connections_key) + max_connections = getattr(m3u_profile, 'max_connections', 3) # Default to 3 + + if current_connections and int(current_connections) >= max_connections: + logger.warning(f"Profile {m3u_profile.name} connection limit exceeded ({current_connections}/{max_connections})") + return False + + # Create connection tracking + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Check if connection already exists + if self.redis_client.exists(connection_key): + logger.info(f"Connection already exists for {client_id} - {content_type} {content_name}") + self.redis_client.hset(connection_key, "last_activity", str(time.time())) + return True + + # Connection data + connection_data = { + "content_type": content_type, + "content_uuid": content_uuid, + "content_name": content_name, + "client_id": client_id, + "client_ip": client_ip, + "user_agent": user_agent, + "m3u_profile_id": m3u_profile.id, + "m3u_profile_name": m3u_profile.name, + "connected_at": str(time.time()), + "last_activity": str(time.time()), + "bytes_sent": "0", + "position_seconds": "0" + } + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping=connection_data) + pipe.expire(connection_key, self.connection_ttl) + pipe.incr(profile_connections_key) + pipe.sadd(content_connections_key, client_id) + pipe.expire(content_connections_key, self.connection_ttl) + pipe.execute() + + logger.info(f"Created Redis-backed VOD connection: {client_id} for {content_type} {content_name}") + return True + + except Exception as e: + logger.error(f"Error creating Redis-backed connection: {e}") + return False + + def remove_connection(self, content_type: str, content_uuid: str, client_id: str): + """Remove connection tracking from Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + content_connections_key = f"vod_proxy:content:{content_type}:{content_uuid}:connections" + + # Get connection data to find profile + connection_data = self.redis_client.hgetall(connection_key) + if connection_data: + # Convert bytes to strings if needed + if isinstance(list(connection_data.keys())[0], bytes): + connection_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in connection_data.items()} + + profile_id = connection_data.get('m3u_profile_id') + if profile_id: + profile_connections_key = f"profile_connections:{profile_id}" + + # Use pipeline for atomic operations + pipe = self.redis_client.pipeline() + pipe.delete(connection_key) + pipe.srem(content_connections_key, client_id) + pipe.decr(profile_connections_key) + pipe.execute() + + logger.info(f"Removed Redis-backed connection: {client_id}") + + except Exception as e: + logger.error(f"Error removing Redis-backed connection: {e}") + + def update_connection_activity(self, content_type: str, content_uuid: str, + client_id: str, bytes_sent: int): + """Update connection activity in Redis""" + if not self.redis_client: + return + + try: + connection_key = f"vod_proxy:connection:{content_type}:{content_uuid}:{client_id}" + pipe = self.redis_client.pipeline() + pipe.hset(connection_key, mapping={ + "last_activity": str(time.time()), + "bytes_sent": str(bytes_sent) + }) + pipe.expire(connection_key, self.connection_ttl) + pipe.execute() + except Exception as e: + logger.error(f"Error updating connection activity: {e}") + + def find_matching_idle_session(self, content_type: str, content_uuid: str, + client_ip: str, client_user_agent: str, + utc_start=None, utc_end=None, offset=None) -> Optional[str]: + """Find existing Redis-backed session that matches criteria using consolidated connection state""" + if not self.redis_client: + return None + + try: + # Search for connections with consolidated session data + pattern = "vod_persistent_connection:*" + cursor = 0 + matching_sessions = [] + + while True: + cursor, keys = self.redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + connection_data = self.redis_client.hgetall(key) + if not connection_data: + continue + + # Convert bytes keys/values to strings if needed + if isinstance(list(connection_data.keys())[0], bytes): + connection_data = {k.decode('utf-8'): v.decode('utf-8') for k, v in connection_data.items()} + + # Check if content matches (using consolidated data) + stored_content_type = connection_data.get('content_obj_type', '') + stored_content_uuid = connection_data.get('content_uuid', '') + + if stored_content_type != content_type or stored_content_uuid != content_uuid: + continue + + # Extract session ID + session_id = key.decode('utf-8').replace('vod_persistent_connection:', '') + + # Check if Redis-backed connection exists and has no active streams + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + if redis_connection.has_active_streams(): + continue + + # Calculate match score + score = 10 # Content match + match_reasons = ["content"] + + # Check other criteria (using consolidated data) + stored_client_ip = connection_data.get('client_ip', '') + stored_user_agent = connection_data.get('client_user_agent', '') or connection_data.get('user_agent', '') + + if stored_client_ip and stored_client_ip == client_ip: + score += 5 + match_reasons.append("ip") + + if stored_user_agent and stored_user_agent == client_user_agent: + score += 3 + match_reasons.append("user-agent") + + # Check timeshift parameters (using consolidated data) + stored_utc_start = connection_data.get('utc_start', '') + stored_utc_end = connection_data.get('utc_end', '') + stored_offset = connection_data.get('offset', '') + + current_utc_start = utc_start or "" + current_utc_end = utc_end or "" + current_offset = str(offset) if offset else "" + + if (stored_utc_start == current_utc_start and + stored_utc_end == current_utc_end and + stored_offset == current_offset): + score += 7 + match_reasons.append("timeshift") + + if score >= 13: # Good match threshold + matching_sessions.append({ + 'session_id': session_id, + 'score': score, + 'reasons': match_reasons, + 'last_activity': float(connection_data.get('last_activity', '0')) + }) + + except Exception as e: + logger.debug(f"Error processing connection key {key}: {e}") + continue + + if cursor == 0: + break + + # Sort by score and last activity + matching_sessions.sort(key=lambda x: (x['score'], x['last_activity']), reverse=True) + + if matching_sessions: + best_match = matching_sessions[0] + logger.info(f"Found matching Redis-backed idle session: {best_match['session_id']} " + f"(score: {best_match['score']}, reasons: {', '.join(best_match['reasons'])})") + return best_match['session_id'] + + return None + + except Exception as e: + logger.error(f"Error finding matching idle session: {e}") + return None + + def get_session_info(self, session_id: str) -> Optional[dict]: + """Get session information from consolidated connection state (compatibility method)""" + if not self.redis_client: + return None + + try: + redis_connection = RedisBackedVODConnection(session_id, self.redis_client) + return redis_connection.get_session_metadata() + except Exception as e: + logger.error(f"Error getting session info for {session_id}: {e}") + return None \ No newline at end of file diff --git a/apps/proxy/vod_proxy/urls.py b/apps/proxy/vod_proxy/urls.py new file mode 100644 index 00000000..c06426ce --- /dev/null +++ b/apps/proxy/vod_proxy/urls.py @@ -0,0 +1,24 @@ +from django.urls import path +from . import views + +app_name = 'vod_proxy' + +urlpatterns = [ + # Generic VOD streaming with session ID in path (for compatibility) + path('//', views.VODStreamView.as_view(), name='vod_stream_with_session'), + path('////', views.VODStreamView.as_view(), name='vod_stream_with_session_and_profile'), + + # Generic VOD streaming (supports movies, episodes, series) - legacy patterns + path('/', views.VODStreamView.as_view(), name='vod_stream'), + path('///', views.VODStreamView.as_view(), name='vod_stream_with_profile'), + + # VOD playlist generation + path('playlist/', views.VODPlaylistView.as_view(), name='vod_playlist'), + path('playlist//', views.VODPlaylistView.as_view(), name='vod_playlist_with_profile'), + + # Position tracking + path('position//', views.VODPositionView.as_view(), name='vod_position'), + + # VOD Stats + path('stats/', views.VODStatsView.as_view(), name='vod_stats'), +] diff --git a/apps/proxy/vod_proxy/utils.py b/apps/proxy/vod_proxy/utils.py new file mode 100644 index 00000000..7ccf08b4 --- /dev/null +++ b/apps/proxy/vod_proxy/utils.py @@ -0,0 +1,58 @@ +""" +Utility functions for VOD proxy operations. +""" + +import logging +from django.http import HttpResponse + +logger = logging.getLogger(__name__) + + +def get_client_info(request): + """ + Extract client IP and User-Agent from request. + + Args: + request: Django HttpRequest object + + Returns: + tuple: (client_ip, user_agent) + """ + # Get client IP, checking for proxy headers + client_ip = request.META.get('HTTP_X_FORWARDED_FOR') + if client_ip: + # Take the first IP if there are multiple (comma-separated) + client_ip = client_ip.split(',')[0].strip() + else: + client_ip = request.META.get('HTTP_X_REAL_IP') or request.META.get('REMOTE_ADDR', 'unknown') + + # Get User-Agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + return client_ip, user_agent + + +def create_vod_response(content, content_type='video/mp4', filename=None): + """ + Create a streaming HTTP response for VOD content. + + Args: + content: Content to stream (file-like object or bytes) + content_type: MIME type of the content + filename: Optional filename for Content-Disposition header + + Returns: + HttpResponse: Configured HTTP response for streaming + """ + response = HttpResponse(content, content_type=content_type) + + if filename: + response['Content-Disposition'] = f'attachment; filename="{filename}"' + + # Add headers for streaming + response['Accept-Ranges'] = 'bytes' + response['Cache-Control'] = 'no-cache, no-store, must-revalidate' + response['Pragma'] = 'no-cache' + response['Expires'] = '0' + + return response diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py new file mode 100644 index 00000000..00ed8a10 --- /dev/null +++ b/apps/proxy/vod_proxy/views.py @@ -0,0 +1,1013 @@ +""" +VOD (Video on Demand) proxy views for handling movie and series streaming. +Supports M3U profiles for authentication and URL transformation. +""" + +import time +import random +import logging +import requests +from django.http import StreamingHttpResponse, JsonResponse, Http404, HttpResponse +from django.shortcuts import get_object_or_404 +from django.views.decorators.csrf import csrf_exempt +from django.utils.decorators import method_decorator +from django.views import View +from apps.vod.models import Movie, Series, Episode +from apps.m3u.models import M3UAccount, M3UAccountProfile +from apps.proxy.vod_proxy.connection_manager import VODConnectionManager +from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url +from .utils import get_client_info, create_vod_response + +logger = logging.getLogger(__name__) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODStreamView(View): + """Handle VOD streaming requests with M3U profile support""" + + def get(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Stream VOD content (movies or series episodes) with session-based connection reuse + + Args: + content_type: 'movie', 'series', or 'episode' + content_id: ID of the content + session_id: Optional session ID from URL path (for persistent connections) + profile_id: Optional M3U profile ID for authentication + """ + logger.info(f"[VOD-REQUEST] Starting VOD stream request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + logger.info(f"[VOD-REQUEST] Full request path: {request.get_full_path()}") + logger.info(f"[VOD-REQUEST] Request method: {request.method}") + logger.info(f"[VOD-REQUEST] Request headers: {dict(request.headers)}") + + try: + client_ip, client_user_agent = get_client_info(request) + + # Extract timeshift parameters from query string + # Support multiple timeshift parameter formats + utc_start = request.GET.get('utc_start') or request.GET.get('start') or request.GET.get('playliststart') + utc_end = request.GET.get('utc_end') or request.GET.get('end') or request.GET.get('playlistend') + offset = request.GET.get('offset') or request.GET.get('seek') or request.GET.get('t') + + # VLC specific timeshift parameters + if not utc_start and not offset: + # Check for VLC-style timestamp parameters + if 'timestamp' in request.GET: + offset = request.GET.get('timestamp') + elif 'time' in request.GET: + offset = request.GET.get('time') + + # Session ID now comes from URL path parameter + # Remove legacy query parameter extraction since we're using path-based routing + + # Extract Range header for seeking support + range_header = request.META.get('HTTP_RANGE') + + logger.info(f"[VOD-TIMESHIFT] Timeshift params - utc_start: {utc_start}, utc_end: {utc_end}, offset: {offset}") + logger.info(f"[VOD-SESSION] Session ID: {session_id}") + + # Log all query parameters for debugging + if request.GET: + logger.debug(f"[VOD-PARAMS] All query params: {dict(request.GET)}") + + if range_header: + logger.info(f"[VOD-RANGE] Range header: {range_header}") + + # Parse the range to understand what position VLC is seeking to + try: + if 'bytes=' in range_header: + range_part = range_header.replace('bytes=', '') + if '-' in range_part: + start_byte, end_byte = range_part.split('-', 1) + if start_byte: + start_pos_mb = int(start_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] Seeking to byte position: {start_byte} (~{start_pos_mb:.1f} MB)") + if int(start_byte) > 0: + logger.info(f"[VOD-SEEK] *** ACTUAL SEEK DETECTED *** Position: {start_pos_mb:.1f} MB") + else: + logger.info(f"[VOD-SEEK] Open-ended range request (from start)") + if end_byte: + end_pos_mb = int(end_byte) / (1024 * 1024) + logger.info(f"[VOD-SEEK] End position: {end_byte} bytes (~{end_pos_mb:.1f} MB)") + except Exception as e: + logger.warning(f"[VOD-SEEK] Could not parse range header: {e}") + + # Simple seek detection - track rapid requests + current_time = time.time() + request_key = f"{client_ip}:{content_type}:{content_id}" + + if not hasattr(self.__class__, '_request_times'): + self.__class__._request_times = {} + + if request_key in self.__class__._request_times: + time_diff = current_time - self.__class__._request_times[request_key] + if time_diff < 5.0: + logger.info(f"[VOD-SEEK] Rapid request detected ({time_diff:.1f}s) - likely seeking") + + self.__class__._request_times[request_key] = current_time + else: + logger.info(f"[VOD-RANGE] No Range header - full content request") + + logger.info(f"[VOD-CLIENT] Client info - IP: {client_ip}, User-Agent: {client_user_agent[:50]}...") + + # If no session ID, create one and redirect to path-based URL + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-SESSION] Creating new session: {new_session_id}") + + # Build redirect URL with session ID in path, preserve query parameters + path_parts = request.path.rstrip('/').split('/') + + # Construct new path: /vod/movie/UUID/SESSION_ID or /vod/movie/UUID/SESSION_ID/PROFILE_ID/ + if profile_id: + new_path = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + new_path = f"{'/'.join(path_parts)}/{new_session_id}" + + # Preserve any query parameters (except session_id) + query_params = dict(request.GET) + query_params.pop('session_id', None) # Remove if present + + if query_params: + from urllib.parse import urlencode + query_string = urlencode(query_params, doseq=True) + redirect_url = f"{new_path}?{query_string}" + else: + redirect_url = new_path + + logger.info(f"[VOD-SESSION] Redirecting to path-based URL: {redirect_url}") + + return HttpResponse( + status=301, + headers={'Location': redirect_url} + ) + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-PARAM] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-PARAM] Preferred stream ID: {preferred_stream_id}") + + # Get the content object and its relation + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-ERROR] Content or relation not found: {content_type} {content_id}") + raise Http404(f"Content not found: {content_type} {content_id}") + + logger.info(f"[VOD-CONTENT] Found content: {getattr(content_obj, 'name', 'Unknown')}") + + # Get M3U account from relation + m3u_account = relation.m3u_account + logger.info(f"[VOD-ACCOUNT] Using M3U account: {m3u_account.name}") + + # Get stream URL from relation + stream_url = self._get_stream_url_from_relation(relation) + logger.info(f"[VOD-CONTENT] Content URL: {stream_url or 'No URL found'}") + + if not stream_url: + logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) + + if not profile_result or not profile_result[0]: + logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}") + return HttpResponse("No available stream", status=503) + + m3u_profile, current_connections = profile_result + logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {current_connections})") + + # Connection tracking is handled by the connection manager + # Transform URL based on profile + final_stream_url = self._transform_url(stream_url, m3u_profile) + logger.info(f"[VOD-URL] Final stream URL: {final_stream_url}") + + # Validate stream URL + if not final_stream_url or not final_stream_url.startswith(('http://', 'https://')): + logger.error(f"[VOD-ERROR] Invalid stream URL: {final_stream_url}") + return HttpResponse("Invalid stream URL", status=500) + + # Get connection manager (Redis-backed for multi-worker support) + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + # Stream the content with session-based connection reuse + logger.info("[VOD-STREAM] Calling connection manager to stream content") + response = connection_manager.stream_content_with_session( + session_id=session_id, + content_obj=content_obj, + stream_url=final_stream_url, + m3u_profile=m3u_profile, + client_ip=client_ip, + client_user_agent=client_user_agent, + request=request, + utc_start=utc_start, + utc_end=utc_end, + offset=offset, + range_header=range_header + ) + + logger.info(f"[VOD-SUCCESS] Stream response created successfully, type: {type(response)}") + return response + + except Exception as e: + logger.error(f"[VOD-EXCEPTION] Error streaming {content_type} {content_id}: {e}", exc_info=True) + return HttpResponse(f"Streaming error: {str(e)}", status=500) + + def head(self, request, content_type, content_id, session_id=None, profile_id=None): + """ + Handle HEAD requests for FUSE filesystem integration + + Returns content length and session URL header for subsequent GET requests + """ + logger.info(f"[VOD-HEAD] HEAD request: {content_type}/{content_id}, session: {session_id}, profile: {profile_id}") + + try: + # Get client info for M3U profile selection + client_ip, client_user_agent = get_client_info(request) + logger.info(f"[VOD-HEAD] Client info - IP: {client_ip}, User-Agent: {client_user_agent[:50] if client_user_agent else 'None'}...") + + # If no session ID, create one (same logic as GET) + if not session_id: + new_session_id = f"vod_{int(time.time() * 1000)}_{random.randint(1000, 9999)}" + logger.info(f"[VOD-HEAD] Creating new session for HEAD: {new_session_id}") + + # Build session URL for response header + path_parts = request.path.rstrip('/').split('/') + if profile_id: + session_url = f"{'/'.join(path_parts)}/{new_session_id}/{profile_id}/" + else: + session_url = f"{'/'.join(path_parts)}/{new_session_id}" + + session_id = new_session_id + else: + # Session already in URL, construct the current session URL + session_url = request.path + logger.info(f"[VOD-HEAD] Using existing session: {session_id}") + + # Extract preferred M3U account ID and stream ID from query parameters + preferred_m3u_account_id = request.GET.get('m3u_account_id') + preferred_stream_id = request.GET.get('stream_id') + + if preferred_m3u_account_id: + try: + preferred_m3u_account_id = int(preferred_m3u_account_id) + except (ValueError, TypeError): + logger.warning(f"[VOD-HEAD] Invalid m3u_account_id parameter: {preferred_m3u_account_id}") + preferred_m3u_account_id = None + + if preferred_stream_id: + logger.info(f"[VOD-HEAD] Preferred stream ID: {preferred_stream_id}") + + # Get content and relation (same as GET) + content_obj, relation = self._get_content_and_relation(content_type, content_id, preferred_m3u_account_id, preferred_stream_id) + if not content_obj or not relation: + logger.error(f"[VOD-HEAD] Content or relation not found: {content_type} {content_id}") + return HttpResponse("Content not found", status=404) + + # Get M3U account and stream URL + m3u_account = relation.m3u_account + stream_url = self._get_stream_url_from_relation(relation) + if not stream_url: + logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}") + return HttpResponse("No stream URL available", status=503) + + # Get M3U profile (returns profile and current connection count) + profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id) + if not profile_result or not profile_result[0]: + logger.error(f"[VOD-HEAD] No M3U profile found or all profiles at capacity") + return HttpResponse("No available stream", status=503) + + m3u_profile, current_connections = profile_result + + # Transform URL if needed + final_stream_url = self._transform_url(stream_url, m3u_profile) + + # Make a small range GET request to get content length since providers don't support HEAD + # We'll use a tiny range to minimize data transfer but get the headers we need + # Use M3U account's user agent as primary, client user agent as fallback + m3u_user_agent = m3u_account.get_user_agent().user_agent if m3u_account.get_user_agent() else None + headers = { + 'User-Agent': m3u_user_agent or client_user_agent or 'Dispatcharr/1.0', + 'Accept': '*/*', + 'Range': 'bytes=0-1' # Request only first 2 bytes + } + + logger.info(f"[VOD-HEAD] Making small range GET request to provider: {final_stream_url}") + response = requests.get(final_stream_url, headers=headers, timeout=30, allow_redirects=True, stream=True) + + # Check for range support - should be 206 for partial content + if response.status_code == 206: + # Parse Content-Range header to get total file size + content_range = response.headers.get('Content-Range', '') + if content_range: + # Content-Range: bytes 0-1/1234567890 + total_size = content_range.split('/')[-1] + logger.info(f"[VOD-HEAD] Got file size from Content-Range: {total_size}") + else: + logger.warning(f"[VOD-HEAD] No Content-Range header in 206 response") + total_size = response.headers.get('Content-Length', '0') + elif response.status_code == 200: + # Server doesn't support range requests, use Content-Length from full response + total_size = response.headers.get('Content-Length', '0') + logger.info(f"[VOD-HEAD] Server doesn't support ranges, got Content-Length: {total_size}") + else: + logger.error(f"[VOD-HEAD] Provider GET request failed: {response.status_code}") + return HttpResponse("Provider error", status=response.status_code) + + # Close the small range request - we don't need to keep this connection + response.close() + + # Store the total content length in Redis for the persistent connection to use + try: + import redis + r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) + content_length_key = f"vod_content_length:{session_id}" + r.set(content_length_key, total_size, ex=1800) # Store for 30 minutes + logger.info(f"[VOD-HEAD] Stored total content length {total_size} for session {session_id}") + except Exception as e: + logger.error(f"[VOD-HEAD] Failed to store content length in Redis: {e}") + + # Now create a persistent connection for the session (if one doesn't exist) + # This ensures the FUSE GET requests will reuse the same connection + + connection_manager = MultiWorkerVODConnectionManager.get_instance() + + logger.info(f"[VOD-HEAD] Pre-creating persistent connection for session: {session_id}") + + # We don't actually stream content here, just ensure connection is ready + # The actual GET requests from FUSE will use the persistent connection + + # Use the total_size we extracted from the range response + provider_content_type = response.headers.get('Content-Type') + + if provider_content_type: + content_type_header = provider_content_type + logger.info(f"[VOD-HEAD] Using provider Content-Type: {content_type_header}") + else: + # Provider didn't send Content-Type, infer from URL + inferred_content_type = infer_content_type_from_url(final_stream_url) + if inferred_content_type: + content_type_header = inferred_content_type + logger.info(f"[VOD-HEAD] Provider missing Content-Type, inferred from URL: {content_type_header}") + else: + content_type_header = 'video/mp4' + logger.info(f"[VOD-HEAD] No Content-Type from provider and could not infer from URL, using default: {content_type_header}") + + logger.info(f"[VOD-HEAD] Provider response - Total Size: {total_size}, Type: {content_type_header}") + + # Create response with content length and session URL header + head_response = HttpResponse() + head_response['Content-Length'] = total_size + head_response['Content-Type'] = content_type_header + head_response['Accept-Ranges'] = 'bytes' + + # Custom header with session URL for FUSE + head_response['X-Session-URL'] = session_url + head_response['X-Dispatcharr-Session'] = session_id + + logger.info(f"[VOD-HEAD] Returning HEAD response with session URL: {session_url}") + return head_response + + except Exception as e: + logger.error(f"[VOD-HEAD] Error in HEAD request: {e}", exc_info=True) + return HttpResponse(f"HEAD error: {str(e)}", status=500) + + def _get_content_and_relation(self, content_type, content_id, preferred_m3u_account_id=None, preferred_stream_id=None): + """Get the content object and its M3U relation""" + try: + logger.info(f"[CONTENT-LOOKUP] Looking up {content_type} with UUID {content_id}") + if preferred_m3u_account_id: + logger.info(f"[CONTENT-LOOKUP] Preferred M3U account ID: {preferred_m3u_account_id}") + if preferred_stream_id: + logger.info(f"[CONTENT-LOOKUP] Preferred stream ID: {preferred_stream_id}") + + if content_type == 'movie': + content_obj = get_object_or_404(Movie, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Movie: {content_obj.name} (ID: {content_obj.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'episode': + content_obj = get_object_or_404(Episode, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Episode: {content_obj.name} (ID: {content_obj.id}, Series: {content_obj.series.name})") + + # Filter by preferred stream ID first (most specific) + relations_query = content_obj.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return content_obj, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return content_obj, relation + + elif content_type == 'series': + # For series, get the first episode + series = get_object_or_404(Series, uuid=content_id) + logger.info(f"[CONTENT-FOUND] Series: {series.name} (ID: {series.id})") + episode = series.episodes.first() + if not episode: + logger.error(f"[CONTENT-ERROR] No episodes found for series {series.name}") + return None, None + + logger.info(f"[CONTENT-FOUND] First episode: {episode.name} (ID: {episode.id})") + + # Filter by preferred stream ID first (most specific) + relations_query = episode.m3u_relations.filter(m3u_account__is_active=True) + if preferred_stream_id: + specific_relation = relations_query.filter(stream_id=preferred_stream_id).first() + if specific_relation: + logger.info(f"[STREAM-SELECTED] Using specific stream: {specific_relation.stream_id} from provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[STREAM-FALLBACK] Preferred stream ID {preferred_stream_id} not found, falling back to account/priority selection") + + # Filter by preferred M3U account if specified + if preferred_m3u_account_id: + specific_relation = relations_query.filter(m3u_account__id=preferred_m3u_account_id).first() + if specific_relation: + logger.info(f"[PROVIDER-SELECTED] Using preferred provider: {specific_relation.m3u_account.name}") + return episode, specific_relation + else: + logger.warning(f"[PROVIDER-FALLBACK] Preferred M3U account {preferred_m3u_account_id} not found, using highest priority") + + # Get the highest priority active relation (fallback or default) + relation = relations_query.select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if relation: + logger.info(f"[PROVIDER-SELECTED] Using provider: {relation.m3u_account.name} (priority: {relation.m3u_account.priority})") + + return episode, relation + else: + logger.error(f"[CONTENT-ERROR] Invalid content type: {content_type}") + return None, None + + except Exception as e: + logger.error(f"Error getting content object: {e}") + return None, None + + def _get_stream_url_from_relation(self, relation): + """Get stream URL from the M3U relation""" + try: + # Log the relation type and available attributes + logger.info(f"[VOD-URL] Relation type: {type(relation).__name__}") + logger.info(f"[VOD-URL] Account type: {relation.m3u_account.account_type}") + logger.info(f"[VOD-URL] Stream ID: {getattr(relation, 'stream_id', 'N/A')}") + + # First try the get_stream_url method (this should build URLs dynamically) + if hasattr(relation, 'get_stream_url'): + url = relation.get_stream_url() + if url: + logger.info(f"[VOD-URL] Built URL from get_stream_url(): {url}") + return url + else: + logger.warning(f"[VOD-URL] get_stream_url() returned None") + + logger.error(f"[VOD-URL] Relation has no get_stream_url method or it failed") + return None + except Exception as e: + logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True) + return None + + def _get_m3u_profile(self, m3u_account, profile_id, session_id=None): + """Get appropriate M3U profile for streaming using Redis-based viewer counts + + Args: + m3u_account: M3UAccount instance + profile_id: Optional specific profile ID requested + session_id: Optional session ID to check for existing connections + + Returns: + tuple: (M3UAccountProfile, current_connections) or None if no profile found + """ + try: + from core.utils import RedisClient + redis_client = RedisClient.get_client() + + if not redis_client: + logger.warning("Redis not available, falling back to default profile") + default_profile = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True, + is_default=True + ).first() + return (default_profile, 0) if default_profile else None + + # Check if this session already has an active connection + if session_id: + persistent_connection_key = f"vod_persistent_connection:{session_id}" + connection_data = redis_client.hgetall(persistent_connection_key) + + if connection_data: + # Decode Redis hash data + decoded_data = {} + for k, v in connection_data.items(): + k_str = k.decode('utf-8') if isinstance(k, bytes) else k + v_str = v.decode('utf-8') if isinstance(v, bytes) else v + decoded_data[k_str] = v_str + + existing_profile_id = decoded_data.get('m3u_profile_id') + if existing_profile_id: + try: + existing_profile = M3UAccountProfile.objects.get( + id=int(existing_profile_id), + m3u_account=m3u_account, + is_active=True + ) + # Get current connections for logging + profile_connections_key = f"profile_connections:{existing_profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + logger.info(f"[PROFILE-SELECTION] Session {session_id} reusing existing profile {existing_profile.id}: {current_connections}/{existing_profile.max_streams} connections") + return (existing_profile, current_connections) + except (M3UAccountProfile.DoesNotExist, ValueError): + logger.warning(f"[PROFILE-SELECTION] Session {session_id} has invalid profile ID {existing_profile_id}, selecting new profile") + except Exception as e: + logger.warning(f"[PROFILE-SELECTION] Error checking existing profile for session {session_id}: {e}") + else: + logger.debug(f"[PROFILE-SELECTION] Session {session_id} exists but has no profile ID stored") # If specific profile requested, try to use it + if profile_id: + try: + profile = M3UAccountProfile.objects.get( + id=profile_id, + m3u_account=m3u_account, + is_active=True + ) + # Check Redis-based current connections + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Using requested profile {profile.id}: {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile.id} is at capacity: {current_connections}/{profile.max_streams}") + except M3UAccountProfile.DoesNotExist: + logger.warning(f"[PROFILE-SELECTION] Requested profile {profile_id} not found") + + # Get active profiles ordered by priority (default first) + m3u_profiles = M3UAccountProfile.objects.filter( + m3u_account=m3u_account, + is_active=True + ) + + default_profile = m3u_profiles.filter(is_default=True).first() + if not default_profile: + logger.error(f"[PROFILE-SELECTION] No default profile found for M3U account {m3u_account.id}") + return None + + # Check profiles in order: default first, then others + profiles = [default_profile] + list(m3u_profiles.filter(is_default=False)) + + for profile in profiles: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if profile has available connection slots + if profile.max_streams == 0 or current_connections < profile.max_streams: + logger.info(f"[PROFILE-SELECTION] Selected profile {profile.id} ({profile.name}): {current_connections}/{profile.max_streams} connections") + return (profile, current_connections) + else: + logger.debug(f"[PROFILE-SELECTION] Profile {profile.id} at capacity: {current_connections}/{profile.max_streams}") + + # All profiles are at capacity - return None to trigger error response + logger.error(f"[PROFILE-SELECTION] All profiles at capacity for M3U account {m3u_account.id}, rejecting request") + return None + + except Exception as e: + logger.error(f"Error getting M3U profile: {e}") + return None + + def _transform_url(self, original_url, m3u_profile): + """Transform URL based on M3U profile settings""" + try: + import re + + if not original_url: + return None + + search_pattern = m3u_profile.search_pattern + replace_pattern = m3u_profile.replace_pattern + safe_replace_pattern = re.sub(r'\$(\d+)', r'\\\1', replace_pattern) + + if search_pattern and replace_pattern: + transformed_url = re.sub(search_pattern, safe_replace_pattern, original_url) + return transformed_url + + return original_url + + except Exception as e: + logger.error(f"Error transforming URL: {e}") + return original_url + +@method_decorator(csrf_exempt, name='dispatch') +class VODPlaylistView(View): + """Generate M3U playlists for VOD content""" + + def get(self, request, profile_id=None): + """Generate VOD playlist""" + try: + # Get profile if specified + m3u_profile = None + if profile_id: + try: + m3u_profile = M3UAccountProfile.objects.get( + id=profile_id, + is_active=True + ) + except M3UAccountProfile.DoesNotExist: + return HttpResponse("Profile not found", status=404) + + # Generate playlist content + playlist_content = self._generate_playlist(m3u_profile) + + response = HttpResponse(playlist_content, content_type='application/vnd.apple.mpegurl') + response['Content-Disposition'] = 'attachment; filename="vod_playlist.m3u8"' + return response + + except Exception as e: + logger.error(f"Error generating VOD playlist: {e}") + return HttpResponse("Playlist generation error", status=500) + + def _generate_playlist(self, m3u_profile=None): + """Generate M3U playlist content for VOD""" + lines = ["#EXTM3U"] + + # Add movies + movies = Movie.objects.filter(is_active=True) + if m3u_profile: + movies = movies.filter(m3u_account=m3u_profile.m3u_account) + + for movie in movies: + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + lines.append(f'#EXTINF:-1 tvg-id="{movie.tmdb_id}" group-title="Movies",{movie.title}') + lines.append(f'/proxy/vod/movie/{movie.uuid}/{profile_param}') + + # Add series + series_list = Series.objects.filter(is_active=True) + if m3u_profile: + series_list = series_list.filter(m3u_account=m3u_profile.m3u_account) + + for series in series_list: + for episode in series.episodes.all(): + profile_param = f"?profile={m3u_profile.id}" if m3u_profile else "" + episode_title = f"{series.title} - S{episode.season_number:02d}E{episode.episode_number:02d}" + lines.append(f'#EXTINF:-1 tvg-id="{series.tmdb_id}" group-title="Series",{episode_title}') + lines.append(f'/proxy/vod/episode/{episode.uuid}/{profile_param}') + + return '\n'.join(lines) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODPositionView(View): + """Handle VOD position updates""" + + def post(self, request, content_id): + """Update playback position for VOD content""" + try: + import json + data = json.loads(request.body) + client_id = data.get('client_id') + position = data.get('position', 0) + + # Find the content object + content_obj = None + try: + content_obj = Movie.objects.get(uuid=content_id) + except Movie.DoesNotExist: + try: + content_obj = Episode.objects.get(uuid=content_id) + except Episode.DoesNotExist: + return JsonResponse({'error': 'Content not found'}, status=404) + + # Here you could store the position in a model or cache + # For now, just return success + logger.info(f"Position update for {content_obj.__class__.__name__} {content_id}: {position}s") + + return JsonResponse({ + 'success': True, + 'content_id': str(content_id), + 'position': position + }) + + except Exception as e: + logger.error(f"Error updating VOD position: {e}") + return JsonResponse({'error': str(e)}, status=500) + + +@method_decorator(csrf_exempt, name='dispatch') +class VODStatsView(View): + """Get VOD connection statistics""" + + def get(self, request): + """Get current VOD connection statistics""" + try: + connection_manager = MultiWorkerVODConnectionManager.get_instance() + redis_client = connection_manager.redis_client + + if not redis_client: + return JsonResponse({'error': 'Redis not available'}, status=500) + + # Get all VOD persistent connections (consolidated data) + pattern = "vod_persistent_connection:*" + cursor = 0 + connections = [] + current_time = time.time() + + while True: + cursor, keys = redis_client.scan(cursor, match=pattern, count=100) + + for key in keys: + try: + key_str = key.decode('utf-8') if isinstance(key, bytes) else key + connection_data = redis_client.hgetall(key) + + if connection_data: + # Extract session ID from key + session_id = key_str.replace('vod_persistent_connection:', '') + + # Decode Redis hash data + combined_data = {} + for k, v in connection_data.items(): + k_str = k.decode('utf-8') if isinstance(k, bytes) else k + v_str = v.decode('utf-8') if isinstance(v, bytes) else v + combined_data[k_str] = v_str + + # Get content info from the connection data (using correct field names) + content_type = combined_data.get('content_obj_type', 'unknown') + content_uuid = combined_data.get('content_uuid', 'unknown') + client_id = session_id + + # Get content info with enhanced metadata + content_name = "Unknown" + content_metadata = {} + try: + if content_type == 'movie': + content_obj = Movie.objects.select_related('logo').get(uuid=content_uuid) + content_name = content_obj.name + + # Get duration from content object + duration_secs = None + if hasattr(content_obj, 'duration_secs') and content_obj.duration_secs: + duration_secs = content_obj.duration_secs + + # If we don't have duration_secs, try to calculate it from file size and position data + if not duration_secs: + file_size_bytes = int(combined_data.get('total_content_size', 0)) + last_seek_byte = int(combined_data.get('last_seek_byte', 0)) + last_seek_percentage = float(combined_data.get('last_seek_percentage', 0.0)) + + # Calculate position if we have the required data + if file_size_bytes and file_size_bytes > 0 and last_seek_percentage > 0: + # If we know the seek percentage and current time position, we can estimate duration + # But we need to know the current time position in seconds first + # For now, let's use a rough estimate based on file size and typical bitrates + # This is a fallback - ideally duration should be in the database + estimated_duration = 6000 # 100 minutes as default for movies + duration_secs = estimated_duration + + content_metadata = { + 'year': content_obj.year, + 'rating': content_obj.rating, + 'genre': content_obj.genre, + 'duration_secs': duration_secs, + 'description': content_obj.description, + 'logo_url': content_obj.logo.url if content_obj.logo else None, + 'tmdb_id': content_obj.tmdb_id, + 'imdb_id': content_obj.imdb_id + } + elif content_type == 'episode': + content_obj = Episode.objects.select_related('series', 'series__logo').get(uuid=content_uuid) + content_name = f"{content_obj.series.name} - {content_obj.name}" + + # Get duration from content object + duration_secs = None + if hasattr(content_obj, 'duration_secs') and content_obj.duration_secs: + duration_secs = content_obj.duration_secs + + # If we don't have duration_secs, estimate for episodes + if not duration_secs: + estimated_duration = 2400 # 40 minutes as default for episodes + duration_secs = estimated_duration + + content_metadata = { + 'series_name': content_obj.series.name, + 'episode_name': content_obj.name, + 'season_number': content_obj.season_number, + 'episode_number': content_obj.episode_number, + 'air_date': content_obj.air_date.isoformat() if content_obj.air_date else None, + 'rating': content_obj.rating, + 'duration_secs': duration_secs, + 'description': content_obj.description, + 'logo_url': content_obj.series.logo.url if content_obj.series.logo else None, + 'series_year': content_obj.series.year, + 'series_genre': content_obj.series.genre, + 'tmdb_id': content_obj.tmdb_id, + 'imdb_id': content_obj.imdb_id + } + except: + pass + + # Get M3U profile information + m3u_profile_info = {} + m3u_profile_id = combined_data.get('m3u_profile_id') + if m3u_profile_id: + try: + from apps.m3u.models import M3UAccountProfile + profile = M3UAccountProfile.objects.select_related('m3u_account').get(id=m3u_profile_id) + m3u_profile_info = { + 'profile_name': profile.name, + 'account_name': profile.m3u_account.name, + 'account_id': profile.m3u_account.id, + 'max_streams': profile.m3u_account.max_streams, + 'm3u_profile_id': int(m3u_profile_id) + } + except Exception as e: + logger.warning(f"Could not fetch M3U profile {m3u_profile_id}: {e}") + + # Also try to get profile info from stored data if database lookup fails + if not m3u_profile_info and combined_data.get('m3u_profile_name'): + m3u_profile_info = { + 'profile_name': combined_data.get('m3u_profile_name', 'Unknown Profile'), + 'm3u_profile_id': combined_data.get('m3u_profile_id'), + 'account_name': 'Unknown Account' # We don't store account name directly + } + + # Calculate estimated current position based on seek percentage or last known position + last_known_position = int(combined_data.get('position_seconds', 0)) + last_position_update = combined_data.get('last_position_update') + last_seek_percentage = float(combined_data.get('last_seek_percentage', 0.0)) + last_seek_timestamp = float(combined_data.get('last_seek_timestamp', 0.0)) + estimated_position = last_known_position + + # If we have seek percentage and content duration, calculate position from that + if last_seek_percentage > 0 and content_metadata.get('duration_secs'): + try: + duration_secs = int(content_metadata['duration_secs']) + # Calculate position from seek percentage + seek_position = int((last_seek_percentage / 100) * duration_secs) + + # If we have a recent seek timestamp, add elapsed time since seek + if last_seek_timestamp > 0: + elapsed_since_seek = current_time - last_seek_timestamp + # Add elapsed time but don't exceed content duration + estimated_position = min( + seek_position + int(elapsed_since_seek), + duration_secs + ) + else: + estimated_position = seek_position + except (ValueError, TypeError): + pass + elif last_position_update and content_metadata.get('duration_secs'): + # Fallback: use time-based estimation from position_seconds + try: + update_timestamp = float(last_position_update) + elapsed_since_update = current_time - update_timestamp + # Add elapsed time to last known position, but don't exceed content duration + estimated_position = min( + last_known_position + int(elapsed_since_update), + int(content_metadata['duration_secs']) + ) + except (ValueError, TypeError): + # If timestamp parsing fails, fall back to last known position + estimated_position = last_known_position + + connection_info = { + 'content_type': content_type, + 'content_uuid': content_uuid, + 'content_name': content_name, + 'content_metadata': content_metadata, + 'm3u_profile': m3u_profile_info, + 'client_id': client_id, + 'client_ip': combined_data.get('client_ip', 'Unknown'), + 'user_agent': combined_data.get('client_user_agent', 'Unknown'), + 'connected_at': combined_data.get('created_at'), + 'last_activity': combined_data.get('last_activity'), + 'm3u_profile_id': m3u_profile_id, + 'position_seconds': estimated_position, # Use estimated position + 'last_known_position': last_known_position, # Include raw position for debugging + 'last_position_update': last_position_update, # Include timestamp for frontend use + 'bytes_sent': int(combined_data.get('bytes_sent', 0)), + # Seek/range information for position calculation and frontend display + 'last_seek_byte': int(combined_data.get('last_seek_byte', 0)), + 'last_seek_percentage': float(combined_data.get('last_seek_percentage', 0.0)), + 'total_content_size': int(combined_data.get('total_content_size', 0)), + 'last_seek_timestamp': float(combined_data.get('last_seek_timestamp', 0.0)) + } + + # Calculate connection duration + duration_calculated = False + if connection_info['connected_at']: + try: + connected_time = float(connection_info['connected_at']) + duration = current_time - connected_time + connection_info['duration'] = int(duration) + duration_calculated = True + except: + pass + + # Fallback: use last_activity if connected_at is not available + if not duration_calculated and connection_info['last_activity']: + try: + last_activity_time = float(connection_info['last_activity']) + # Estimate connection duration using client_id timestamp if available + if connection_info['client_id'].startswith('vod_'): + # Extract timestamp from client_id (format: vod_timestamp_random) + parts = connection_info['client_id'].split('_') + if len(parts) >= 2: + client_start_time = float(parts[1]) / 1000.0 # Convert ms to seconds + duration = current_time - client_start_time + connection_info['duration'] = int(duration) + duration_calculated = True + except: + pass + + # Final fallback + if not duration_calculated: + connection_info['duration'] = 0 + + connections.append(connection_info) + + except Exception as e: + logger.error(f"Error processing connection key {key}: {e}") + + if cursor == 0: + break + + # Group connections by content + content_stats = {} + for conn in connections: + content_key = f"{conn['content_type']}:{conn['content_uuid']}" + if content_key not in content_stats: + content_stats[content_key] = { + 'content_type': conn['content_type'], + 'content_name': conn['content_name'], + 'content_uuid': conn['content_uuid'], + 'content_metadata': conn['content_metadata'], + 'connection_count': 0, + 'connections': [] + } + content_stats[content_key]['connection_count'] += 1 + content_stats[content_key]['connections'].append(conn) + + return JsonResponse({ + 'vod_connections': list(content_stats.values()), + 'total_connections': len(connections), + 'timestamp': current_time + }) + + except Exception as e: + logger.error(f"Error getting VOD stats: {e}") + return JsonResponse({'error': str(e)}, status=500) diff --git a/apps/vod/__init__.py b/apps/vod/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/admin.py b/apps/vod/admin.py new file mode 100644 index 00000000..c660f310 --- /dev/null +++ b/apps/vod/admin.py @@ -0,0 +1,67 @@ +from django.contrib import admin +from .models import ( + Series, VODCategory, Movie, Episode, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation +) + + +@admin.register(VODCategory) +class VODCategoryAdmin(admin.ModelAdmin): + list_display = ['name', 'category_type', 'created_at'] + list_filter = ['category_type', 'created_at'] + search_fields = ['name'] + + +@admin.register(Series) +class SeriesAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + +@admin.register(Movie) +class MovieAdmin(admin.ModelAdmin): + list_display = ['name', 'year', 'genre', 'duration_secs', 'created_at'] + list_filter = ['year', 'created_at'] + search_fields = ['name', 'description', 'tmdb_id', 'imdb_id'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('logo') + + +@admin.register(Episode) +class EpisodeAdmin(admin.ModelAdmin): + list_display = ['name', 'series', 'season_number', 'episode_number', 'duration_secs', 'created_at'] + list_filter = ['series', 'season_number', 'created_at'] + search_fields = ['name', 'description', 'series__name'] + readonly_fields = ['uuid', 'created_at', 'updated_at'] + + def get_queryset(self, request): + return super().get_queryset(request).select_related('series') + + +@admin.register(M3UMovieRelation) +class M3UMovieRelationAdmin(admin.ModelAdmin): + list_display = ['movie', 'm3u_account', 'category', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['movie__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3USeriesRelation) +class M3USeriesRelationAdmin(admin.ModelAdmin): + list_display = ['series', 'm3u_account', 'category', 'external_series_id', 'created_at'] + list_filter = ['m3u_account', 'category', 'created_at'] + search_fields = ['series__name', 'm3u_account__name', 'external_series_id'] + readonly_fields = ['created_at', 'updated_at'] + + +@admin.register(M3UEpisodeRelation) +class M3UEpisodeRelationAdmin(admin.ModelAdmin): + list_display = ['episode', 'm3u_account', 'stream_id', 'created_at'] + list_filter = ['m3u_account', 'created_at'] + search_fields = ['episode__name', 'episode__series__name', 'm3u_account__name', 'stream_id'] + readonly_fields = ['created_at', 'updated_at'] + diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py new file mode 100644 index 00000000..e897bd28 --- /dev/null +++ b/apps/vod/api_urls.py @@ -0,0 +1,22 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import ( + MovieViewSet, + EpisodeViewSet, + SeriesViewSet, + VODCategoryViewSet, + UnifiedContentViewSet, + VODLogoViewSet, +) + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet, basename='movie') +router.register(r'episodes', EpisodeViewSet, basename='episode') +router.register(r'series', SeriesViewSet, basename='series') +router.register(r'categories', VODCategoryViewSet, basename='vodcategory') +router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') + +urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py new file mode 100644 index 00000000..4ff1f82b --- /dev/null +++ b/apps/vod/api_views.py @@ -0,0 +1,846 @@ +from rest_framework import viewsets, status +from rest_framework.response import Response +from rest_framework.decorators import action +from rest_framework.filters import SearchFilter, OrderingFilter +from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny +from django_filters.rest_framework import DjangoFilterBackend +from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q +import django_filters +import logging +import os +import requests +from apps.accounts.permissions import ( + Authenticated, + permission_classes_by_action, +) +from .models import ( + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation +) +from .serializers import ( + MovieSerializer, + EpisodeSerializer, + SeriesSerializer, + VODCategorySerializer, + VODLogoSerializer, + M3UMovieRelationSerializer, + M3USeriesRelationSerializer, + M3UEpisodeRelationSerializer +) +from .tasks import refresh_series_episodes, refresh_movie_advanced_data +from django.utils import timezone +from datetime import timedelta + +logger = logging.getLogger(__name__) + + +class VODPagination(PageNumberPagination): + page_size = 20 # Default page size to match frontend default + page_size_query_param = "page_size" # Allow clients to specify page size + max_page_size = 100 # Prevent excessive page sizes for VOD content + + +class MovieFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Movie + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.split('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class MovieViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Movie content""" + queryset = Movie.objects.all() + serializer_class = MovieSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = MovieFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return movies that have active M3U relations + return Movie.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('m3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this movie""" + movie = self.get_object() + relations = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3UMovieRelationSerializer(relations, many=True) + return Response(serializer.data) + + + @action(detail=True, methods=['get'], url_path='provider-info') + def provider_info(self, request, pk=None): + """Get detailed movie information from the original provider, throttled to 24h.""" + movie = self.get_object() + + # Get the highest priority active relation + relation = M3UMovieRelation.objects.filter( + movie=movie, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this movie'}, + status=status.HTTP_400_BAD_REQUEST + ) + + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + now = timezone.now() + needs_refresh = ( + force_refresh or + not relation.last_advanced_refresh or + (now - relation.last_advanced_refresh).total_seconds() > 86400 + ) + + if needs_refresh: + # Trigger advanced data refresh + logger.debug(f"Refreshing advanced data for movie {movie.id} (relation ID: {relation.id})") + refresh_movie_advanced_data(relation.id, force_refresh=force_refresh) + + # Refresh objects from database after task completion + movie.refresh_from_db() + relation.refresh_from_db() + + # Use refreshed data from database + custom_props = relation.custom_properties or {} + info = custom_props.get('detailed_info', {}) + movie_data = custom_props.get('movie_data', {}) + + # Build response with available data + response_data = { + 'id': movie.id, + 'uuid': movie.uuid, + 'stream_id': relation.stream_id, + 'name': info.get('name', movie.name), + 'o_name': info.get('o_name', ''), + 'description': info.get('description', info.get('plot', movie.description)), + 'plot': info.get('plot', info.get('description', movie.description)), + 'year': movie.year or info.get('year'), + 'release_date': (movie.custom_properties or {}).get('release_date') or info.get('release_date') or info.get('releasedate', ''), + 'genre': movie.genre or info.get('genre', ''), + 'director': (movie.custom_properties or {}).get('director') or info.get('director', ''), + 'actors': (movie.custom_properties or {}).get('actors') or info.get('actors', ''), + 'country': (movie.custom_properties or {}).get('country') or info.get('country', ''), + 'rating': movie.rating or info.get('rating', movie.rating or 0), + 'tmdb_id': movie.tmdb_id or info.get('tmdb_id', ''), + 'imdb_id': movie.imdb_id or info.get('imdb_id', ''), + 'youtube_trailer': (movie.custom_properties or {}).get('youtube_trailer') or info.get('youtube_trailer') or info.get('trailer', ''), + 'duration_secs': movie.duration_secs or info.get('duration_secs'), + 'age': info.get('age', ''), + 'backdrop_path': (movie.custom_properties or {}).get('backdrop_path') or info.get('backdrop_path', []), + 'cover': info.get('cover_big', ''), + 'cover_big': info.get('cover_big', ''), + 'movie_image': movie.logo.url if movie.logo else info.get('movie_image', ''), + 'bitrate': info.get('bitrate', 0), + 'video': info.get('video', {}), + 'audio': info.get('audio', {}), + 'container_extension': movie_data.get('container_extension', 'mp4'), + 'direct_source': movie_data.get('direct_source', ''), + 'category_id': movie_data.get('category_id', ''), + 'added': movie_data.get('added', ''), + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + } + } + return Response(response_data) + +class EpisodeFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + series = django_filters.NumberFilter(field_name="series__id") + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + season_number = django_filters.NumberFilter() + episode_number = django_filters.NumberFilter() + + class Meta: + model = Episode + fields = ['name', 'series', 'm3u_account', 'season_number', 'episode_number'] + + +class SeriesFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + m3u_account = django_filters.NumberFilter(field_name="m3u_relations__m3u_account__id") + category = django_filters.CharFilter(method='filter_category') + year = django_filters.NumberFilter() + year_gte = django_filters.NumberFilter(field_name="year", lookup_expr="gte") + year_lte = django_filters.NumberFilter(field_name="year", lookup_expr="lte") + + class Meta: + model = Series + fields = ['name', 'm3u_account', 'category', 'year'] + + def filter_category(self, queryset, name, value): + """Custom category filter that handles 'name|type' format""" + if not value: + return queryset + + # Handle the format 'category_name|category_type' + if '|' in value: + category_name, category_type = value.split('|', 1) + return queryset.filter( + m3u_relations__category__name=category_name, + m3u_relations__category__category_type=category_type + ) + else: + # Fallback: treat as category name only + return queryset.filter(m3u_relations__category__name=value) + + +class EpisodeViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Episode content""" + queryset = Episode.objects.all() + serializer_class = EpisodeSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = EpisodeFilter + search_fields = ['name', 'description'] + ordering_fields = ['name', 'season_number', 'episode_number', 'created_at'] + ordering = ['series__name', 'season_number', 'episode_number'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + return Episode.objects.select_related( + 'series', 'm3u_account' + ).filter(m3u_account__is_active=True) + + +class SeriesViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for Series management""" + queryset = Series.objects.all() + serializer_class = SeriesSerializer + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = SeriesFilter + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def get_queryset(self): + # Only return series that have active M3U relations + return Series.objects.filter( + m3u_relations__m3u_account__is_active=True + ).distinct().select_related('logo').prefetch_related('episodes', 'm3u_relations__m3u_account') + + @action(detail=True, methods=['get'], url_path='providers') + def get_providers(self, request, pk=None): + """Get all providers (M3U accounts) that have this series""" + series = self.get_object() + relations = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account', 'category') + + serializer = M3USeriesRelationSerializer(relations, many=True) + return Response(serializer.data) + + @action(detail=True, methods=['get'], url_path='episodes') + def get_episodes(self, request, pk=None): + """Get episodes for this series with provider information""" + series = self.get_object() + episodes = Episode.objects.filter(series=series).prefetch_related( + 'm3u_relations__m3u_account' + ).order_by('season_number', 'episode_number') + + episodes_data = [] + for episode in episodes: + episode_serializer = EpisodeSerializer(episode) + episode_data = episode_serializer.data + + # Add provider information + relations = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True + ).select_related('m3u_account') + + episode_data['providers'] = M3UEpisodeRelationSerializer(relations, many=True).data + episodes_data.append(episode_data) + + return Response(episodes_data) + + @action(detail=True, methods=['get'], url_path='provider-info') + def series_info(self, request, pk=None): + """Get detailed series information, refreshing from provider if needed""" + logger.debug(f"SeriesViewSet.series_info called for series ID: {pk}") + series = self.get_object() + logger.debug(f"Retrieved series: {series.name} (ID: {series.id})") + + # Get the highest priority active relation + relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account__is_active=True + ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + + if not relation: + return Response( + {'error': 'No active M3U account associated with this series'}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Check if we should refresh data (optional force refresh parameter) + force_refresh = request.query_params.get('force_refresh', 'false').lower() == 'true' + refresh_interval_hours = int(request.query_params.get("refresh_interval", 24)) # Default to 24 hours + + now = timezone.now() + last_refreshed = relation.last_episode_refresh + + # Check if detailed data has been fetched + custom_props = relation.custom_properties or {} + episodes_fetched = custom_props.get('episodes_fetched', False) + detailed_fetched = custom_props.get('detailed_fetched', False) + + # Force refresh if episodes have never been fetched or if forced + if not episodes_fetched or not detailed_fetched or force_refresh: + force_refresh = True + logger.debug(f"Series {series.id} needs detailed/episode refresh, forcing refresh") + elif last_refreshed is None or (now - last_refreshed) > timedelta(hours=refresh_interval_hours): + force_refresh = True + logger.debug(f"Series {series.id} refresh interval exceeded or never refreshed, forcing refresh") + + if force_refresh: + logger.debug(f"Refreshing series {series.id} data from provider") + # Use existing refresh logic with external_series_id + from .tasks import refresh_series_episodes + account = relation.m3u_account + if account and account.is_active: + refresh_series_episodes(account, series, relation.external_series_id) + series.refresh_from_db() # Reload from database after refresh + relation.refresh_from_db() # Reload relation too + + # Return the database data (which should now be fresh) + custom_props = relation.custom_properties or {} + response_data = { + 'id': series.id, + 'series_id': relation.external_series_id, + 'name': series.name, + 'description': series.description, + 'year': series.year, + 'genre': series.genre, + 'rating': series.rating, + 'tmdb_id': series.tmdb_id, + 'imdb_id': series.imdb_id, + 'category_id': relation.category.id if relation.category else None, + 'category_name': relation.category.name if relation.category else None, + 'cover': { + 'id': series.logo.id, + 'url': series.logo.url, + 'name': series.logo.name, + } if series.logo else None, + 'last_refreshed': series.updated_at, + 'custom_properties': series.custom_properties, + 'm3u_account': { + 'id': relation.m3u_account.id, + 'name': relation.m3u_account.name, + 'account_type': relation.m3u_account.account_type + }, + 'episodes_fetched': custom_props.get('episodes_fetched', False), + 'detailed_fetched': custom_props.get('detailed_fetched', False) + } + + # Always include episodes for series info if they've been fetched + include_episodes = request.query_params.get('include_episodes', 'true').lower() == 'true' + if include_episodes and custom_props.get('episodes_fetched', False): + logger.debug(f"Including episodes for series {series.id}") + episodes_by_season = {} + for episode in series.episodes.all().order_by('season_number', 'episode_number'): + season_key = str(episode.season_number or 0) + if season_key not in episodes_by_season: + episodes_by_season[season_key] = [] + + # Get episode relation for additional data + episode_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account=relation.m3u_account + ).first() + + episode_data = { + 'id': episode.id, + 'uuid': episode.uuid, + 'name': episode.name, + 'title': episode.name, + 'episode_number': episode.episode_number, + 'season_number': episode.season_number, + 'description': episode.description, + 'air_date': episode.air_date, + 'plot': episode.description, + 'duration_secs': episode.duration_secs, + 'rating': episode.rating, + 'tmdb_id': episode.tmdb_id, + 'imdb_id': episode.imdb_id, + 'movie_image': episode.custom_properties.get('movie_image', '') if episode.custom_properties else '', + 'container_extension': episode_relation.container_extension if episode_relation else 'mp4', + 'type': 'episode', + 'series': { + 'id': series.id, + 'name': series.name + } + } + episodes_by_season[season_key].append(episode_data) + + response_data['episodes'] = episodes_by_season + logger.debug(f"Added {len(episodes_by_season)} seasons of episodes to response") + elif include_episodes: + # Episodes not yet fetched, include empty episodes list + response_data['episodes'] = {} + + logger.debug(f"Returning series info response for series {series.id}") + return Response(response_data) + + except Exception as e: + logger.error(f"Error fetching series info for series {pk}: {str(e)}") + return Response( + {'error': f'Failed to fetch series information: {str(e)}'}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + +class VODCategoryFilter(django_filters.FilterSet): + name = django_filters.CharFilter(lookup_expr="icontains") + category_type = django_filters.ChoiceFilter(choices=VODCategory.CATEGORY_TYPE_CHOICES) + m3u_account = django_filters.NumberFilter(field_name="m3u_account__id") + + class Meta: + model = VODCategory + fields = ['name', 'category_type', 'm3u_account'] + + +class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for VOD Categories""" + queryset = VODCategory.objects.all() + serializer_class = VODCategorySerializer + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + filterset_class = VODCategoryFilter + search_fields = ['name'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + +class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet that combines Movies and Series for unified 'All' view""" + queryset = Movie.objects.none() # Empty queryset, we override list method + serializer_class = MovieSerializer # Default serializer, overridden in list + pagination_class = VODPagination + + filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] + search_fields = ['name', 'description', 'genre'] + ordering_fields = ['name', 'year', 'created_at'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + return [Authenticated()] + + def list(self, request, *args, **kwargs): + """Override list to handle unified content properly - database-level approach""" + import logging + from django.db import connection + + logger = logging.getLogger(__name__) + logger.error("=== UnifiedContentViewSet.list() called ===") + + try: + # Get pagination parameters + page_size = int(request.query_params.get('page_size', 24)) + page_number = int(request.query_params.get('page', 1)) + + logger.error(f"Page {page_number}, page_size {page_size}") + + # Calculate offset for unified pagination + offset = (page_number - 1) * page_size + + # For high page numbers, use raw SQL for efficiency + # This avoids loading and sorting massive amounts of data in Python + + search = request.query_params.get('search', '') + category = request.query_params.get('category', '') + + # Build WHERE clauses + where_conditions = [ + # Only active content + "movies.id IN (SELECT DISTINCT movie_id FROM vod_m3umovierelation mmr JOIN m3u_m3uaccount ma ON mmr.m3u_account_id = ma.id WHERE ma.is_active = true)", + "series.id IN (SELECT DISTINCT series_id FROM vod_m3useriesrelation msr JOIN m3u_m3uaccount ma ON msr.m3u_account_id = ma.id WHERE ma.is_active = true)" + ] + + params = [] + + if search: + where_conditions[0] += " AND LOWER(movies.name) LIKE %s" + where_conditions[1] += " AND LOWER(series.name) LIKE %s" + search_param = f"%{search.lower()}%" + params.extend([search_param, search_param]) + + if category: + if '|' in category: + cat_name, cat_type = category.split('|', 1) + if cat_type == 'movie': + where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)" + where_conditions[1] = "1=0" # Exclude series + params.append(cat_name) + elif cat_type == 'series': + where_conditions[1] += " AND series.id IN (SELECT series_id FROM vod_m3useriesrelation msr JOIN vod_vodcategory c ON msr.category_id = c.id WHERE c.name = %s)" + where_conditions[0] = "1=0" # Exclude movies + params.append(cat_name) + else: + where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)" + where_conditions[1] += " AND series.id IN (SELECT series_id FROM vod_m3useriesrelation msr JOIN vod_vodcategory c ON msr.category_id = c.id WHERE c.name = %s)" + params.extend([category, category]) + + # Use UNION ALL with ORDER BY and LIMIT/OFFSET for true unified pagination + # This is much more efficient than Python sorting + sql = f""" + WITH unified_content AS ( + SELECT + movies.id, + movies.uuid, + movies.name, + movies.description, + movies.year, + movies.rating, + movies.genre, + movies.duration_secs as duration, + movies.created_at, + movies.updated_at, + movies.custom_properties, + movies.logo_id, + logo.name as logo_name, + logo.url as logo_url, + 'movie' as content_type + FROM vod_movie movies + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id + WHERE {where_conditions[0]} + + UNION ALL + + SELECT + series.id, + series.uuid, + series.name, + series.description, + series.year, + series.rating, + series.genre, + NULL as duration, + series.created_at, + series.updated_at, + series.custom_properties, + series.logo_id, + logo.name as logo_name, + logo.url as logo_url, + 'series' as content_type + FROM vod_series series + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id + WHERE {where_conditions[1]} + ) + SELECT * FROM unified_content + ORDER BY LOWER(name), id + LIMIT %s OFFSET %s + """ + + params.extend([page_size, offset]) + + logger.error(f"Executing SQL with LIMIT {page_size} OFFSET {offset}") + + with connection.cursor() as cursor: + cursor.execute(sql, params) + columns = [col[0] for col in cursor.description] + results = [] + + for row in cursor.fetchall(): + item_dict = dict(zip(columns, row)) + + # Build logo object in the format expected by frontend + logo_data = None + if item_dict['logo_id']: + logo_data = { + 'id': item_dict['logo_id'], + 'name': item_dict['logo_name'], + 'url': item_dict['logo_url'], + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True + } + + # Convert to the format expected by frontend + formatted_item = { + 'id': item_dict['id'], + 'uuid': str(item_dict['uuid']), + 'name': item_dict['name'], + 'description': item_dict['description'] or '', + 'year': item_dict['year'], + 'rating': float(item_dict['rating']) if item_dict['rating'] else 0.0, + 'genre': item_dict['genre'] or '', + 'duration': item_dict['duration'], + 'created_at': item_dict['created_at'].isoformat() if item_dict['created_at'] else None, + 'updated_at': item_dict['updated_at'].isoformat() if item_dict['updated_at'] else None, + 'custom_properties': item_dict['custom_properties'] or {}, + 'logo': logo_data, + 'content_type': item_dict['content_type'] + } + results.append(formatted_item) + + logger.error(f"Retrieved {len(results)} results via SQL") + + # Get total count estimate (for pagination info) + # Use a separate efficient count query + count_sql = f""" + SELECT COUNT(*) FROM ( + SELECT 1 FROM vod_movie movies WHERE {where_conditions[0]} + UNION ALL + SELECT 1 FROM vod_series series WHERE {where_conditions[1]} + ) as total_count + """ + + count_params = params[:-2] # Remove LIMIT and OFFSET params + + with connection.cursor() as cursor: + cursor.execute(count_sql, count_params) + total_count = cursor.fetchone()[0] + + response_data = { + 'count': total_count, + 'next': offset + page_size < total_count, + 'previous': page_number > 1, + 'results': results + } + + return Response(response_data) + + except Exception as e: + logger.error(f"Error in UnifiedContentViewSet.list(): {e}") + import traceback + logger.error(traceback.format_exc()) + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/apps.py b/apps/vod/apps.py new file mode 100644 index 00000000..0e2af56d --- /dev/null +++ b/apps/vod/apps.py @@ -0,0 +1,12 @@ +from django.apps import AppConfig + + +class VODConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'apps.vod' + verbose_name = 'Video on Demand' + + def ready(self): + """Initialize VOD app when Django is ready""" + # Import models to ensure they're registered + from . import models diff --git a/apps/vod/migrations/0001_initial.py b/apps/vod/migrations/0001_initial.py new file mode 100644 index 00000000..02c6ae2a --- /dev/null +++ b/apps/vod/migrations/0001_initial.py @@ -0,0 +1,201 @@ +# Generated by Django 5.2.4 on 2025-08-28 18:16 + +import django.db.models.deletion +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'), + ('m3u', '0016_m3uaccount_priority'), + ] + + operations = [ + migrations.CreateModel( + name='Movie', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the movie', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Movie', + 'verbose_name_plural': 'Movies', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Series', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('year', models.IntegerField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('genre', models.CharField(blank=True, max_length=255, null=True)), + ('tmdb_id', models.CharField(blank=True, help_text='TMDB ID for metadata', max_length=50, null=True, unique=True)), + ('imdb_id', models.CharField(blank=True, help_text='IMDB ID for metadata', max_length=50, null=True, unique=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Additional metadata and properties for the series', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='dispatcharr_channels.logo')), + ], + options={ + 'verbose_name': 'Series', + 'verbose_name_plural': 'Series', + 'ordering': ['name'], + }, + ), + migrations.CreateModel( + name='Episode', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('name', models.CharField(max_length=255)), + ('description', models.TextField(blank=True, null=True)), + ('air_date', models.DateField(blank=True, null=True)), + ('rating', models.CharField(blank=True, max_length=10, null=True)), + ('duration_secs', models.IntegerField(blank=True, help_text='Duration in seconds', null=True)), + ('season_number', models.IntegerField(blank=True, null=True)), + ('episode_number', models.IntegerField(blank=True, null=True)), + ('tmdb_id', models.CharField(blank=True, db_index=True, help_text='TMDB ID for metadata', max_length=50, null=True)), + ('imdb_id', models.CharField(blank=True, db_index=True, help_text='IMDB ID for metadata', max_length=50, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Custom properties for this episode', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episodes', to='vod.series')), + ], + options={ + 'verbose_name': 'Episode', + 'verbose_name_plural': 'Episodes', + 'ordering': ['series__name', 'season_number', 'episode_number'], + }, + ), + migrations.CreateModel( + name='VODCategory', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('category_type', models.CharField(choices=[('movie', 'Movie'), ('series', 'Series')], default='movie', help_text='Type of content this category contains', max_length=10)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ], + options={ + 'verbose_name': 'VOD Category', + 'verbose_name_plural': 'VOD Categories', + 'ordering': ['name'], + 'unique_together': {('name', 'category_type')}, + }, + ), + migrations.CreateModel( + name='M3UVODCategoryRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('enabled', models.BooleanField(default=False, help_text='Set to false to deactivate this category for the M3U account')), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_relations', to='m3u.m3uaccount')), + ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U VOD Category Relation', + 'verbose_name_plural': 'M3U VOD Category Relations', + }, + ), + migrations.CreateModel( + name='M3USeriesRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('external_series_id', models.CharField(help_text='External series ID from M3U provider', max_length=255)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_episode_refresh', models.DateTimeField(blank=True, help_text='Last time episodes were refreshed', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='series_relations', to='m3u.m3uaccount')), + ('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.series')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Series Relation', + 'verbose_name_plural': 'M3U Series Relations', + }, + ), + migrations.CreateModel( + name='M3UMovieRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('last_advanced_refresh', models.DateTimeField(blank=True, help_text='Last time advanced data was fetched from provider', null=True)), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movie_relations', to='m3u.m3uaccount')), + ('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.movie')), + ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vod.vodcategory')), + ], + options={ + 'verbose_name': 'M3U Movie Relation', + 'verbose_name_plural': 'M3U Movie Relations', + }, + ), + migrations.CreateModel( + name='M3UEpisodeRelation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('stream_id', models.CharField(help_text='External stream ID from M3U provider', max_length=255)), + ('container_extension', models.CharField(blank=True, max_length=10, null=True)), + ('custom_properties', models.JSONField(blank=True, help_text='Provider-specific data like quality, language, etc.', null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('episode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_relations', to='vod.episode')), + ('m3u_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episode_relations', to='m3u.m3uaccount')), + ], + options={ + 'verbose_name': 'M3U Episode Relation', + 'verbose_name_plural': 'M3U Episode Relations', + 'unique_together': {('m3u_account', 'stream_id')}, + }, + ), + migrations.AddConstraint( + model_name='movie', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_movie_name_year_no_external_id'), + ), + migrations.AddConstraint( + model_name='series', + constraint=models.UniqueConstraint(condition=models.Q(('tmdb_id__isnull', True), ('imdb_id__isnull', True)), fields=('name', 'year'), name='unique_series_name_year_no_external_id'), + ), + migrations.AlterUniqueTogether( + name='episode', + unique_together={('series', 'season_number', 'episode_number')}, + ), + migrations.AlterUniqueTogether( + name='m3uvodcategoryrelation', + unique_together={('m3u_account', 'category')}, + ), + migrations.AlterUniqueTogether( + name='m3useriesrelation', + unique_together={('m3u_account', 'external_series_id')}, + ), + migrations.AlterUniqueTogether( + name='m3umovierelation', + unique_together={('m3u_account', 'stream_id')}, + ), + ] diff --git a/apps/vod/migrations/0002_add_last_seen_with_default.py b/apps/vod/migrations/0002_add_last_seen_with_default.py new file mode 100644 index 00000000..6ece988a --- /dev/null +++ b/apps/vod/migrations/0002_add_last_seen_with_default.py @@ -0,0 +1,29 @@ +# Generated by Django 5.2.4 on 2025-09-04 21:12 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='m3uepisoderelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + migrations.AddField( + model_name='m3umovierelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + migrations.AddField( + model_name='m3useriesrelation', + name='last_seen', + field=models.DateTimeField(default=django.utils.timezone.now, help_text='Last time this relation was seen during VOD scan'), + ), + ] diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 00000000..1bd2c418 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"βœ“ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/migrations/__init__.py b/apps/vod/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/apps/vod/models.py b/apps/vod/models.py new file mode 100644 index 00000000..69aed808 --- /dev/null +++ b/apps/vod/models.py @@ -0,0 +1,318 @@ +from django.db import models +from django.db.models import Q +from django.utils import timezone +from django.contrib.contenttypes.fields import GenericForeignKey +from django.contrib.contenttypes.models import ContentType +from apps.m3u.models import M3UAccount +import uuid + + +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + +class VODCategory(models.Model): + """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" + + CATEGORY_TYPE_CHOICES = [ + ('movie', 'Movie'), + ('series', 'Series'), + ] + + name = models.CharField(max_length=255) + category_type = models.CharField( + max_length=10, + choices=CATEGORY_TYPE_CHOICES, + default='movie', + help_text="Type of content this category contains" + ) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'VOD Category' + verbose_name_plural = 'VOD Categories' + ordering = ['name'] + unique_together = [('name', 'category_type')] + + @classmethod + def bulk_create_and_fetch(cls, objects, ignore_conflicts=False): + # Perform the bulk create operation + cls.objects.bulk_create(objects, ignore_conflicts=ignore_conflicts) + + # Use the unique fields to fetch the created objects + # Since we have unique_together on ('name', 'category_type'), we need both fields + filter_conditions = [] + for obj in objects: + filter_conditions.append( + Q(name=obj.name, category_type=obj.category_type) + ) + + if filter_conditions: + # Combine all conditions with OR + combined_condition = filter_conditions[0] + for condition in filter_conditions[1:]: + combined_condition |= condition + + created_objects = cls.objects.filter(combined_condition) + else: + created_objects = cls.objects.none() + + return created_objects + + def __str__(self): + return f"{self.name} ({self.get_category_type_display()})" + + +class Series(models.Model): + """Series information for TV shows""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the series') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Series' + verbose_name_plural = 'Series' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_series_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Movie(models.Model): + """Movie content""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + year = models.IntegerField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + genre = models.CharField(max_length=255, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + + # Metadata IDs for deduplication - these should be globally unique when present + tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") + imdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="IMDB ID for metadata") + + # Additional metadata and properties + custom_properties = models.JSONField(blank=True, null=True, help_text='Additional metadata and properties for the movie') + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Movie' + verbose_name_plural = 'Movies' + ordering = ['name'] + # Only enforce name+year uniqueness when no external IDs are present + constraints = [ + models.UniqueConstraint( + fields=['name', 'year'], + condition=models.Q(tmdb_id__isnull=True) & models.Q(imdb_id__isnull=True), + name='unique_movie_name_year_no_external_id' + ), + ] + + def __str__(self): + year_str = f" ({self.year})" if self.year else "" + return f"{self.name}{year_str}" + + +class Episode(models.Model): + """Episode content for TV series""" + uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) + name = models.CharField(max_length=255) + description = models.TextField(blank=True, null=True) + air_date = models.DateField(blank=True, null=True) + rating = models.CharField(max_length=10, blank=True, null=True) + duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") + + # Episode specific fields + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='episodes') + season_number = models.IntegerField(blank=True, null=True) + episode_number = models.IntegerField(blank=True, null=True) + + # Metadata IDs + tmdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="TMDB ID for metadata", db_index=True) + imdb_id = models.CharField(max_length=50, blank=True, null=True, help_text="IMDB ID for metadata", db_index=True) + + # Custom properties for episode + custom_properties = models.JSONField(blank=True, null=True, help_text="Custom properties for this episode") + + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'Episode' + verbose_name_plural = 'Episodes' + ordering = ['series__name', 'season_number', 'episode_number'] + unique_together = [ + ('series', 'season_number', 'episode_number'), + ] + + def __str__(self): + season_ep = f"S{self.season_number or 0:02d}E{self.episode_number or 0:02d}" + return f"{self.series.name} - {season_ep} - {self.name}" + + +# New relation models to link M3U accounts with VOD content + +class M3USeriesRelation(models.Model): + """Links M3U accounts to Series with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='series_relations') + series = models.ForeignKey(Series, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Provider-specific fields - renamed to avoid clash with series ForeignKey + external_series_id = models.CharField(max_length=255, help_text="External series ID from M3U provider") + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_episode_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time episodes were refreshed") + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Series Relation' + verbose_name_plural = 'M3U Series Relations' + unique_together = [('m3u_account', 'external_series_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.series.name}" + + +class M3UMovieRelation(models.Model): + """Links M3U accounts to Movies with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='movie_relations') + movie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m3u_relations') + category = models.ForeignKey(VODCategory, on_delete=models.SET_NULL, null=True, blank=True) + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_advanced_refresh = models.DateTimeField(blank=True, null=True, help_text="Last time advanced data was fetched from provider") + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Movie Relation' + verbose_name_plural = 'M3U Movie Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.movie.name}" + + def get_stream_url(self): + """Get the full stream URL for this movie from this provider""" + # Build URL dynamically for XtreamCodes accounts + if self.m3u_account.account_type == 'XC': + server_url = self.m3u_account.server_url.rstrip('/') + username = self.m3u_account.username + password = self.m3u_account.password + return f"{server_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # For other account types, we would need another way to build URLs + return None + + +class M3UEpisodeRelation(models.Model): + """Links M3U accounts to Episodes with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='episode_relations') + episode = models.ForeignKey(Episode, on_delete=models.CASCADE, related_name='m3u_relations') + + # Streaming information (provider-specific) + stream_id = models.CharField(max_length=255, help_text="External stream ID from M3U provider") + container_extension = models.CharField(max_length=10, blank=True, null=True) + + # Provider-specific data + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + last_seen = models.DateTimeField(default=timezone.now, help_text="Last time this relation was seen during VOD scan") + + class Meta: + verbose_name = 'M3U Episode Relation' + verbose_name_plural = 'M3U Episode Relations' + unique_together = [('m3u_account', 'stream_id')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.episode}" + + def get_stream_url(self): + """Get the full stream URL for this episode from this provider""" + from core.xtream_codes import Client as XtreamCodesClient + + if self.m3u_account.account_type == 'XC': + # For XtreamCodes accounts, build the URL dynamically + server_url = self.m3u_account.server_url.rstrip('/') + username = self.m3u_account.username + password = self.m3u_account.password + return f"{server_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}" + else: + # We might support non XC accounts in the future + # For now, return None + return None + +class M3UVODCategoryRelation(models.Model): + """Links M3U accounts to categories with provider-specific information""" + m3u_account = models.ForeignKey(M3UAccount, on_delete=models.CASCADE, related_name='category_relations') + category = models.ForeignKey(VODCategory, on_delete=models.CASCADE, related_name='m3u_relations') + + enabled = models.BooleanField( + default=False, help_text="Set to false to deactivate this category for the M3U account" + ) + + custom_properties = models.JSONField(blank=True, null=True, help_text="Provider-specific data like quality, language, etc.") + + # Timestamps + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = 'M3U VOD Category Relation' + verbose_name_plural = 'M3U VOD Category Relations' + unique_together = [('m3u_account', 'category')] + + def __str__(self): + return f"{self.m3u_account.name} - {self.category.name}" diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py new file mode 100644 index 00000000..7747cb88 --- /dev/null +++ b/apps/vod/serializers.py @@ -0,0 +1,304 @@ +from rest_framework import serializers +from django.urls import reverse +from .models import ( + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from apps.m3u.serializers import M3UAccountSerializer + + +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + +class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): + category = serializers.IntegerField(source="category.id") + m3u_account = serializers.IntegerField(source="m3u_account.id") + + class Meta: + model = M3UVODCategoryRelation + fields = ["category", "m3u_account", "enabled"] + + +class VODCategorySerializer(serializers.ModelSerializer): + category_type_display = serializers.CharField(source='get_category_type_display', read_only=True) + m3u_accounts = M3UVODCategoryRelationSerializer(many=True, source="m3u_relations", read_only=True) + + class Meta: + model = VODCategory + fields = [ + "id", + "name", + "category_type", + "category_type_display", + "m3u_accounts", + ] + +class SeriesSerializer(serializers.ModelSerializer): + logo = VODLogoSerializer(read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() + + +class MovieSerializer(serializers.ModelSerializer): + logo = VODLogoSerializer(read_only=True) + + class Meta: + model = Movie + fields = '__all__' + + +class EpisodeSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + + class Meta: + model = Episode + fields = '__all__' + + +class M3USeriesRelationSerializer(serializers.ModelSerializer): + series = SeriesSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + + class Meta: + model = M3USeriesRelation + fields = '__all__' + + +class M3UMovieRelationSerializer(serializers.ModelSerializer): + movie = MovieSerializer(read_only=True) + category = VODCategorySerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UMovieRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the movie if available + movie = obj.movie + if hasattr(movie, 'video') and movie.video: + video_data = movie.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from movie name/title + if movie and movie.name: + name = movie.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(movie, 'bitrate') and movie.bitrate and movie.bitrate > 0: + bitrate = movie.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class M3UEpisodeRelationSerializer(serializers.ModelSerializer): + episode = EpisodeSerializer(read_only=True) + m3u_account = M3UAccountSerializer(read_only=True) + quality_info = serializers.SerializerMethodField() + + class Meta: + model = M3UEpisodeRelation + fields = '__all__' + + def get_quality_info(self, obj): + """Extract quality information from various sources""" + quality_info = {} + + # 1. Check custom_properties first + if obj.custom_properties: + if obj.custom_properties.get('quality'): + quality_info['quality'] = obj.custom_properties['quality'] + return quality_info + elif obj.custom_properties.get('resolution'): + quality_info['resolution'] = obj.custom_properties['resolution'] + return quality_info + + # 2. Try to get detailed info from the episode if available + episode = obj.episode + if hasattr(episode, 'video') and episode.video: + video_data = episode.video + if isinstance(video_data, dict) and 'width' in video_data and 'height' in video_data: + width = video_data['width'] + height = video_data['height'] + quality_info['resolution'] = f"{width}x{height}" + + # Convert to common quality names (prioritize width for ultrawide/cinematic content) + if width >= 3840: + quality_info['quality'] = '4K' + elif width >= 1920: + quality_info['quality'] = '1080p' + elif width >= 1280: + quality_info['quality'] = '720p' + elif width >= 854: + quality_info['quality'] = '480p' + else: + quality_info['quality'] = f"{width}x{height}" + return quality_info + + # 3. Extract from episode name/title + if episode and episode.name: + name = episode.name + if '4K' in name or '2160p' in name: + quality_info['quality'] = '4K' + return quality_info + elif '1080p' in name or 'FHD' in name: + quality_info['quality'] = '1080p' + return quality_info + elif '720p' in name or 'HD' in name: + quality_info['quality'] = '720p' + return quality_info + elif '480p' in name: + quality_info['quality'] = '480p' + return quality_info + + # 4. Try bitrate as last resort + if hasattr(episode, 'bitrate') and episode.bitrate and episode.bitrate > 0: + bitrate = episode.bitrate + if bitrate >= 6000: + quality_info['quality'] = '4K' + elif bitrate >= 3000: + quality_info['quality'] = '1080p' + elif bitrate >= 1500: + quality_info['quality'] = '720p' + else: + quality_info['bitrate'] = f"{round(bitrate/1000)}Mbps" + return quality_info + + # 5. Fallback - no quality info available + return None + + +class EnhancedSeriesSerializer(serializers.ModelSerializer): + """Enhanced serializer for series with provider information""" + logo = VODLogoSerializer(read_only=True) + providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) + episode_count = serializers.SerializerMethodField() + + class Meta: + model = Series + fields = '__all__' + + def get_episode_count(self, obj): + return obj.episodes.count() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py new file mode 100644 index 00000000..e34e00e6 --- /dev/null +++ b/apps/vod/tasks.py @@ -0,0 +1,2026 @@ +from celery import shared_task, current_app, group +from django.utils import timezone +from django.db import transaction, IntegrityError +from django.db.models import Q +from apps.m3u.models import M3UAccount +from core.xtream_codes import Client as XtreamCodesClient +from .models import ( + VODCategory, Series, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation +) +from datetime import datetime +import logging +import json +import re + +logger = logging.getLogger(__name__) + + +@shared_task +def refresh_vod_content(account_id): + """Refresh VOD content for an M3U account with batch processing for improved performance""" + # Import here to avoid circular import + from apps.m3u.tasks import send_m3u_update + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"VOD refresh called for non-XC account {account_id}") + return "VOD refresh only available for XtreamCodes accounts" + + logger.info(f"Starting batch VOD refresh for account {account.name}") + start_time = timezone.now() + + # Send start notification + send_m3u_update(account_id, "vod_refresh", 0, status="processing") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + movie_categories, series_categories = refresh_categories(account.id, client) + + logger.debug("Fetching relations for filtering category filtering") + relations = { rel.category_id: rel for rel in M3UVODCategoryRelation.objects + .filter(m3u_account=account) + .select_related("category", "m3u_account") + } + + # Refresh movies with batch processing (pass scan start time) + refresh_movies(client, account, movie_categories, relations, scan_start_time=start_time) + + # Refresh series with batch processing (pass scan start time) + refresh_series(client, account, series_categories, relations, scan_start_time=start_time) + + end_time = timezone.now() + duration = (end_time - start_time).total_seconds() + + logger.info(f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds") + + # Cleanup orphaned VOD content after refresh (scoped to this account only) + logger.info(f"Starting cleanup of orphaned VOD content for account {account.name}") + cleanup_result = cleanup_orphaned_vod_content(account_id=account_id, scan_start_time=start_time) + logger.info(f"VOD cleanup completed: {cleanup_result}") + + # Send completion notification + send_m3u_update(account_id, "vod_refresh", 100, status="success", + message=f"VOD refresh completed in {duration:.2f} seconds") + + return f"Batch VOD refresh completed for account {account.name} in {duration:.2f} seconds" + + except Exception as e: + logger.error(f"Error refreshing VOD for account {account_id}: {str(e)}") + + # Send error notification + send_m3u_update(account_id, "vod_refresh", 100, status="error", + message=f"VOD refresh failed: {str(e)}") + + return f"VOD refresh failed: {str(e)}" + +def refresh_categories(account_id, client=None): + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if not client: + client = XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) + logger.info(f"Refreshing movie categories for account {account.name}") + + # First, get the category list to properly map category IDs and names + logger.info("Fetching movie categories from provider...") + categories_data = client.get_vod_categories() + category_map = batch_create_categories(categories_data, 'movie', account) + + # Create a mapping from provider category IDs to our category objects + movies_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + movies_category_id_map[str(provider_cat_id)] = our_category + + # Get the category list to properly map category IDs and names + logger.info("Fetching series categories from provider...") + categories_data = client.get_series_categories() + category_map = batch_create_categories(categories_data, 'series', account) + + # Create a mapping from provider category IDs to our category objects + series_category_id_map = {} + for cat_data in categories_data: + cat_name = cat_data.get('category_name', 'Unknown') + provider_cat_id = cat_data.get('category_id') + our_category = category_map.get(cat_name) + if provider_cat_id and our_category: + series_category_id_map[str(provider_cat_id)] = our_category + + return movies_category_id_map, series_category_id_map + +def refresh_movies(client, account, categories_by_provider, relations, scan_start_time=None): + """Refresh movie content using single API call for all movies""" + logger.info(f"Refreshing movies for account {account.name}") + + # Get all movies in a single API call + logger.info("Fetching all movies from provider...") + all_movies_data = client.get_vod_streams() # No category_id = get all movies + + # Process movies in chunks using the simple approach + chunk_size = 1000 + total_movies = len(all_movies_data) + total_chunks = (total_movies + chunk_size - 1) // chunk_size if total_movies > 0 else 0 + + for i in range(0, total_movies, chunk_size): + chunk = all_movies_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + + logger.info(f"Processing movie chunk {chunk_num}/{total_chunks} ({len(chunk)} movies)") + process_movie_batch(account, chunk, categories_by_provider, relations, scan_start_time) + + logger.info(f"Completed processing all {total_movies} movies in {total_chunks} chunks") + + +def refresh_series(client, account, categories_by_provider, relations, scan_start_time=None): + """Refresh series content using single API call for all series""" + logger.info(f"Refreshing series for account {account.name}") + + # Get all series in a single API call + logger.info("Fetching all series from provider...") + all_series_data = client.get_series() # No category_id = get all series + + # Process series in chunks using the simple approach + chunk_size = 1000 + total_series = len(all_series_data) + total_chunks = (total_series + chunk_size - 1) // chunk_size if total_series > 0 else 0 + + for i in range(0, total_series, chunk_size): + chunk = all_series_data[i:i + chunk_size] + chunk_num = (i // chunk_size) + 1 + + logger.info(f"Processing series chunk {chunk_num}/{total_chunks} ({len(chunk)} series)") + process_series_batch(account, chunk, categories_by_provider, relations, scan_start_time) + + logger.info(f"Completed processing all {total_series} series in {total_chunks} chunks") + + +def batch_create_categories(categories_data, category_type, account): + """Create categories in batch and return a mapping""" + category_names = [cat.get('category_name', 'Unknown') for cat in categories_data] + + relations_to_create = [] + + # Get existing categories + logger.debug(f"Starting VOD {category_type} category refresh") + existing_categories = { + cat.name: cat for cat in VODCategory.objects.filter( + name__in=category_names, + category_type=category_type + ) + } + + logger.debug(f"Found {len(existing_categories)} existing categories") + + # Check if we should auto-enable new categories based on account settings + account_custom_props = account.custom_properties or {} + if category_type == 'movie': + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + else: # series + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + + # Create missing categories in batch + new_categories = [] + + for name in category_names: + if name not in existing_categories: + # Always create new categories + new_categories.append(VODCategory(name=name, category_type=category_type)) + else: + # Existing category - create relationship with enabled based on auto_enable setting + # (category exists globally but is new to this account) + relations_to_create.append(M3UVODCategoryRelation( + category=existing_categories[name], + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + )) + + logger.debug(f"{len(new_categories)} new categories found") + logger.debug(f"{len(relations_to_create)} existing categories found for account") + + if new_categories: + logger.debug("Creating new categories...") + created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)) + + # Create relations for newly created categories with enabled based on auto_enable setting + for cat in created_categories: + if not auto_enable_new: + logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}") + + relations_to_create.append( + M3UVODCategoryRelation( + category=cat, + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + ) + ) + + # Convert to dictionary for easy lookup + newly_created = {cat.name: cat for cat in created_categories} + existing_categories.update(newly_created) + + # Create missing relations + logger.debug("Updating category account relations...") + M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + # Delete orphaned category relationships (categories no longer in the M3U source) + current_category_ids = set(existing_categories[name].id for name in category_names) + existing_relations = M3UVODCategoryRelation.objects.filter( + m3u_account=account, + category__category_type=category_type + ).select_related('category') + + relations_to_delete = [ + rel for rel in existing_relations + if rel.category_id not in current_category_ids + ] + + if relations_to_delete: + M3UVODCategoryRelation.objects.filter( + id__in=[rel.id for rel in relations_to_delete] + ).delete() + logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}") + + # Check if any of the deleted relationships left categories with no remaining associations + orphaned_category_ids = [] + for rel in relations_to_delete: + category = rel.category + + # Check if this category has any remaining M3U account relationships + remaining_relationships = M3UVODCategoryRelation.objects.filter( + category=category + ).exists() + + # If no relationships remain, it's safe to delete the category + if not remaining_relationships: + orphaned_category_ids.append(category.id) + logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted") + + # Delete orphaned categories + if orphaned_category_ids: + VODCategory.objects.filter(id__in=orphaned_category_ids).delete() + logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations") + + # πŸ”‘ Fetch all relations for this account, for all categories + # relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects + # .filter(category__in=existing_categories.values(), m3u_account=account) + # .select_related("category", "m3u_account") + # } + + # Attach relations to category objects + # for rel in relations: + # existing_categories[rel.category.name]['relation'] = { + # "relation_id": rel.id, + # "category_id": rel.category_id, + # "account_id": rel.m3u_account_id, + # } + + + return existing_categories + + + +@shared_task +def process_movie_batch(account, batch, categories, relations, scan_start_time=None): + """Process a batch of movies using simple bulk operations like M3U processing""" + logger.info(f"Processing movie batch of {len(batch)} movies for account {account.name}") + + movies_to_create = [] + movies_to_update = [] + relations_to_create = [] + relations_to_update = [] + movie_keys = {} # For deduplication like M3U stream_hashes + + # Process each movie in the batch + for movie_data in batch: + try: + stream_id = str(movie_data.get('stream_id')) + name = movie_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(movie_data.get('category_id', '')) if movie_data.get('category_id') else None + movie_data['_provider_category_id'] = provider_cat_id + movie_data['_category_id'] = None + + logger.debug(f"Checking for existing provider category ID {provider_cat_id}") + if provider_cat_id in categories: + category = categories[provider_cat_id] + movie_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for movie {name}") + + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + logger.warning(f"No category ID provided for movie {name}") + + # Extract metadata + year = extract_year_from_data(movie_data, 'name') + tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb') + imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb') + + # Clean empty string IDs + if tmdb_id == '': + tmdb_id = None + if imdb_id == '': + imdb_id = None + + # Create a unique key for this movie (priority: TMDB > IMDB > name+year) + if tmdb_id: + movie_key = f"tmdb_{tmdb_id}" + elif imdb_id: + movie_key = f"imdb_{imdb_id}" + else: + movie_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if movie_key in movie_keys: + continue + + # Prepare movie properties + description = movie_data.get('description') or movie_data.get('plot') or '' + rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average')) + genre = movie_data.get('genre') or movie_data.get('category_name') or '' + duration_secs = extract_duration_from_data(movie_data) + trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or '' + trailer = extract_string_from_array_or_string(trailer_raw) if trailer_raw else None + logo_url = movie_data.get('stream_icon') or '' + + movie_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'duration_secs': duration_secs, + 'custom_properties': {'trailer': trailer} if trailer else None, + } + + movie_keys[movie_key] = { + 'props': movie_props, + 'stream_id': stream_id, + 'category': category, + 'movie_data': movie_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing movie {movie_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to movie names + for data in movie_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the movie name (use first occurrence if multiple movies share same logo) + if logo_url not in logo_url_to_name: + movie_name = data['props'].get('name', 'Unknown Movie') + logo_url_to_name[logo_url] = movie_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) + + if logos_to_create: + try: + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new VOD logos for movies") + except Exception as e: + logger.warning(f"Failed to create VOD logos: {e}") + + # Get existing movies based on our keys + existing_movies = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in movie_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for movie in Movie.objects.filter(tmdb_id__in=tmdb_ids): + existing_movies[f"tmdb_{movie.tmdb_id}"] = movie + + # Query by IMDB IDs + imdb_keys = [k for k in movie_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for movie in Movie.objects.filter(imdb_id__in=imdb_ids): + existing_movies[f"imdb_{movie.imdb_id}"] = movie + + # Query by name+year for movies without external IDs + name_year_keys = [k for k in movie_keys.keys() if k.startswith('name_')] + if name_year_keys: + for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{movie.name}_{movie.year or 'None'}" + if key in name_year_keys: + existing_movies[key] = movie + + # Get existing relations + stream_ids = [data['stream_id'] for data in movie_keys.values()] + existing_relations = { + rel.stream_id: rel for rel in M3UMovieRelation.objects.filter( + m3u_account=account, + stream_id__in=stream_ids + ).select_related('movie') + } + + # Process each movie + for movie_key, data in movie_keys.items(): + movie_props = data['props'] + stream_id = data['stream_id'] + category = data['category'] + movie_data = data['movie_data'] + logo_url = data.get('logo_url') + + if movie_key in existing_movies: + # Update existing movie + movie = existing_movies[movie_key] + updated = False + + for field, value in movie_props.items(): + if field == 'custom_properties': + if value != movie.custom_properties: + movie.custom_properties = value + updated = True + elif getattr(movie, field) != value: + setattr(movie, field, value) + updated = True + + # Handle logo assignment for existing movies + logo_updated = False + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if movie.logo != new_logo: + movie._logo_to_update = new_logo + logo_updated = True + elif (not logo_url or len(logo_url) > 500) and movie.logo: + # Clear logo if no logo URL provided or URL is too long + movie._logo_to_update = None + logo_updated = True + + if updated or logo_updated: + movies_to_update.append(movie) + else: + # Create new movie + movie = Movie(**movie_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + movie.logo = existing_logos[logo_url] + + movies_to_create.append(movie) + + # Handle relation + if stream_id in existing_relations: + # Update existing relation + relation = existing_relations[stream_id] + relation.movie = movie + relation.category = category + relation.container_extension = movie_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'basic_data': movie_data, + 'detailed_fetched': False + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UMovieRelation( + m3u_account=account, + movie=movie, + category=category, + stream_id=stream_id, + container_extension=movie_data.get('container_extension', 'mp4'), + custom_properties={ + 'basic_data': movie_data, + 'detailed_fetched': False + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(movies_to_create)} movies to create, {len(movies_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new movies and get their IDs + created_movies = {} + if movies_to_create: + Movie.objects.bulk_create(movies_to_create, ignore_conflicts=True) + + # Get the newly created movies with their IDs + # We need to re-fetch them to get the primary keys + for movie in movies_to_create: + # Find the movie by its unique identifiers + if movie.tmdb_id: + db_movie = Movie.objects.filter(tmdb_id=movie.tmdb_id).first() + elif movie.imdb_id: + db_movie = Movie.objects.filter(imdb_id=movie.imdb_id).first() + else: + db_movie = Movie.objects.filter( + name=movie.name, + year=movie.year, + tmdb_id__isnull=True, + imdb_id__isnull=True + ).first() + + if db_movie: + created_movies[id(movie)] = db_movie + + # Update existing movies + if movies_to_update: + # First, update all fields except logo to avoid unsaved related object issues + Movie.objects.bulk_update(movies_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'duration_secs', 'custom_properties' + ]) + + # Handle logo updates separately to avoid bulk_update issues + for movie in movies_to_update: + if hasattr(movie, '_logo_to_update'): + movie.logo = movie._logo_to_update + movie.save(update_fields=['logo']) + + # Update relations to reference the correct movie objects + for relation in relations_to_create: + if id(relation.movie) in created_movies: + relation.movie = created_movies[id(relation.movie)] + + # Handle relations + if relations_to_create: + M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3UMovieRelation.objects.bulk_update(relations_to_update, [ + 'movie', 'category', 'container_extension', 'custom_properties', 'last_seen' + ]) + + logger.info("Movie batch processing completed successfully!") + return f"Movie batch processed: {len(movies_to_create)} created, {len(movies_to_update)} updated" + + except Exception as e: + logger.error(f"Movie batch processing failed: {str(e)}") + return f"Movie batch processing failed: {str(e)}" + + +@shared_task +def process_series_batch(account, batch, categories, relations, scan_start_time=None): + """Process a batch of series using simple bulk operations like M3U processing""" + logger.info(f"Processing series batch of {len(batch)} series for account {account.name}") + + series_to_create = [] + series_to_update = [] + relations_to_create = [] + relations_to_update = [] + series_keys = {} # For deduplication like M3U stream_hashes + + # Process each series in the batch + for series_data in batch: + try: + series_id = str(series_data.get('series_id')) + name = series_data.get('name', 'Unknown') + + # Get category with proper error handling + category = None + + provider_cat_id = str(series_data.get('category_id', '')) if series_data.get('category_id') else None + series_data['_provider_category_id'] = provider_cat_id + series_data['_category_id'] = None + + if provider_cat_id in categories: + category = categories[provider_cat_id] + series_data['_category_id'] = category.id + logger.debug(f"Found category {category.name} (ID: {category.id}) for series {name}") + relation = relations.get(category.id, None) + + if relation and not relation.enabled: + logger.debug("Skipping disabled category") + continue + else: + logger.warning(f"No category ID provided for series {name}") + + # Extract metadata + year = extract_year(series_data.get('releaseDate', '')) + if not year and series_data.get('release_date'): + year = extract_year(series_data.get('release_date')) + + tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id') + imdb_id = series_data.get('imdb') or series_data.get('imdb_id') + + # Clean empty string IDs + if tmdb_id == '': + tmdb_id = None + if imdb_id == '': + imdb_id = None + + # Create a unique key for this series (priority: TMDB > IMDB > name+year) + if tmdb_id: + series_key = f"tmdb_{tmdb_id}" + elif imdb_id: + series_key = f"imdb_{imdb_id}" + else: + series_key = f"name_{name}_{year or 'None'}" + + # Skip duplicates in this batch + if series_key in series_keys: + continue + + # Prepare series properties + description = series_data.get('plot', '') + rating = normalize_rating(series_data.get('rating')) + genre = series_data.get('genre', '') + logo_url = series_data.get('cover') or '' + + # Extract additional metadata for custom_properties + additional_metadata = {} + for key in ['backdrop_path', 'poster_path', 'original_name', 'first_air_date', 'last_air_date', + 'episode_run_time', 'status', 'type', 'cast', 'director', 'country', 'language', + 'releaseDate', 'youtube_trailer', 'category_id', 'age', 'seasons']: + value = series_data.get(key) + if value: + # For string-like fields that might be arrays, extract clean strings + if key in ['poster_path', 'youtube_trailer', 'cast', 'director']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = clean_value + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + additional_metadata[key] = [clean_value] + else: + # For other fields, keep as-is if not null/empty + if value is not None and value != '' and value != []: + additional_metadata[key] = value + + series_props = { + 'name': name, + 'year': year, + 'tmdb_id': tmdb_id, + 'imdb_id': imdb_id, + 'description': description, + 'rating': rating, + 'genre': genre, + 'custom_properties': additional_metadata if additional_metadata else None, + } + + series_keys[series_key] = { + 'props': series_props, + 'series_id': series_id, + 'category': category, + 'series_data': series_data, + 'logo_url': logo_url # Keep logo URL for later processing + } + + except Exception as e: + logger.error(f"Error preparing series {series_data.get('name', 'Unknown')}: {str(e)}") + + # Collect all logo URLs and create logos in batch + logo_urls = set() + logo_url_to_name = {} # Map logo URLs to series names + for data in series_keys.values(): + logo_url = data.get('logo_url') + if logo_url and len(logo_url) <= 500: # Ignore overly long URLs (likely embedded image data) + logo_urls.add(logo_url) + # Map this logo URL to the series name (use first occurrence if multiple series share same logo) + if logo_url not in logo_url_to_name: + series_name = data['props'].get('name', 'Unknown Series') + logo_url_to_name[logo_url] = series_name + + # Get existing logos + existing_logos = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) + } if logo_urls else {} + + # Create missing logos + logos_to_create = [] + for logo_url in logo_urls: + if logo_url not in existing_logos: + series_name = logo_url_to_name.get(logo_url, 'Unknown Series') + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) + + if logos_to_create: + try: + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + # Refresh existing_logos with newly created ones + new_logo_urls = [logo.url for logo in logos_to_create] + newly_created = { + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) + } + existing_logos.update(newly_created) + logger.info(f"Created {len(newly_created)} new VOD logos for series") + except Exception as e: + logger.warning(f"Failed to create VOD logos: {e}") + + # Get existing series based on our keys - same pattern as movies + existing_series = {} + + # Query by TMDB IDs + tmdb_keys = [k for k in series_keys.keys() if k.startswith('tmdb_')] + tmdb_ids = [k.replace('tmdb_', '') for k in tmdb_keys] + if tmdb_ids: + for series in Series.objects.filter(tmdb_id__in=tmdb_ids): + existing_series[f"tmdb_{series.tmdb_id}"] = series + + # Query by IMDB IDs + imdb_keys = [k for k in series_keys.keys() if k.startswith('imdb_')] + imdb_ids = [k.replace('imdb_', '') for k in imdb_keys] + if imdb_ids: + for series in Series.objects.filter(imdb_id__in=imdb_ids): + existing_series[f"imdb_{series.imdb_id}"] = series + + # Query by name+year for series without external IDs + name_year_keys = [k for k in series_keys.keys() if k.startswith('name_')] + if name_year_keys: + for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True): + key = f"name_{series.name}_{series.year or 'None'}" + if key in name_year_keys: + existing_series[key] = series + + # Get existing relations + series_ids = [data['series_id'] for data in series_keys.values()] + existing_relations = { + rel.external_series_id: rel for rel in M3USeriesRelation.objects.filter( + m3u_account=account, + external_series_id__in=series_ids + ).select_related('series') + } + + # Process each series + for series_key, data in series_keys.items(): + series_props = data['props'] + series_id = data['series_id'] + category = data['category'] + series_data = data['series_data'] + logo_url = data.get('logo_url') + + if series_key in existing_series: + # Update existing series + series = existing_series[series_key] + updated = False + + for field, value in series_props.items(): + if field == 'custom_properties': + if value != series.custom_properties: + series.custom_properties = value + updated = True + elif getattr(series, field) != value: + setattr(series, field, value) + updated = True + + # Handle logo assignment for existing series + logo_updated = False + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + new_logo = existing_logos[logo_url] + if series.logo != new_logo: + series._logo_to_update = new_logo + logo_updated = True + elif (not logo_url or len(logo_url) > 500) and series.logo: + # Clear logo if no logo URL provided or URL is too long + series._logo_to_update = None + logo_updated = True + + if updated or logo_updated: + series_to_update.append(series) + else: + # Create new series + series = Series(**series_props) + + # Assign logo if available + if logo_url and len(logo_url) <= 500 and logo_url in existing_logos: + series.logo = existing_logos[logo_url] + + series_to_create.append(series) + + # Handle relation + if series_id in existing_relations: + # Update existing relation + relation = existing_relations[series_id] + relation.series = series + relation.category = category + relation.custom_properties = { + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3USeriesRelation( + m3u_account=account, + series=series, + category=category, + external_series_id=series_id, + custom_properties={ + 'basic_data': series_data, + 'detailed_fetched': False, + 'episodes_fetched': False + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + # Execute batch operations + logger.info(f"Executing batch operations: {len(series_to_create)} series to create, {len(series_to_update)} to update") + + try: + with transaction.atomic(): + # First, create new series and get their IDs + created_series = {} + if series_to_create: + Series.objects.bulk_create(series_to_create, ignore_conflicts=True) + + # Get the newly created series with their IDs + # We need to re-fetch them to get the primary keys + for series in series_to_create: + # Find the series by its unique identifiers + if series.tmdb_id: + db_series = Series.objects.filter(tmdb_id=series.tmdb_id).first() + elif series.imdb_id: + db_series = Series.objects.filter(imdb_id=series.imdb_id).first() + else: + db_series = Series.objects.filter( + name=series.name, + year=series.year, + tmdb_id__isnull=True, + imdb_id__isnull=True + ).first() + + if db_series: + created_series[id(series)] = db_series + + # Update existing series + if series_to_update: + # First, update all fields except logo to avoid unsaved related object issues + Series.objects.bulk_update(series_to_update, [ + 'description', 'rating', 'genre', 'year', 'tmdb_id', 'imdb_id', + 'custom_properties' + ]) + + # Handle logo updates separately to avoid bulk_update issues + for series in series_to_update: + if hasattr(series, '_logo_to_update'): + series.logo = series._logo_to_update + series.save(update_fields=['logo']) + + # Update relations to reference the correct series objects + for relation in relations_to_create: + if id(relation.series) in created_series: + relation.series = created_series[id(relation.series)] + + # Handle relations + if relations_to_create: + M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + + if relations_to_update: + M3USeriesRelation.objects.bulk_update(relations_to_update, [ + 'series', 'category', 'custom_properties', 'last_seen' + ]) + + logger.info("Series batch processing completed successfully!") + return f"Series batch processed: {len(series_to_create)} created, {len(series_to_update)} updated" + + except Exception as e: + logger.error(f"Series batch processing failed: {str(e)}") + return f"Series batch processing failed: {str(e)}" + + +# Helper functions for year and date extraction + +def extract_duration_from_data(movie_data): + """Extract duration in seconds from movie data""" + duration_secs = None + + # Try to extract duration from various possible fields + if movie_data.get('duration_secs'): + duration_secs = int(movie_data.get('duration_secs')) + elif movie_data.get('duration'): + # Handle duration that might be in different formats + duration_str = str(movie_data.get('duration')) + if duration_str.isdigit(): + duration_secs = int(duration_str) * 60 # Assume minutes if just a number + else: + # Try to parse time format like "01:30:00" + try: + time_parts = duration_str.split(':') + if len(time_parts) == 3: + hours, minutes, seconds = map(int, time_parts) + duration_secs = (hours * 3600) + (minutes * 60) + seconds + elif len(time_parts) == 2: + minutes, seconds = map(int, time_parts) + duration_secs = minutes * 60 + seconds + except (ValueError, AttributeError): + pass + + return duration_secs + + +def normalize_rating(rating_value): + """Normalize rating value by converting commas to decimals and validating as float""" + if not rating_value: + return None + + try: + # Convert to string for processing + rating_str = str(rating_value).strip() + + if not rating_str or rating_str == '': + return None + + # Replace comma with decimal point (European format) + rating_str = rating_str.replace(',', '.') + + # Try to convert to float + rating_float = float(rating_str) + + # Return as string to maintain compatibility with existing code + # but ensure it's a valid numeric format + return str(rating_float) + except (ValueError, TypeError, AttributeError): + # If conversion fails, discard the rating + logger.debug(f"Invalid rating value discarded: {rating_value}") + return None + + +def extract_year(date_string): + """Extract year from date string""" + if not date_string: + return None + try: + return int(date_string.split('-')[0]) + except (ValueError, IndexError): + return None + + +def extract_year_from_title(title): + """Extract year from movie title if present""" + if not title: + return None + + # Pattern for (YYYY) format + pattern1 = r'\((\d{4})\)' + # Pattern for - YYYY format + pattern2 = r'\s-\s(\d{4})' + # Pattern for YYYY at the end + pattern3 = r'\s(\d{4})$' + + for pattern in [pattern1, pattern2, pattern3]: + match = re.search(pattern, title) + if match: + year = int(match.group(1)) + # Validate year is reasonable (between 1900 and current year + 5) + if 1900 <= year <= 2030: + return year + + return None + + +def extract_year_from_data(data, title_key='name'): + """Extract year from various data sources with fallback options""" + try: + # First try the year field + year = data.get('year') + if year and str(year).strip() and str(year).strip() != '': + try: + year_int = int(year) + if 1900 <= year_int <= 2030: + return year_int + except (ValueError, TypeError): + pass + + # Try releaseDate or release_date fields + for date_field in ['releaseDate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + # Extract year from date format like "2011-09-19" + try: + year_str = date_value.split('-')[0].strip() + if year_str: + year = int(year_str) + if 1900 <= year <= 2030: + return year + except (ValueError, IndexError): + continue + + # Finally try extracting from title + title = data.get(title_key, '') + if title and title.strip(): + return extract_year_from_title(title) + + except Exception: + # Don't fail processing if year extraction fails + pass + + return None + + +def extract_date_from_data(data): + """Extract date from various data sources with fallback options""" + try: + for date_field in ['air_date', 'releasedate', 'release_date']: + date_value = data.get(date_field) + if date_value and isinstance(date_value, str) and date_value.strip(): + parsed = parse_date(date_value) + if parsed: + return parsed + except Exception: + # Don't fail processing if date extraction fails + pass + return None + + +def parse_date(date_string): + """Parse date string into a datetime object""" + if not date_string: + return None + try: + # Try to parse ISO format first + return datetime.fromisoformat(date_string) + except ValueError: + # Fallback to parsing with strptime for common formats + try: + return datetime.strptime(date_string, '%Y-%m-%d') + except ValueError: + return None # Return None if parsing fails + + +# Episode processing and other advanced features + +def refresh_series_episodes(account, series, external_series_id, episodes_data=None): + """Refresh episodes for a series - only called on-demand""" + try: + if not episodes_data: + # Fetch detailed series info including episodes + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + series_info = client.get_series_info(external_series_id) + if series_info: + # Update series with detailed info + info = series_info.get('info', {}) + if info: + # Only update fields if new value is non-empty and either no existing value or existing value is empty + updated = False + if should_update_field(series.description, info.get('plot')): + series.description = extract_string_from_array_or_string(info.get('plot')) + updated = True + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and (not series.rating or not str(series.rating).strip()): + series.rating = normalized_rating + updated = True + if should_update_field(series.genre, info.get('genre')): + series.genre = extract_string_from_array_or_string(info.get('genre')) + updated = True + + year = extract_year_from_data(info) + if year and not series.year: + series.year = year + updated = True + + if updated: + series.save() + + episodes_data = series_info.get('episodes', {}) + else: + episodes_data = {} + + # Clear existing episodes for this account to handle deletions + Episode.objects.filter( + series=series, + m3u_relations__m3u_account=account + ).delete() + + # Process all episodes in batch + batch_process_episodes(account, series, episodes_data) + + # Update the series relation to mark episodes as fetched + series_relation = M3USeriesRelation.objects.filter( + series=series, + m3u_account=account + ).first() + + if series_relation: + custom_props = series_relation.custom_properties or {} + custom_props['episodes_fetched'] = True + custom_props['detailed_fetched'] = True + series_relation.custom_properties = custom_props + series_relation.last_episode_refresh = timezone.now() + series_relation.save() + + except Exception as e: + logger.error(f"Error refreshing episodes for series {series.name}: {str(e)}") + + +def batch_process_episodes(account, series, episodes_data, scan_start_time=None): + """Process episodes in batches for better performance""" + if not episodes_data: + return + + # Flatten episodes data + all_episodes_data = [] + for season_num, season_episodes in episodes_data.items(): + for episode_data in season_episodes: + episode_data['_season_number'] = int(season_num) + all_episodes_data.append(episode_data) + + if not all_episodes_data: + return + + logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}") + + # Extract episode identifiers + episode_keys = [] + episode_ids = [] + for episode_data in all_episodes_data: + season_num = episode_data['_season_number'] + episode_num = episode_data.get('episode_num', 0) + episode_keys.append((series.id, season_num, episode_num)) + episode_ids.append(str(episode_data.get('id'))) + + # Pre-fetch existing episodes + existing_episodes = {} + for episode in Episode.objects.filter(series=series): + key = (episode.series_id, episode.season_number, episode.episode_number) + existing_episodes[key] = episode + + # Pre-fetch existing episode relations + existing_relations = { + rel.stream_id: rel for rel in M3UEpisodeRelation.objects.filter( + m3u_account=account, + stream_id__in=episode_ids + ).select_related('episode') + } + + # Prepare batch operations + episodes_to_create = [] + episodes_to_update = [] + relations_to_create = [] + relations_to_update = [] + + for episode_data in all_episodes_data: + try: + episode_id = str(episode_data.get('id')) + episode_name = episode_data.get('title', 'Unknown Episode') + season_number = episode_data['_season_number'] + episode_number = episode_data.get('episode_num', 0) + info = episode_data.get('info', {}) + + # Extract episode metadata + description = info.get('plot') or info.get('overview', '') if info else '' + rating = normalize_rating(info.get('rating')) if info else None + air_date = extract_date_from_data(info) if info else None + duration_secs = info.get('duration_secs') if info else None + tmdb_id = info.get('tmdb_id') if info else None + imdb_id = info.get('imdb_id') if info else None + + # Prepare custom properties + custom_props = {} + if info: + if info.get('crew'): + custom_props['crew'] = info.get('crew') + if info.get('movie_image'): + movie_image = extract_string_from_array_or_string(info.get('movie_image')) + if movie_image: + custom_props['movie_image'] = movie_image + backdrop = extract_string_from_array_or_string(info.get('backdrop_path')) + if backdrop: + custom_props['backdrop_path'] = [backdrop] + + # Find existing episode + episode_key = (series.id, season_number, episode_number) + episode = existing_episodes.get(episode_key) + + if episode: + # Update existing episode + updated = False + if episode_name != episode.name: + episode.name = episode_name + updated = True + if description != episode.description: + episode.description = description + updated = True + if rating != episode.rating: + episode.rating = rating + updated = True + if air_date != episode.air_date: + episode.air_date = air_date + updated = True + if duration_secs != episode.duration_secs: + episode.duration_secs = duration_secs + updated = True + if tmdb_id != episode.tmdb_id: + episode.tmdb_id = tmdb_id + updated = True + if imdb_id != episode.imdb_id: + episode.imdb_id = imdb_id + updated = True + if custom_props != episode.custom_properties: + episode.custom_properties = custom_props if custom_props else None + updated = True + + if updated: + episodes_to_update.append(episode) + else: + # Create new episode + episode = Episode( + series=series, + name=episode_name, + description=description, + air_date=air_date, + rating=rating, + duration_secs=duration_secs, + season_number=season_number, + episode_number=episode_number, + tmdb_id=tmdb_id, + imdb_id=imdb_id, + custom_properties=custom_props if custom_props else None + ) + episodes_to_create.append(episode) + + # Handle episode relation + if episode_id in existing_relations: + # Update existing relation + relation = existing_relations[episode_id] + relation.episode = episode + relation.container_extension = episode_data.get('container_extension', 'mp4') + relation.custom_properties = { + 'info': episode_data, + 'season_number': season_number + } + relation.last_seen = scan_start_time or timezone.now() # Mark as seen during this scan + relations_to_update.append(relation) + else: + # Create new relation + relation = M3UEpisodeRelation( + m3u_account=account, + episode=episode, + stream_id=episode_id, + container_extension=episode_data.get('container_extension', 'mp4'), + custom_properties={ + 'info': episode_data, + 'season_number': season_number + }, + last_seen=scan_start_time or timezone.now() # Mark as seen during this scan + ) + relations_to_create.append(relation) + + except Exception as e: + logger.error(f"Error preparing episode {episode_data.get('title', 'Unknown')}: {str(e)}") + + # Execute batch operations + with transaction.atomic(): + # Create new episodes + if episodes_to_create: + Episode.objects.bulk_create(episodes_to_create) + + # Update existing episodes + if episodes_to_update: + Episode.objects.bulk_update(episodes_to_update, [ + 'name', 'description', 'air_date', 'rating', 'duration_secs', + 'tmdb_id', 'imdb_id', 'custom_properties' + ]) + + # Create new episode relations + if relations_to_create: + M3UEpisodeRelation.objects.bulk_create(relations_to_create) + + # Update existing episode relations + if relations_to_update: + M3UEpisodeRelation.objects.bulk_update(relations_to_update, [ + 'episode', 'container_extension', 'custom_properties', 'last_seen' + ]) + + logger.info(f"Batch processed episodes: {len(episodes_to_create)} new, {len(episodes_to_update)} updated, " + f"{len(relations_to_create)} new relations, {len(relations_to_update)} updated relations") + + +@shared_task +def batch_refresh_series_episodes(account_id, series_ids=None): + """ + Batch refresh episodes for multiple series. + If series_ids is None, refresh all series that haven't been refreshed recently. + """ + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.warning(f"Episode refresh called for non-XC account {account_id}") + return "Episode refresh only available for XtreamCodes accounts" + + # Determine which series to refresh + if series_ids: + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + series__id__in=series_ids + ).select_related('series') + else: + # Refresh series that haven't been refreshed in the last 24 hours + cutoff_time = timezone.now() - timezone.timedelta(hours=24) + series_relations = M3USeriesRelation.objects.filter( + m3u_account=account, + last_episode_refresh__lt=cutoff_time + ).select_related('series') + + logger.info(f"Batch refreshing episodes for {series_relations.count()} series") + + with XtreamCodesClient( + account.server_url, + account.username, + account.password, + account.get_user_agent().user_agent + ) as client: + + refreshed_count = 0 + for relation in series_relations: + try: + refresh_series_episodes( + account, + relation.series, + relation.external_series_id + ) + refreshed_count += 1 + except Exception as e: + logger.error(f"Error refreshing episodes for series {relation.series.name}: {str(e)}") + + logger.info(f"Batch episode refresh completed for {refreshed_count} series") + return f"Batch episode refresh completed for {refreshed_count} series" + + except Exception as e: + logger.error(f"Error in batch episode refresh for account {account_id}: {str(e)}") + return f"Batch episode refresh failed: {str(e)}" + + +@shared_task +def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id=None): + """Clean up VOD content that has no M3U relations or has stale relations""" + from datetime import timedelta + + # Use scan start time as reference, or current time if not provided + reference_time = scan_start_time or timezone.now() + + # Calculate cutoff date for stale relations + cutoff_date = reference_time - timedelta(days=stale_days) + + # Build base query filters + base_filters = {'last_seen__lt': cutoff_date} + if account_id: + base_filters['m3u_account_id'] = account_id + logger.info(f"Cleaning up stale VOD content for account {account_id}") + else: + logger.info("Cleaning up stale VOD content across all accounts") + + # Clean up stale movie relations (haven't been seen in the specified days) + stale_movie_relations = M3UMovieRelation.objects.filter(**base_filters) + stale_movie_count = stale_movie_relations.count() + stale_movie_relations.delete() + + # Clean up stale series relations + stale_series_relations = M3USeriesRelation.objects.filter(**base_filters) + stale_series_count = stale_series_relations.count() + stale_series_relations.delete() + + # Clean up stale episode relations + stale_episode_relations = M3UEpisodeRelation.objects.filter(**base_filters) + stale_episode_count = stale_episode_relations.count() + stale_episode_relations.delete() + + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") + orphaned_movies.delete() + + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") + orphaned_series.delete() + + # Episodes will be cleaned up via CASCADE when series are deleted + + result = (f"Cleaned up {stale_movie_count} stale movie relations, " + f"{stale_series_count} stale series relations, " + f"{stale_episode_count} stale episode relations, " + f"{orphaned_movie_count} orphaned movies, and " + f"{orphaned_series_count} orphaned series") + + logger.info(result) + return result + + +def handle_movie_id_conflicts(current_movie, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id. + + Since this is called when a user is actively accessing movie details, we always + preserve the current movie (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (movie_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_movie_with_tmdb = None + existing_movie_with_imdb = None + + # Check for existing movies with these IDs + if tmdb_id_to_set: + try: + existing_movie_with_tmdb = Movie.objects.get(tmdb_id=tmdb_id_to_set) + except Movie.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_movie_with_imdb = Movie.objects.get(imdb_id=imdb_id_to_set) + except Movie.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_movie_with_tmdb and not existing_movie_with_imdb: + return current_movie, False + + # Determine which existing movie has the conflicting ID (prefer TMDB match) + existing_movie = existing_movie_with_tmdb or existing_movie_with_imdb + + # CRITICAL: Check if the existing movie is actually the same as the current movie + # This can happen if the current movie already has the ID we're trying to set + if existing_movie.id == current_movie.id: + logger.debug(f"Current movie {current_movie.id} already has the target ID, no conflict resolution needed") + return current_movie, False + + logger.info(f"ID conflict detected: Merging existing movie '{existing_movie.name}' (ID: {existing_movie.id}) into current movie '{current_movie.name}' (ID: {current_movie.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing movie before any merging + if existing_movie_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.tmdb_id = None + existing_movie.save(update_fields=['tmdb_id']) + + if existing_movie_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing movie {existing_movie.id} to avoid constraint violation") + existing_movie.imdb_id = None + existing_movie.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing movie into current movie (now safe to set IDs) + merge_movie_data(source_movie=existing_movie, target_movie=current_movie, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing movie to current movie + existing_relations = existing_movie.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing movie {existing_movie.id} to current movie {current_movie.id}") + existing_relations.update(movie=current_movie) + + # Now safe to delete the existing movie since all its relations have been transferred + logger.info(f"Deleting existing movie {existing_movie.id} '{existing_movie.name}' after merging data and transferring relations") + existing_movie.delete() + + return current_movie, False # No relation update needed since we kept current movie + + +def merge_movie_data(source_movie, target_movie, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_movie into target_movie. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_movie: Movie to copy data from + target_movie: Movie to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_movie.description and source_movie.description: + target_movie.description = source_movie.description + updated = True + logger.debug(f"Merged description from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.year and source_movie.year: + target_movie.year = source_movie.year + updated = True + logger.debug(f"Merged year from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.rating and source_movie.rating: + target_movie.rating = source_movie.rating + updated = True + logger.debug(f"Merged rating from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.genre and source_movie.genre: + target_movie.genre = source_movie.genre + updated = True + logger.debug(f"Merged genre from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.duration_secs and source_movie.duration_secs: + target_movie.duration_secs = source_movie.duration_secs + updated = True + logger.debug(f"Merged duration_secs from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.logo and source_movie.logo: + target_movie.logo = source_movie.logo + updated = True + logger.debug(f"Merged logo from movie {source_movie.id} to {target_movie.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_movie.tmdb_id: + if tmdb_id_to_set: + target_movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {target_movie.id}") + elif source_movie.tmdb_id: + target_movie.tmdb_id = source_movie.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from movie {source_movie.id} to {target_movie.id}") + + if not target_movie.imdb_id: + if imdb_id_to_set: + target_movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {target_movie.id}") + elif source_movie.imdb_id: + target_movie.imdb_id = source_movie.imdb_id + updated = True + logger.debug(f"Merged imdb_id from movie {source_movie.id} to {target_movie.id}") + + # Merge custom properties + target_props = target_movie.custom_properties or {} + source_props = source_movie.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from movie {source_movie.id} to {target_movie.id}") + + if updated: + target_movie.custom_properties = target_props + target_movie.save() + logger.info(f"Successfully merged data from movie {source_movie.id} into {target_movie.id}") + + +def handle_series_id_conflicts(current_series, relation, tmdb_id_to_set, imdb_id_to_set): + """ + Handle potential duplicate key conflicts when setting tmdb_id or imdb_id for series. + + Since this is called when a user is actively accessing series details, we always + preserve the current series (user's selection) and merge the existing one into it. + This prevents breaking the user's current viewing experience. + + Returns: + tuple: (series_to_use, relation_was_updated) + """ + from django.db import IntegrityError + + existing_series_with_tmdb = None + existing_series_with_imdb = None + + # Check for existing series with these IDs + if tmdb_id_to_set: + try: + existing_series_with_tmdb = Series.objects.get(tmdb_id=tmdb_id_to_set) + except Series.DoesNotExist: + pass + + if imdb_id_to_set: + try: + existing_series_with_imdb = Series.objects.get(imdb_id=imdb_id_to_set) + except Series.DoesNotExist: + pass + + # If no conflicts, proceed normally + if not existing_series_with_tmdb and not existing_series_with_imdb: + return current_series, False + + # Determine which existing series has the conflicting ID (prefer TMDB match) + existing_series = existing_series_with_tmdb or existing_series_with_imdb + + # CRITICAL: Check if the existing series is actually the same as the current series + # This can happen if the current series already has the ID we're trying to set + if existing_series.id == current_series.id: + logger.debug(f"Current series {current_series.id} already has the target ID, no conflict resolution needed") + return current_series, False + + logger.info(f"ID conflict detected: Merging existing series '{existing_series.name}' (ID: {existing_series.id}) into current series '{current_series.name}' (ID: {current_series.id}) to preserve user selection") + + # FIRST: Clear the conflicting ID from the existing series before any merging + if existing_series_with_tmdb and tmdb_id_to_set: + logger.info(f"Clearing tmdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.tmdb_id = None + existing_series.save(update_fields=['tmdb_id']) + + if existing_series_with_imdb and imdb_id_to_set: + logger.info(f"Clearing imdb_id from existing series {existing_series.id} to avoid constraint violation") + existing_series.imdb_id = None + existing_series.save(update_fields=['imdb_id']) + + # THEN: Merge data from existing series into current series (now safe to set IDs) + merge_series_data(source_series=existing_series, target_series=current_series, + tmdb_id_to_set=tmdb_id_to_set, imdb_id_to_set=imdb_id_to_set) + + # Transfer all relations from existing series to current series + existing_relations = existing_series.m3u_relations.all() + if existing_relations.exists(): + logger.info(f"Transferring {existing_relations.count()} relations from existing series {existing_series.id} to current series {current_series.id}") + existing_relations.update(series=current_series) + + # Now safe to delete the existing series since all its relations have been transferred + logger.info(f"Deleting existing series {existing_series.id} '{existing_series.name}' after merging data and transferring relations") + existing_series.delete() + + return current_series, False # No relation update needed since we kept current series + + +def merge_series_data(source_series, target_series, tmdb_id_to_set=None, imdb_id_to_set=None): + """ + Merge valuable data from source_series into target_series. + Only overwrites target fields that are empty/None with non-empty source values. + + Args: + source_series: Series to copy data from + target_series: Series to copy data to + tmdb_id_to_set: TMDB ID to set on target (overrides source tmdb_id) + imdb_id_to_set: IMDB ID to set on target (overrides source imdb_id) + """ + updated = False + + # Basic fields - only fill if target is empty + if not target_series.description and source_series.description: + target_series.description = source_series.description + updated = True + logger.debug(f"Merged description from series {source_series.id} to {target_series.id}") + + if not target_series.year and source_series.year: + target_series.year = source_series.year + updated = True + logger.debug(f"Merged year from series {source_series.id} to {target_series.id}") + + if not target_series.rating and source_series.rating: + target_series.rating = source_series.rating + updated = True + logger.debug(f"Merged rating from series {source_series.id} to {target_series.id}") + + if not target_series.genre and source_series.genre: + target_series.genre = source_series.genre + updated = True + logger.debug(f"Merged genre from series {source_series.id} to {target_series.id}") + + if not target_series.logo and source_series.logo: + target_series.logo = source_series.logo + updated = True + logger.debug(f"Merged logo from series {source_series.id} to {target_series.id}") + + # Handle external IDs - use the specific IDs we want to set, or fall back to source + if not target_series.tmdb_id: + if tmdb_id_to_set: + target_series.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on series {target_series.id}") + elif source_series.tmdb_id: + target_series.tmdb_id = source_series.tmdb_id + updated = True + logger.debug(f"Merged tmdb_id from series {source_series.id} to {target_series.id}") + + if not target_series.imdb_id: + if imdb_id_to_set: + target_series.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on series {target_series.id}") + elif source_series.imdb_id: + target_series.imdb_id = source_series.imdb_id + updated = True + logger.debug(f"Merged imdb_id from series {source_series.id} to {target_series.id}") + + # Merge custom properties + target_props = target_series.custom_properties or {} + source_props = source_series.custom_properties or {} + + for key, value in source_props.items(): + if value and not target_props.get(key): + target_props[key] = value + updated = True + logger.debug(f"Merged custom property '{key}' from series {source_series.id} to {target_series.id}") + + if updated: + target_series.custom_properties = target_props + target_series.save() + logger.info(f"Successfully merged data from series {source_series.id} into {target_series.id}") + + +def is_non_empty_string(value): + """ + Helper function to safely check if a value is a non-empty string. + Returns True only if value is a string and has non-whitespace content. + """ + return isinstance(value, str) and value.strip() + + +def extract_string_from_array_or_string(value): + """ + Helper function to extract a string value from either a string or array. + Returns the first non-null string from an array, or the string itself. + Returns None if no valid string is found. + """ + if isinstance(value, str): + return value.strip() if value.strip() else None + elif isinstance(value, list) and value: + # Find first non-null, non-empty string in the array + for item in value: + if isinstance(item, str) and item.strip(): + return item.strip() + elif item is not None and str(item).strip(): + return str(item).strip() + return None + + +def clean_custom_properties(custom_props): + """ + Remove null, empty, or invalid values from custom_properties dict. + Only keeps properties that have meaningful values. + """ + if not custom_props: + return None + + cleaned = {} + for key, value in custom_props.items(): + # Handle fields that should extract clean strings + if key in ['youtube_trailer', 'actors', 'director', 'cast']: + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = clean_value + # Handle backdrop_path which should remain as array format + elif key == 'backdrop_path': + clean_value = extract_string_from_array_or_string(value) + if clean_value: + cleaned[key] = [clean_value] + else: + # For other properties, keep them if they're not None and not empty + if value is not None and value != '' and value != []: + # If it's a list with only null values, skip it + if isinstance(value, list) and all(item is None for item in value): + continue + cleaned[key] = value + + return cleaned if cleaned else None + + +def should_update_field(existing_value, new_value): + """ + Helper function to determine if we should update a field. + Returns True if: + - new_value is a non-empty string (or contains one if it's an array) AND + - existing_value is None, empty string, array with null/empty values, or non-string + """ + # Extract actual string values from arrays if needed + new_string = extract_string_from_array_or_string(new_value) + existing_string = extract_string_from_array_or_string(existing_value) + + return new_string is not None and (existing_string is None or not existing_string) + + +@shared_task +def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): + """ + Fetch advanced movie data from provider and update Movie and M3UMovieRelation. + Only fetch if last_advanced_refresh > 24h ago, unless force_refresh is True. + """ + try: + relation = M3UMovieRelation.objects.select_related('movie', 'm3u_account').get(id=m3u_movie_relation_id) + now = timezone.now() + if not force_refresh and relation.last_advanced_refresh and (now - relation.last_advanced_refresh).total_seconds() < 86400: + return "Advanced data recently fetched, skipping." + + account = relation.m3u_account + movie = relation.movie + + from core.xtream_codes import Client as XtreamCodesClient + + with XtreamCodesClient( + server_url=account.server_url, + username=account.username, + password=account.password, + user_agent=account.get_user_agent().user_agent + ) as client: + vod_info = client.get_vod_info(relation.stream_id) + if vod_info and 'info' in vod_info: + info_raw = vod_info.get('info', {}) + + # Handle case where 'info' might be a list instead of dict + if isinstance(info_raw, list): + # If it's a list, try to use the first item or create empty dict + info = info_raw[0] if info_raw and isinstance(info_raw[0], dict) else {} + logger.warning(f"VOD info for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(info_raw, dict): + info = info_raw + else: + info = {} + logger.warning(f"VOD info for stream {relation.stream_id} returned unexpected type: {type(info_raw)}") + + movie_data_raw = vod_info.get('movie_data', {}) + + # Handle case where 'movie_data' might be a list instead of dict + if isinstance(movie_data_raw, list): + movie_data = movie_data_raw[0] if movie_data_raw and isinstance(movie_data_raw[0], dict) else {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned list instead of dict, using first item") + elif isinstance(movie_data_raw, dict): + movie_data = movie_data_raw + else: + movie_data = {} + logger.warning(f"VOD movie_data for stream {relation.stream_id} returned unexpected type: {type(movie_data_raw)}") + + # Update Movie fields if changed + updated = False + custom_props = movie.custom_properties or {} + if info.get('plot') and info.get('plot') != movie.description: + movie.description = info.get('plot') + updated = True + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and normalized_rating != movie.rating: + movie.rating = normalized_rating + updated = True + if info.get('genre') and info.get('genre') != movie.genre: + movie.genre = info.get('genre') + updated = True + if info.get('duration_secs'): + duration_secs = int(info.get('duration_secs')) + if duration_secs != movie.duration_secs: + movie.duration_secs = duration_secs + updated = True + # Check for releasedate or release_date + release_date_value = info.get('releasedate') or info.get('release_date') + if release_date_value: + try: + year = int(str(release_date_value).split('-')[0]) + if year != movie.year: + movie.year = year + updated = True + except Exception: + pass + # Handle TMDB/IMDB ID updates with duplicate key protection + tmdb_id_to_set = info.get('tmdb_id') if info.get('tmdb_id') and info.get('tmdb_id') != movie.tmdb_id else None + imdb_id_to_set = info.get('imdb_id') if info.get('imdb_id') and info.get('imdb_id') != movie.imdb_id else None + + logger.debug(f"Movie {movie.id} current IDs: tmdb_id={movie.tmdb_id}, imdb_id={movie.imdb_id}") + logger.debug(f"IDs to set: tmdb_id={tmdb_id_to_set}, imdb_id={imdb_id_to_set}") + + if tmdb_id_to_set or imdb_id_to_set: + # Check for existing movies with these IDs and handle duplicates + updated_movie, relation_updated = handle_movie_id_conflicts( + movie, relation, tmdb_id_to_set, imdb_id_to_set + ) + if relation_updated: + # If the relation was updated to point to a different movie, + # we need to update our reference and continue with that movie + movie = updated_movie + logger.info(f"Relation updated, now working with movie {movie.id}") + else: + # No relation update, safe to set the IDs + if tmdb_id_to_set: + movie.tmdb_id = tmdb_id_to_set + updated = True + logger.debug(f"Set tmdb_id {tmdb_id_to_set} on movie {movie.id}") + if imdb_id_to_set: + movie.imdb_id = imdb_id_to_set + updated = True + logger.debug(f"Set imdb_id {imdb_id_to_set} on movie {movie.id}") + # Only update trailer if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('youtube_trailer'), info.get('trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('trailer')) + updated = True + if should_update_field(custom_props.get('youtube_trailer'), info.get('youtube_trailer')): + custom_props['youtube_trailer'] = extract_string_from_array_or_string(info.get('youtube_trailer')) + updated = True + # Only update backdrop_path if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('backdrop_path'), info.get('backdrop_path')): + backdrop_url = extract_string_from_array_or_string(info.get('backdrop_path')) + custom_props['backdrop_path'] = [backdrop_url] if backdrop_url else None + updated = True + # Only update actors if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('actors'), info.get('actors')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('actors')) + updated = True + if should_update_field(custom_props.get('actors'), info.get('cast')): + custom_props['actors'] = extract_string_from_array_or_string(info.get('cast')) + updated = True + # Only update director if we have a non-empty value and either no existing value or existing value is empty + if should_update_field(custom_props.get('director'), info.get('director')): + custom_props['director'] = extract_string_from_array_or_string(info.get('director')) + updated = True + if updated: + # Clean custom_properties before saving to remove null/empty values + movie.custom_properties = clean_custom_properties(custom_props) + try: + movie.save() + except Exception as save_error: + # If we still get an integrity error after our conflict resolution, + # log it and try to save without the problematic IDs + logger.error(f"Failed to save movie {movie.id} after conflict resolution: {str(save_error)}") + if 'tmdb_id' in str(save_error) and movie.tmdb_id: + logger.warning(f"Clearing tmdb_id {movie.tmdb_id} from movie {movie.id} due to save error") + movie.tmdb_id = None + if 'imdb_id' in str(save_error) and movie.imdb_id: + logger.warning(f"Clearing imdb_id {movie.imdb_id} from movie {movie.id} due to save error") + movie.imdb_id = None + try: + movie.save() + logger.info(f"Successfully saved movie {movie.id} after clearing problematic IDs") + except Exception as final_error: + logger.error(f"Final save attempt failed for movie {movie.id}: {str(final_error)}") + raise + + # Update relation custom_properties and last_advanced_refresh + relation_custom_props = relation.custom_properties or {} + + # Clean the detailed_info before saving to avoid storing null/empty arrays + cleaned_info = clean_custom_properties(info) if info else None + cleaned_movie_data = clean_custom_properties(movie_data) if movie_data else None + + if cleaned_info: + relation_custom_props['detailed_info'] = cleaned_info + if cleaned_movie_data: + relation_custom_props['movie_data'] = cleaned_movie_data + relation_custom_props['detailed_fetched'] = True + + relation.custom_properties = relation_custom_props + relation.last_advanced_refresh = now + relation.save(update_fields=['custom_properties', 'last_advanced_refresh']) + + return "Advanced data refreshed." + except Exception as e: + logger.error(f"Error refreshing advanced movie data for relation {m3u_movie_relation_id}: {str(e)}") + return f"Error: {str(e)}" + + +def validate_logo_reference(obj, obj_type="object"): + """ + Validate that a VOD logo reference exists in the database. + If not, set it to None to prevent foreign key constraint violations. + + Args: + obj: Object with a logo attribute + obj_type: String description of the object type for logging + + Returns: + bool: True if logo was valid or None, False if logo was invalid and cleared + """ + if not hasattr(obj, 'logo') or not obj.logo: + return True + + if not obj.logo.pk: + # Logo doesn't have a primary key, so it's not saved + obj.logo = None + return False + + try: + # Verify the logo exists in the database + VODLogo.objects.get(pk=obj.logo.pk) + return True + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + obj.logo = None + return False diff --git a/apps/vod/urls.py b/apps/vod/urls.py new file mode 100644 index 00000000..3cea96a5 --- /dev/null +++ b/apps/vod/urls.py @@ -0,0 +1,16 @@ +from django.urls import path, include +from rest_framework.routers import DefaultRouter +from .api_views import MovieViewSet, EpisodeViewSet, SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet + +app_name = 'vod' + +router = DefaultRouter() +router.register(r'movies', MovieViewSet) +router.register(r'episodes', EpisodeViewSet) +router.register(r'series', SeriesViewSet) +router.register(r'categories', VODCategoryViewSet) +router.register(r'all', UnifiedContentViewSet, basename='unified-content') + +urlpatterns = [ + path('api/', include(router.urls)), +] diff --git a/core/api_urls.py b/core/api_urls.py index e30eb698..baa4bbe5 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,15 +2,16 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version +from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint, TimezoneListView router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') router.register(r'streamprofiles', StreamProfileViewSet, basename='streamprofile') router.register(r'settings', CoreSettingsViewSet, basename='coresettings') -router.register(r'settings', CoreSettingsViewSet, basename='settings') urlpatterns = [ path('settings/env/', environment, name='token_refresh'), path('version/', version, name='version'), + path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), + path('timezones/', TimezoneListView.as_view(), name='timezones'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index 77473b5d..f475909a 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -1,37 +1,68 @@ # core/api_views.py +import json +import ipaddress +import logging from rest_framework import viewsets, status from rest_framework.response import Response +from rest_framework.views import APIView from django.shortcuts import get_object_or_404 -from .models import UserAgent, StreamProfile, CoreSettings, STREAM_HASH_KEY -from .serializers import UserAgentSerializer, StreamProfileSerializer, CoreSettingsSerializer from rest_framework.permissions import IsAuthenticated -from rest_framework.decorators import api_view, permission_classes +from rest_framework.decorators import api_view, permission_classes, action from drf_yasg.utils import swagger_auto_schema +from drf_yasg import openapi +from .models import ( + UserAgent, + StreamProfile, + CoreSettings, + STREAM_HASH_KEY, + NETWORK_ACCESS, + PROXY_SETTINGS_KEY, +) +from .serializers import ( + UserAgentSerializer, + StreamProfileSerializer, + CoreSettingsSerializer, + ProxySettingsSerializer, +) + import socket import requests import os from core.tasks import rehash_streams +from apps.accounts.permissions import ( + Authenticated, +) +from dispatcharr.utils import get_client_ip + + +logger = logging.getLogger(__name__) + class UserAgentViewSet(viewsets.ModelViewSet): """ API endpoint that allows user agents to be viewed, created, edited, or deleted. """ + queryset = UserAgent.objects.all() serializer_class = UserAgentSerializer + class StreamProfileViewSet(viewsets.ModelViewSet): """ API endpoint that allows stream profiles to be viewed, created, edited, or deleted. """ + queryset = StreamProfile.objects.all() serializer_class = StreamProfileSerializer + class CoreSettingsViewSet(viewsets.ModelViewSet): """ API endpoint for editing core settings. This is treated as a singleton: only one instance should exist. """ + queryset = CoreSettings.objects.all() serializer_class = CoreSettingsSerializer @@ -39,27 +70,175 @@ class CoreSettingsViewSet(viewsets.ModelViewSet): instance = self.get_object() response = super().update(request, *args, **kwargs) if instance.key == STREAM_HASH_KEY: - if instance.value != request.data['value']: - rehash_streams.delay(request.data['value'].split(',')) + if instance.value != request.data["value"]: + rehash_streams.delay(request.data["value"].split(",")) + + # If DVR pre/post offsets changed, reschedule upcoming recordings + try: + from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY + if instance.key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY): + if instance.value != request.data.get("value"): + try: + # Prefer async task if Celery is available + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change + reschedule_upcoming_recordings_for_offset_change.delay() + except Exception: + # Fallback to synchronous implementation + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl + reschedule_upcoming_recordings_for_offset_change_impl() + except Exception: + pass return response + def create(self, request, *args, **kwargs): + response = super().create(request, *args, **kwargs) + # If creating DVR pre/post offset settings, also reschedule upcoming recordings + try: + key = request.data.get("key") + from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY + if key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY): + try: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change + reschedule_upcoming_recordings_for_offset_change.delay() + except Exception: + from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl + reschedule_upcoming_recordings_for_offset_change_impl() + except Exception: + pass + return response + @action(detail=False, methods=["post"], url_path="check") + def check(self, request, *args, **kwargs): + data = request.data + + if data.get("key") == NETWORK_ACCESS: + client_ip = ipaddress.ip_address(get_client_ip(request)) + + in_network = {} + invalid = [] + + value = json.loads(data.get("value", "{}")) + for key, val in value.items(): + in_network[key] = [] + cidrs = val.split(",") + for cidr in cidrs: + try: + network = ipaddress.ip_network(cidr) + + if client_ip in network: + in_network[key] = [] + break + + in_network[key].append(cidr) + except: + invalid.append(cidr) + + if len(invalid) > 0: + return Response( + { + "error": True, + "message": "Invalid CIDR(s)", + "data": invalid, + }, + status=status.HTTP_200_OK, + ) + + return Response(in_network, status=status.HTTP_200_OK) + + return Response({}, status=status.HTTP_200_OK) + +class ProxySettingsViewSet(viewsets.ViewSet): + """ + API endpoint for proxy settings stored as JSON in CoreSettings. + """ + serializer_class = ProxySettingsSerializer + + def _get_or_create_settings(self): + """Get or create the proxy settings CoreSettings entry""" + try: + settings_obj = CoreSettings.objects.get(key=PROXY_SETTINGS_KEY) + settings_data = json.loads(settings_obj.value) + except (CoreSettings.DoesNotExist, json.JSONDecodeError): + # Create default settings + settings_data = { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + settings_obj, created = CoreSettings.objects.get_or_create( + key=PROXY_SETTINGS_KEY, + defaults={ + "name": "Proxy Settings", + "value": json.dumps(settings_data) + } + ) + return settings_obj, settings_data + + def list(self, request): + """Return proxy settings""" + settings_obj, settings_data = self._get_or_create_settings() + return Response(settings_data) + + def retrieve(self, request, pk=None): + """Return proxy settings regardless of ID""" + settings_obj, settings_data = self._get_or_create_settings() + return Response(settings_data) + + def update(self, request, pk=None): + """Update proxy settings""" + settings_obj, current_data = self._get_or_create_settings() + + serializer = ProxySettingsSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + + # Update the JSON data + settings_obj.value = json.dumps(serializer.validated_data) + settings_obj.save() + + return Response(serializer.validated_data) + + def partial_update(self, request, pk=None): + """Partially update proxy settings""" + settings_obj, current_data = self._get_or_create_settings() + + # Merge current data with new data + updated_data = {**current_data, **request.data} + + serializer = ProxySettingsSerializer(data=updated_data) + serializer.is_valid(raise_exception=True) + + # Update the JSON data + settings_obj.value = json.dumps(serializer.validated_data) + settings_obj.save() + + return Response(serializer.validated_data) + + @action(detail=False, methods=['get', 'patch']) + def settings(self, request): + """Get or update the proxy settings.""" + if request.method == 'GET': + return self.list(request) + elif request.method == 'PATCH': + return self.partial_update(request) + + + @swagger_auto_schema( - method='get', + method="get", operation_description="Endpoint for environment details", - responses={200: "Environment variables"} + responses={200: "Environment variables"}, ) -@api_view(['GET']) -@permission_classes([IsAuthenticated]) +@api_view(["GET"]) +@permission_classes([Authenticated]) def environment(request): - - public_ip = None local_ip = None country_code = None country_name = None - # 1) Get the public IP + # 1) Get the public IP from ipify.org API try: r = requests.get("https://api64.ipify.org?format=json", timeout=5) r.raise_for_status() @@ -67,46 +246,153 @@ def environment(request): except requests.RequestException as e: public_ip = f"Error: {e}" - # 2) Get the local IP + # 2) Get the local IP by connecting to a public DNS server try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - # connect to a β€œpublic” address so the OS can determine our local interface + # connect to a "public" address so the OS can determine our local interface s.connect(("8.8.8.8", 80)) local_ip = s.getsockname()[0] s.close() except Exception as e: local_ip = f"Error: {e}" - # 3) If we got a valid public_ip, fetch geo info from ipapi.co + # 3) Get geolocation data from ipapi.co or ip-api.com if public_ip and "Error" not in public_ip: try: - geo = requests.get(f"https://ipapi.co/{public_ip}/json/", timeout=5).json() - # ipapi returns fields like country_code, country_name, etc. - country_code = geo.get("country_code", "") # e.g. "US" - country_name = geo.get("country_name", "") # e.g. "United States" - except requests.RequestException as e: + # Attempt to get geo information from ipapi.co first + r = requests.get(f"https://ipapi.co/{public_ip}/json/", timeout=5) + + if r.status_code == requests.codes.ok: + geo = r.json() + country_code = geo.get("country_code") # e.g. "US" + country_name = geo.get("country_name") # e.g. "United States" + + else: + # If ipapi.co fails, fallback to ip-api.com + # only supports http requests for free tier + r = requests.get("http://ip-api.com/json/", timeout=5) + + if r.status_code == requests.codes.ok: + geo = r.json() + country_code = geo.get("countryCode") # e.g. "US" + country_name = geo.get("country") # e.g. "United States" + + else: + raise Exception("Geo lookup failed with both services") + + except Exception as e: + logger.error(f"Error during geo lookup: {e}") country_code = None country_name = None - return Response({ - 'authenticated': True, - 'public_ip': public_ip, - 'local_ip': local_ip, - 'country_code': country_code, - 'country_name': country_name, - 'env_mode': "dev" if os.getenv('DISPATCHARR_ENV') == "dev" else "prod", - }) + # 4) Get environment mode from system environment variable + return Response( + { + "authenticated": True, + "public_ip": public_ip, + "local_ip": local_ip, + "country_code": country_code, + "country_name": country_name, + "env_mode": "dev" if os.getenv("DISPATCHARR_ENV") == "dev" else "prod", + } + ) + @swagger_auto_schema( - method='get', + method="get", operation_description="Get application version information", - responses={200: "Version information"} + responses={200: "Version information"}, ) -@api_view(['GET']) + +@api_view(["GET"]) def version(request): # Import version information from version import __version__, __timestamp__ - return Response({ - 'version': __version__, - 'timestamp': __timestamp__, - }) + + return Response( + { + "version": __version__, + "timestamp": __timestamp__, + } + ) + + +@swagger_auto_schema( + method="post", + operation_description="Trigger rehashing of all streams", + responses={200: "Rehash task started"}, +) +@api_view(["POST"]) +@permission_classes([Authenticated]) +def rehash_streams_endpoint(request): + """Trigger the rehash streams task""" + try: + # Get the current hash keys from settings + hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY) + hash_keys = hash_key_setting.value.split(",") + + # Queue the rehash task + task = rehash_streams.delay(hash_keys) + + return Response({ + "success": True, + "message": "Stream rehashing task has been queued", + "task_id": task.id + }, status=status.HTTP_200_OK) + + except CoreSettings.DoesNotExist: + return Response({ + "success": False, + "message": "Hash key settings not found" + }, status=status.HTTP_400_BAD_REQUEST) + + except Exception as e: + logger.error(f"Error triggering rehash streams: {e}") + return Response({ + "success": False, + "message": "Failed to trigger rehash task" + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +# ───────────────────────────── +# Timezone List API +# ───────────────────────────── +class TimezoneListView(APIView): + """ + API endpoint that returns all available timezones supported by pytz. + Returns a list of timezone names grouped by region for easy selection. + This is a general utility endpoint that can be used throughout the application. + """ + + def get_permissions(self): + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Get list of all supported timezones", + responses={200: openapi.Response('List of timezones with grouping by region')} + ) + def get(self, request): + import pytz + + # Get all common timezones (excludes deprecated ones) + all_timezones = sorted(pytz.common_timezones) + + # Group by region for better UX + grouped = {} + for tz in all_timezones: + if '/' in tz: + region = tz.split('/')[0] + if region not in grouped: + grouped[region] = [] + grouped[region].append(tz) + else: + # Handle special zones like UTC, GMT, etc. + if 'Other' not in grouped: + grouped['Other'] = [] + grouped['Other'].append(tz) + + return Response({ + 'timezones': all_timezones, + 'grouped': grouped, + 'count': len(all_timezones) + }) diff --git a/core/management/commands/reset_network_access.py b/core/management/commands/reset_network_access.py new file mode 100644 index 00000000..3b0e5a55 --- /dev/null +++ b/core/management/commands/reset_network_access.py @@ -0,0 +1,13 @@ +# your_app/management/commands/update_column.py + +from django.core.management.base import BaseCommand +from core.models import CoreSettings, NETWORK_ACCESS + + +class Command(BaseCommand): + help = "Reset network access settings" + + def handle(self, *args, **options): + setting = CoreSettings.objects.get(key=NETWORK_ACCESS) + setting.value = "{}" + setting.save() diff --git a/core/migrations/0009_m3u_hash_settings.py b/core/migrations/0009_m3u_hash_settings.py index eab5f141..3c6283fa 100644 --- a/core/migrations/0009_m3u_hash_settings.py +++ b/core/migrations/0009_m3u_hash_settings.py @@ -8,7 +8,7 @@ def preload_core_settings(apps, schema_editor): CoreSettings.objects.create( key=slugify("M3U Hash Key"), name="M3U Hash Key", - value="name,url,tvg_id", + value="url", ) class Migration(migrations.Migration): diff --git a/core/migrations/0013_default_network_access_settings.py b/core/migrations/0013_default_network_access_settings.py new file mode 100644 index 00000000..be53ba05 --- /dev/null +++ b/core/migrations/0013_default_network_access_settings.py @@ -0,0 +1,24 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:01 + +from django.db import migrations +from django.utils.text import slugify + + +def preload_network_access_settings(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + CoreSettings.objects.create( + key=slugify("Network Access"), + name="Network Access", + value="{}", + ) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0012_default_active_m3u_accounts"), + ] + + operations = [ + migrations.RunPython(preload_network_access_settings), + ] diff --git a/core/migrations/0014_default_proxy_settings.py b/core/migrations/0014_default_proxy_settings.py new file mode 100644 index 00000000..f4a61a9e --- /dev/null +++ b/core/migrations/0014_default_proxy_settings.py @@ -0,0 +1,35 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:01 + +import json +from django.db import migrations +from django.utils.text import slugify + + +def preload_proxy_settings(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + + # Default proxy settings + default_proxy_settings = { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + + CoreSettings.objects.create( + key=slugify("Proxy Settings"), + name="Proxy Settings", + value=json.dumps(default_proxy_settings), + ) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0013_default_network_access_settings"), + ] + + operations = [ + migrations.RunPython(preload_proxy_settings), + ] diff --git a/core/migrations/0015_dvr_templates.py b/core/migrations/0015_dvr_templates.py new file mode 100644 index 00000000..130a80d4 --- /dev/null +++ b/core/migrations/0015_dvr_templates.py @@ -0,0 +1,30 @@ +# Generated by Django 5.1.6 on 2025-03-01 14:10 + +from django.db import migrations +from django.utils.text import slugify + + +def add_dvr_defaults(apps, schema_editor): + CoreSettings = apps.get_model("core", "CoreSettings") + + defaults = [ + (slugify("DVR TV Template"), "DVR TV Template", "Recordings/TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "DVR Movie Template", "Recordings/Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "DVR TV Fallback Template", "Recordings/TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "DVR Movie Fallback Template", "Recordings/Movies/{start}.mkv"), + (slugify("DVR Comskip Enabled"), "DVR Comskip Enabled", "false"), + ] + + for key, name, value in defaults: + CoreSettings.objects.get_or_create(key=key, defaults={"name": name, "value": value}) + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0014_default_proxy_settings"), + ] + + operations = [ + migrations.RunPython(add_dvr_defaults), + ] diff --git a/core/migrations/0016_update_dvr_template_paths.py b/core/migrations/0016_update_dvr_template_paths.py new file mode 100644 index 00000000..5e729c47 --- /dev/null +++ b/core/migrations/0016_update_dvr_template_paths.py @@ -0,0 +1,61 @@ +# Generated manually to update DVR template paths + +from django.db import migrations +from django.utils.text import slugify + + +def update_dvr_template_paths(apps, schema_editor): + """Remove 'Recordings/' prefix from DVR template paths""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Define the updates needed + updates = [ + (slugify("DVR TV Template"), "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "Movies/{start}.mkv"), + ] + + # Update each setting + for key, new_value in updates: + try: + setting = CoreSettings.objects.get(key=key) + setting.value = new_value + setting.save() + print(f"Updated {setting.name}: {new_value}") + except CoreSettings.DoesNotExist: + print(f"Setting with key '{key}' not found - skipping") + + +def reverse_dvr_template_paths(apps, schema_editor): + """Add back 'Recordings/' prefix to DVR template paths""" + CoreSettings = apps.get_model("core", "CoreSettings") + + # Define the reverse updates (add back Recordings/ prefix) + updates = [ + (slugify("DVR TV Template"), "Recordings/TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"), + (slugify("DVR Movie Template"), "Recordings/Movies/{title} ({year}).mkv"), + (slugify("DVR TV Fallback Template"), "Recordings/TV_Shows/{show}/{start}.mkv"), + (slugify("DVR Movie Fallback Template"), "Recordings/Movies/{start}.mkv"), + ] + + # Update each setting back to original + for key, original_value in updates: + try: + setting = CoreSettings.objects.get(key=key) + setting.value = original_value + setting.save() + print(f"Reverted {setting.name}: {original_value}") + except CoreSettings.DoesNotExist: + print(f"Setting with key '{key}' not found - skipping") + + +class Migration(migrations.Migration): + + dependencies = [ + ("core", "0015_dvr_templates"), + ] + + operations = [ + migrations.RunPython(update_dvr_template_paths, reverse_dvr_template_paths), + ] \ No newline at end of file diff --git a/core/models.py b/core/models.py index fe7e9eb5..3a5895ba 100644 --- a/core/models.py +++ b/core/models.py @@ -1,26 +1,27 @@ # core/models.py +from django.conf import settings from django.db import models from django.utils.text import slugify +from django.core.exceptions import ValidationError + class UserAgent(models.Model): name = models.CharField( - max_length=512, - unique=True, - help_text="The User-Agent name." + max_length=512, unique=True, help_text="The User-Agent name." ) user_agent = models.CharField( max_length=512, unique=True, - help_text="The complete User-Agent string sent by the client." + help_text="The complete User-Agent string sent by the client.", ) description = models.CharField( max_length=255, blank=True, - help_text="An optional description of the client or device type." + help_text="An optional description of the client or device type.", ) is_active = models.BooleanField( default=True, - help_text="Whether this user agent is currently allowed/recognized." + help_text="Whether this user agent is currently allowed/recognized.", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) @@ -28,31 +29,34 @@ class UserAgent(models.Model): def __str__(self): return self.name -PROXY_PROFILE_NAME = 'Proxy' -REDIRECT_PROFILE_NAME = 'Redirect' + +PROXY_PROFILE_NAME = "Proxy" +REDIRECT_PROFILE_NAME = "Redirect" + class StreamProfile(models.Model): name = models.CharField(max_length=255, help_text="Name of the stream profile") command = models.CharField( max_length=255, help_text="Command to execute (e.g., 'yt.sh', 'streamlink', or 'vlc')", - blank=True + blank=True, ) parameters = models.TextField( help_text="Command-line parameters. Use {userAgent} and {streamUrl} as placeholders.", - blank=True + blank=True, ) locked = models.BooleanField( - default=False, - help_text="Protected - can't be deleted or modified" + default=False, help_text="Protected - can't be deleted or modified" + ) + is_active = models.BooleanField( + default=True, help_text="Whether this profile is active" ) - is_active = models.BooleanField(default=True, help_text="Whether this profile is active") user_agent = models.ForeignKey( "UserAgent", on_delete=models.SET_NULL, null=True, blank=True, - help_text="Optional user agent to use. If not set, you can fall back to a default." + help_text="Optional user agent to use. If not set, you can fall back to a default.", ) def __str__(self): @@ -77,7 +81,9 @@ class StreamProfile(models.Model): new_value = new_value.pk if field_name not in allowed_fields and orig_value != new_value: - raise ValidationError(f"Cannot modify {field_name} on a protected profile.") + raise ValidationError( + f"Cannot modify {field_name} on a protected profile." + ) super().save(*args, **kwargs) @@ -90,10 +96,14 @@ class StreamProfile(models.Model): for field_name, new_value in kwargs.items(): if field_name not in allowed_fields: - raise ValidationError(f"Cannot modify {field_name} on a protected profile.") + raise ValidationError( + f"Cannot modify {field_name} on a protected profile." + ) # Ensure user_agent ForeignKey updates correctly - if field_name == "user_agent" and isinstance(new_value, cls._meta.get_field("user_agent").related_model): + if field_name == "user_agent" and isinstance( + new_value, cls._meta.get_field("user_agent").related_model + ): new_value = new_value.pk # Convert object to ID if needed setattr(instance, field_name, new_value) @@ -122,7 +132,8 @@ class StreamProfile(models.Model): # Split the command and iterate through each part to apply replacements cmd = [self.command] + [ - self._replace_in_part(part, replacements) for part in self.parameters.split() + self._replace_in_part(part, replacements) + for part in self.parameters.split() ] return cmd @@ -134,11 +145,25 @@ class StreamProfile(models.Model): return part -DEFAULT_USER_AGENT_KEY= slugify("Default User-Agent") +DEFAULT_USER_AGENT_KEY = slugify("Default User-Agent") DEFAULT_STREAM_PROFILE_KEY = slugify("Default Stream Profile") STREAM_HASH_KEY = slugify("M3U Hash Key") PREFERRED_REGION_KEY = slugify("Preferred Region") AUTO_IMPORT_MAPPED_FILES = slugify("Auto-Import Mapped Files") +NETWORK_ACCESS = slugify("Network Access") +PROXY_SETTINGS_KEY = slugify("Proxy Settings") +DVR_TV_TEMPLATE_KEY = slugify("DVR TV Template") +DVR_MOVIE_TEMPLATE_KEY = slugify("DVR Movie Template") +DVR_SERIES_RULES_KEY = slugify("DVR Series Rules") +DVR_TV_FALLBACK_DIR_KEY = slugify("DVR TV Fallback Dir") +DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template") +DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template") +DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled") +DVR_COMSKIP_CUSTOM_PATH_KEY = slugify("DVR Comskip Custom Path") +DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes") +DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes") +SYSTEM_TIME_ZONE_KEY = slugify("System Time Zone") + class CoreSettings(models.Model): key = models.CharField( @@ -183,3 +208,170 @@ class CoreSettings(models.Model): return cls.objects.get(key=AUTO_IMPORT_MAPPED_FILES).value except cls.DoesNotExist: return None + + @classmethod + def get_proxy_settings(cls): + """Retrieve proxy settings as dict (or return defaults if not found).""" + try: + import json + settings_json = cls.objects.get(key=PROXY_SETTINGS_KEY).value + return json.loads(settings_json) + except (cls.DoesNotExist, json.JSONDecodeError): + # Return defaults if not found or invalid JSON + return { + "buffering_timeout": 15, + "buffering_speed": 1.0, + "redis_chunk_ttl": 60, + "channel_shutdown_delay": 0, + "channel_init_grace_period": 5, + } + + @classmethod + def get_dvr_tv_template(cls): + try: + return cls.objects.get(key=DVR_TV_TEMPLATE_KEY).value + except cls.DoesNotExist: + # Default: relative to recordings root (/data/recordings) + return "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv" + + @classmethod + def get_dvr_movie_template(cls): + try: + return cls.objects.get(key=DVR_MOVIE_TEMPLATE_KEY).value + except cls.DoesNotExist: + return "Movies/{title} ({year}).mkv" + + @classmethod + def get_dvr_tv_fallback_dir(cls): + """Folder name to use when a TV episode has no season/episode information. + Defaults to 'TV_Show' to match existing behavior but can be overridden in settings. + """ + try: + return cls.objects.get(key=DVR_TV_FALLBACK_DIR_KEY).value or "TV_Shows" + except cls.DoesNotExist: + return "TV_Shows" + + @classmethod + def get_dvr_tv_fallback_template(cls): + """Full path template used when season/episode are missing for a TV airing.""" + try: + return cls.objects.get(key=DVR_TV_FALLBACK_TEMPLATE_KEY).value + except cls.DoesNotExist: + # default requested by user + return "TV_Shows/{show}/{start}.mkv" + + @classmethod + def get_dvr_movie_fallback_template(cls): + """Full path template used when movie metadata is incomplete.""" + try: + return cls.objects.get(key=DVR_MOVIE_FALLBACK_TEMPLATE_KEY).value + except cls.DoesNotExist: + return "Movies/{start}.mkv" + + @classmethod + def get_dvr_comskip_enabled(cls): + """Return boolean-like string value ('true'/'false') for comskip enablement.""" + try: + val = cls.objects.get(key=DVR_COMSKIP_ENABLED_KEY).value + return str(val).lower() in ("1", "true", "yes", "on") + except cls.DoesNotExist: + return False + + @classmethod + def get_dvr_comskip_custom_path(cls): + """Return configured comskip.ini path or empty string if unset.""" + try: + return cls.objects.get(key=DVR_COMSKIP_CUSTOM_PATH_KEY).value + except cls.DoesNotExist: + return "" + + @classmethod + def set_dvr_comskip_custom_path(cls, path: str | None): + """Persist the comskip.ini path setting, normalizing nulls to empty string.""" + value = (path or "").strip() + obj, _ = cls.objects.get_or_create( + key=DVR_COMSKIP_CUSTOM_PATH_KEY, + defaults={"name": "DVR Comskip Custom Path", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + + @classmethod + def get_dvr_pre_offset_minutes(cls): + """Minutes to start recording before scheduled start (default 0).""" + try: + val = cls.objects.get(key=DVR_PRE_OFFSET_MINUTES_KEY).value + return int(val) + except cls.DoesNotExist: + return 0 + except Exception: + try: + return int(float(val)) + except Exception: + return 0 + + @classmethod + def get_dvr_post_offset_minutes(cls): + """Minutes to stop recording after scheduled end (default 0).""" + try: + val = cls.objects.get(key=DVR_POST_OFFSET_MINUTES_KEY).value + return int(val) + except cls.DoesNotExist: + return 0 + except Exception: + try: + return int(float(val)) + except Exception: + return 0 + + @classmethod + def get_system_time_zone(cls): + """Return configured system time zone or fall back to Django settings.""" + try: + value = cls.objects.get(key=SYSTEM_TIME_ZONE_KEY).value + if value: + return value + except cls.DoesNotExist: + pass + return getattr(settings, "TIME_ZONE", "UTC") or "UTC" + + @classmethod + def set_system_time_zone(cls, tz_name: str | None): + """Persist the desired system time zone identifier.""" + value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + obj, _ = cls.objects.get_or_create( + key=SYSTEM_TIME_ZONE_KEY, + defaults={"name": "System Time Zone", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + + @classmethod + def get_dvr_series_rules(cls): + """Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}""" + import json + try: + raw = cls.objects.get(key=DVR_SERIES_RULES_KEY).value + rules = json.loads(raw) if raw else [] + if isinstance(rules, list): + return rules + return [] + except cls.DoesNotExist: + # Initialize empty if missing + cls.objects.create(key=DVR_SERIES_RULES_KEY, name="DVR Series Rules", value="[]") + return [] + + @classmethod + def set_dvr_series_rules(cls, rules): + import json + try: + obj, _ = cls.objects.get_or_create(key=DVR_SERIES_RULES_KEY, defaults={"name": "DVR Series Rules", "value": "[]"}) + obj.value = json.dumps(rules) + obj.save(update_fields=["value"]) + return rules + except Exception: + return rules diff --git a/core/serializers.py b/core/serializers.py index c80ad630..c6029bc4 100644 --- a/core/serializers.py +++ b/core/serializers.py @@ -1,19 +1,100 @@ # core/serializers.py +import json +import ipaddress from rest_framework import serializers -from .models import UserAgent, StreamProfile, CoreSettings +from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS + class UserAgentSerializer(serializers.ModelSerializer): class Meta: model = UserAgent - fields = ['id', 'name', 'user_agent', 'description', 'is_active', 'created_at', 'updated_at'] + fields = [ + "id", + "name", + "user_agent", + "description", + "is_active", + "created_at", + "updated_at", + ] + class StreamProfileSerializer(serializers.ModelSerializer): class Meta: model = StreamProfile - fields = ['id', 'name', 'command', 'parameters', 'is_active', 'user_agent', 'locked'] + fields = [ + "id", + "name", + "command", + "parameters", + "is_active", + "user_agent", + "locked", + ] + class CoreSettingsSerializer(serializers.ModelSerializer): class Meta: model = CoreSettings - fields = '__all__' + fields = "__all__" + + def update(self, instance, validated_data): + if instance.key == NETWORK_ACCESS: + errors = False + invalid = {} + value = json.loads(validated_data.get("value")) + for key, val in value.items(): + cidrs = val.split(",") + for cidr in cidrs: + try: + ipaddress.ip_network(cidr) + except: + errors = True + if key not in invalid: + invalid[key] = [] + invalid[key].append(cidr) + + if errors: + # Perform CIDR validation + raise serializers.ValidationError( + { + "message": "Invalid CIDRs", + "value": invalid, + } + ) + + return super().update(instance, validated_data) + +class ProxySettingsSerializer(serializers.Serializer): + """Serializer for proxy settings stored as JSON in CoreSettings""" + buffering_timeout = serializers.IntegerField(min_value=0, max_value=300) + buffering_speed = serializers.FloatField(min_value=0.1, max_value=10.0) + redis_chunk_ttl = serializers.IntegerField(min_value=10, max_value=3600) + channel_shutdown_delay = serializers.IntegerField(min_value=0, max_value=300) + channel_init_grace_period = serializers.IntegerField(min_value=0, max_value=60) + + def validate_buffering_timeout(self, value): + if value < 0 or value > 300: + raise serializers.ValidationError("Buffering timeout must be between 0 and 300 seconds") + return value + + def validate_buffering_speed(self, value): + if value < 0.1 or value > 10.0: + raise serializers.ValidationError("Buffering speed must be between 0.1 and 10.0") + return value + + def validate_redis_chunk_ttl(self, value): + if value < 10 or value > 3600: + raise serializers.ValidationError("Redis chunk TTL must be between 10 and 3600 seconds") + return value + + def validate_channel_shutdown_delay(self, value): + if value < 0 or value > 300: + raise serializers.ValidationError("Channel shutdown delay must be between 0 and 300 seconds") + return value + + def validate_channel_init_grace_period(self, value): + if value < 0 or value > 60: + raise serializers.ValidationError("Channel init grace period must be between 0 and 60 seconds") + return value diff --git a/core/tasks.py b/core/tasks.py index 0fdaedf7..f757613b 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -1,4 +1,3 @@ -# yourapp/tasks.py from celery import shared_task from channels.layers import get_channel_layer from asgiref.sync import async_to_sync @@ -7,7 +6,7 @@ import logging import re import time import os -from core.utils import RedisClient, send_websocket_update +from core.utils import RedisClient, send_websocket_update, acquire_task_lock, release_task_lock from apps.proxy.ts_proxy.channel_status import ChannelStatus from apps.m3u.models import M3UAccount from apps.epg.models import EPGSource @@ -21,10 +20,12 @@ logger = logging.getLogger(__name__) EPG_WATCH_DIR = '/data/epgs' M3U_WATCH_DIR = '/data/m3us' +LOGO_WATCH_DIR = '/data/logos' MIN_AGE_SECONDS = 6 STARTUP_SKIP_AGE = 30 REDIS_PREFIX = "processed_file:" REDIS_TTL = 60 * 60 * 24 * 3 # expire keys after 3 days (optional) +SUPPORTED_LOGO_FORMATS = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg'] # Store the last known value to compare with new data last_known_data = {} @@ -56,10 +57,11 @@ def scan_and_process_files(): global _first_scan_completed redis_client = RedisClient.get_client() now = time.time() + # Check if directories exist - dirs_exist = all(os.path.exists(d) for d in [M3U_WATCH_DIR, EPG_WATCH_DIR]) + dirs_exist = all(os.path.exists(d) for d in [M3U_WATCH_DIR, EPG_WATCH_DIR, LOGO_WATCH_DIR]) if not dirs_exist: - throttled_log(logger.warning, f"Watch directories missing: M3U ({os.path.exists(M3U_WATCH_DIR)}), EPG ({os.path.exists(EPG_WATCH_DIR)})", "watch_dirs_missing") + throttled_log(logger.warning, f"Watch directories missing: M3U ({os.path.exists(M3U_WATCH_DIR)}), EPG ({os.path.exists(EPG_WATCH_DIR)}), LOGO ({os.path.exists(LOGO_WATCH_DIR)})", "watch_dirs_missing") # Process M3U files m3u_files = [f for f in os.listdir(M3U_WATCH_DIR) @@ -266,6 +268,126 @@ def scan_and_process_files(): logger.trace(f"EPG processing complete: {epg_processed} processed, {epg_skipped} skipped, {epg_errors} errors") + # Process Logo files (including subdirectories) + try: + logo_files = [] + if os.path.exists(LOGO_WATCH_DIR): + for root, dirs, files in os.walk(LOGO_WATCH_DIR): + for filename in files: + logo_files.append(os.path.join(root, filename)) + logger.trace(f"Found {len(logo_files)} files in LOGO directory (including subdirectories)") + except Exception as e: + logger.error(f"Error listing LOGO directory: {e}") + logo_files = [] + + logo_processed = 0 + logo_skipped = 0 + logo_errors = 0 + + for filepath in logo_files: + filename = os.path.basename(filepath) + + if not os.path.isfile(filepath): + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not a file") + else: + logger.debug(f"Skipping {filename}: Not a file") + logo_skipped += 1 + continue + + # Check if file has supported logo extension + file_ext = os.path.splitext(filename)[1].lower() + if file_ext not in SUPPORTED_LOGO_FORMATS: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Not a supported logo format") + else: + logger.debug(f"Skipping {filename}: Not a supported logo format") + logo_skipped += 1 + continue + + mtime = os.path.getmtime(filepath) + age = now - mtime + redis_key = REDIS_PREFIX + filepath + stored_mtime = redis_client.get(redis_key) + + # Check if logo already exists in database + if not stored_mtime and age > STARTUP_SKIP_AGE: + from apps.channels.models import Logo + existing_logo = Logo.objects.filter(url=filepath).exists() + if existing_logo: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already exists in database") + else: + logger.debug(f"Skipping {filename}: Already exists in database") + redis_client.set(redis_key, mtime, ex=REDIS_TTL) + logo_skipped += 1 + continue + else: + logger.debug(f"Processing {filename} despite age: Not found in database") + + # File too new β€” probably still being written + if age < MIN_AGE_SECONDS: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + else: + logger.debug(f"Skipping {filename}: Too new, possibly still being written (age={age}s)") + logo_skipped += 1 + continue + + # Skip if we've already processed this mtime + if stored_mtime and float(stored_mtime) >= mtime: + if _first_scan_completed: + logger.trace(f"Skipping {filename}: Already processed this version") + else: + logger.debug(f"Skipping {filename}: Already processed this version") + logo_skipped += 1 + continue + + try: + from apps.channels.models import Logo + + # Create logo entry with just the filename (without extension) as name + logo_name = os.path.splitext(filename)[0] + + logo, created = Logo.objects.get_or_create( + url=filepath, + defaults={ + "name": logo_name, + } + ) + + redis_client.set(redis_key, mtime, ex=REDIS_TTL) + + if created: + logger.info(f"Created new logo entry: {logo_name}") + else: + logger.debug(f"Logo entry already exists: {logo_name}") + + logo_processed += 1 + + except Exception as e: + logger.error(f"Error processing logo file {filename}: {str(e)}", exc_info=True) + logo_errors += 1 + continue + + logger.trace(f"LOGO processing complete: {logo_processed} processed, {logo_skipped} skipped, {logo_errors} errors") + + # Send summary websocket update for logo processing + if logo_processed > 0 or logo_errors > 0: + send_websocket_update( + "updates", + "update", + { + "success": True, + "type": "logo_processing_summary", + "processed": logo_processed, + "skipped": logo_skipped, + "errors": logo_errors, + "total_files": len(logo_files), + "message": f"Logo processing complete: {logo_processed} processed, {logo_skipped} skipped, {logo_errors} errors" + } + ) + # Mark that the first scan is complete _first_scan_completed = True @@ -312,32 +434,243 @@ def fetch_channel_stats(): @shared_task def rehash_streams(keys): - batch_size = 1000 - queryset = Stream.objects.all() + """ + Regenerate stream hashes for all streams based on current hash key configuration. + This task checks for and blocks M3U refresh tasks to prevent conflicts. + """ + from apps.channels.models import Stream + from apps.m3u.models import M3UAccount - hash_keys = {} - total_records = queryset.count() - for start in range(0, total_records, batch_size): - with transaction.atomic(): - batch = queryset[start:start + batch_size] - for obj in batch: - stream_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys) - if stream_hash in hash_keys: - # Handle duplicate keys and remove any without channels - stream_channels = ChannelStream.objects.filter(stream_id=obj.id).count() - if stream_channels == 0: + logger.info("Starting stream rehash process") + + # Get all M3U account IDs for locking + m3u_account_ids = list(M3UAccount.objects.filter(is_active=True).values_list('id', flat=True)) + + # Check if any M3U refresh tasks are currently running + blocked_accounts = [] + for account_id in m3u_account_ids: + if not acquire_task_lock('refresh_single_m3u_account', account_id): + blocked_accounts.append(account_id) + + if blocked_accounts: + # Release any locks we did acquire + for account_id in m3u_account_ids: + if account_id not in blocked_accounts: + release_task_lock('refresh_single_m3u_account', account_id) + + logger.warning(f"Rehash blocked: M3U refresh tasks running for accounts: {blocked_accounts}") + + # Send WebSocket notification to inform user + send_websocket_update( + 'updates', + 'update', + { + "success": False, + "type": "stream_rehash", + "action": "blocked", + "blocked_accounts": len(blocked_accounts), + "total_accounts": len(m3u_account_ids), + "message": f"Stream rehash blocked: M3U refresh tasks are currently running for {len(blocked_accounts)} accounts. Please try again later." + } + ) + + return f"Rehash blocked: M3U refresh tasks running for {len(blocked_accounts)} accounts" + + acquired_locks = m3u_account_ids.copy() + + try: + batch_size = 1000 + queryset = Stream.objects.all() + + # Track statistics + total_processed = 0 + duplicates_merged = 0 + hash_keys = {} + + total_records = queryset.count() + logger.info(f"Starting rehash of {total_records} streams with keys: {keys}") + + # Send initial WebSocket update + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "starting", + "progress": 0, + "total_records": total_records, + "message": f"Starting rehash of {total_records} streams" + } + ) + + for start in range(0, total_records, batch_size): + batch_processed = 0 + batch_duplicates = 0 + + with transaction.atomic(): + batch = queryset[start:start + batch_size] + + for obj in batch: + # Generate new hash + new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id) + + # Check if this hash already exists in our tracking dict or in database + if new_hash in hash_keys: + # Found duplicate in current batch - merge the streams + existing_stream_id = hash_keys[new_hash] + existing_stream = Stream.objects.get(id=existing_stream_id) + + # Move any channel relationships from duplicate to existing stream + # Handle potential unique constraint violations + for channel_stream in ChannelStream.objects.filter(stream_id=obj.id): + # Check if this channel already has a relationship with the target stream + existing_relationship = ChannelStream.objects.filter( + channel_id=channel_stream.channel_id, + stream_id=existing_stream_id + ).first() + + if existing_relationship: + # Relationship already exists, just delete the duplicate + channel_stream.delete() + else: + # Safe to update the relationship + channel_stream.stream_id = existing_stream_id + channel_stream.save() + + # Update the existing stream with the most recent data + if obj.updated_at > existing_stream.updated_at: + existing_stream.name = obj.name + existing_stream.url = obj.url + existing_stream.logo_url = obj.logo_url + existing_stream.tvg_id = obj.tvg_id + existing_stream.m3u_account = obj.m3u_account + existing_stream.channel_group = obj.channel_group + existing_stream.custom_properties = obj.custom_properties + existing_stream.last_seen = obj.last_seen + existing_stream.updated_at = obj.updated_at + existing_stream.save() + + # Delete the duplicate obj.delete() - continue + batch_duplicates += 1 + else: + # Check if hash already exists in database (from previous batches or existing data) + existing_stream = Stream.objects.filter(stream_hash=new_hash).exclude(id=obj.id).first() + if existing_stream: + # Found duplicate in database - merge the streams + # Move any channel relationships from duplicate to existing stream + # Handle potential unique constraint violations + for channel_stream in ChannelStream.objects.filter(stream_id=obj.id): + # Check if this channel already has a relationship with the target stream + existing_relationship = ChannelStream.objects.filter( + channel_id=channel_stream.channel_id, + stream_id=existing_stream.id + ).first() + + if existing_relationship: + # Relationship already exists, just delete the duplicate + channel_stream.delete() + else: + # Safe to update the relationship + channel_stream.stream_id = existing_stream.id + channel_stream.save() + + # Update the existing stream with the most recent data + if obj.updated_at > existing_stream.updated_at: + existing_stream.name = obj.name + existing_stream.url = obj.url + existing_stream.logo_url = obj.logo_url + existing_stream.tvg_id = obj.tvg_id + existing_stream.m3u_account = obj.m3u_account + existing_stream.channel_group = obj.channel_group + existing_stream.custom_properties = obj.custom_properties + existing_stream.last_seen = obj.last_seen + existing_stream.updated_at = obj.updated_at + existing_stream.save() + + # Delete the duplicate + obj.delete() + batch_duplicates += 1 + hash_keys[new_hash] = existing_stream.id + else: + # Update hash for this stream + obj.stream_hash = new_hash + obj.save(update_fields=['stream_hash']) + hash_keys[new_hash] = obj.id + + batch_processed += 1 + + total_processed += batch_processed + duplicates_merged += batch_duplicates + + # Calculate progress percentage + progress_percent = int((total_processed / total_records) * 100) + current_batch = start // batch_size + 1 + total_batches = (total_records // batch_size) + 1 + + # Send progress update via WebSocket + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "processing", + "progress": progress_percent, + "batch": current_batch, + "total_batches": total_batches, + "processed": total_processed, + "duplicates_merged": duplicates_merged, + "message": f"Processed batch {current_batch}/{total_batches}: {batch_processed} streams, {batch_duplicates} duplicates merged" + } + ) + + logger.info(f"Rehashed batch {current_batch}/{total_batches}: " + f"{batch_processed} processed, {batch_duplicates} duplicates merged") + + logger.info(f"Rehashing complete: {total_processed} streams processed, " + f"{duplicates_merged} duplicates merged") + + # Send completion update via WebSocket + send_websocket_update( + 'updates', + 'update', + { + "success": True, + "type": "stream_rehash", + "action": "completed", + "progress": 100, + "total_processed": total_processed, + "duplicates_merged": duplicates_merged, + "final_count": total_processed - duplicates_merged, + "message": f"Rehashing complete: {total_processed} streams processed, {duplicates_merged} duplicates merged" + }, + collect_garbage=True # Force garbage collection after completion + ) + + logger.info("Stream rehash completed successfully") + return f"Successfully rehashed {total_processed} streams" + + except Exception as e: + logger.error(f"Error during stream rehash: {e}") + raise + finally: + # Always release all acquired M3U locks + for account_id in acquired_locks: + release_task_lock('refresh_single_m3u_account', account_id) + logger.info(f"Released M3U task locks for {len(acquired_locks)} accounts") - existing_stream_channels = ChannelStream.objects.filter(stream_id=hash_keys[stream_hash]).count() - if existing_stream_channels == 0: - Stream.objects.filter(id=hash_keys[stream_hash]).delete() +@shared_task +def cleanup_vod_persistent_connections(): + """Clean up stale VOD persistent connections""" + try: + from apps.proxy.vod_proxy.connection_manager import VODConnectionManager - obj.stream_hash = stream_hash - obj.save(update_fields=['stream_hash']) - hash_keys[stream_hash] = obj.id + # Clean up connections older than 30 minutes + VODConnectionManager.cleanup_stale_persistent_connections(max_age_seconds=1800) + logger.info("VOD persistent connection cleanup completed") - logger.debug(f"Re-hashed {batch_size} streams") - - logger.debug(f"Re-hashing complete") + except Exception as e: + logger.error(f"Error during VOD persistent connection cleanup: {e}") diff --git a/core/utils.py b/core/utils.py index 9951ce26..38b31144 100644 --- a/core/utils.py +++ b/core/utils.py @@ -3,11 +3,14 @@ import logging import time import os import threading +import re from django.conf import settings from redis.exceptions import ConnectionError, TimeoutError from django.core.cache import cache from asgiref.sync import async_to_sync from channels.layers import get_channel_layer +from django.core.validators import URLValidator +from django.core.exceptions import ValidationError import gc logger = logging.getLogger(__name__) @@ -15,6 +18,29 @@ logger = logging.getLogger(__name__) # Import the command detector from .command_utils import is_management_command +def natural_sort_key(text): + """ + Convert a string into a list of string and number chunks for natural sorting. + "PPV 10" becomes ['PPV ', 10] so it sorts correctly with "PPV 2". + + This function enables natural/alphanumeric sorting where numbers within strings + are treated as actual numbers rather than strings. + + Args: + text (str): The text to convert for sorting + + Returns: + list: A list of strings and integers for proper sorting + + Example: + >>> sorted(['PPV 1', 'PPV 10', 'PPV 2'], key=natural_sort_key) + ['PPV 1', 'PPV 2', 'PPV 10'] + """ + def convert(chunk): + return int(chunk) if chunk.isdigit() else chunk.lower() + + return [convert(c) for c in re.split('([0-9]+)', text)] + class RedisClient: _client = None _pubsub_client = None @@ -330,3 +356,35 @@ def is_protected_path(file_path): return True return False + +def validate_flexible_url(value): + """ + Custom URL validator that accepts URLs with hostnames that aren't FQDNs. + This allows URLs like "http://hostname/" which + Django's standard URLValidator rejects. + """ + if not value: + return # Allow empty values since the field is nullable + + # Create a standard Django URL validator + url_validator = URLValidator() + + try: + # First try the standard validation + url_validator(value) + except ValidationError as e: + # If standard validation fails, check if it's a non-FQDN hostname + import re + + # More flexible pattern for non-FQDN hostnames with paths + # Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234 + # Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value + # Also supports authentication: rtsp://user:pass@hostname/path + non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' + non_fqdn_match = re.match(non_fqdn_pattern, value) + + if non_fqdn_match: + return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication + + # If it doesn't match our flexible patterns, raise the original error + raise ValidationError("Enter a valid URL.") diff --git a/core/views.py b/core/views.py index 397783fb..d10df027 100644 --- a/core/views.py +++ b/core/views.py @@ -73,7 +73,6 @@ def stream_view(request, channel_uuid): default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) profiles = [obj for obj in m3u_profiles if not obj.is_default] - # -- Loop through profiles and pick the first active one -- for profile in [default_profile] + profiles: logger.debug(f'Checking profile {profile.name}...') @@ -174,7 +173,7 @@ def stream_view(request, channel_uuid): persistent_lock.release() logger.debug("Persistent lock released for channel ID=%s", channel.id) - return StreamingHttpResponse( - stream_generator(process, stream, persistent_lock), - content_type="video/MP2T" - ) + return StreamingHttpResponse( + stream_generator(process, stream, persistent_lock), + content_type="video/MP2T" + ) diff --git a/core/xtream_codes.py b/core/xtream_codes.py index 17f3eaad..9b56197a 100644 --- a/core/xtream_codes.py +++ b/core/xtream_codes.py @@ -17,20 +17,29 @@ class Client: # Fix: Properly handle all possible user_agent input types if user_agent: if isinstance(user_agent, str): - # Direct string user agent user_agent_string = user_agent elif hasattr(user_agent, 'user_agent'): - # UserAgent model object user_agent_string = user_agent.user_agent else: - # Fallback for any other type logger.warning(f"Unexpected user_agent type: {type(user_agent)}, using default") user_agent_string = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' else: - # No user agent provided user_agent_string = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' - self.headers = {'User-Agent': user_agent_string} + # Create persistent session + self.session = requests.Session() + self.session.headers.update({'User-Agent': user_agent_string}) + + # Configure connection pooling + adapter = requests.adapters.HTTPAdapter( + pool_connections=1, + pool_maxsize=2, + max_retries=3, + pool_block=False + ) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + self.server_info = None def _normalize_url(self, url): @@ -53,11 +62,35 @@ class Client: url = f"{self.server_url}/{endpoint}" logger.debug(f"XC API Request: {url} with params: {params}") - response = requests.get(url, params=params, headers=self.headers, timeout=30) + response = self.session.get(url, params=params, timeout=30) response.raise_for_status() - data = response.json() - logger.debug(f"XC API Response: {url} status code: {response.status_code}") + # Check if response is empty + if not response.content: + error_msg = f"XC API returned empty response from {url}" + logger.error(error_msg) + raise ValueError(error_msg) + + # Check for common blocking responses before trying to parse JSON + response_text = response.text.strip() + if response_text.lower() in ['blocked', 'forbidden', 'access denied', 'unauthorized']: + error_msg = f"XC API request blocked by server from {url}. Response: {response_text}" + logger.error(error_msg) + logger.error(f"This may indicate IP blocking, User-Agent filtering, or rate limiting") + raise ValueError(error_msg) + + try: + data = response.json() + except requests.exceptions.JSONDecodeError as json_err: + error_msg = f"XC API returned invalid JSON from {url}. Response: {response.text[:1000]}" + logger.error(error_msg) + logger.error(f"JSON decode error: {str(json_err)}") + + # Check if it looks like an HTML error page + if response_text.startswith('<'): + logger.error("Response appears to be HTML - server may be returning an error page") + + raise ValueError(error_msg) # Check for XC-specific error responses if isinstance(data, dict) and data.get('user_info') is None and 'error' in data: @@ -103,6 +136,47 @@ class Client: logger.error(traceback.format_exc()) raise + def get_account_info(self): + """Get account information from the last authentication response""" + if not self.server_info: + raise ValueError("Not authenticated. Call authenticate() first.") + + from datetime import datetime + + # Extract relevant account information + user_info = self.server_info.get('user_info', {}) + server_info = self.server_info.get('server_info', {}) + + account_info = { + 'last_refresh': datetime.utcnow().isoformat() + 'Z', # Explicit UTC with Z suffix + 'auth_timestamp': datetime.utcnow().timestamp(), + 'user_info': { + 'username': user_info.get('username'), + 'password': user_info.get('password'), + 'message': user_info.get('message'), + 'auth': user_info.get('auth'), + 'status': user_info.get('status'), + 'exp_date': user_info.get('exp_date'), + 'is_trial': user_info.get('is_trial'), + 'active_cons': user_info.get('active_cons'), + 'created_at': user_info.get('created_at'), + 'max_connections': user_info.get('max_connections'), + 'allowed_output_formats': user_info.get('allowed_output_formats', []) + }, + 'server_info': { + 'url': server_info.get('url'), + 'port': server_info.get('port'), + 'https_port': server_info.get('https_port'), + 'server_protocol': server_info.get('server_protocol'), + 'rtmp_port': server_info.get('rtmp_port'), + 'timezone': server_info.get('timezone'), + 'timestamp_now': server_info.get('timestamp_now'), + 'time_now': server_info.get('time_now') + } + } + + return account_info + def get_live_categories(self): """Get live TV categories""" try: @@ -159,6 +233,233 @@ class Client: logger.error(traceback.format_exc()) raise + def get_all_live_streams(self): + """Get all live streams (no category filter)""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_live_streams' + # No category_id = get all streams + } + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid streams response for all live streams: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} total live streams") + return streams + except Exception as e: + logger.error(f"Failed to get all live streams: {str(e)}") + logger.error(traceback.format_exc()) + raise + def get_stream_url(self, stream_id): """Get the playback URL for a stream""" return f"{self.server_url}/live/{self.username}/{self.password}/{stream_id}.ts" + + def get_episode_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for an episode stream""" + return f"{self.server_url}/series/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_stream_url(self, stream_id, container_extension='mp4'): + """Get the playback URL for a VOD stream""" + return f"{self.server_url}/movie/{self.username}/{self.password}/{stream_id}.{container_extension}" + + def get_vod_categories(self): + """Get VOD categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid VOD categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} VOD categories") + return categories + except Exception as e: + logger.error(f"Failed to get VOD categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_streams(self, category_id=None): + """Get VOD streams for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_streams' + } + + if category_id: + params['category_id'] = category_id + + streams = self._make_request(endpoint, params) + + if not isinstance(streams, list): + error_msg = f"Invalid VOD streams response for category {category_id}: {streams}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(streams)} VOD streams for category {category_id}") + return streams + except Exception as e: + logger.error(f"Failed to get VOD streams for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_vod_info(self, vod_id): + """Get detailed information for a specific VOD""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_vod_info', + 'vod_id': vod_id + } + + vod_info = self._make_request(endpoint, params) + + if not isinstance(vod_info, dict): + error_msg = f"Invalid VOD info response for vod_id {vod_id}: {vod_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved VOD info for vod_id {vod_id}") + return vod_info + except Exception as e: + logger.error(f"Failed to get VOD info for vod_id {vod_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_categories(self): + """Get series categories""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_categories' + } + + categories = self._make_request(endpoint, params) + + if not isinstance(categories, list): + error_msg = f"Invalid series categories response: {categories}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(categories)} series categories") + return categories + except Exception as e: + logger.error(f"Failed to get series categories: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series(self, category_id=None): + """Get series for a specific category""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series' + } + + if category_id: + params['category_id'] = category_id + + series = self._make_request(endpoint, params) + + if not isinstance(series, list): + error_msg = f"Invalid series response for category {category_id}: {series}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved {len(series)} series for category {category_id}") + return series + except Exception as e: + logger.error(f"Failed to get series for category {category_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def get_series_info(self, series_id): + """Get detailed information for a specific series including episodes""" + try: + if not self.server_info: + self.authenticate() + + endpoint = "player_api.php" + params = { + 'username': self.username, + 'password': self.password, + 'action': 'get_series_info', + 'series_id': series_id + } + + series_info = self._make_request(endpoint, params) + + if not isinstance(series_info, dict): + error_msg = f"Invalid series info response for series_id {series_id}: {series_info}" + logger.error(error_msg) + raise ValueError(error_msg) + + logger.info(f"Successfully retrieved series info for series_id {series_id}") + return series_info + except Exception as e: + logger.error(f"Failed to get series info for series_id {series_id}: {str(e)}") + logger.error(traceback.format_exc()) + raise + + def close(self): + """Close the session and cleanup resources""" + if hasattr(self, 'session') and self.session: + try: + self.session.close() + except Exception as e: + logger.debug(f"Error closing XC session: {e}") + + def __enter__(self): + """Enter the context manager""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the context manager and cleanup resources""" + self.close() + return False # Don't suppress exceptions + + def __del__(self): + """Ensure session is closed when object is destroyed""" + self.close() diff --git a/debian_install.sh b/debian_install.sh old mode 100644 new mode 100755 index 0e41139e..bda506b1 --- a/debian_install.sh +++ b/debian_install.sh @@ -1,208 +1,243 @@ #!/usr/bin/env bash -set -e +set -euo pipefail +IFS=$'\n\t' + +# Root check +if [[ $EUID -ne 0 ]]; then + echo "[ERROR] This script must be run as root." >&2 + exit 1 +fi + +trap 'echo -e "\n[ERROR] Line $LINENO failed. Exiting." >&2; exit 1' ERR ############################################################################## # 0) Warning / Disclaimer ############################################################################## -echo "**************************************************************" -echo "WARNING: While we do not anticipate any problems, we disclaim all" -echo "responsibility for anything that happens to your machine." -echo "" -echo "This script is intended for **Debian-based operating systems only**." -echo "Running it on other distributions WILL cause unexpected issues." -echo "" -echo "This script is **NOT RECOMMENDED** for use on your primary machine." -echo "For safety and best results, we strongly advise running this inside a" -echo "clean virtual machine (VM) or LXC container environment." -echo "" -echo "Additionally, there is NO SUPPORT for this method; Docker is the only" -echo "officially supported way to run Dispatcharr." -echo "**************************************************************" -echo "" -echo "If you wish to proceed, type \"I understand\" and press Enter." -read user_input - -if [ "$user_input" != "I understand" ]; then - echo "Exiting script..." - exit 1 -fi - +show_disclaimer() { + echo "**************************************************************" + echo "WARNING: While we do not anticipate any problems, we disclaim all" + echo "responsibility for anything that happens to your machine." + echo "" + echo "This script is intended for **Debian-based operating systems only**." + echo "Running it on other distributions WILL cause unexpected issues." + echo "" + echo "This script is **NOT RECOMMENDED** for use on your primary machine." + echo "For safety and best results, we strongly advise running this inside a" + echo "clean virtual machine (VM) or LXC container environment." + echo "" + echo "Additionally, there is NO SUPPORT for this method; Docker is the only" + echo "officially supported way to run Dispatcharr." + echo "**************************************************************" + echo "" + echo "If you wish to proceed, type \"I understand\" and press Enter." + read user_input + if [ "$user_input" != "I understand" ]; then + echo "Exiting script..." + exit 1 + fi +} ############################################################################## # 1) Configuration ############################################################################## -# Linux user/group under which Dispatcharr processes will run -DISPATCH_USER="dispatcharr" -DISPATCH_GROUP="dispatcharr" - -# Where Dispatcharr source code should live -APP_DIR="/opt/dispatcharr" - -# Git branch to clone (e.g., "main" or "dev") -DISPATCH_BRANCH="dev" - -# PostgreSQL settings -POSTGRES_DB="dispatcharr" -POSTGRES_USER="dispatch" -POSTGRES_PASSWORD="secret" - -# The port on which Nginx will listen for HTTP -NGINX_HTTP_PORT="9191" - -# The TCP port for DaphnΓ© (Django Channels) -WEBSOCKET_PORT="8001" - -# Directory inside /run/ for our socket; full path becomes /run/dispatcharr/dispatcharr.sock -GUNICORN_RUNTIME_DIR="dispatcharr" -GUNICORN_SOCKET="/run/${GUNICORN_RUNTIME_DIR}/dispatcharr.sock" +configure_variables() { + DISPATCH_USER="dispatcharr" + DISPATCH_GROUP="dispatcharr" + APP_DIR="/opt/dispatcharr" + DISPATCH_BRANCH="main" + POSTGRES_DB="dispatcharr" + POSTGRES_USER="dispatch" + POSTGRES_PASSWORD="secret" + NGINX_HTTP_PORT="9191" + WEBSOCKET_PORT="8001" + GUNICORN_RUNTIME_DIR="dispatcharr" + GUNICORN_SOCKET="/run/${GUNICORN_RUNTIME_DIR}/dispatcharr.sock" + PYTHON_BIN=$(command -v python3) + SYSTEMD_DIR="/etc/systemd/system" + NGINX_SITE="/etc/nginx/sites-available/dispatcharr" +} ############################################################################## # 2) Install System Packages ############################################################################## -echo ">>> Installing system packages..." -apt-get update -apt-get install -y \ - git \ - curl \ - wget \ - build-essential \ - gcc \ - libpcre3-dev \ - libpq-dev \ - python3-dev \ - python3-venv \ - python3-pip \ - nginx \ - redis-server \ - postgresql \ - postgresql-contrib \ - ffmpeg \ - procps \ - streamlink +install_packages() { + echo ">>> Installing system packages..." + apt-get update + declare -a packages=( + git curl wget build-essential gcc libpq-dev + python3-dev python3-venv python3-pip nginx redis-server + postgresql postgresql-contrib ffmpeg procps streamlink + sudo + ) + apt-get install -y --no-install-recommends "${packages[@]}" -# Node.js setup (v23.x from NodeSource) - adjust version if needed -if ! command -v node >/dev/null 2>&1; then - echo ">>> Installing Node.js..." - curl -sL https://deb.nodesource.com/setup_23.x | bash - - apt-get install -y nodejs -fi + if ! command -v node >/dev/null 2>&1; then + echo ">>> Installing Node.js..." + curl -sL https://deb.nodesource.com/setup_24.x | bash - + apt-get install -y nodejs + fi -# Start & enable PostgreSQL and Redis -systemctl enable postgresql redis-server -systemctl start postgresql redis-server + systemctl enable --now postgresql redis-server +} ############################################################################## -# 3) Create Dispatcharr User/Group +# 3) Create User/Group ############################################################################## -if ! getent group "${DISPATCH_GROUP}" >/dev/null; then - echo ">>> Creating group: ${DISPATCH_GROUP}" - groupadd "${DISPATCH_GROUP}" -fi - -if ! id -u "${DISPATCH_USER}" >/dev/null; then - echo ">>> Creating user: ${DISPATCH_USER}" - useradd -m -g "${DISPATCH_GROUP}" -s /bin/bash "${DISPATCH_USER}" -fi +create_dispatcharr_user() { + if ! getent group "$DISPATCH_GROUP" >/dev/null; then + groupadd "$DISPATCH_GROUP" + fi + if ! id -u "$DISPATCH_USER" >/dev/null; then + useradd -m -g "$DISPATCH_GROUP" -s /bin/bash "$DISPATCH_USER" + fi +} ############################################################################## -# 4) Configure PostgreSQL Database +# 4) PostgreSQL Setup ############################################################################## -echo ">>> Configuring PostgreSQL..." -su - postgres -c "psql -tc \"SELECT 1 FROM pg_database WHERE datname='${POSTGRES_DB}'\"" | grep -q 1 || \ - su - postgres -c "psql -c \"CREATE DATABASE ${POSTGRES_DB};\"" +setup_postgresql() { + echo ">>> Checking PostgreSQL database and user..." -su - postgres -c "psql -tc \"SELECT 1 FROM pg_roles WHERE rolname='${POSTGRES_USER}'\"" | grep -q 1 || \ - su - postgres -c "psql -c \"CREATE USER ${POSTGRES_USER} WITH PASSWORD '${POSTGRES_PASSWORD}';\"" + db_exists=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_database WHERE datname='$POSTGRES_DB'") + if [[ "$db_exists" != "1" ]]; then + echo ">>> Creating database '${POSTGRES_DB}'..." + sudo -u postgres createdb "$POSTGRES_DB" + else + echo ">>> Database '${POSTGRES_DB}' already exists, skipping creation." + fi -su - postgres -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE ${POSTGRES_DB} TO ${POSTGRES_USER};\"" -su - postgres -c "psql -c \"ALTER DATABASE ${POSTGRES_DB} OWNER TO ${POSTGRES_USER};\"" -su - postgres -c "psql -d ${POSTGRES_DB} -c \"ALTER SCHEMA public OWNER TO ${POSTGRES_USER};\"" + user_exists=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='$POSTGRES_USER'") + if [[ "$user_exists" != "1" ]]; then + echo ">>> Creating user '${POSTGRES_USER}'..." + sudo -u postgres psql -c "CREATE USER $POSTGRES_USER WITH PASSWORD '$POSTGRES_PASSWORD';" + else + echo ">>> User '${POSTGRES_USER}' already exists, skipping creation." + fi + + echo ">>> Granting privileges..." + sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;" + sudo -u postgres psql -c "ALTER DATABASE $POSTGRES_DB OWNER TO $POSTGRES_USER;" + sudo -u postgres psql -d "$POSTGRES_DB" -c "ALTER SCHEMA public OWNER TO $POSTGRES_USER;" +} ############################################################################## -# 5) Clone or Update Dispatcharr Code +# 5) Clone Dispatcharr Repository ############################################################################## -echo ">>> Installing or updating Dispatcharr in ${APP_DIR} ..." +clone_dispatcharr_repo() { + echo ">>> Installing or updating Dispatcharr in ${APP_DIR} ..." + + if [ ! -d "$APP_DIR" ]; then + mkdir -p "$APP_DIR" + chown "$DISPATCH_USER:$DISPATCH_GROUP" "$APP_DIR" + fi -if [ ! -d "${APP_DIR}" ]; then - echo ">>> Cloning repository for the first time..." - mkdir -p "${APP_DIR}" - chown "${DISPATCH_USER}:${DISPATCH_GROUP}" "${APP_DIR}" - su - "${DISPATCH_USER}" -c "git clone -b ${DISPATCH_BRANCH} https://github.com/Dispatcharr/Dispatcharr.git ${APP_DIR}" -else - echo ">>> Updating existing repository..." - su - "${DISPATCH_USER}" <>> Updating existing Dispatcharr repo..." + su - "$DISPATCH_USER" <>> Cloning Dispatcharr repo into ${APP_DIR}..." + rm -rf "$APP_DIR"/* + chown "$DISPATCH_USER:$DISPATCH_GROUP" "$APP_DIR" + su - "$DISPATCH_USER" -c "git clone -b $DISPATCH_BRANCH https://github.com/Dispatcharr/Dispatcharr.git $APP_DIR" + fi +} ############################################################################## -# 6) Create Python Virtual Environment & Install Python Dependencies +# 6) Setup Python Environment ############################################################################## -echo ">>> Setting up Python virtual environment..." -su - "${DISPATCH_USER}" <>> Setting up Python virtual environment..." + su - "$DISPATCH_USER" <>> Linking ffmpeg into the virtual environment..." -ln -sf /usr/bin/ffmpeg ${APP_DIR}/env/bin/ffmpeg + ln -sf /usr/bin/ffmpeg "$APP_DIR/env/bin/ffmpeg" +} ############################################################################## -# 7) Build Frontend (React) +# 7) Build Frontend ############################################################################## -echo ">>> Building frontend..." -su - "${DISPATCH_USER}" <>> Building frontend..." + su - "$DISPATCH_USER" <>> Running Django migrations & collectstatic..." -su - "${DISPATCH_USER}" <>> Running Django migrations & collectstatic..." + su - "$DISPATCH_USER" </etc/systemd/system/dispatcharr.service +configure_services() { + echo ">>> Creating systemd service files..." + + # Gunicorn + cat <${SYSTEMD_DIR}/dispatcharr.service [Unit] Description=Gunicorn for Dispatcharr After=network.target postgresql.service redis-server.service @@ -211,36 +246,31 @@ After=network.target postgresql.service redis-server.service User=${DISPATCH_USER} Group=${DISPATCH_GROUP} WorkingDirectory=${APP_DIR} - RuntimeDirectory=${GUNICORN_RUNTIME_DIR} RuntimeDirectoryMode=0775 - -# Update PATH to include both the virtualenv and system binaries (for ffmpeg) Environment="PATH=${APP_DIR}/env/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin" Environment="POSTGRES_DB=${POSTGRES_DB}" Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" - +ExecStartPre=/usr/bin/bash -c 'until pg_isready -h localhost -U ${POSTGRES_USER}; do sleep 1; done' ExecStart=${APP_DIR}/env/bin/gunicorn \\ --workers=4 \\ --worker-class=gevent \\ --timeout=300 \\ --bind unix:${GUNICORN_SOCKET} \\ dispatcharr.wsgi:application - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 10) Create Systemd Service for Celery -############################################################################## - -cat </etc/systemd/system/dispatcharr-celery.service + # Celery + cat <${SYSTEMD_DIR}/dispatcharr-celery.service [Unit] Description=Celery Worker for Dispatcharr After=network.target redis-server.service @@ -256,21 +286,18 @@ Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" Environment="CELERY_BROKER_URL=redis://localhost:6379/0" - ExecStart=${APP_DIR}/env/bin/celery -A dispatcharr worker -l info - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-celery +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 11) Create Systemd Service for Celery Beat (Optional) -############################################################################## - -cat </etc/systemd/system/dispatcharr-celerybeat.service + # Celery Beat + cat <${SYSTEMD_DIR}/dispatcharr-celerybeat.service [Unit] Description=Celery Beat Scheduler for Dispatcharr After=network.target redis-server.service @@ -286,23 +313,20 @@ Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" Environment="CELERY_BROKER_URL=redis://localhost:6379/0" - ExecStart=${APP_DIR}/env/bin/celery -A dispatcharr beat -l info - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-celerybeat +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 12) Create Systemd Service for DaphnΓ© (WebSockets / Channels) -############################################################################## - -cat </etc/systemd/system/dispatcharr-daphne.service + # Daphne + cat <${SYSTEMD_DIR}/dispatcharr-daphne.service [Unit] -Description=Daphne for Dispatcharr (ASGI) +Description=Daphne for Dispatcharr (ASGI/WebSockets) After=network.target Requires=dispatcharr.service @@ -315,47 +339,33 @@ Environment="POSTGRES_DB=${POSTGRES_DB}" Environment="POSTGRES_USER=${POSTGRES_USER}" Environment="POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" Environment="POSTGRES_HOST=localhost" - ExecStart=${APP_DIR}/env/bin/daphne -b 0.0.0.0 -p ${WEBSOCKET_PORT} dispatcharr.asgi:application - Restart=always KillMode=mixed - +SyslogIdentifier=dispatcharr-daphne +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target EOF -############################################################################## -# 13) Configure Nginx -############################################################################## - -echo ">>> Configuring Nginx at /etc/nginx/sites-available/dispatcharr.conf ..." -cat </etc/nginx/sites-available/dispatcharr.conf + echo ">>> Creating Nginx config..." + cat </etc/nginx/sites-available/dispatcharr.conf server { listen ${NGINX_HTTP_PORT}; - - # Proxy to Gunicorn socket for main HTTP traffic location / { include proxy_params; proxy_pass http://unix:${GUNICORN_SOCKET}; } - - # Serve Django static files location /static/ { alias ${APP_DIR}/static/; } - - # Serve React build assets location /assets/ { alias ${APP_DIR}/frontend/dist/assets/; } - - # Serve media files if any location /media/ { alias ${APP_DIR}/media/; } - - # WebSockets for DaphnΓ© location /ws/ { proxy_pass http://127.0.0.1:${WEBSOCKET_PORT}; proxy_http_version 1.1; @@ -368,46 +378,67 @@ server { } EOF -ln -sf /etc/nginx/sites-available/dispatcharr.conf /etc/nginx/sites-enabled/dispatcharr.conf - -# Remove default site if it exists -if [ -f /etc/nginx/sites-enabled/default ]; then - rm -f /etc/nginx/sites-enabled/default -fi - -echo ">>> Testing Nginx config..." -nginx -t - -echo ">>> Restarting Nginx..." -systemctl restart nginx -systemctl enable nginx + ln -sf /etc/nginx/sites-available/dispatcharr.conf /etc/nginx/sites-enabled/dispatcharr.conf + [ -f /etc/nginx/sites-enabled/default ] && rm /etc/nginx/sites-enabled/default + nginx -t + systemctl restart nginx + systemctl enable nginx +} ############################################################################## -# 14) Start & Enable Services +# 11) Start Services ############################################################################## -echo ">>> Enabling systemd services..." -systemctl daemon-reload -systemctl enable dispatcharr -systemctl enable dispatcharr-celery -systemctl enable dispatcharr-celerybeat -systemctl enable dispatcharr-daphne - -echo ">>> Restarting / Starting services..." -systemctl restart dispatcharr -systemctl restart dispatcharr-celery -systemctl restart dispatcharr-celerybeat -systemctl restart dispatcharr-daphne +start_services() { + echo ">>> Enabling and starting services..." + systemctl daemon-reexec + systemctl daemon-reload + systemctl enable --now dispatcharr dispatcharr-celery dispatcharr-celerybeat dispatcharr-daphne +} ############################################################################## -# Done! +# 12) Summary ############################################################################## -echo "=================================================" -echo "Dispatcharr installation (or update) complete!" -echo "Nginx is listening on port ${NGINX_HTTP_PORT}." -echo "Gunicorn socket: ${GUNICORN_SOCKET}." -echo "WebSockets on port ${WEBSOCKET_PORT} (path /ws/)." -echo "You can check logs via 'sudo journalctl -u dispatcharr -f', etc." -echo "Visit http://:${NGINX_HTTP_PORT} in your browser." -echo "=================================================" +show_summary() { + server_ip=$(ip route get 1 | awk '{print $7; exit}') + cat <//", + stream_xc, + name="xc_live_stream_endpoint", + ), + path( + "//", + stream_xc, + name="xc_stream_endpoint", + ), + # XC VOD endpoints + path( + "movie///.", + xc_movie_stream, + name="xc_movie_stream", + ), + path( + "series///.", + xc_series_stream, + name="xc_series_stream", + ), + re_path(r"^swagger/?$", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"), # ReDoc UI - path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), - + path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"), # Optionally, serve the raw Swagger JSON - path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'), + path("swagger.json", schema_view.without_ui(cache_timeout=0), name="schema-json"), + # VOD proxy is now handled by the main proxy URLs above # Catch-all routes should always be last - path('', TemplateView.as_view(template_name='index.html')), # React entry point - path('', TemplateView.as_view(template_name='index.html')), - + path("", TemplateView.as_view(template_name="index.html")), # React entry point + path("", TemplateView.as_view(template_name="index.html")), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += websocket_urlpatterns diff --git a/dispatcharr/utils.py b/dispatcharr/utils.py index e6392c6c..260515fc 100644 --- a/dispatcharr/utils.py +++ b/dispatcharr/utils.py @@ -1,23 +1,58 @@ # dispatcharr/utils.py +import json +import ipaddress from django.http import JsonResponse from django.core.exceptions import ValidationError +from core.models import CoreSettings, NETWORK_ACCESS + def json_error_response(message, status=400): """Return a standardized error JSON response.""" - return JsonResponse({'success': False, 'error': message}, status=status) + return JsonResponse({"success": False, "error": message}, status=status) + def json_success_response(data=None, status=200): """Return a standardized success JSON response.""" - response = {'success': True} + response = {"success": True} if data is not None: response.update(data) return JsonResponse(response, status=status) + def validate_logo_file(file): """Validate uploaded logo file size and MIME type.""" - valid_mime_types = ['image/jpeg', 'image/png', 'image/gif'] + valid_mime_types = ["image/jpeg", "image/png", "image/gif", "image/webp", "image/svg+xml"] if file.content_type not in valid_mime_types: - raise ValidationError('Unsupported file type. Allowed types: JPEG, PNG, GIF.') - if file.size > 2 * 1024 * 1024: - raise ValidationError('File too large. Max 2MB.') + raise ValidationError("Unsupported file type. Allowed types: JPEG, PNG, GIF, WebP, SVG.") + if file.size > 5 * 1024 * 1024: # 5MB + raise ValidationError("File too large. Max 5MB.") + +def get_client_ip(request): + x_forwarded_for = request.META.get("HTTP_X_REAL_IP") + if x_forwarded_for: + # X-Forwarded-For can be a comma-separated list of IPs + ip = x_forwarded_for.split(",")[0].strip() + else: + ip = request.META.get("REMOTE_ADDR") + return ip + + +def network_access_allowed(request, settings_key): + network_access = json.loads(CoreSettings.objects.get(key=NETWORK_ACCESS).value) + + cidrs = ( + network_access[settings_key].split(",") + if settings_key in network_access + else ["0.0.0.0/0"] + ) + + network_allowed = False + client_ip = ipaddress.ip_address(get_client_ip(request)) + for cidr in cidrs: + network = ipaddress.ip_network(cidr) + if client_ip in network: + network_allowed = True + break + + return network_allowed diff --git a/docker/DispatcharrBase b/docker/DispatcharrBase index 4360ced3..d37d8958 100644 --- a/docker/DispatcharrBase +++ b/docker/DispatcharrBase @@ -15,7 +15,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ python-is-python3 python3-pip \ libpcre3 libpcre3-dev libpq-dev procps \ build-essential gcc pciutils \ - nginx streamlink \ + nginx streamlink comskip\ && apt-get clean && rm -rf /var/lib/apt/lists/* # --- Create Python virtual environment --- @@ -32,14 +32,14 @@ RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyri apt-get update && apt-get install -y redis-server && \ apt-get clean && rm -rf /var/lib/apt/lists/* -# --- Set up PostgreSQL 14.x --- +# --- Set up PostgreSQL 17.x --- RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg && \ echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" | \ tee /etc/apt/sources.list.d/pgdg.list && \ - apt-get update && apt-get install -y postgresql-14 postgresql-contrib-14 && \ + apt-get update && apt-get install -y postgresql-17 postgresql-contrib-17 && \ apt-get clean && rm -rf /var/lib/apt/lists/* # Create render group for hardware acceleration support with GID 109 RUN groupadd -r -g 109 render || true -ENTRYPOINT ["/app/docker/entrypoint.sh"] \ No newline at end of file +ENTRYPOINT ["/app/docker/entrypoint.sh"] diff --git a/docker/Dockerfile b/docker/Dockerfile index ec24c818..dc437227 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,11 +4,15 @@ ARG REPO_NAME=dispatcharr ARG BASE_TAG=base # --- Build frontend --- -FROM node:20 AS frontend-builder + +FROM node:24 AS frontend-builder + WORKDIR /app/frontend COPY ./frontend /app/frontend -RUN corepack enable && corepack prepare yarn@stable --activate && \ - yarn install && yarn build && \ +# remove any node_modules that may have been copied from the host (x86) +RUN rm -rf node_modules || true; \ + npm install --no-audit --progress=false; +RUN npm run build; \ rm -rf node_modules .cache # --- Redeclare build arguments for the next stage --- diff --git a/docker/comskip.ini b/docker/comskip.ini new file mode 100644 index 00000000..5dc94fd0 --- /dev/null +++ b/docker/comskip.ini @@ -0,0 +1,6 @@ +; Minimal default comskip config +edl_out=1 +output_edl=1 +verbose=0 +thread_count=0 + diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index 90cd8654..fe5e1507 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -14,6 +14,15 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #devices: # - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API) diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml index 163ebf6a..d9dbef0e 100644 --- a/docker/docker-compose.debug.yml +++ b/docker/docker-compose.debug.yml @@ -18,3 +18,12 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=trace + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 4f42e1b0..d1bb3680 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -11,12 +11,21 @@ services: - 8001:8001 volumes: - ../:/app - # - ./data/db:/data + - ./data:/data environment: - DISPATCHARR_ENV=dev - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=debug + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE pgadmin: image: dpage/pgadmin4 diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index d195fbdc..aaa63990 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -4,6 +4,8 @@ services: container_name: dispatcharr_web ports: - 9191:9191 + volumes: + - ./data:/data depends_on: - db - redis @@ -15,6 +17,15 @@ services: - REDIS_HOST=redis - CELERY_BROKER_URL=redis://redis:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #group_add: # - video @@ -51,7 +62,7 @@ services: command: > bash -c " cd /app && - celery -A dispatcharr worker -l info + nice -n 5 celery -A dispatcharr worker -l info " db: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 2d9f7fdc..fa0eea01 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -33,12 +33,25 @@ export POSTGRES_USER=${POSTGRES_USER:-dispatch} export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secret} export POSTGRES_HOST=${POSTGRES_HOST:-localhost} export POSTGRES_PORT=${POSTGRES_PORT:-5432} - +export PG_VERSION=$(ls /usr/lib/postgresql/ | sort -V | tail -n 1) +export PG_BINDIR="/usr/lib/postgresql/${PG_VERSION}/bin" export REDIS_HOST=${REDIS_HOST:-localhost} export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' + +# Process priority configuration +# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) +# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority) +# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI +export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0} +CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5} + +# Calculate relative nice value for Celery (since nice is relative to parent process) +# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value +export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL)) + # Set LIBVA_DRIVER_NAME if user has specified it if [ -v LIBVA_DRIVER_NAME ]; then export LIBVA_DRIVER_NAME @@ -77,6 +90,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL ) # Process each variable for both profile.d and environment @@ -95,26 +109,41 @@ fi chmod +x /etc/profile.d/dispatcharr.sh -pip install django-filter +# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells +if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then + cat >> /root/.bashrc << 'EOF' + +# Source Dispatcharr environment variables +if [ -f /etc/profile.d/dispatcharr.sh ]; then + . /etc/profile.d/dispatcharr.sh +fi +EOF +fi # Run init scripts -echo "Starting init process..." +echo "Starting user setup..." . /app/docker/init/01-user-setup.sh +echo "Setting up PostgreSQL..." . /app/docker/init/02-postgres.sh +echo "Starting init process..." . /app/docker/init/03-init-dispatcharr.sh # Start PostgreSQL echo "Starting Postgres..." -su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" +su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" # Wait for PostgreSQL to be ready -until su - postgres -c "/usr/lib/postgresql/14/bin/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do +until su - postgres -c "$PG_BINDIR/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do echo_with_timestamp "Waiting for PostgreSQL to be ready..." sleep 1 done -postgres_pid=$(su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') +postgres_pid=$(su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') echo "βœ… Postgres started with PID $postgres_pid" pids+=("$postgres_pid") +# Ensure database encoding is UTF8 +. /app/docker/init/02-postgres.sh +ensure_utf8_encoding + if [[ "$DISPATCHARR_ENV" = "dev" ]]; then . /app/docker/init/99-init-dev.sh echo "Starting frontend dev environment" @@ -154,10 +183,12 @@ if [ "$DISPATCHARR_DEBUG" != "true" ]; then uwsgi_args+=" --disable-logging" fi -# Launch uwsgi -p passes environment variables to the process -su -p - $POSTGRES_USER -c "cd /app && uwsgi $uwsgi_args &" -uwsgi_pid=$(pgrep uwsgi | sort | head -n1) -echo "βœ… uwsgi started with PID $uwsgi_pid" +# Launch uwsgi with configurable nice level (default: 0 for normal priority) +# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose +# Start with nice as root, then use setpriv to drop privileges to dispatch user +# This preserves both the nice value and environment variables +nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$! +echo "βœ… uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") # sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf @@ -202,7 +233,7 @@ echo "πŸ” Running hardware acceleration check..." # Wait for at least one process to exit and log the process that exited first if [ ${#pids[@]} -gt 0 ]; then - echo "⏳ Waiting for processes to exit..." + echo "⏳ Dispatcharr is running. Monitoring processes..." while kill -0 "${pids[@]}" 2>/dev/null; do sleep 1 # Wait for a second before checking again done diff --git a/docker/init/02-postgres.sh b/docker/init/02-postgres.sh index 7bb90671..93a808fa 100644 --- a/docker/init/02-postgres.sh +++ b/docker/init/02-postgres.sh @@ -1,5 +1,4 @@ #!/bin/bash - # Temporary migration from postgres in /data to $POSTGRES_DIR. Can likely remove # some time in the future. if [ -e "/data/postgresql.conf" ]; then @@ -27,6 +26,66 @@ if [ -e "/data/postgresql.conf" ]; then echo "Migration completed successfully." fi +PG_VERSION_FILE="${POSTGRES_DIR}/PG_VERSION" + +# Detect current version from data directory, if present +if [ -f "$PG_VERSION_FILE" ]; then + CURRENT_VERSION=$(cat "$PG_VERSION_FILE") +else + CURRENT_VERSION="" +fi + +# Only run upgrade if current version is set and not the target +if [ -n "$CURRENT_VERSION" ] && [ "$CURRENT_VERSION" != "$PG_VERSION" ]; then + echo "Detected PostgreSQL data directory version $CURRENT_VERSION, upgrading to $PG_VERSION..." + # Set binary paths for upgrade if needed + OLD_BINDIR="/usr/lib/postgresql/${CURRENT_VERSION}/bin" + NEW_BINDIR="/usr/lib/postgresql/${PG_VERSION}/bin" + PG_INSTALLED_BY_SCRIPT=0 + if [ ! -d "$OLD_BINDIR" ]; then + echo "PostgreSQL binaries for version $CURRENT_VERSION not found. Installing..." + apt update && apt install -y postgresql-$CURRENT_VERSION postgresql-contrib-$CURRENT_VERSION + if [ $? -ne 0 ]; then + echo "Failed to install PostgreSQL version $CURRENT_VERSION. Exiting." + exit 1 + fi + PG_INSTALLED_BY_SCRIPT=1 + fi + + # Prepare new data directory + NEW_POSTGRES_DIR="${POSTGRES_DIR}_$PG_VERSION" + + # Remove new data directory if it already exists (from a failed/partial upgrade) + if [ -d "$NEW_POSTGRES_DIR" ]; then + echo "Warning: $NEW_POSTGRES_DIR already exists. Removing it to avoid upgrade issues." + rm -rf "$NEW_POSTGRES_DIR" + fi + + mkdir -p "$NEW_POSTGRES_DIR" + chown -R postgres:postgres "$NEW_POSTGRES_DIR" + chmod 700 "$NEW_POSTGRES_DIR" + + # Initialize new data directory + echo "Initializing new PostgreSQL data directory at $NEW_POSTGRES_DIR..." + su - postgres -c "$NEW_BINDIR/initdb -D $NEW_POSTGRES_DIR" + echo "Running pg_upgrade from $OLD_BINDIR to $NEW_BINDIR..." + # Run pg_upgrade + su - postgres -c "$NEW_BINDIR/pg_upgrade -b $OLD_BINDIR -B $NEW_BINDIR -d $POSTGRES_DIR -D $NEW_POSTGRES_DIR" + + # Move old data directory for backup, move new into place + mv "$POSTGRES_DIR" "${POSTGRES_DIR}_backup_${CURRENT_VERSION}_$(date +%s)" + mv "$NEW_POSTGRES_DIR" "$POSTGRES_DIR" + + echo "Upgrade complete. Old data directory backed up." + + # Uninstall PostgreSQL if we installed it just for upgrade + if [ "$PG_INSTALLED_BY_SCRIPT" -eq 1 ]; then + echo "Uninstalling temporary PostgreSQL $CURRENT_VERSION packages..." + apt remove -y postgresql-$CURRENT_VERSION postgresql-contrib-$CURRENT_VERSION + apt autoremove -y + fi +fi + # Initialize PostgreSQL database if [ -z "$(ls -A $POSTGRES_DIR)" ]; then echo "Initializing PostgreSQL database..." @@ -35,29 +94,28 @@ if [ -z "$(ls -A $POSTGRES_DIR)" ]; then chmod 700 $POSTGRES_DIR # Initialize PostgreSQL - su - postgres -c "/usr/lib/postgresql/14/bin/initdb -D ${POSTGRES_DIR}" + su - postgres -c "$PG_BINDIR/initdb -D ${POSTGRES_DIR}" # Configure PostgreSQL echo "host all all 0.0.0.0/0 md5" >> "${POSTGRES_DIR}/pg_hba.conf" echo "listen_addresses='*'" >> "${POSTGRES_DIR}/postgresql.conf" # Start PostgreSQL echo "Starting Postgres..." - su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" + su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} start -w -t 300 -o '-c port=${POSTGRES_PORT}'" # Wait for PostgreSQL to be ready - until su - postgres -c "/usr/lib/postgresql/14/bin/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do + until su - postgres -c "$PG_BINDIR/pg_isready -h ${POSTGRES_HOST} -p ${POSTGRES_PORT}" >/dev/null 2>&1; do echo "Waiting for PostgreSQL to be ready..." sleep 1 done - postgres_pid=$(su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') + postgres_pid=$(su - postgres -c "$PG_BINDIR/pg_ctl -D ${POSTGRES_DIR} status" | sed -n 's/.*PID: \([0-9]\+\).*/\1/p') # Setup database if needed if ! su - postgres -c "psql -p ${POSTGRES_PORT} -tAc \"SELECT 1 FROM pg_database WHERE datname = '$POSTGRES_DB';\"" | grep -q 1; then # Create PostgreSQL database echo "Creating PostgreSQL database..." - su - postgres -c "createdb -p ${POSTGRES_PORT} ${POSTGRES_DB}" - - # Create user, set ownership, and grant privileges, including privileges to create new databases + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 ${POSTGRES_DB}" + # Create user, set ownership, and grant privileges echo "Creating PostgreSQL user..." su - postgres -c "psql -p ${POSTGRES_PORT} -d ${POSTGRES_DB}" < $DUMP_FILE" + # Drop and recreate database with UTF8 encoding using template0 + su - postgres -c "dropdb -p ${POSTGRES_PORT} $POSTGRES_DB" + # Recreate database with UTF8 encoding + su - postgres -c "createdb -p ${POSTGRES_PORT} --encoding=UTF8 --template=template0 ${POSTGRES_DB}" + + + # Restore data + su - postgres -c "psql -p ${POSTGRES_PORT} -d $POSTGRES_DB < $DUMP_FILE" + #configure_db + + + rm -f "$DUMP_FILE" + echo "Database $POSTGRES_DB converted to UTF8 and permissions set." + fi +} + + diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index b9c3c63b..5fbef23d 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -1,24 +1,67 @@ #!/bin/bash -mkdir -p /data/logos -mkdir -p /data/recordings -mkdir -p /data/uploads/m3us -mkdir -p /data/uploads/epgs -mkdir -p /data/m3us -mkdir -p /data/epgs -mkdir -p /app/logo_cache -mkdir -p /app/media +# Define directories that need to exist and be owned by PUID:PGID +DATA_DIRS=( + "/data/logos" + "/data/recordings" + "/data/uploads/m3us" + "/data/uploads/epgs" + "/data/m3us" + "/data/epgs" + "/data/plugins" + "/data/models" +) + +APP_DIRS=( + "/app/logo_cache" + "/app/media" +) + +# Create all directories +for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do + mkdir -p "$dir" +done + +# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation) +if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then + if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /app (non-recursive)" + chown $PUID:$PGID /app + fi +fi sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then - # Needs to own ALL of /data except db, we handle that below - chown -R $PUID:$PGID /data - chown -R $PUID:$PGID /app + # Fix data directories (non-recursive to avoid touching user files) + for dir in "${DATA_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir" + chown $PUID:$PGID "$dir" + fi + done + + # Fix app directories (recursive since they're managed by the app) + for dir in "${APP_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir (recursive)" + chown -R $PUID:$PGID "$dir" + fi + done + + # Database permissions + if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then + echo "Fixing ownership for /data/db" + chown -R postgres:postgres /data/db + fi + + # Fix /data directory ownership (non-recursive) + if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /data (non-recursive)" + chown $PUID:$PGID /data + fi - # Permissions - chown -R postgres:postgres /data/db chmod +x /data -fi +fi \ No newline at end of file diff --git a/docker/nginx.conf b/docker/nginx.conf index 65d382c5..5e754d20 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -7,15 +7,19 @@ server { proxy_connect_timeout 75; proxy_send_timeout 300; proxy_read_timeout 300; - client_max_body_size 0; # Allow file uploads up to 128MB + client_max_body_size 0; + + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host:$server_port; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Port $server_port; # Serve Django via uWSGI location / { include uwsgi_params; uwsgi_pass unix:/app/uwsgi.sock; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } location /assets/ { @@ -55,11 +59,6 @@ server { location /hdhr { include uwsgi_params; uwsgi_pass unix:/app/uwsgi.sock; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Host $host:$server_port; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Host $host; } # Serve FFmpeg streams efficiently @@ -78,9 +77,6 @@ server { proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } # Route TS proxy requests to the dedicated instance @@ -94,8 +90,5 @@ server { proxy_read_timeout 300s; proxy_send_timeout 300s; client_max_body_size 0; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $host; } } diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index e049df87..3de890a5 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -7,9 +7,10 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 7e50f2ef..e476e216 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index b35ea5bf..f8fe8ab7 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application # Core settings diff --git a/frontend/package-lock.json b/frontend/package-lock.json index d8da7f76..780aabe1 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,38 +1,33 @@ { - "name": "vite", + "name": "frontend", "version": "0.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "vite", + "name": "frontend", "version": "0.0.0", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", - "@mantine/charts": "^7.17.2", - "@mantine/core": "^7.17.2", - "@mantine/dates": "^7.17.2", - "@mantine/dropzone": "^7.17.2", - "@mantine/form": "^7.17.3", - "@mantine/hooks": "^7.17.2", - "@mantine/notifications": "^7.17.2", - "@tabler/icons-react": "^3.31.0", + "@mantine/charts": "~8.0.1", + "@mantine/core": "~8.0.1", + "@mantine/dates": "~8.0.1", + "@mantine/dropzone": "~8.0.1", + "@mantine/form": "~8.0.1", + "@mantine/hooks": "~8.0.1", + "@mantine/notifications": "~8.0.1", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", - "axios": "^1.8.2", - "clsx": "^2.1.1", + "allotment": "^1.20.4", "dayjs": "^1.11.13", "formik": "^2.4.6", "hls.js": "^1.5.20", - "lucide-react": "^0.479.0", - "mantine-react-table": "^2.0.0-beta.9", + "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "prettier": "^3.5.3", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", @@ -46,16 +41,66 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", "eslint": "^9.21.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", - "vite": "^6.2.0" + "jsdom": "^27.0.0", + "prettier": "^3.5.3", + "vite": "^6.2.0", + "vitest": "^3.2.4" } }, + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@asamuzakjp/css-color": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.0.4.tgz", + "integrity": "sha512-cKjSKvWGmAziQWbCouOsFwb14mp1betm8Y7Fn+yglDMUUu3r9DCbJ9iJbeFDenLMqFbIMC0pQP8K+B8LAxX3OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "lru-cache": "^11.1.0" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.5.5.tgz", + "integrity": "sha512-kI2MX9pmImjxWT8nxDZY+MuN6r1jJGe7WxizEbsAEPB/zxfW5wYLIiPG1v3UKgEOOP8EsDkp0ZL99oRFAdPM8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.1.0", + "is-potential-custom-element-name": "^1.0.1" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, "node_modules/@babel/code-frame": { "version": "7.26.2", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", @@ -198,6 +243,144 @@ "node": ">=6.9.0" } }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.14.tgz", + "integrity": "sha512-zSlIxa20WvMojjpCSy8WrNpcZ61RqfTfX3XTaOeVlGJrt/8HF3YbzgFZa01yTbT4GWQLwfTcC3EB8i3XnB647Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@dnd-kit/accessibility": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz", @@ -411,6 +594,278 @@ "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", "license": "MIT" }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.1.tgz", + "integrity": "sha512-kfYGy8IdzTGy+z0vFGvExZtxkFlA4zAxgKEahG9KE1ScBjpQnFsNOX8KTU5ojNru5ed5CVoJYXFtoxaq5nFbjQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.1.tgz", + "integrity": "sha512-dp+MshLYux6j/JjdqVLnMglQlFu+MuVeNrmT5nk6q07wNhCdSnB7QZj+7G8VMUGh1q+vj2Bq8kRsuyA00I/k+Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.1.tgz", + "integrity": "sha512-50tM0zCJW5kGqgG7fQ7IHvQOcAn9TKiVRuQ/lN0xR+T2lzEFvAi1ZcS8DiksFcEpf1t/GYOeOfCAgDHFpkiSmA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.1.tgz", + "integrity": "sha512-GCj6WfUtNldqUzYkN/ITtlhwQqGWu9S45vUXs7EIYf+7rCiiqH9bCloatO9VhxsL0Pji+PF4Lz2XXCES+Q8hDw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.1.tgz", + "integrity": "sha512-5hEZKPf+nQjYoSr/elb62U19/l1mZDdqidGfmFutVUjjUZrOazAtwK+Kr+3y0C/oeJfLlxo9fXb1w7L+P7E4FQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.1.tgz", + "integrity": "sha512-hxVnwL2Dqs3fM1IWq8Iezh0cX7ZGdVhbTfnOy5uURtao5OIVCEyj9xIzemDi7sRvKsuSdtCAhMKarxqtlyVyfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.1.tgz", + "integrity": "sha512-1MrCZs0fZa2g8E+FUo2ipw6jw5qqQiH+tERoS5fAfKnRx6NXH31tXBKI3VpmLijLH6yriMZsxJtaXUyFt/8Y4A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.1.tgz", + "integrity": "sha512-0IZWLiTyz7nm0xuIs0q1Y3QWJC52R8aSXxe40VUxm6BB1RNmkODtW6LHvWRrGiICulcX7ZvyH6h5fqdLu4gkww==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.1.tgz", + "integrity": "sha512-NdKOhS4u7JhDKw9G3cY6sWqFcnLITn6SqivVArbzIaf3cemShqfLGHYMx8Xlm/lBit3/5d7kXvriTUGa5YViuQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.1.tgz", + "integrity": "sha512-jaN3dHi0/DDPelk0nLcXRm1q7DNJpjXy7yWaWvbfkPvI+7XNSc/lDOnCLN7gzsyzgu6qSAmgSvP9oXAhP973uQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.1.tgz", + "integrity": "sha512-OJykPaF4v8JidKNGz8c/q1lBO44sQNUQtq1KktJXdBLn1hPod5rE/Hko5ugKKZd+D2+o1a9MFGUEIUwO2YfgkQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.1.tgz", + "integrity": "sha512-nGfornQj4dzcq5Vp835oM/o21UMlXzn79KobKlcs3Wz9smwiifknLy4xDCLUU0BWp7b/houtdrgUz7nOGnfIYg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.1.tgz", + "integrity": "sha512-1osBbPEFYwIE5IVB/0g2X6i1qInZa1aIoj1TdL4AaAb55xIIgbg8Doq6a5BzYWgr+tEcDzYH67XVnTmUzL+nXg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.1.tgz", + "integrity": "sha512-/6VBJOwUf3TdTvJZ82qF3tbLuWsscd7/1w+D9LH0W/SqUgM5/JJD0lrJ1fVIfZsqB6RFmLCe0Xz3fmZc3WtyVg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.1.tgz", + "integrity": "sha512-nSut/Mx5gnilhcq2yIMLMe3Wl4FK5wx/o0QuuCLMtmJn+WeWYoEGDN1ipcN72g1WHsnIbxGXd4i/MF0gTcuAjQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.1.tgz", + "integrity": "sha512-cEECeLlJNfT8kZHqLarDBQso9a27o2Zd2AQ8USAEoGtejOrCYHNtKP8XQhMDJMtthdF4GBmjR2au3x1udADQQQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/linux-x64": { "version": "0.25.1", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz", @@ -428,6 +883,142 @@ "node": ">=18" } }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.1.tgz", + "integrity": "sha512-O96poM2XGhLtpTh+s4+nP7YCCAfb4tJNRVZHfIE7dgmax+yMP2WgMd2OecBuaATHKTHsLWHQeuaxMRnCsH8+5g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.1.tgz", + "integrity": "sha512-X53z6uXip6KFXBQ+Krbx25XHV/NCbzryM6ehOAeAil7X7oa4XIq+394PWGnwaSQ2WRA0KI6PUO6hTO5zeF5ijA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.1.tgz", + "integrity": "sha512-Na9T3szbXezdzM/Kfs3GcRQNjHzM6GzFBeU1/6IV/npKP5ORtp9zbQjvkDJ47s6BCgaAZnnnu/cY1x342+MvZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.1.tgz", + "integrity": "sha512-T3H78X2h1tszfRSf+txbt5aOp/e7TAz3ptVKu9Oyir3IAOFPGV6O9c2naym5TOriy1l0nNf6a4X5UXRZSGX/dw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.1.tgz", + "integrity": "sha512-2H3RUvcmULO7dIE5EWJH8eubZAI4xw54H1ilJnRNZdeo8dTADEZ21w6J22XBkXqGJbe0+wnNJtw3UXRoLJnFEg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.1.tgz", + "integrity": "sha512-GE7XvrdOzrb+yVKB9KsRMq+7a2U/K5Cf/8grVFRAGJmfADr/e/ODQ134RK2/eeHqYV5eQRFxb1hY7Nr15fv1NQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.1.tgz", + "integrity": "sha512-uOxSJCIcavSiT6UnBhBzE8wy3n0hOkJsBOzy7HDAuTDE++1DJMRRVCPGisULScHL+a/ZwdXPpXD3IyFKjA7K8A==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.1.tgz", + "integrity": "sha512-Y1EQdcfwMSeQN/ujR5VayLOJ1BHaK+ssyk0AEzPjC+t1lITgsnccPqFjb6V+LsTp/9Iov4ysfjxLaGJ9RPtkVg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@eslint-community/eslint-utils": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.5.0.tgz", @@ -580,22 +1171,22 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.9", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.9.tgz", - "integrity": "sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==", + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", "license": "MIT", "dependencies": { - "@floating-ui/utils": "^0.2.9" + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.13", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.13.tgz", - "integrity": "sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==", + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.9" + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" } }, "node_modules/@floating-ui/react": { @@ -614,12 +1205,12 @@ } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", - "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", "license": "MIT", "dependencies": { - "@floating-ui/dom": "^1.0.0" + "@floating-ui/dom": "^1.7.4" }, "peerDependencies": { "react": ">=16.8.0", @@ -627,9 +1218,9 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.9.tgz", - "integrity": "sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==", + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", "license": "MIT" }, "node_modules/@humanfs/core": { @@ -731,9 +1322,9 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { @@ -753,72 +1344,72 @@ "license": "Apache-2.0" }, "node_modules/@mantine/charts": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-7.17.2.tgz", - "integrity": "sha512-ckB23pIqRjzysUz2EiWZD9AVyf7t0r7o7zfJbl01nzOezFgYq5RGeRoxvpcsfBC+YoSbB/43rjNcXtYhtA7QzA==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/charts/-/charts-8.0.1.tgz", + "integrity": "sha512-yntk4siXpQGSj83tDwftJw6fHTOBS6c/VWinjvTW29ptEdjBCxbKFfyyDc9UGVVuO7ovbdtpfCZBpuN2I7HPCA==", "license": "MIT", "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.1", + "@mantine/hooks": "8.0.1", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x", "recharts": "^2.13.3" } }, "node_modules/@mantine/core": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.17.2.tgz", - "integrity": "sha512-R6MYhitJ0JEgrhadd31Nw9FhRaQwDHjXUs5YIlitKH/fTOz9gKSxKjzmNng3bEBQCcbEDOkZj3FRcBgTUh/F0Q==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.0.1.tgz", + "integrity": "sha512-4ezaxKjChSPtawamQ3KrJq+x506uTouXlL0Z5fP+t105KnyxMrAJUENhbh2ivD4pq9Zh1BFiD9IWzyu3IXFR8w==", "license": "MIT", "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", "react-number-format": "^5.4.3", "react-remove-scroll": "^2.6.2", - "react-textarea-autosize": "8.5.6", + "react-textarea-autosize": "8.5.9", "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.17.2", + "@mantine/hooks": "8.0.1", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dates": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.17.2.tgz", - "integrity": "sha512-7bB992j8f+uEi280jab0/8i5yfsN/3oSrMDFwatZ+7XSDUwiP0YFib/FVX0pNSSqdFpbXhUmsZEECX71QtHw+Q==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.0.1.tgz", + "integrity": "sha512-YCmV5jiGE9Ts2uhNS217IA1Hd5kAa8oaEtfnU0bS1sL36zKEf2s6elmzY718XdF8tFil0jJWAj0jiCrA3/udMg==", "license": "MIT", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.1", + "@mantine/hooks": "8.0.1", "dayjs": ">=1.0.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dropzone": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-7.17.2.tgz", - "integrity": "sha512-NMQ1SDmnW0sf3GO6p1r/VIcg/xWqlRmfnWCr00/bGRbBEGbyaUwL3LSn+KYBJdY+3/jNGvGa+xflWDvnby5tzw==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-8.0.1.tgz", + "integrity": "sha512-8PH5yrtA/ebCIwjs0m4J9qOvEyS/P4XmNlHrw0E389/qq64Ol7+/ZH7Xtiq64IaY8kvsMW1XHaV0c+bdYrijiA==", "license": "MIT", "dependencies": { - "react-dropzone-esm": "15.2.0" + "react-dropzone": "14.3.8" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.1", + "@mantine/hooks": "8.0.1", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/form": { - "version": "7.17.3", - "resolved": "https://registry.npmjs.org/@mantine/form/-/form-7.17.3.tgz", - "integrity": "sha512-ktERldD8f9lrjjz6wIbwMnNbAZq8XEWPx4K5WuFyjXaK0PI8D+gsXIGKMtA5rVrAUFHCWCdbK3yLgtjJNki8ew==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/form/-/form-8.0.1.tgz", + "integrity": "sha512-lQ94gn/9p60C+tKEW7psQ1tZHod58Q0bXLbRDadRKMwnqBb2WFoIuaQWPDo7ox+PqyOv28dtflgS+Lm95EbBhg==", "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3", @@ -829,34 +1420,34 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.17.2.tgz", - "integrity": "sha512-tbErVcGZu0E4dSmE6N0k6Tv1y9R3SQmmQgwqorcc+guEgKMdamc36lucZGlJnSGUmGj+WLUgELkEQ0asdfYBDA==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.0.1.tgz", + "integrity": "sha512-GvLdM4Ro3QcDyIgqrdXsUZmeeKye2TNL/k3mEr9JhM5KacHQjr83JPp0u9eLobn7kiyBqpLTYmVYAbmjJdCxHw==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" } }, "node_modules/@mantine/notifications": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.17.2.tgz", - "integrity": "sha512-vg0L8cmihz0ODg4WJ9MAyK06WPt/6g67ksIUFxd4F8RfdJbIMLTsNG9yWoSfuhtXenUg717KaA917IWLjDSaqw==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.0.1.tgz", + "integrity": "sha512-7TX9OyAmUcok3qffnheS7gTAMKDczETy8XEYDr38Sy/XIoXLjM+3CwO+a/vfd1F9oW2LvkahkHT0Ey+vBOVd0Q==", "license": "MIT", "dependencies": { - "@mantine/store": "7.17.2", + "@mantine/store": "8.0.1", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.17.2", - "@mantine/hooks": "7.17.2", + "@mantine/core": "8.0.1", + "@mantine/hooks": "8.0.1", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/store": { - "version": "7.17.2", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.17.2.tgz", - "integrity": "sha512-UoMUYQK/z58hMueCkpDIXc49gPgrVO/zcpb0k+B7MFU51EIUiFzHLxLFBmWrgCAM6rzJORqN8JjyCd/PB9j4aw==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.0.1.tgz", + "integrity": "sha512-3wfUDeiERXJEI+MGgRAbh+9aY35D9oE4UzquLqZh8cIiH5i5g64Y/eJx3PfjHgO5+Zeu6lbgTgL6k4lg4a2SBQ==", "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" @@ -872,6 +1463,209 @@ "url": "https://opencollective.com/popperjs" } }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.35", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.35.tgz", + "integrity": "sha512-slYrCpoxJUqzFDDNlvrOYRazQUNRvWPjXA17dAOISY3rDMxX6k8K4cj2H+hEYMHF81HO3uNd5rHVigAWRM5dSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.35.0.tgz", + "integrity": "sha512-uYQ2WfPaqz5QtVgMxfN6NpLD+no0MYHDBywl7itPYd3K5TjjSghNKmX8ic9S8NU8w81NVhJv/XojcHptRly7qQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.35.0.tgz", + "integrity": "sha512-FtKddj9XZudurLhdJnBl9fl6BwCJ3ky8riCXjEw3/UIbjmIY58ppWwPEvU3fNu+W7FUsAsB1CdH+7EQE6CXAPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.35.0.tgz", + "integrity": "sha512-Uk+GjOJR6CY844/q6r5DR/6lkPFOw0hjfOIzVx22THJXMxktXG6CbejseJFznU8vHcEBLpiXKY3/6xc+cBm65Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.35.0.tgz", + "integrity": "sha512-3IrHjfAS6Vkp+5bISNQnPogRAW5GAV1n+bNCrDwXmfMHbPl5EhTmWtfmwlJxFRUCBZ+tZ/OxDyU08aF6NI/N5Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.35.0.tgz", + "integrity": "sha512-sxjoD/6F9cDLSELuLNnY0fOrM9WA0KrM0vWm57XhrIMf5FGiN8D0l7fn+bpUeBSU7dCgPV2oX4zHAsAXyHFGcQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.35.0.tgz", + "integrity": "sha512-2mpHCeRuD1u/2kruUiHSsnjWtHjqVbzhBkNVQ1aVD63CcexKVcQGwJ2g5VphOd84GvxfSvnnlEyBtQCE5hxVVw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.35.0.tgz", + "integrity": "sha512-mrA0v3QMy6ZSvEuLs0dMxcO2LnaCONs1Z73GUDBHWbY8tFFocM6yl7YyMu7rz4zS81NDSqhrUuolyZXGi8TEqg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.35.0.tgz", + "integrity": "sha512-DnYhhzcvTAKNexIql8pFajr0PiDGrIsBYPRvCKlA5ixSS3uwo/CWNZxB09jhIapEIg945KOzcYEAGGSmTSpk7A==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.35.0.tgz", + "integrity": "sha512-uagpnH2M2g2b5iLsCTZ35CL1FgyuzzJQ8L9VtlJ+FckBXroTwNOaD0z0/UF+k5K3aNQjbm8LIVpxykUOQt1m/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.35.0.tgz", + "integrity": "sha512-XQxVOCd6VJeHQA/7YcqyV0/88N6ysSVzRjJ9I9UA/xXpEsjvAgDTgH3wQYz5bmr7SPtVK2TsP2fQ2N9L4ukoUg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loongarch64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.35.0.tgz", + "integrity": "sha512-5pMT5PzfgwcXEwOaSrqVsz/LvjDZt+vQ8RT/70yhPU06PTuq8WaHhfT1LW+cdD7mW6i/J5/XIkX/1tCAkh1W6g==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.35.0.tgz", + "integrity": "sha512-c+zkcvbhbXF98f4CtEIP1EBA/lCic5xB0lToneZYvMeKu5Kamq3O8gqrxiYYLzlZH6E3Aq+TSW86E4ay8iD8EA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.35.0.tgz", + "integrity": "sha512-s91fuAHdOwH/Tad2tzTtPX7UZyytHIRR6V4+2IGlV0Cej5rkG0R61SX4l4y9sh0JBibMiploZx3oHKPnQBKe4g==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.35.0.tgz", + "integrity": "sha512-hQRkPQPLYJZYGP+Hj4fR9dDBMIM7zrzJDWFEMPdTnTy95Ljnv0/4w/ixFw3pTBMEuuEuoqtBINYND4M7ujcuQw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, "node_modules/@rollup/rollup-linux-x64-gnu": { "version": "4.35.0", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.35.0.tgz", @@ -900,137 +1694,62 @@ "linux" ] }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.35.0.tgz", + "integrity": "sha512-OUOlGqPkVJCdJETKOCEf1mw848ZyJ5w50/rZ/3IBQVdLfR5jk/6Sr5m3iO2tdPgwo0x7VcncYuOvMhBWZq8ayg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.35.0.tgz", + "integrity": "sha512-2/lsgejMrtwQe44glq7AFFHLfJBPafpsTa6JvP2NGef/ifOa4KBoglVf7AKN7EV9o32evBPRqfg96fEHzWo5kw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.35.0.tgz", + "integrity": "sha512-PIQeY5XDkrOysbQblSW7v3l1MDZzkTEzAfTPkj5VAu3FW8fS4ynyLg2sINp0fp3SjZ8xkRYpLqoKcYqAkhU1dw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, "node_modules/@swc/core": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.11.8.tgz", - "integrity": "sha512-UAL+EULxrc0J73flwYHfu29mO8CONpDJiQv1QPDXsyCvDUcEhqAqUROVTgC+wtJCFFqMQdyr4stAA5/s0KSOmA==", - "dev": true, - "hasInstallScript": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3", - "@swc/types": "^0.1.19" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/swc" - }, - "optionalDependencies": { - "@swc/core-darwin-arm64": "1.11.8", - "@swc/core-darwin-x64": "1.11.8", - "@swc/core-linux-arm-gnueabihf": "1.11.8", - "@swc/core-linux-arm64-gnu": "1.11.8", - "@swc/core-linux-arm64-musl": "1.11.8", - "@swc/core-linux-x64-gnu": "1.11.8", - "@swc/core-linux-x64-musl": "1.11.8", - "@swc/core-win32-arm64-msvc": "1.11.8", - "@swc/core-win32-ia32-msvc": "1.11.8", - "@swc/core-win32-x64-msvc": "1.11.8" - }, - "peerDependencies": { - "@swc/helpers": "*" - }, - "peerDependenciesMeta": { - "@swc/helpers": { - "optional": true - } - } - }, - "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.11.8.tgz", - "integrity": "sha512-r72onUEIU1iJi9EUws3R28pztQ/eM3EshNpsPRBfuLwKy+qn3et55vXOyDhIjGCUph5Eg2Yn8H3h6MTxDdLd+w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/core-linux-x64-musl": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.11.8.tgz", - "integrity": "sha512-294k8cLpO103++f4ZUEDr3vnBeUfPitW6G0a3qeVZuoXFhFgaW7ANZIWknUc14WiLOMfMecphJAEiy9C8OeYSw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "Apache-2.0 AND MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=10" - } - }, - "node_modules/@swc/counter": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "name": "@swc/wasm", + "version": "1.13.20", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", + "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", "dev": true, "license": "Apache-2.0" }, - "node_modules/@swc/types": { - "version": "0.1.19", - "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.19.tgz", - "integrity": "sha512-WkAZaAfj44kh/UFdAQcrMP1I0nwRqpt27u+08LMBYMqmQfwwMofYoMh/48NGkMMRfC4ynpfwRbJuu8ErfNloeA==", + "node_modules/@swc/wasm": { + "version": "1.13.20", + "resolved": "https://registry.npmjs.org/@swc/wasm/-/wasm-1.13.20.tgz", + "integrity": "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q==", "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@swc/counter": "^0.1.3" - } - }, - "node_modules/@tabler/icons": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.31.0.tgz", - "integrity": "sha512-dblAdeKY3+GA1U+Q9eziZ0ooVlZMHsE8dqP0RkwvRtEsAULoKOYaCUOcJ4oW1DjWegdxk++UAt2SlQVnmeHv+g==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/codecalm" - } - }, - "node_modules/@tabler/icons-react": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.31.0.tgz", - "integrity": "sha512-2rrCM5y/VnaVKnORpDdAua9SEGuJKVqPtWxeQ/vUVsgaUx30LDgBZph7/lterXxDY1IKR6NO//HDhWiifXTi3w==", - "license": "MIT", - "dependencies": { - "@tabler/icons": "3.31.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/codecalm" - }, - "peerDependencies": { - "react": ">= 16" - } - }, - "node_modules/@tanstack/match-sorter-utils": { - "version": "8.19.4", - "resolved": "https://registry.npmjs.org/@tanstack/match-sorter-utils/-/match-sorter-utils-8.19.4.tgz", - "integrity": "sha512-Wo1iKt2b9OT7d+YGhvEPD3DXvPv2etTusIMhMUoG7fbhmxcXCtIjJDEygy91Y2JFlwGyjqiBPRozme7UD8hoqg==", - "license": "MIT", - "dependencies": { - "remove-accents": "0.5.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - } + "license": "Apache-2.0" }, "node_modules/@tanstack/react-table": { "version": "8.21.3", @@ -1052,23 +1771,6 @@ "react-dom": ">=16.8" } }, - "node_modules/@tanstack/react-virtual": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.11.2.tgz", - "integrity": "sha512-OuFzMXPF4+xZgx8UzJha0AieuMihhhaWG0tCqpp6tDzlFwOmNBPYMuLOtMJ1Tr4pXLHmgjcWhG6RlknY2oNTdQ==", - "license": "MIT", - "dependencies": { - "@tanstack/virtual-core": "3.11.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", - "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" - } - }, "node_modules/@tanstack/table-core": { "version": "8.21.3", "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.21.3.tgz", @@ -1082,22 +1784,112 @@ "url": "https://github.com/sponsors/tannerlinsley" } }, - "node_modules/@tanstack/virtual-core": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.11.2.tgz", - "integrity": "sha512-vTtpNt7mKCiZ1pwU9hfKPhpdVO2sVzFQsxoVBGtOSHxlrRRzYr8iQ2TlwbAcRYCcEiZ9ECAM8kBzH0v2+VzfKw==", + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" } }, - "node_modules/@types/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "node_modules/@testing-library/jest-dom": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.8.0.tgz", + "integrity": "sha512-WgXcWzVM6idy5JaftTVC8Vs83NKRmGJz4Hqs4oyOuO2J4r/y79vvKZsb+CaGyCSEbUPI6OsewfPd0G1A0/TUZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, "license": "MIT" }, + "node_modules/@testing-library/react": { + "version": "16.3.0", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz", + "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, "node_modules/@types/d3-array": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", @@ -1161,6 +1953,13 @@ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", "license": "MIT" }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -1192,18 +1991,18 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "19.0.10", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.10.tgz", - "integrity": "sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==", + "version": "19.1.16", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.16.tgz", + "integrity": "sha512-WBM/nDbEZmDUORKnh5i1bTnAz6vTohUf9b8esSMu+b24+srbaxa04UbJgWx78CVfNXA20sNu0odEIluZDFdCog==", "license": "MIT", "dependencies": { "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "19.0.4", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.4.tgz", - "integrity": "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg==", + "version": "19.1.9", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz", + "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -1259,16 +2058,135 @@ } }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.8.0.tgz", - "integrity": "sha512-T4sHPvS+DIqDP51ifPqa9XIRAz/kIvIi8oXcnOZZgHmMotgmmdxe/DD5tMFlt5nuIRzT0/QuiwmKlH0503Aapw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.1.0.tgz", + "integrity": "sha512-Ff690TUck0Anlh7wdIcnsVMhofeEVgm44Y4OYdeeEEPSKyZHzDI9gfVBvySEhDfXtBp8tLCbfsVKPWEMEjq8/g==", "dev": true, "license": "MIT", "dependencies": { - "@swc/core": "^1.10.15" + "@rolldown/pluginutils": "1.0.0-beta.35", + "@swc/core": "^1.13.5" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" }, "peerDependencies": { - "vite": "^4 || ^5 || ^6" + "vite": "^4 || ^5 || ^6 || ^7" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, "node_modules/@xmldom/xmldom": { @@ -1315,6 +2233,16 @@ "pkcs7": "^1.0.4" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -1333,9 +2261,9 @@ } }, "node_modules/allotment": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.3.tgz", - "integrity": "sha512-JCnklt7j0OsyDjD7A9AdT6wqJ3FSoo1ASV6w02Am02lo6NwO25yhG1DcWW8ueBV38ppXQmvrXBXuzX7iVkq6Tw==", + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/allotment/-/allotment-1.20.4.tgz", + "integrity": "sha512-LMM5Xe5nLePFOLAlW/5k3ARqznYGUyNekV4xJrfDKn1jimW3nlZE6hT/Tu0T8s0VgAkr9s2P7+uM0WvJKn5DAw==", "license": "MIT", "dependencies": { "classnames": "^2.3.0", @@ -1350,17 +2278,14 @@ "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" } }, - "node_modules/allotment/node_modules/use-resize-observer": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", - "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, "license": "MIT", - "dependencies": { - "@juggle/resize-observer": "^3.3.1" - }, - "peerDependencies": { - "react": "16.8.0 - 18", - "react-dom": "16.8.0 - 18" + "engines": { + "node": ">=8" } }, "node_modules/ansi-styles": { @@ -1386,21 +2311,33 @@ "dev": true, "license": "Python-2.0" }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "license": "MIT" - }, - "node_modules/axios": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.8.3.tgz", - "integrity": "sha512-iP4DebzoNlP/YN2dpwCgb8zoCmhtkajzS48JvwmkSkXvPI3DHc7m+XYL5tGnSlJtR6nImXZmdCuN5aP8dh1d8A==", - "license": "MIT", + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "dequal": "^2.0.3" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/attr-accept": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz", + "integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==", + "license": "MIT", + "engines": { + "node": ">=4" } }, "node_modules/babel-plugin-macros": { @@ -1425,6 +2362,16 @@ "dev": true, "license": "MIT" }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -1436,17 +2383,14 @@ "concat-map": "0.0.1" } }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, "engines": { - "node": ">= 0.4" + "node": ">=8" } }, "node_modules/callsites": { @@ -1458,6 +2402,23 @@ "node": ">=6" } }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -1475,6 +2436,16 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, "node_modules/classnames": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", @@ -1510,18 +2481,6 @@ "dev": true, "license": "MIT" }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -1584,6 +2543,42 @@ "node": ">= 8" } }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssstyle": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.1.tgz", + "integrity": "sha512-g5PC9Aiph9eiczFpcgUhd9S4UUO3F+LHGRIi5NUMZ+4xtoIYbHNZwZnWA2JsFGe8OU8nl4WyaEFiZuGuxlutJQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^4.0.3", + "@csstools/css-syntax-patches-for-csstree": "^1.0.14", + "css-tree": "^3.1.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -1711,6 +2706,20 @@ "node": ">=12" } }, + "node_modules/data-urls": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", + "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/dayjs": { "version": "1.11.13", "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", @@ -1718,9 +2727,9 @@ "license": "MIT" }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -1734,12 +2743,29 @@ } } }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, "node_modules/decimal.js-light": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", "license": "MIT" }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -1756,13 +2782,14 @@ "node": ">=0.10.0" } }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, "license": "MIT", "engines": { - "node": ">=0.4.0" + "node": ">=6" } }, "node_modules/detect-node-es": { @@ -1771,6 +2798,13 @@ "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", "license": "MIT" }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT" + }, "node_modules/dom-helpers": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", @@ -1786,18 +2820,17 @@ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", "engines": { - "node": ">= 0.4" + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" } }, "node_modules/error-ex": { @@ -1809,50 +2842,12 @@ "is-arrayish": "^0.2.1" } }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-set-tostringtag": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.6", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - } + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" }, "node_modules/es6-promise": { "version": "4.2.8", @@ -2081,6 +3076,16 @@ "node": ">=4.0" } }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", @@ -2097,6 +3102,16 @@ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", "license": "MIT" }, + "node_modules/expect-type": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -2104,9 +3119,9 @@ "license": "MIT" }, "node_modules/fast-equals": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.2.2.tgz", - "integrity": "sha512-V7/RktU11J3I36Nwq2JnZEM7tNm17eBJz+u25qdxBZeCKiX6BkVSZQjwWIr+IobgnZy+ag73tTZgZi7tr0LrBw==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.3.2.tgz", + "integrity": "sha512-6rxyATwPCkaFIL3JLqw8qXqMpIZ942pTX/tbQFkRsDGblS8tNGtlUauA/+mt6RUfqn/4MoEr+WDkYoIQbibWuQ==", "license": "MIT", "engines": { "node": ">=6.0.0" @@ -2126,6 +3141,24 @@ "dev": true, "license": "MIT" }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, "node_modules/file-entry-cache": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", @@ -2139,6 +3172,18 @@ "node": ">=16.0.0" } }, + "node_modules/file-selector": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/file-selector/-/file-selector-2.1.2.tgz", + "integrity": "sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==", + "license": "MIT", + "dependencies": { + "tslib": "^2.7.0" + }, + "engines": { + "node": ">= 12" + } + }, "node_modules/find-root": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", @@ -2183,41 +3228,6 @@ "dev": true, "license": "ISC" }, - "node_modules/follow-redirects": { - "version": "1.15.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", - "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", - "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "es-set-tostringtag": "^2.1.0", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/formik": { "version": "2.4.6", "resolved": "https://registry.npmjs.org/formik/-/formik-2.4.6.tgz", @@ -2243,6 +3253,21 @@ "react": ">=16.8.0" } }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -2252,30 +3277,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -2285,19 +3286,6 @@ "node": ">=6" } }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -2334,18 +3322,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -2356,33 +3332,6 @@ "node": ">=8" } }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/hasown": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", @@ -2410,11 +3359,59 @@ "react-is": "^16.7.0" } }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" + "node_modules/html-encoding-sniffer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^3.1.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } }, "node_modules/ignore": { "version": "5.3.2", @@ -2452,6 +3449,16 @@ "node": ">=0.8.19" } }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/internmap": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", @@ -2511,6 +3518,13 @@ "node": ">=0.10.0" } }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -2537,6 +3551,46 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/jsdom": { + "version": "27.0.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.0.0.tgz", + "integrity": "sha512-lIHeR1qlIRrIN5VMccd8tI2Sgw6ieYXSVktcSHaNe3Z5nE/tcPQYQWOq00wxMvYOsz+73eAkNenVvmPC6bba9A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/dom-selector": "^6.5.4", + "cssstyle": "^5.3.0", + "data-urls": "^6.0.0", + "decimal.js": "^10.5.0", + "html-encoding-sniffer": "^4.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", + "is-potential-custom-element-name": "^1.0.1", + "parse5": "^7.3.0", + "rrweb-cssom": "^0.8.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.0", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.0", + "whatwg-encoding": "^3.1.1", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0", + "ws": "^8.18.2", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, "node_modules/jsesc": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", @@ -2681,15 +3735,42 @@ "loose-envify": "cli.js" } }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "11.2.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.1.tgz", + "integrity": "sha512-r8LA6i4LP4EeWOhqBaZZjDWwehd1xUJPCJd9Sv300H0ZmcUER4+JPh7bqqZeqs1o5pgtgvXm+d9UGrB5zZGDiQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/lucide-react": { - "version": "0.479.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.479.0.tgz", - "integrity": "sha512-aBhNnveRhorBOK7uA4gDjgaf+YlHMdMhQ/3cupk6exM10hWlEU+2QtWYOfhXhjAsmdb6LeKR+NZnow4UxRRiTQ==", + "version": "0.511.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.511.0.tgz", + "integrity": "sha512-VK5a2ydJ7xm8GvBeKLS9mu1pVK6ucef9780JVUjw6bAjJL/QXnd4Y0p7SPeOUMC27YhzNCZvm5d/QX0Tp3rc0w==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, "node_modules/m3u8-parser": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/m3u8-parser/-/m3u8-parser-7.2.0.tgz", @@ -2701,75 +3782,22 @@ "global": "^4.4.0" } }, - "node_modules/mantine-react-table": { - "version": "2.0.0-beta.9", - "resolved": "https://registry.npmjs.org/mantine-react-table/-/mantine-react-table-2.0.0-beta.9.tgz", - "integrity": "sha512-ZdfcwebWaPERoDvAuk43VYcBCzamohARVclnbuepT0PHZ0wRcDPMBR+zgaocL+pFy8EXUGwvWTOKNh25ITpjNQ==", + "node_modules/magic-string": { + "version": "0.30.19", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", + "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==", + "dev": true, "license": "MIT", "dependencies": { - "@tanstack/match-sorter-utils": "8.19.4", - "@tanstack/react-table": "8.20.5", - "@tanstack/react-virtual": "3.11.2" - }, - "engines": { - "node": ">=16" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/kevinvandy" - }, - "peerDependencies": { - "@mantine/core": "^7.9", - "@mantine/dates": "^7.9", - "@mantine/hooks": "^7.9", - "@tabler/icons-react": ">=2.23.0", - "clsx": ">=2", - "dayjs": ">=1.11", - "react": ">=18.0", - "react-dom": ">=18.0" + "@jridgewell/sourcemap-codec": "^1.5.5" } }, - "node_modules/mantine-react-table/node_modules/@tanstack/react-table": { - "version": "8.20.5", - "resolved": "https://registry.npmjs.org/@tanstack/react-table/-/react-table-8.20.5.tgz", - "integrity": "sha512-WEHopKw3znbUZ61s9i0+i9g8drmDo6asTWbrQh8Us63DAk/M0FkmIqERew6P71HI75ksZ2Pxyuf4vvKh9rAkiA==", - "license": "MIT", - "dependencies": { - "@tanstack/table-core": "8.20.5" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": ">=16.8", - "react-dom": ">=16.8" - } - }, - "node_modules/mantine-react-table/node_modules/@tanstack/table-core": { - "version": "8.20.5", - "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.20.5.tgz", - "integrity": "sha512-P9dF7XbibHph2PFRz8gfBKEXEY/HJPOhym8CHmjF8y3q5mWpKx9xtZapXQUWCgkqvsK0R46Azuz+VaxD4Xl+Tg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "dev": true, + "license": "CC0-1.0" }, "node_modules/memoize-one": { "version": "5.2.1", @@ -2777,27 +3805,6 @@ "integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==", "license": "MIT" }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, "node_modules/min-document": { "version": "2.19.0", "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", @@ -2806,6 +3813,16 @@ "dom-walk": "^0.1.0" } }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -2982,6 +3999,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -3017,12 +4047,42 @@ "node": ">=8" } }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "license": "ISC" }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/pkcs7": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/pkcs7/-/pkcs7-1.0.4.tgz", @@ -3078,6 +4138,7 @@ "version": "3.5.3", "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "dev": true, "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" @@ -3089,6 +4150,41 @@ "url": "https://github.com/prettier/prettier?sponsor=1" } }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT" + }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -3109,24 +4205,12 @@ "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, "node_modules/property-expr": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==", "license": "MIT" }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "license": "MIT" - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -3138,24 +4222,24 @@ } }, "node_modules/react": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", - "integrity": "sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "19.0.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.0.0.tgz", - "integrity": "sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", + "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", "license": "MIT", "dependencies": { - "scheduler": "^0.25.0" + "scheduler": "^0.26.0" }, "peerDependencies": { - "react": "^19.0.0" + "react": "^19.1.1" } }, "node_modules/react-draggable": { @@ -3181,12 +4265,14 @@ "node": ">=6" } }, - "node_modules/react-dropzone-esm": { - "version": "15.2.0", - "resolved": "https://registry.npmjs.org/react-dropzone-esm/-/react-dropzone-esm-15.2.0.tgz", - "integrity": "sha512-pPwR8xWVL+tFLnbAb8KVH5f6Vtl397tck8dINkZ1cPMxHWH+l9dFmIgRWgbh7V7jbjIcuKXCsVrXbhQz68+dVA==", + "node_modules/react-dropzone": { + "version": "14.3.8", + "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz", + "integrity": "sha512-sBgODnq+lcA4P296DY4wacOZz3JFpD99fp+hb//iBO2HHnyeZU3FwWyXJ6salNpqQdsZrgMrotuko/BdJMV8Ug==", "license": "MIT", "dependencies": { + "attr-accept": "^2.2.4", + "file-selector": "^2.1.0", "prop-types": "^15.8.1" }, "engines": { @@ -3203,9 +4289,9 @@ "license": "MIT" }, "node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "license": "MIT" }, "node_modules/react-lifecycles-compat": { @@ -3215,9 +4301,9 @@ "license": "MIT" }, "node_modules/react-number-format": { - "version": "5.4.3", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.3.tgz", - "integrity": "sha512-VCY5hFg/soBighAoGcdE+GagkJq0230qN6jcS5sp8wQX1qy1fYN/RX7/BXkrs0oyzzwqR8/+eSUrqXbGeywdUQ==", + "version": "5.4.4", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.4.tgz", + "integrity": "sha512-wOmoNZoOpvMminhifQYiYSTCLUDOiUbBunrMrMjA+dV52sY+vck1S4UhR6PkgnoCquvvMSeJjErXZ4qSaWCliA==", "license": "MIT", "peerDependencies": { "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", @@ -3288,15 +4374,13 @@ } }, "node_modules/react-router": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.3.0.tgz", - "integrity": "sha512-466f2W7HIWaNXTKM5nHTqNxLrHTyXybm7R0eBlVSt0k/u55tTCDO194OIx/NrYD4TS5SXKTNekXfT37kMKUjgw==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.6.0.tgz", + "integrity": "sha512-GGufuHIVCJDbnIAXP3P9Sxzq3UUsddG3rrI3ut1q6m0FI6vxVBF3JoPQ38+W/blslLH4a5Yutp8drkEpXoddGQ==", "license": "MIT", "dependencies": { - "@types/cookie": "^0.6.0", "cookie": "^1.0.1", - "set-cookie-parser": "^2.6.0", - "turbo-stream": "2.4.0" + "set-cookie-parser": "^2.6.0" }, "engines": { "node": ">=20.0.0" @@ -3312,12 +4396,12 @@ } }, "node_modules/react-router-dom": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.3.0.tgz", - "integrity": "sha512-z7Q5FTiHGgQfEurX/FBinkOXhWREJIAB2RiU24lvcBa82PxUpwqvs/PAXb9lJyPjTs2jrl6UkLvCZVGJPeNuuQ==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.6.0.tgz", + "integrity": "sha512-DYgm6RDEuKdopSyGOWZGtDfSm7Aofb8CCzgkliTjtu/eDuB0gcsv6qdFhhi8HdtmA+KHkt5MfZ5K2PdzjugYsA==", "license": "MIT", "dependencies": { - "react-router": "7.3.0" + "react-router": "7.6.0" }, "engines": { "node": ">=20.0.0" @@ -3365,9 +4449,9 @@ } }, "node_modules/react-textarea-autosize": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.6.tgz", - "integrity": "sha512-aT3ioKXMa8f6zHYGebhbdMD2L00tKeRX1zuVuDx9YQK/JLLRSaSxq3ugECEmUB9z2kvk6bFSIoRHLkkUv0RJiw==", + "version": "8.5.9", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.9.tgz", + "integrity": "sha512-U1DGlIQN5AwgjTyOEnI1oCcMuEr1pv1qOtklB2l4nyMGbHzWrI0eFsYK0zos2YWqAolJyG0IWJaqWmWj5ETh0A==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.20.13", @@ -3489,17 +4573,41 @@ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "license": "MIT" }, + "node_modules/recharts/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/regenerator-runtime": { "version": "0.14.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", "license": "MIT" }, - "node_modules/remove-accents": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.5.0.tgz", - "integrity": "sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A==", - "license": "MIT" + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } }, "node_modules/resolve": { "version": "1.22.10", @@ -3569,10 +4677,37 @@ "fsevents": "~2.3.2" } }, + "node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, "node_modules/scheduler": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", - "integrity": "sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==", + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", "license": "MIT" }, "node_modules/set-cookie-parser": { @@ -3604,6 +4739,13 @@ "node": ">=8" } }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, "node_modules/source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -3623,6 +4765,33 @@ "node": ">=0.10.0" } }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", + "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -3636,6 +4805,26 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/strip-literal": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", + "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/stylis": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", @@ -3667,6 +4856,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, "node_modules/tabbable": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", @@ -3691,24 +4887,125 @@ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", "license": "MIT" }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.15.tgz", + "integrity": "sha512-heYRCiGLhtI+U/D0V8YM3QRwPfsLJiP+HX+YwiHZTnWzjIKC+ZCxQRYlzvOoTEc6KIP62B1VeAN63diGCng2hg==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.15" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.15.tgz", + "integrity": "sha512-YBkp2VfS9VTRMPNL2PA6PMESmxV1JEVoAr5iBlZnB5JG3KUrWzNCB3yNNkRa2FZkqClaBgfNYCp8PgpYmpjkZw==", + "dev": true, + "license": "MIT" + }, "node_modules/toposort": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==", "license": "MIT" }, + "node_modules/tough-cookie": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", + "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, - "node_modules/turbo-stream": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", - "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", - "license": "ISC" - }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3780,9 +5077,9 @@ } }, "node_modules/use-isomorphic-layout-effect": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.0.tgz", - "integrity": "sha512-q6ayo8DWoPZT0VdG4u3D3uxcgONP3Mevx2i2b0434cwWBoL+aelL1DzkXI6w3PhTZzUeR2kaVlZn70iCiseP6w==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.1.tgz", + "integrity": "sha512-tpZZ+EX0gaghDAiFR37hj5MgY6ZN55kLiPkJsKxBMZ6GZdOSPJXiOzPM984oPYZ5AnehYx5WQp1+ME8I/P/pRA==", "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" @@ -3810,6 +5107,19 @@ } } }, + "node_modules/use-resize-observer": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/use-resize-observer/-/use-resize-observer-9.1.0.tgz", + "integrity": "sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==", + "license": "MIT", + "dependencies": { + "@juggle/resize-observer": "^3.3.1" + }, + "peerDependencies": { + "react": "16.8.0 - 18", + "react-dom": "16.8.0 - 18" + } + }, "node_modules/use-sidecar": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", @@ -3906,15 +5216,18 @@ } }, "node_modules/vite": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.2.1.tgz", - "integrity": "sha512-n2GnqDb6XPhlt9B8olZPrgMD/es/Nd1RdChF6CBD/fHW6pUyUTt2sQW2fPRX5GiD9XEa6+8A6A4f2vT6pSsE7Q==", + "version": "6.3.5", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.3.5.tgz", + "integrity": "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ==", "dev": true, "license": "MIT", "dependencies": { "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", "postcss": "^8.5.3", - "rollup": "^4.30.1" + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" }, "bin": { "vite": "bin/vite.js" @@ -3977,11 +5290,168 @@ } } }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", + "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, "node_modules/webworkify-webpack": { "version": "2.1.5", - "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git#24d1e719b4a6cac37a518b2bb10fe124527ef4ef", + "resolved": "git+ssh://git@github.com/xqq/webworkify-webpack.git", + "integrity": "sha512-W8Bg+iLq52d2GFvwabPNCIDCgMHcW3g68Tr8zwpJliEz2cKBIKYL3T0VdYeZWhz5rOxWRBBEdF931fquSO6iCQ==", "license": "MIT" }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-url": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", + "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.0" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -3998,6 +5468,23 @@ "node": ">= 8" } }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/word-wrap": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", @@ -4008,10 +5495,49 @@ "node": ">=0.10.0" } }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, "node_modules/yaml": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.0.tgz", - "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.1.tgz", + "integrity": "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==", "dev": true, "license": "ISC", "optional": true, @@ -4020,7 +5546,7 @@ "yaml": "bin.mjs" }, "engines": { - "node": ">= 14" + "node": ">= 14.6" } }, "node_modules/yocto-queue": { diff --git a/frontend/package.json b/frontend/package.json index 3b287d79..fea6b73e 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,5 +1,5 @@ { - "name": "vite", + "name": "frontend", "private": true, "version": "0.0.0", "type": "module", @@ -7,34 +7,31 @@ "dev": "vite --host", "build": "vite build", "lint": "eslint .", - "preview": "vite preview" + "preview": "vite preview", + "test": "vitest --run", + "test:watch": "vitest" }, "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", - "@mantine/charts": "^7.17.2", - "@mantine/core": "^7.17.2", - "@mantine/dates": "^7.17.2", - "@mantine/dropzone": "^7.17.2", - "@mantine/form": "^7.17.3", - "@mantine/hooks": "^7.17.2", - "@mantine/notifications": "^7.17.2", - "@tabler/icons-react": "^3.31.0", + "@mantine/charts": "~8.0.1", + "@mantine/core": "~8.0.1", + "@mantine/dates": "~8.0.1", + "@mantine/dropzone": "~8.0.1", + "@mantine/form": "~8.0.1", + "@mantine/hooks": "~8.0.1", + "@mantine/notifications": "~8.0.1", "@tanstack/react-table": "^8.21.2", - "allotment": "^1.20.3", - "axios": "^1.8.2", - "clsx": "^2.1.1", + "allotment": "^1.20.4", "dayjs": "^1.11.13", "formik": "^2.4.6", "hls.js": "^1.5.20", - "lucide-react": "^0.479.0", - "mantine-react-table": "^2.0.0-beta.9", + "lucide-react": "^0.511.0", "mpegts.js": "^1.8.0", - "prettier": "^3.5.3", - "react": "^19.0.0", - "react-dom": "^19.0.0", + "react": "^19.1.0", + "react-dom": "^19.1.0", "react-draggable": "^4.4.6", "react-pro-sidebar": "^1.1.0", "react-router-dom": "^7.3.0", @@ -48,13 +45,27 @@ }, "devDependencies": { "@eslint/js": "^9.21.0", - "@types/react": "^19.0.10", - "@types/react-dom": "^19.0.4", - "@vitejs/plugin-react-swc": "^3.8.0", + "@swc/core": "npm:@swc/wasm@1.13.20", + "@swc/wasm": "^1.13.20", + "@testing-library/dom": "^10.4.1", + "@testing-library/jest-dom": "^6.8.0", + "@testing-library/react": "^16.3.0", + "@testing-library/user-event": "^14.6.1", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react-swc": "^4.1.0", "eslint": "^9.21.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", "globals": "^15.15.0", - "vite": "^6.2.0" + "jsdom": "^27.0.0", + "prettier": "^3.5.3", + "vite": "^6.2.0", + "vitest": "^3.2.4" + }, + "resolutions": { + "vite": "7.1.7", + "react": "19.1.0", + "react-dom": "19.1.0" } } diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 7295d12e..3c7c3877 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -14,13 +14,17 @@ import Guide from './pages/Guide'; import Stats from './pages/Stats'; import DVR from './pages/DVR'; import Settings from './pages/Settings'; +import PluginsPage from './pages/Plugins'; +import Users from './pages/Users'; +import LogosPage from './pages/Logos'; +import VODsPage from './pages/VODs'; import useAuthStore from './store/auth'; +import useLogosStore from './store/logos'; import FloatingVideo from './components/FloatingVideo'; import { WebsocketProvider } from './WebSocket'; import { Box, AppShell, MantineProvider } from '@mantine/core'; import '@mantine/core/styles.css'; // Ensure Mantine global styles load import '@mantine/notifications/styles.css'; -import 'mantine-react-table/styles.css'; import '@mantine/dropzone/styles.css'; import '@mantine/dates/styles.css'; import './index.css'; @@ -36,6 +40,8 @@ const defaultRoute = '/channels'; const App = () => { const [open, setOpen] = useState(true); + const [backgroundLoadingStarted, setBackgroundLoadingStarted] = + useState(false); const isAuthenticated = useAuthStore((s) => s.isAuthenticated); const setIsAuthenticated = useAuthStore((s) => s.setIsAuthenticated); const logout = useAuthStore((s) => s.logout); @@ -75,18 +81,22 @@ const App = () => { const loggedIn = await initializeAuth(); if (loggedIn) { await initData(); - setIsAuthenticated(true); + // Start background logo loading after app is fully initialized (only once) + if (!backgroundLoadingStarted) { + setBackgroundLoadingStarted(true); + useLogosStore.getState().startBackgroundLoading(); + } } else { await logout(); } } catch (error) { - console.error("Auth check failed:", error); + console.error('Auth check failed:', error); await logout(); } }; checkAuth(); - }, [initializeAuth, initData, setIsAuthenticated, logout]); + }, [initializeAuth, initData, logout, backgroundLoadingStarted]); return ( { height: 0, }} navbar={{ - width: open ? drawerWidth : miniDrawerWidth, + width: isAuthenticated + ? open + ? drawerWidth + : miniDrawerWidth + : 0, }} > - + {isAuthenticated && ( + + )} { } /> } /> } /> + } /> + } /> } /> + } /> + } /> ) : ( } /> diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index aeed8826..1c576d23 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -9,6 +9,7 @@ import React, { } from 'react'; import { notifications } from '@mantine/notifications'; import useChannelsStore from './store/channels'; +import useLogosStore from './store/logos'; import usePlaylistsStore from './store/playlists'; import useEPGsStore from './store/epgs'; import { Box, Button, Stack, Alert, Group } from '@mantine/core'; @@ -16,7 +17,7 @@ import API from './api'; import useSettingsStore from './store/settings'; import useAuthStore from './store/auth'; -export const WebsocketContext = createContext([false, () => { }, null]); +export const WebsocketContext = createContext([false, () => {}, null]); export const WebsocketProvider = ({ children }) => { const [isReady, setIsReady] = useState(false); @@ -35,7 +36,6 @@ export const WebsocketProvider = ({ children }) => { const updateEPG = useEPGsStore((s) => s.updateEPG); const updateEPGProgress = useEPGsStore((s) => s.updateEPGProgress); - const playlists = usePlaylistsStore((s) => s.playlists); const updatePlaylist = usePlaylistsStore((s) => s.updatePlaylist); // Calculate reconnection delay with exponential backoff @@ -169,6 +169,60 @@ export const WebsocketProvider = ({ children }) => { // Handle standard message format for other event types switch (parsedEvent.data?.type) { + case 'comskip_status': { + const rid = parsedEvent.data.recording_id; + const id = `comskip-${rid}`; + const status = parsedEvent.data.status; + const title = parsedEvent.data.title || 'Recording'; + if (status === 'started') { + notifications.show({ + id, + title: 'Removing commercials', + message: `Processing ${title}...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (status === 'completed') { + notifications.update({ + id, + title: 'Commercials removed', + message: `${title} β€” kept ${parsedEvent.data.segments_kept} segments`, + color: 'green.5', + loading: false, + autoClose: 4000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } else if (status === 'skipped') { + notifications.update({ + id, + title: 'No commercials to remove', + message: parsedEvent.data.reason || '', + color: 'teal', + loading: false, + autoClose: 3000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } else if (status === 'error') { + notifications.update({ + id, + title: 'Comskip failed', + message: parsedEvent.data.reason || 'Unknown error', + color: 'red', + loading: false, + autoClose: 6000, + }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch {} + } + break; + } case 'epg_file': fetchEPGs(); notifications.show({ @@ -192,10 +246,14 @@ export const WebsocketProvider = ({ children }) => { // Update the playlist status whenever we receive a status update // Not just when progress is 100% or status is pending_setup if (parsedEvent.data.status && parsedEvent.data.account) { - // Check if playlists is an object with IDs as keys or an array - const playlist = Array.isArray(playlists) - ? playlists.find((p) => p.id === parsedEvent.data.account) - : playlists[parsedEvent.data.account]; + // Get fresh playlists from store to avoid stale state from React render cycle + const currentPlaylists = usePlaylistsStore.getState().playlists; + const isArray = Array.isArray(currentPlaylists); + const playlist = isArray + ? currentPlaylists.find( + (p) => p.id === parsedEvent.data.account + ) + : currentPlaylists[parsedEvent.data.account]; if (playlist) { // When we receive a "success" status with 100% progress, this is a completed refresh @@ -214,16 +272,23 @@ export const WebsocketProvider = ({ children }) => { ) { updateData.updated_at = new Date().toISOString(); // Log successful completion for debugging - console.log('M3U refresh completed successfully:', updateData); + console.log( + 'M3U refresh completed successfully:', + updateData + ); + fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date + fetchChannelProfiles(); // Ensure channel profiles are updated } updatePlaylist(updateData); } else { - // Log when playlist can't be found for debugging purposes - console.warn( - `Received update for unknown playlist ID: ${parsedEvent.data.account}`, - Array.isArray(playlists) ? 'playlists is array' : 'playlists is object', - Object.keys(playlists).length + // Playlist not in store yet - this happens when backend sends websocket + // updates immediately after creating the playlist, before the API response + // returns. The frontend will receive a 'playlist_created' event shortly + // which will trigger a fetchPlaylists() to sync the store. + console.log( + `Received update for playlist ID ${parsedEvent.data.account} not yet in store. ` + + `Waiting for playlist_created event to sync...` ); } } @@ -268,6 +333,173 @@ export const WebsocketProvider = ({ children }) => { } break; + case 'epg_matching_progress': { + const progress = parsedEvent.data; + const id = 'epg-matching-progress'; + + if (progress.stage === 'starting') { + notifications.show({ + id, + title: 'EPG Matching in Progress', + message: `Starting to match ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'matching') { + let message = `Matched ${progress.matched} of ${progress.total} channels`; + if (progress.remaining > 0) { + message += ` (${progress.remaining} remaining)`; + } + if (progress.current_channel) { + message += `\nCurrently processing: ${progress.current_channel}`; + } + + notifications.update({ + id, + title: 'EPG Matching in Progress', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.stage === 'completed') { + notifications.update({ + id, + title: 'EPG Matching Complete', + message: `Successfully matched ${progress.matched} of ${progress.total} channels (${progress.progress_percent}%)`, + color: progress.matched > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + } + break; + } + + case 'epg_logo_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-logo-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Logos from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + if (progress.created_logos_count !== undefined) { + message += `, created ${progress.created_logos_count} logos`; + } + + notifications.update({ + id, + title: 'Setting Logos from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Logo Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel logos${progress.created_logos_count ? `, created ${progress.created_logos_count} new logos` : ''}`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data and logos + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + + // Get updated channel data and extract logo IDs to load + const channels = useChannelsStore.getState().channels; + const logoIds = Object.values(channels) + .filter((channel) => channel.logo_id) + .map((channel) => channel.logo_id); + + // Fetch the specific logos that were just assigned + if (logoIds.length > 0) { + await useLogosStore.getState().fetchLogosByIds(logoIds); + } + } catch (e) { + console.warn( + 'Failed to refresh channels after logo setting:', + e + ); + } + } + break; + } + + case 'epg_name_setting_progress': { + const progress = parsedEvent.data; + const id = 'epg-name-setting-progress'; + + if (progress.status === 'running' && progress.progress === 0) { + // Initial message + notifications.show({ + id, + title: 'Setting Names from EPG', + message: `Processing ${progress.total} channels...`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'running') { + // Progress update + let message = `Processed ${progress.progress} of ${progress.total} channels`; + if (progress.updated_count !== undefined) { + message += ` (${progress.updated_count} updated)`; + } + + notifications.update({ + id, + title: 'Setting Names from EPG', + message, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (progress.status === 'completed') { + notifications.update({ + id, + title: 'Name Setting Complete', + message: `Successfully updated ${progress.updated_count || 0} channel names from EPG data`, + color: progress.updated_count > 0 ? 'green.5' : 'orange', + loading: false, + autoClose: 6000, + }); + // Refresh channels data + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + } catch (e) { + console.warn( + 'Failed to refresh channels after name setting:', + e + ); + } + } + break; + } + case 'm3u_profile_test': setProfilePreview( parsedEvent.data.search_preview, @@ -275,11 +507,32 @@ export const WebsocketProvider = ({ children }) => { ); break; + case 'recording_updated': + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on update:', e); + } + break; + + case 'recordings_refreshed': + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on refreshed:', e); + } + break; + case 'recording_started': notifications.show({ title: 'Recording started!', message: `Started recording channel ${parsedEvent.data.channel}`, }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on start:', e); + } break; case 'recording_ended': @@ -287,6 +540,11 @@ export const WebsocketProvider = ({ children }) => { title: 'Recording finished!', message: `Stopped recording channel ${parsedEvent.data.channel}`, }); + try { + await useChannelsStore.getState().fetchRecordings(); + } catch (e) { + console.warn('Failed to refresh recordings on end:', e); + } break; case 'epg_fetch_error': @@ -314,10 +572,10 @@ export const WebsocketProvider = ({ children }) => { // Update the store with progress information updateEPGProgress(parsedEvent.data); - // If we have source_id/account info, update the EPG source status - if (parsedEvent.data.source_id || parsedEvent.data.account) { + // If we have source/account info, update the EPG source status + if (parsedEvent.data.source || parsedEvent.data.account) { const sourceId = - parsedEvent.data.source_id || parsedEvent.data.account; + parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; if (epg) { @@ -372,6 +630,194 @@ export const WebsocketProvider = ({ children }) => { } break; + case 'epg_sources_changed': + // A plugin or backend process signaled that the EPG sources changed + try { + await fetchEPGs(); + } catch (e) { + console.warn( + 'Failed to refresh EPG sources after change notification:', + e + ); + } + break; + + case 'epg_data_created': + // A new EPG data entry was created (e.g., for a dummy EPG) + // Fetch EPG data so the channel form can immediately assign it + try { + await fetchEPGData(); + } catch (e) { + console.warn('Failed to refresh EPG data after creation:', e); + } + break; + + case 'stream_rehash': + // Handle stream rehash progress updates + if (parsedEvent.data.action === 'starting') { + notifications.show({ + id: 'stream-rehash-progress', // Persistent ID + title: 'Stream Rehash Started', + message: parsedEvent.data.message, + color: 'blue.5', + autoClose: false, // Don't auto-close + withCloseButton: false, // No close button during processing + loading: true, // Show loading indicator + }); + } else if (parsedEvent.data.action === 'processing') { + // Update the existing notification + notifications.update({ + id: 'stream-rehash-progress', + title: 'Stream Rehash in Progress', + message: `${parsedEvent.data.progress}% complete - ${parsedEvent.data.processed} streams processed, ${parsedEvent.data.duplicates_merged} duplicates merged`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (parsedEvent.data.action === 'completed') { + // Update to completion state + notifications.update({ + id: 'stream-rehash-progress', + title: 'Stream Rehash Complete', + message: `Processed ${parsedEvent.data.total_processed} streams, merged ${parsedEvent.data.duplicates_merged} duplicates. Final count: ${parsedEvent.data.final_count}`, + color: 'green.5', + autoClose: 8000, // Auto-close after completion + withCloseButton: true, // Allow manual close + loading: false, // Remove loading indicator + }); + } else if (parsedEvent.data.action === 'blocked') { + // Handle blocked rehash attempt + notifications.show({ + title: 'Stream Rehash Blocked', + message: parsedEvent.data.message, + color: 'orange.5', + autoClose: 8000, + }); + } + break; + + case 'logo_processing_summary': + notifications.show({ + title: 'Logo Processing Summary', + message: `${parsedEvent.data.message}`, + color: 'blue', + autoClose: 5000, + }); + fetchLogos(); + break; + + case 'account_info_refresh_success': + notifications.show({ + title: 'Account Info Refreshed', + message: `Successfully updated account information for ${parsedEvent.data.profile_name}`, + color: 'green', + autoClose: 4000, + }); + // Trigger refresh of playlists to update the UI + fetchPlaylists(); + break; + + case 'account_info_refresh_error': + notifications.show({ + title: 'Account Info Refresh Failed', + message: + parsedEvent.data.error || + 'Failed to refresh account information', + color: 'red', + autoClose: 8000, + }); + break; + + case 'channels_created': + // General notification for channel creation + notifications.show({ + title: 'Channels Created', + message: `Successfully created ${parsedEvent.data.count || 'multiple'} channel(s)`, + color: 'green', + autoClose: 4000, + }); + + // Refresh the channels table to show new channels + try { + await API.requeryChannels(); + await useChannelsStore.getState().fetchChannels(); + console.log('Channels refreshed after bulk creation'); + } catch (error) { + console.error( + 'Error refreshing channels after creation:', + error + ); + } + + break; + + case 'playlist_created': + // Backend signals that a new playlist has been created and we should refresh + console.log( + 'Playlist created event received, refreshing playlists...' + ); + fetchPlaylists(); + break; + + case 'bulk_channel_creation_progress': { + // Handle progress updates with persistent notifications like stream rehash + const data = parsedEvent.data; + + if (data.status === 'starting') { + notifications.show({ + id: 'bulk-channel-creation-progress', // Persistent ID + title: 'Bulk Channel Creation Started', + message: data.message || 'Starting bulk channel creation...', + color: 'blue.5', + autoClose: false, // Don't auto-close + withCloseButton: false, // No close button during processing + loading: true, // Show loading indicator + }); + } else if ( + data.status === 'processing' || + data.status === 'creating_logos' || + data.status === 'creating_channels' + ) { + // Calculate progress percentage + const progressPercent = + data.total > 0 + ? Math.round((data.progress / data.total) * 100) + : 0; + + // Update the existing notification with progress + notifications.update({ + id: 'bulk-channel-creation-progress', + title: 'Bulk Channel Creation in Progress', + message: `${progressPercent}% complete - ${data.message}`, + color: 'blue.5', + autoClose: false, + withCloseButton: false, + loading: true, + }); + } else if (data.status === 'completed') { + // Hide the progress notification since channels_created will show success + notifications.hide('bulk-channel-creation-progress'); + } else if (data.status === 'failed') { + // Update to error state + notifications.update({ + id: 'bulk-channel-creation-progress', + title: 'Bulk Channel Creation Failed', + message: + data.error || + 'An error occurred during bulk channel creation', + color: 'red.5', + autoClose: 12000, // Auto-close after longer delay for errors + withCloseButton: true, // Allow manual close + loading: false, // Remove loading indicator + }); + } + + // Pass through to individual components for any additional handling + setVal(parsedEvent); + break; + } + default: console.error( `Unknown websocket event type: ${parsedEvent.data?.type}` @@ -442,6 +888,8 @@ export const WebsocketProvider = ({ children }) => { const setProfilePreview = usePlaylistsStore((s) => s.setProfilePreview); const fetchEPGData = useEPGsStore((s) => s.fetchEPGData); const fetchEPGs = useEPGsStore((s) => s.fetchEPGs); + const fetchLogos = useLogosStore((s) => s.fetchAllLogos); + const fetchChannelProfiles = useChannelsStore((s) => s.fetchChannelProfiles); const ret = useMemo(() => { return [isReady, ws.current?.send.bind(ws.current), val]; diff --git a/frontend/src/api.js b/frontend/src/api.js index 73bbde7d..8f5aeeeb 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1,6 +1,7 @@ // src/api.js (updated) import useAuthStore from './store/auth'; import useChannelsStore from './store/channels'; +import useLogosStore from './store/logos'; import useUserAgentsStore from './store/userAgents'; import usePlaylistsStore from './store/playlists'; import useEPGsStore from './store/epgs'; @@ -9,6 +10,7 @@ import useStreamProfilesStore from './store/streamProfiles'; import useSettingsStore from './store/settings'; import { notifications } from '@mantine/notifications'; import useChannelsTableStore from './store/channelsTable'; +import useUsersStore from './store/users'; // If needed, you can set a base host or keep it empty if relative requests const host = import.meta.env.DEV @@ -249,7 +251,17 @@ export default class API { }); if (response.id) { - useChannelsStore.getState().addChannelGroup(response); + // Add association flags for new groups + const processedGroup = { + ...response, + hasChannels: false, + hasM3UAccounts: false, + canEdit: true, + canDelete: true, + }; + useChannelsStore.getState().addChannelGroup(processedGroup); + // Refresh channel groups to update the UI + useChannelsStore.getState().fetchChannelGroups(); } return response; @@ -276,17 +288,62 @@ export default class API { } } + static async deleteChannelGroup(id) { + try { + await request(`${host}/api/channels/groups/${id}/`, { + method: 'DELETE', + }); + + // Remove from store after successful deletion + useChannelsStore.getState().removeChannelGroup(id); + + return true; + } catch (e) { + errorNotification('Failed to delete channel group', e); + throw e; + } + } + + static async cleanupUnusedChannelGroups() { + try { + const response = await request(`${host}/api/channels/groups/cleanup/`, { + method: 'POST', + }); + + // Refresh channel groups to update the UI + useChannelsStore.getState().fetchChannelGroups(); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused channel groups', e); + throw e; + } + } + static async addChannel(channel) { try { let body = null; + // Prepare a copy to safely mutate + const channelData = { ...channel }; + + // Remove channel_number if empty, null, or not a valid number + if ( + channelData.channel_number === '' || + channelData.channel_number === null || + channelData.channel_number === undefined || + (typeof channelData.channel_number === 'string' && channelData.channel_number.trim() === '') + ) { + delete channelData.channel_number; + } + if (channel.logo_file) { // Must send FormData for file upload body = new FormData(); - for (const prop in channel) { - body.append(prop, channel[prop]); + for (const prop in channelData) { + body.append(prop, channelData[prop]); } } else { - body = { ...channel }; + body = { ...channelData }; delete body.logo_file; } @@ -389,9 +446,9 @@ export default class API { static async updateChannels(ids, values) { const body = []; - for (const id in ids) { + for (const id of ids) { body.push({ - id, + id: id, ...values, }); } @@ -405,7 +462,24 @@ export default class API { } ); - useChannelsStore.getState().updateChannels(response); + // Don't automatically update the store here - let the caller handle it + return response; + } catch (e) { + errorNotification('Failed to update channels', e); + } + } + + // Bulk update with per-channel payloads (e.g., regex renames) + static async bulkUpdateChannels(updates) { + try { + const response = await request( + `${host}/api/channels/channels/edit/bulk/`, + { + method: 'PATCH', + body: updates, + } + ); + return response; } catch (e) { errorNotification('Failed to update channels', e); @@ -442,6 +516,75 @@ export default class API { } } + static async setChannelNamesFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-names-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG name setting task', e); + throw e; + } + } + + static async setChannelLogosFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-logos-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG logo setting task', e); + throw e; + } + } + + static async setChannelTvgIdsFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-tvg-ids-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG TVG-ID setting task', e); + throw e; + } + } + static async assignChannelNumbers(channelIds, startingNum = 1) { try { const response = await request(`${host}/api/channels/channels/assign/`, { @@ -475,23 +618,32 @@ export default class API { } } - static async createChannelsFromStreams(values) { + static async createChannelsFromStreamsAsync(streamIds, channelProfileIds = null, startingChannelNumber = null) { try { + const requestBody = { + stream_ids: streamIds, + }; + + if (channelProfileIds !== null) { + requestBody.channel_profile_ids = channelProfileIds; + } + + if (startingChannelNumber !== null) { + requestBody.starting_channel_number = startingChannelNumber; + } + const response = await request( `${host}/api/channels/channels/from-stream/bulk/`, { method: 'POST', - body: values, + body: requestBody, } ); - if (response.created.length > 0) { - useChannelsStore.getState().addChannels(response.created); - } - return response; } catch (e) { - errorNotification('Failed to create channels', e); + errorNotification('Failed to start bulk channel creation task', e); + throw e; } } @@ -690,11 +842,32 @@ export default class API { } } - static async addPlaylist(values) { - if (values.custom_properties) { - values.custom_properties = JSON.stringify(values.custom_properties); + static async updateM3UGroupSettings( + playlistId, + groupSettings = [], + categorySettings = [] + ) { + try { + const response = await request( + `${host}/api/m3u/accounts/${playlistId}/group-settings/`, + { + method: 'PATCH', + body: { + group_settings: groupSettings, + category_settings: categorySettings, + }, + } + ); + // Fetch the updated playlist and update the store + const updatedPlaylist = await API.getPlaylist(playlistId); + usePlaylistsStore.getState().updatePlaylist(updatedPlaylist); + return response; + } catch (e) { + errorNotification('Failed to update M3U group settings', e); } + } + static async addPlaylist(values) { try { let body = null; if (values.file) { @@ -725,13 +898,11 @@ export default class API { const response = await request(`${host}/api/m3u/refresh/${id}/`, { method: 'POST', }); - return response; } catch (e) { errorNotification('Failed to refresh M3U account', e); } } - static async refreshAllPlaylist() { try { const response = await request(`${host}/api/m3u/refresh/`, { @@ -743,6 +914,19 @@ export default class API { errorNotification('Failed to refresh all M3U accounts', e); } } + static async refreshVODContent(accountId) { + try { + const response = await request( + `${host}/api/m3u/accounts/${accountId}/refresh-vod/`, + { + method: 'POST', + } + ); + return response; + } catch (e) { + errorNotification('Failed to refresh VOD content', e); + } + } static async deletePlaylist(id) { try { @@ -796,7 +980,6 @@ export default class API { body = { ...payload }; delete body.file; } - console.log(body); const response = await request(`${host}/api/m3u/accounts/${id}/`, { method: 'PATCH', @@ -935,6 +1118,21 @@ export default class API { } } + static async getTimezones() { + try { + const response = await request(`${host}/api/core/timezones/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve timezones', e); + // Return fallback data instead of throwing + return { + timezones: ['UTC', 'US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific'], + grouped: {}, + count: 5 + }; + } + } + static async getStreamProfiles() { try { const response = await request(`${host}/api/core/streamprofiles/`); @@ -1052,6 +1250,64 @@ export default class API { } } + static async refreshAccountInfo(profileId) { + try { + const response = await request(`${host}/api/m3u/refresh-account-info/${profileId}/`, { + method: 'POST', + }); + return response; + } catch (e) { + // If it's a structured error response, return it instead of throwing + if (e.body && typeof e.body === 'object') { + return e.body; + } + errorNotification(`Failed to refresh account info for profile ${profileId}`, e); + throw e; + } + } + + static async addM3UFilter(accountId, values) { + try { + const response = await request( + `${host}/api/m3u/accounts/${accountId}/filters/`, + { + method: 'POST', + body: values, + } + ); + + return response; + } catch (e) { + errorNotification(`Failed to add profile to account ${accountId}`, e); + } + } + + static async deleteM3UFilter(accountId, id) { + try { + await request(`${host}/api/m3u/accounts/${accountId}/filters/${id}/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification(`Failed to delete profile for account ${accountId}`, e); + } + } + + static async updateM3UFilter(accountId, filterId, values) { + const { id, ...payload } = values; + + try { + await request( + `${host}/api/m3u/accounts/${accountId}/filters/${filterId}/`, + { + method: 'PUT', + body: payload, + } + ); + } catch (e) { + errorNotification(`Failed to update profile for account ${accountId}`, e); + } + } + static async getSettings() { try { const response = await request(`${host}/api/core/settings/`); @@ -1084,6 +1340,109 @@ export default class API { } } + // Plugins API + static async getPlugins() { + try { + const response = await request(`${host}/api/plugins/plugins/`); + return response.plugins || []; + } catch (e) { + errorNotification('Failed to retrieve plugins', e); + } + } + + static async reloadPlugins() { + try { + const response = await request(`${host}/api/plugins/plugins/reload/`, { + method: 'POST', + }); + return response; + } catch (e) { + errorNotification('Failed to reload plugins', e); + } + } + + static async importPlugin(file) { + try { + const form = new FormData(); + form.append('file', file); + const response = await request(`${host}/api/plugins/plugins/import/`, { + method: 'POST', + body: form, + }); + return response; + } catch (e) { + // Show only the concise error message for plugin import + const msg = (e?.body && (e.body.error || e.body.detail)) || e?.message || 'Failed to import plugin'; + notifications.show({ title: 'Import failed', message: msg, color: 'red' }); + throw e; + } + } + + static async deletePlugin(key) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/delete/`, { + method: 'DELETE', + }); + return response; + } catch (e) { + errorNotification('Failed to delete plugin', e); + } + } + + static async updatePluginSettings(key, settings) { + try { + const response = await request( + `${host}/api/plugins/plugins/${key}/settings/`, + { + method: 'POST', + body: { settings }, + } + ); + return response?.settings || {}; + } catch (e) { + errorNotification('Failed to update plugin settings', e); + } + } + + static async runPluginAction(key, action, params = {}) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/run/`, { + method: 'POST', + body: { action, params }, + }); + return response; + } catch (e) { + errorNotification('Failed to run plugin action', e); + } + } + + static async setPluginEnabled(key, enabled) { + try { + const response = await request(`${host}/api/plugins/plugins/${key}/enabled/`, { + method: 'POST', + body: { enabled }, + }); + return response; + } catch (e) { + errorNotification('Failed to update plugin enabled state', e); + } + } + + static async checkSetting(values) { + const { id, ...payload } = values; + + try { + const response = await request(`${host}/api/core/settings/check/`, { + method: 'POST', + body: payload, + }); + + return response; + } catch (e) { + errorNotification('Failed to update settings', e); + } + } + static async updateSetting(values) { const { id, ...payload } = values; @@ -1101,6 +1460,19 @@ export default class API { } } + static async createSetting(values) { + try { + const response = await request(`${host}/api/core/settings/`, { + method: 'POST', + body: values, + }); + useSettingsStore.getState().updateSetting(response); + return response; + } catch (e) { + errorNotification('Failed to create setting', e); + } + } + static async getChannelStats(uuid = null) { try { const response = await request(`${host}/proxy/ts/status`); @@ -1111,6 +1483,16 @@ export default class API { } } + static async getVODStats() { + try { + const response = await request(`${host}/proxy/vod/stats/`); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD stats', e); + } + } + static async stopChannel(id) { try { const response = await request(`${host}/proxy/ts/stop/${id}`, { @@ -1139,12 +1521,18 @@ export default class API { } } - static async matchEpg() { + static async matchEpg(channelIds = null) { try { + const requestBody = channelIds ? { channel_ids: channelIds } : {}; + const response = await request( `${host}/api/channels/channels/match-epg/`, { method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestBody), } ); @@ -1154,9 +1542,42 @@ export default class API { } } - static async getLogos() { + static async matchChannelEpg(channelId) { try { - const response = await request(`${host}/api/channels/logos/`); + const response = await request( + `${host}/api/channels/channels/${channelId}/match-epg/`, + { + method: 'POST', + } + ); + + // Update the channel in the store with the refreshed data if provided + if (response.channel) { + useChannelsStore.getState().updateChannel(response.channel); + } + + return response; + } catch (e) { + errorNotification('Failed to run EPG auto-match for channel', e); + } + } + + static async fetchActiveChannelStats() { + try { + const response = await request(`${host}/proxy/ts/status`); + return response; + } catch (e) { + errorNotification('Failed to fetch active channel stats', e); + throw e; + } + } + + static async getLogos(params = {}) { + try { + const queryParams = new URLSearchParams(params); + const response = await request( + `${host}/api/channels/logos/?${queryParams.toString()}` + ); return response; } catch (e) { @@ -1164,21 +1585,277 @@ export default class API { } } - static async uploadLogo(file) { + static async getLogosByIds(logoIds) { + try { + if (!logoIds || logoIds.length === 0) return []; + + const params = new URLSearchParams(); + logoIds.forEach(id => params.append('ids', id)); + // Disable pagination for ID-based queries to get all matching logos + params.append('no_pagination', 'true'); + + const response = await request( + `${host}/api/channels/logos/?${params.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve logos by IDs', e); + return []; + } + } + + static async fetchLogos() { + try { + const response = await this.getLogos(); + useLogosStore.getState().setLogos(response); + return response; + } catch (e) { + errorNotification('Failed to fetch logos', e); + } + } + + static async fetchUsedLogos() { + try { + const response = await useLogosStore.getState().fetchUsedLogos(); + return response; + } catch (e) { + errorNotification('Failed to fetch used logos', e); + } + } + + static async fetchLogosByIds(logoIds) { + try { + const response = await useLogosStore.getState().fetchLogosByIds(logoIds); + return response; + } catch (e) { + errorNotification('Failed to fetch logos by IDs', e); + } + } + + static async uploadLogo(file, name = null) { try { const formData = new FormData(); formData.append('file', file); - const response = await request(`${host}/api/channels/logos/upload/`, { + // Add custom name if provided + if (name && name.trim()) { + formData.append('name', name.trim()); + } + + // Add timeout handling for file uploads + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout + + const response = await fetch(`${host}/api/channels/logos/upload/`, { + method: 'POST', + body: formData, + headers: { + Authorization: `Bearer ${await API.getAuthToken()}`, + }, + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = new Error(`HTTP error! Status: ${response.status}`); + let errorBody = await response.text(); + + try { + errorBody = JSON.parse(errorBody); + } catch (e) { + // If parsing fails, leave errorBody as the raw text + } + + error.status = response.status; + error.response = response; + error.body = errorBody; + throw error; + } + + const result = await response.json(); + useLogosStore.getState().addLogo(result); + return result; + } catch (e) { + if (e.name === 'AbortError') { + const timeoutError = new Error('Upload timed out. Please try again.'); + timeoutError.code = 'NETWORK_ERROR'; + throw timeoutError; + } + errorNotification('Failed to upload logo', e); + throw e; + } + } + + static async createLogo(values) { + try { + // Use FormData for logo creation to match backend expectations + const formData = new FormData(); + for (const [key, value] of Object.entries(values)) { + if (value !== null && value !== undefined) { + formData.append(key, value); + } + } + + const response = await request(`${host}/api/channels/logos/`, { method: 'POST', body: formData, }); - useChannelsStore.getState().addLogo(response); + useLogosStore.getState().addLogo(response); return response; } catch (e) { - errorNotification('Failed to upload logo', e); + errorNotification('Failed to create logo', e); + } + } + + static async updateLogo(id, values) { + try { + const response = await request(`${host}/api/channels/logos/${id}/`, { + method: 'PUT', + body: values, // This will be converted to JSON in the request function + }); + + useLogosStore.getState().updateLogo(response); + + return response; + } catch (e) { + errorNotification('Failed to update logo', e); + } + } + + static async deleteLogo(id, deleteFile = false) { + try { + const params = new URLSearchParams(); + if (deleteFile) { + params.append('delete_file', 'true'); + } + + const url = `${host}/api/channels/logos/${id}/?${params.toString()}`; + await request(url, { + method: 'DELETE', + }); + + useLogosStore.getState().removeLogo(id); + + return true; + } catch (e) { + errorNotification('Failed to delete logo', e); + } + } + + static async deleteLogos(ids, deleteFiles = false) { + try { + const body = { logo_ids: ids }; + if (deleteFiles) { + body.delete_files = true; + } + + await request(`${host}/api/channels/logos/bulk-delete/`, { + method: 'DELETE', + body: body, + }); + + // Remove multiple logos from store + ids.forEach((id) => { + useLogosStore.getState().removeLogo(id); + }); + + return true; + } catch (e) { + errorNotification('Failed to delete logos', e); + } + } + + static async cleanupUnusedLogos(deleteFiles = false) { + try { + const body = {}; + if (deleteFiles) { + body.delete_files = true; + } + + const response = await request(`${host}/api/channels/logos/cleanup/`, { + method: 'POST', + body: body, + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused logos', e); + throw e; + } + } + + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; } } @@ -1305,18 +1982,181 @@ export default class API { } } - static async deleteRecording(id) { + static async updateRecording(id, values) { try { - await request(`${host}/api/channels/recordings/${id}/`, { + const response = await request(`${host}/api/channels/recordings/${id}/`, { + method: 'PATCH', + body: values, + }); + useChannelsStore.getState().fetchRecordings(); + return response; + } catch (e) { + errorNotification(`Failed to update recording ${id}`, e); + } + } + + static async getComskipConfig() { + try { + return await request(`${host}/api/channels/dvr/comskip-config/`); + } catch (e) { + errorNotification('Failed to retrieve comskip configuration', e); + } + } + + static async uploadComskipIni(file) { + try { + const formData = new FormData(); + formData.append('file', file); + return await request(`${host}/api/channels/dvr/comskip-config/`, { + method: 'POST', + body: formData, + }); + } catch (e) { + errorNotification('Failed to upload comskip.ini', e); + } + } + + static async listRecurringRules() { + try { + const response = await request(`${host}/api/channels/recurring-rules/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve recurring DVR rules', e); + } + } + + static async createRecurringRule(payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/`, { + method: 'POST', + body: payload, + }); + return response; + } catch (e) { + errorNotification('Failed to create recurring DVR rule', e); + } + } + + static async updateRecurringRule(ruleId, payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'PATCH', + body: payload, + }); + return response; + } catch (e) { + errorNotification(`Failed to update recurring rule ${ruleId}`, e); + } + } + + static async deleteRecurringRule(ruleId) { + try { + await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { method: 'DELETE', }); + } catch (e) { + errorNotification(`Failed to delete recurring rule ${ruleId}`, e); + } + } - useChannelsStore.getState().fetchRecordings(); + static async deleteRecording(id) { + try { + await request(`${host}/api/channels/recordings/${id}/`, { method: 'DELETE' }); + // Optimistically remove locally for instant UI update + try { useChannelsStore.getState().removeRecording(id); } catch {} } catch (e) { errorNotification(`Failed to delete recording ${id}`, e); } } + static async runComskip(recordingId) { + try { + const resp = await request(`${host}/api/channels/recordings/${recordingId}/comskip/`, { + method: 'POST', + }); + // Refresh recordings list to reflect comskip status when done later + // This endpoint just queues the task; the websocket/refresh will update eventually + return resp; + } catch (e) { + errorNotification('Failed to run comskip', e); + throw e; + } + } + + // DVR Series Rules + static async listSeriesRules() { + try { + const resp = await request(`${host}/api/channels/series-rules/`); + return resp?.rules || []; + } catch (e) { + errorNotification('Failed to load series rules', e); + return []; + } + } + + static async createSeriesRule(values) { + try { + const resp = await request(`${host}/api/channels/series-rules/`, { + method: 'POST', + body: values, + }); + notifications.show({ title: 'Series rule saved' }); + return resp; + } catch (e) { + errorNotification('Failed to save series rule', e); + throw e; + } + } + + static async deleteSeriesRule(tvgId) { + try { + await request(`${host}/api/channels/series-rules/${tvgId}/`, { method: 'DELETE' }); + notifications.show({ title: 'Series rule removed' }); + } catch (e) { + errorNotification('Failed to remove series rule', e); + throw e; + } + } + + static async deleteAllUpcomingRecordings() { + try { + const resp = await request(`${host}/api/channels/recordings/bulk-delete-upcoming/`, { + method: 'POST', + }); + notifications.show({ title: `Removed ${resp.removed || 0} upcoming` }); + useChannelsStore.getState().fetchRecordings(); + return resp; + } catch (e) { + errorNotification('Failed to delete upcoming recordings', e); + throw e; + } + } + + static async evaluateSeriesRules(tvgId = null) { + try { + await request(`${host}/api/channels/series-rules/evaluate/`, { + method: 'POST', + body: tvgId ? { tvg_id: tvgId } : {}, + }); + } catch (e) { + errorNotification('Failed to evaluate series rules', e); + } + } + + static async bulkRemoveSeriesRecordings({ tvg_id, title = null, scope = 'title' }) { + try { + const resp = await request(`${host}/api/channels/series-rules/bulk-remove/`, { + method: 'POST', + body: { tvg_id, title, scope }, + }); + notifications.show({ title: `Removed ${resp.removed || 0} scheduled` }); + return resp; + } catch (e) { + errorNotification('Failed to bulk-remove scheduled recordings', e); + throw e; + } + } + static async switchStream(channelId, streamId) { try { const response = await request( @@ -1363,9 +2203,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); @@ -1392,4 +2238,238 @@ export default class API { return null; } } + + static async me() { + return await request(`${host}/api/accounts/users/me/`); + } + + static async getUsers() { + try { + const response = await request(`${host}/api/accounts/users/`); + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async createUser(body) { + try { + const response = await request(`${host}/api/accounts/users/`, { + method: 'POST', + body, + }); + + useUsersStore.getState().addUser(response); + + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async updateUser(id, body) { + try { + const response = await request(`${host}/api/accounts/users/${id}/`, { + method: 'PATCH', + body, + }); + + useUsersStore.getState().updateUser(response); + + return response; + } catch (e) { + errorNotification('Failed to fetch users', e); + } + } + + static async deleteUser(id) { + try { + await request(`${host}/api/accounts/users/${id}/`, { + method: 'DELETE', + }); + + useUsersStore.getState().removeUser(id); + } catch (e) { + errorNotification('Failed to delete user', e); + } + } + + static async rehashStreams() { + try { + const response = await request(`${host}/api/core/rehash-streams/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to trigger stream rehash', e); + } + } + + static async getStreamsByIds(ids) { + try { + // Use POST for large ID lists to avoid URL length limitations + if (ids.length > 50) { + const response = await request( + `${host}/api/channels/streams/by-ids/`, + { + method: 'POST', + body: { ids }, + } + ); + return response; + } else { + // Use GET for small ID lists for backward compatibility + const params = new URLSearchParams(); + params.append('ids', ids.join(',')); + const response = await request( + `${host}/api/channels/streams/?${params.toString()}` + ); + return response.results || response; + } + } catch (e) { + errorNotification('Failed to retrieve streams by IDs', e); + throw e; // Re-throw to allow proper error handling in calling code + } + } + + // VOD Methods + static async getMovies(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/movies/?${params.toString()}` + ); + return response; + } catch (e) { + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve movies', e); + } + throw e; + } + } + + static async getSeries(params = new URLSearchParams()) { + try { + const response = await request( + `${host}/api/vod/series/?${params.toString()}` + ); + return response; + } catch (e) { + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve series', e); + } + throw e; + } + } + + static async getAllContent(params = new URLSearchParams()) { + try { + console.log('Calling getAllContent with URL:', `${host}/api/vod/all/?${params.toString()}`); + const response = await request( + `${host}/api/vod/all/?${params.toString()}` + ); + console.log('getAllContent raw response:', response); + return response; + } catch (e) { + console.error('getAllContent error:', e); + console.error('Error status:', e.status); + console.error('Error body:', e.body); + console.error('Error message:', e.message); + + // Don't show error notification for "Invalid page" errors as they're handled gracefully + const isInvalidPage = e.body?.detail?.includes('Invalid page') || + e.message?.includes('Invalid page'); + + if (!isInvalidPage) { + errorNotification('Failed to retrieve content', e); + } + throw e; + } + } + + static async getMovieDetails(movieId) { + try { + const response = await request(`${host}/api/vod/movies/${movieId}/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie details', e); + } + } + + static async getMovieProviderInfo(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/provider-info/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie provider info', e); + } + } + + static async getMovieProviders(movieId) { + try { + const response = await request( + `${host}/api/vod/movies/${movieId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve movie providers', e); + } + } + + static async getSeriesProviders(seriesId) { + try { + const response = await request( + `${host}/api/vod/series/${seriesId}/providers/` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series providers', e); + } + } + + static async getVODCategories() { + try { + const response = await request(`${host}/api/vod/categories/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD categories', e); + } + } + + static async getSeriesInfo(seriesId) { + try { + // Call the provider-info endpoint that includes episodes + const response = await request( + `${host}/api/vod/series/${seriesId}/provider-info/?include_episodes=true` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve series info', e); + } + } + + static async updateVODPosition(vodUuid, clientId, position) { + try { + const response = await request( + `${host}/proxy/vod/stream/${vodUuid}/position/`, + { + method: 'POST', + body: { client_id: clientId, position }, + } + ); + return response; + } catch (e) { + errorNotification('Failed to update playback position', e); + } + } } diff --git a/frontend/src/components/ConfirmationDialog.jsx b/frontend/src/components/ConfirmationDialog.jsx index 3c0f15e7..73805513 100644 --- a/frontend/src/components/ConfirmationDialog.jsx +++ b/frontend/src/components/ConfirmationDialog.jsx @@ -18,60 +18,90 @@ import useWarningsStore from '../store/warnings'; * @param {string} [props.size='md'] - Size of the modal */ const ConfirmationDialog = ({ - opened, - onClose, - onConfirm, - title = 'Confirm Action', - message = 'Are you sure you want to proceed?', - confirmLabel = 'Confirm', - cancelLabel = 'Cancel', - actionKey, - onSuppressChange, - size = 'md', // Add default size parameter - md is a medium width + opened, + onClose, + onConfirm, + title = 'Confirm Action', + message = 'Are you sure you want to proceed?', + confirmLabel = 'Confirm', + cancelLabel = 'Cancel', + actionKey, + onSuppressChange, + size = 'md', + zIndex = 1000, + showDeleteFileOption = false, + deleteFileLabel = 'Also delete files from disk', }) => { - const suppressWarning = useWarningsStore((s) => s.suppressWarning); - const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); - const [suppressChecked, setSuppressChecked] = useState( - isWarningSuppressed(actionKey) - ); + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed); + const [suppressChecked, setSuppressChecked] = useState( + isWarningSuppressed(actionKey) + ); + const [deleteFiles, setDeleteFiles] = useState(false); - const handleToggleSuppress = (e) => { - setSuppressChecked(e.currentTarget.checked); - if (onSuppressChange) { - onSuppressChange(e.currentTarget.checked); - } - }; + const handleToggleSuppress = (e) => { + setSuppressChecked(e.currentTarget.checked); + if (onSuppressChange) { + onSuppressChange(e.currentTarget.checked); + } + }; - const handleConfirm = () => { - if (suppressChecked) { - suppressWarning(actionKey); - } - onConfirm(); - }; + const handleConfirm = () => { + if (suppressChecked) { + suppressWarning(actionKey); + } + if (showDeleteFileOption) { + onConfirm(deleteFiles); + } else { + onConfirm(); + } + setDeleteFiles(false); // Reset for next time + }; - return ( - - {message} + const handleClose = () => { + setDeleteFiles(false); // Reset for next time + onClose(); + }; - {actionKey && ( - - )} + return ( + + {message} - - - - - - ); + {actionKey && ( + + )} + + {showDeleteFileOption && ( + setDeleteFiles(event.currentTarget.checked)} + label={deleteFileLabel} + mb="md" + /> + )} + + + + + + + ); }; export default ConfirmationDialog; diff --git a/frontend/src/components/FloatingVideo.jsx b/frontend/src/components/FloatingVideo.jsx index 46c191eb..6aaeecda 100644 --- a/frontend/src/components/FloatingVideo.jsx +++ b/frontend/src/components/FloatingVideo.jsx @@ -8,52 +8,277 @@ import { CloseButton, Flex, Loader, Text, Box } from '@mantine/core'; export default function FloatingVideo() { const isVisible = useVideoStore((s) => s.isVisible); const streamUrl = useVideoStore((s) => s.streamUrl); + const contentType = useVideoStore((s) => s.contentType); + const metadata = useVideoStore((s) => s.metadata); const hideVideo = useVideoStore((s) => s.hideVideo); const videoRef = useRef(null); const playerRef = useRef(null); const videoContainerRef = useRef(null); - // Convert ref to state so we can use it for rendering const [isLoading, setIsLoading] = useState(false); const [loadError, setLoadError] = useState(null); + const [showOverlay, setShowOverlay] = useState(true); + const overlayTimeoutRef = useRef(null); - // Safely destroy the player to prevent errors + // Safely destroy the mpegts player to prevent errors const safeDestroyPlayer = () => { try { if (playerRef.current) { - // Set loading to false when destroying player setIsLoading(false); setLoadError(null); - // First unload the source to stop any in-progress fetches if (videoRef.current) { - // Remove src attribute and force a load to clear any pending requests videoRef.current.removeAttribute('src'); videoRef.current.load(); } - // Pause the player first try { playerRef.current.pause(); } catch (e) { // Ignore pause errors } - // Use a try-catch block specifically for the destroy call try { playerRef.current.destroy(); } catch (error) { - // Ignore expected abort errors - if (error.name !== 'AbortError' && !error.message?.includes('aborted')) { - console.log("Error during player destruction:", error.message); + if ( + error.name !== 'AbortError' && + !error.message?.includes('aborted') + ) { + console.log('Error during player destruction:', error.message); } } finally { playerRef.current = null; } } } catch (error) { - console.log("Error during player cleanup:", error); + console.log('Error during player cleanup:', error); playerRef.current = null; } + + // Clear overlay timer + if (overlayTimeoutRef.current) { + clearTimeout(overlayTimeoutRef.current); + overlayTimeoutRef.current = null; + } + }; + + // Start overlay auto-hide timer + const startOverlayTimer = () => { + if (overlayTimeoutRef.current) { + clearTimeout(overlayTimeoutRef.current); + } + overlayTimeoutRef.current = setTimeout(() => { + setShowOverlay(false); + }, 4000); // Hide after 4 seconds + }; + + // Initialize VOD player (native HTML5 with enhanced controls) + const initializeVODPlayer = () => { + if (!videoRef.current || !streamUrl) return; + + setIsLoading(true); + setLoadError(null); + setShowOverlay(true); // Show overlay initially + + console.log('Initializing VOD player for:', streamUrl); + + const video = videoRef.current; + + // Enhanced video element configuration for VOD + video.preload = 'metadata'; + video.crossOrigin = 'anonymous'; + + // Set up event listeners + const handleLoadStart = () => setIsLoading(true); + const handleLoadedData = () => setIsLoading(false); + const handleCanPlay = () => { + setIsLoading(false); + // Auto-play for VOD content + video.play().catch((e) => { + console.log('Auto-play prevented:', e); + setLoadError('Auto-play was prevented. Click play to start.'); + }); + // Start overlay timer when video is ready + startOverlayTimer(); + }; + const handleError = (e) => { + setIsLoading(false); + const error = e.target.error; + let errorMessage = 'Video playback error'; + + if (error) { + switch (error.code) { + case error.MEDIA_ERR_ABORTED: + errorMessage = 'Video playback was aborted'; + break; + case error.MEDIA_ERR_NETWORK: + errorMessage = 'Network error while loading video'; + break; + case error.MEDIA_ERR_DECODE: + errorMessage = 'Video codec not supported by your browser'; + break; + case error.MEDIA_ERR_SRC_NOT_SUPPORTED: + errorMessage = 'Video format not supported by your browser'; + break; + default: + errorMessage = error.message || 'Unknown video error'; + } + } + + setLoadError(errorMessage); + }; + + // Enhanced progress tracking for VOD + const handleProgress = () => { + if (video.buffered.length > 0) { + const bufferedEnd = video.buffered.end(video.buffered.length - 1); + const duration = video.duration; + if (duration > 0) { + const bufferedPercent = (bufferedEnd / duration) * 100; + // You could emit this to a store for UI feedback + } + } + }; + + // Add event listeners + video.addEventListener('loadstart', handleLoadStart); + video.addEventListener('loadeddata', handleLoadedData); + video.addEventListener('canplay', handleCanPlay); + video.addEventListener('error', handleError); + video.addEventListener('progress', handleProgress); + + // Set the source + video.src = streamUrl; + video.load(); + + // Store cleanup function + playerRef.current = { + destroy: () => { + video.removeEventListener('loadstart', handleLoadStart); + video.removeEventListener('loadeddata', handleLoadedData); + video.removeEventListener('canplay', handleCanPlay); + video.removeEventListener('error', handleError); + video.removeEventListener('progress', handleProgress); + video.removeAttribute('src'); + video.load(); + }, + }; + }; + + // Initialize live stream player (mpegts.js) + const initializeLivePlayer = () => { + if (!videoRef.current || !streamUrl) return; + + setIsLoading(true); + setLoadError(null); + + console.log('Initializing live stream player for:', streamUrl); + + try { + if (!mpegts.getFeatureList().mseLivePlayback) { + setIsLoading(false); + setLoadError( + "Your browser doesn't support live video streaming. Please try Chrome or Edge." + ); + return; + } + + const player = mpegts.createPlayer({ + type: 'mpegts', + url: streamUrl, + isLive: true, + enableWorker: true, + enableStashBuffer: false, + liveBufferLatencyChasing: true, + liveSync: true, + cors: true, + autoCleanupSourceBuffer: true, + autoCleanupMaxBackwardDuration: 10, + autoCleanupMinBackwardDuration: 5, + reuseRedirectedURL: true, + }); + + player.attachMediaElement(videoRef.current); + + player.on(mpegts.Events.LOADING_COMPLETE, () => { + setIsLoading(false); + }); + + player.on(mpegts.Events.METADATA_ARRIVED, () => { + setIsLoading(false); + }); + + player.on(mpegts.Events.ERROR, (errorType, errorDetail) => { + setIsLoading(false); + + if (errorType !== 'NetworkError' || !errorDetail?.includes('aborted')) { + console.error('Player error:', errorType, errorDetail); + + let errorMessage = `Error: ${errorType}`; + + if (errorType === 'MediaError') { + const errorString = errorDetail?.toLowerCase() || ''; + + if ( + errorString.includes('audio') || + errorString.includes('ac3') || + errorString.includes('ac-3') + ) { + errorMessage = + 'Audio codec not supported by your browser. Try Chrome or Edge for better audio codec support.'; + } else if ( + errorString.includes('video') || + errorString.includes('h264') || + errorString.includes('h.264') + ) { + errorMessage = + 'Video codec not supported by your browser. Try Chrome or Edge for better video codec support.'; + } else if (errorString.includes('mse')) { + errorMessage = + "Your browser doesn't support the codecs used in this stream. Try Chrome or Edge for better compatibility."; + } else { + errorMessage = + 'Media codec not supported by your browser. This may be due to unsupported audio (AC3) or video codecs. Try Chrome or Edge.'; + } + } else if (errorDetail) { + errorMessage += ` - ${errorDetail}`; + } + + setLoadError(errorMessage); + } + }); + + player.load(); + + player.on(mpegts.Events.MEDIA_INFO, () => { + setIsLoading(false); + try { + player.play().catch((e) => { + console.log('Auto-play prevented:', e); + setLoadError('Auto-play was prevented. Click play to start.'); + }); + } catch (e) { + console.log('Error during play:', e); + setLoadError(`Playback error: ${e.message}`); + } + }); + + playerRef.current = player; + } catch (error) { + setIsLoading(false); + console.error('Error initializing player:', error); + + if ( + error.message?.includes('codec') || + error.message?.includes('format') + ) { + setLoadError( + 'Codec not supported by your browser. Please try a different browser (Chrome/Edge recommended).' + ); + } else { + setLoadError(`Initialization error: ${error.message}`); + } + } }; useEffect(() => { @@ -62,95 +287,29 @@ export default function FloatingVideo() { return; } - // Check if we have an existing player and clean it up + // Clean up any existing player safeDestroyPlayer(); - // Set loading state to true when starting a new stream - setIsLoading(true); - setLoadError(null); - - // Debug log to help diagnose stream issues - console.log("Attempting to play stream:", streamUrl); - - try { - // If the browser supports MSE for live playback, initialize mpegts.js - if (mpegts.getFeatureList().mseLivePlayback) { - // Set loading flag - setIsLoading(true); - - const player = mpegts.createPlayer({ - type: 'mpegts', // MPEG-TS format - url: streamUrl, - isLive: true, - enableWorker: true, - enableStashBuffer: false, // Try disabling stash buffer for live streams - liveBufferLatencyChasing: true, - liveSync: true, - cors: true, // Enable CORS for cross-domain requests - // Add error recovery options - autoCleanupSourceBuffer: true, - autoCleanupMaxBackwardDuration: 10, - autoCleanupMinBackwardDuration: 5, - reuseRedirectedURL: true, - }); - - player.attachMediaElement(videoRef.current); - - // Add events to track loading state - player.on(mpegts.Events.LOADING_COMPLETE, () => { - setIsLoading(false); - }); - - player.on(mpegts.Events.METADATA_ARRIVED, () => { - setIsLoading(false); - }); - - // Add error event handler - player.on(mpegts.Events.ERROR, (errorType, errorDetail) => { - setIsLoading(false); - - // Filter out aborted errors - if (errorType !== 'NetworkError' || !errorDetail?.includes('aborted')) { - console.error('Player error:', errorType, errorDetail); - setLoadError(`Error: ${errorType}${errorDetail ? ` - ${errorDetail}` : ''}`); - } - }); - - player.load(); - - // Don't auto-play until we've loaded properly - player.on(mpegts.Events.MEDIA_INFO, () => { - setIsLoading(false); - try { - player.play().catch(e => { - console.log("Auto-play prevented:", e); - setLoadError("Auto-play was prevented. Click play to start."); - }); - } catch (e) { - console.log("Error during play:", e); - setLoadError(`Playback error: ${e.message}`); - } - }); - - // Store player instance so we can clean up later - playerRef.current = player; - } - } catch (error) { - setIsLoading(false); - setLoadError(`Initialization error: ${error.message}`); - console.error("Error initializing player:", error); + // Initialize the appropriate player based on content type + if (contentType === 'vod') { + initializeVODPlayer(); + } else { + initializeLivePlayer(); } // Cleanup when component unmounts or streamUrl changes return () => { safeDestroyPlayer(); }; - }, [isVisible, streamUrl]); + }, [isVisible, streamUrl, contentType]); // Modified hideVideo handler to clean up player first - const handleClose = () => { + const handleClose = (e) => { + if (e) { + e.stopPropagation(); + e.preventDefault(); + } safeDestroyPlayer(); - // Small delay before hiding the video component to ensure cleanup is complete setTimeout(() => { hideVideo(); }, 50); @@ -178,20 +337,99 @@ export default function FloatingVideo() { }} > {/* Simple header row with a close button */} - - + + e.stopPropagation()} + onTouchStart={(e) => e.stopPropagation()} + style={{ + minHeight: '32px', + minWidth: '32px', + cursor: 'pointer', + touchAction: 'manipulation', + }} + /> {/* Video container with relative positioning for the overlay */} - - {/* The