mirror of
https://github.com/Dispatcharr/Dispatcharr.git
synced 2026-01-24 03:05:13 +00:00
Compare commits
No commits in common. "main" and "v0.6.2" have entirely different histories.
326 changed files with 8682 additions and 70765 deletions
|
|
@ -31,4 +31,3 @@
|
|||
LICENSE
|
||||
README.md
|
||||
data/
|
||||
docker/data/
|
||||
|
|
|
|||
214
.github/workflows/base-image.yml
vendored
214
.github/workflows/base-image.yml
vendored
|
|
@ -2,37 +2,42 @@ name: Base Image Build
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
branches: [ main, dev ]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
branches: [ main, dev ]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
build-base-image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
|
|
@ -61,190 +66,13 @@ jobs:
|
|||
echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker base image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/DispatcharrBase
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }}
|
||||
|
|
|
|||
277
.github/workflows/ci.yml
vendored
277
.github/workflows/ci.yml
vendored
|
|
@ -2,86 +2,19 @@ name: CI Pipeline
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [dev]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
branches: [ dev ]
|
||||
pull_request:
|
||||
branches: [dev]
|
||||
workflow_dispatch:
|
||||
branches: [ dev ]
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
# compute a single timestamp, version, and repo metadata for the entire workflow
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
id: meta
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
# no per-job outputs here; shared metadata comes from the `prepare` job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
@ -112,162 +45,66 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
run: |
|
||||
# Get lowercase repository owner
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get repository name
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Determine branch name
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# For other branches, use the branch name
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine if this is from a fork
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
# Build only the platform for this matrix job to avoid running amd64
|
||||
# stages under qemu on an arm64 runner (and vice-versa). This makes
|
||||
# the matrix runner's platform the one built by buildx.
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
# push arch-specific tags from each matrix job (they will be combined
|
||||
# into a multi-arch manifest in a follow-up job)
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
REPO_OWNER=${{ steps.meta.outputs.repo_owner }}
|
||||
REPO_NAME=${{ steps.meta.outputs.repo_name }}
|
||||
BASE_TAG=base
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
TIMESTAMP=${{ steps.timestamp.outputs.timestamp }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
# wait for prepare and all matrix builds to finish
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
VERSION=${{ needs.prepare.outputs.version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# branch tag (e.g. latest or dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# version + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
||||
# also create Docker Hub manifests using the same username
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
41
.github/workflows/frontend-tests.yml
vendored
41
.github/workflows/frontend-tests.yml
vendored
|
|
@ -1,41 +0,0 @@
|
|||
name: Frontend Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./frontend
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './frontend/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# - name: Run linter
|
||||
# run: npm run lint
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
203
.github/workflows/release.yml
vendored
203
.github/workflows/release.yml
vendored
|
|
@ -15,22 +15,16 @@ on:
|
|||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
new_version: ${{ steps.update_version.outputs.new_version }}
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
|
|
@ -44,194 +38,55 @@ jobs:
|
|||
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
|
||||
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update Changelog
|
||||
run: |
|
||||
python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }}
|
||||
|
||||
- name: Set repository metadata
|
||||
id: meta
|
||||
- name: Set lowercase repo owner
|
||||
id: repo_owner
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py CHANGELOG.md
|
||||
git add version.py
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: main
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
- name: Build and Push Release Image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases
|
||||
tags: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
VERSION=${{ needs.prepare.outputs.new_version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:latest \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
|
||||
|
||||
create-release:
|
||||
needs: [prepare, create-manifest]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
tag_name: v${{ needs.prepare.outputs.new_version }}
|
||||
name: Release v${{ needs.prepare.outputs.new_version }}
|
||||
tag_name: v${{ steps.update_version.outputs.new_version }}
|
||||
name: Release v${{ steps.update_version.outputs.new_version }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -18,5 +18,4 @@ dump.rdb
|
|||
debugpy*
|
||||
uwsgi.sock
|
||||
package-lock.json
|
||||
models
|
||||
.idea
|
||||
models
|
||||
1014
CHANGELOG.md
1014
CHANGELOG.md
File diff suppressed because it is too large
Load diff
286
Plugins.md
286
Plugins.md
|
|
@ -1,286 +0,0 @@
|
|||
# Dispatcharr Plugins
|
||||
|
||||
This document explains how to build, install, and use Python plugins in Dispatcharr. It covers discovery, the plugin interface, settings, actions, how to access application APIs, and examples.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
1) Create a folder under `/app/data/plugins/my_plugin/` (host path `data/plugins/my_plugin/` in the repo).
|
||||
|
||||
2) Add a `plugin.py` file exporting a `Plugin` class:
|
||||
|
||||
```
|
||||
# /app/data/plugins/my_plugin/plugin.py
|
||||
class Plugin:
|
||||
name = "My Plugin"
|
||||
version = "0.1.0"
|
||||
description = "Does something useful"
|
||||
|
||||
# Settings fields rendered by the UI and persisted by the backend
|
||||
fields = [
|
||||
{"id": "enabled", "label": "Enabled", "type": "boolean", "default": True},
|
||||
{"id": "limit", "label": "Item limit", "type": "number", "default": 5},
|
||||
{"id": "mode", "label": "Mode", "type": "select", "default": "safe",
|
||||
"options": [
|
||||
{"value": "safe", "label": "Safe"},
|
||||
{"value": "fast", "label": "Fast"},
|
||||
]},
|
||||
{"id": "note", "label": "Note", "type": "string", "default": ""},
|
||||
]
|
||||
|
||||
# Actions appear as buttons. Clicking one calls run(action, params, context)
|
||||
actions = [
|
||||
{"id": "do_work", "label": "Do Work", "description": "Process items"},
|
||||
]
|
||||
|
||||
def run(self, action: str, params: dict, context: dict):
|
||||
settings = context.get("settings", {})
|
||||
logger = context.get("logger")
|
||||
|
||||
if action == "do_work":
|
||||
limit = int(settings.get("limit", 5))
|
||||
mode = settings.get("mode", "safe")
|
||||
logger.info(f"My Plugin running with limit={limit}, mode={mode}")
|
||||
# Do a small amount of work here. Schedule Celery tasks for heavy work.
|
||||
return {"status": "ok", "processed": limit, "mode": mode}
|
||||
|
||||
return {"status": "error", "message": f"Unknown action {action}"}
|
||||
```
|
||||
|
||||
3) Open the Plugins page in the UI, click the refresh icon to reload discovery, then configure and run your plugin.
|
||||
|
||||
---
|
||||
|
||||
## Where Plugins Live
|
||||
|
||||
- Default directory: `/app/data/plugins` inside the container.
|
||||
- Override with env var: `DISPATCHARR_PLUGINS_DIR`.
|
||||
- Each plugin is a directory containing either:
|
||||
- `plugin.py` exporting a `Plugin` class, or
|
||||
- a Python package (`__init__.py`) exporting a `Plugin` class.
|
||||
|
||||
The directory name (lowercased, spaces as `_`) is used as the registry key and module import path (e.g. `my_plugin.plugin`).
|
||||
|
||||
---
|
||||
|
||||
## Discovery & Lifecycle
|
||||
|
||||
- Discovery runs at server startup and on-demand when:
|
||||
- Fetching the plugins list from the UI
|
||||
- Hitting `POST /api/plugins/plugins/reload/`
|
||||
- The loader imports each plugin module and instantiates `Plugin()`.
|
||||
- Metadata (name, version, description) and a per-plugin settings JSON are stored in the DB.
|
||||
|
||||
Backend code:
|
||||
- Loader: `apps/plugins/loader.py`
|
||||
- API Views: `apps/plugins/api_views.py`
|
||||
- API URLs: `apps/plugins/api_urls.py`
|
||||
- Model: `apps/plugins/models.py` (stores `enabled` flag and `settings` per plugin)
|
||||
|
||||
---
|
||||
|
||||
## Plugin Interface
|
||||
|
||||
Export a `Plugin` class. Supported attributes and behavior:
|
||||
|
||||
- `name` (str): Human-readable name.
|
||||
- `version` (str): Semantic version string.
|
||||
- `description` (str): Short description.
|
||||
- `fields` (list): Settings schema used by the UI to render controls.
|
||||
- `actions` (list): Available actions; the UI renders a Run button for each.
|
||||
- `run(action, params, context)` (callable): Invoked when a user clicks an action.
|
||||
|
||||
### Settings Schema
|
||||
Supported field `type`s:
|
||||
- `boolean`
|
||||
- `number`
|
||||
- `string`
|
||||
- `select` (requires `options`: `[{"value": ..., "label": ...}, ...]`)
|
||||
|
||||
Common field keys:
|
||||
- `id` (str): Settings key.
|
||||
- `label` (str): Label shown in the UI.
|
||||
- `type` (str): One of above.
|
||||
- `default` (any): Default value used until saved.
|
||||
- `help_text` (str, optional): Shown under the control.
|
||||
- `options` (list, for select): List of `{value, label}`.
|
||||
|
||||
The UI automatically renders settings and persists them. The backend stores settings in `PluginConfig.settings`.
|
||||
|
||||
Read settings in `run` via `context["settings"]`.
|
||||
|
||||
### Actions
|
||||
Each action is a dict:
|
||||
- `id` (str): Unique action id.
|
||||
- `label` (str): Button label.
|
||||
- `description` (str, optional): Helper text.
|
||||
|
||||
Clicking an action calls your plugin’s `run(action, params, context)` and shows a notification with the result or error.
|
||||
|
||||
### Action Confirmation (Modal)
|
||||
Developers can request a confirmation modal per action using the `confirm` key on the action. Options:
|
||||
|
||||
- Boolean: `confirm: true` will show a default confirmation modal.
|
||||
- Object: `confirm: { required: true, title: '...', message: '...' }` to customize the modal title and message.
|
||||
|
||||
Example:
|
||||
```
|
||||
actions = [
|
||||
{
|
||||
"id": "danger_run",
|
||||
"label": "Do Something Risky",
|
||||
"description": "Runs a job that affects many records.",
|
||||
"confirm": { "required": true, "title": "Proceed?", "message": "This will modify many records." },
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Accessing Dispatcharr APIs from Plugins
|
||||
|
||||
Plugins are server-side Python code running within the Django application. You can:
|
||||
|
||||
- Import models and run queries/updates:
|
||||
```
|
||||
from apps.m3u.models import M3UAccount
|
||||
from apps.epg.models import EPGSource
|
||||
from apps.channels.models import Channel
|
||||
from core.models import CoreSettings
|
||||
```
|
||||
|
||||
- Dispatch Celery tasks for heavy work (recommended):
|
||||
```
|
||||
from apps.m3u.tasks import refresh_m3u_accounts # apps/m3u/tasks.py
|
||||
from apps.epg.tasks import refresh_all_epg_data # apps/epg/tasks.py
|
||||
|
||||
refresh_m3u_accounts.delay()
|
||||
refresh_all_epg_data.delay()
|
||||
```
|
||||
|
||||
- Send WebSocket updates:
|
||||
```
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {"type": "plugin", "plugin": "my_plugin", "message": "Done"})
|
||||
```
|
||||
|
||||
- Use transactions:
|
||||
```
|
||||
from django.db import transaction
|
||||
with transaction.atomic():
|
||||
# bulk updates here
|
||||
...
|
||||
```
|
||||
|
||||
- Log via provided context or standard logging:
|
||||
```
|
||||
def run(self, action, params, context):
|
||||
logger = context.get("logger") # already configured
|
||||
logger.info("running action %s", action)
|
||||
```
|
||||
|
||||
Prefer Celery tasks (`.delay()`) to keep `run` fast and non-blocking.
|
||||
|
||||
---
|
||||
|
||||
## REST Endpoints (for UI and tooling)
|
||||
|
||||
- List plugins: `GET /api/plugins/plugins/`
|
||||
- Response: `{ "plugins": [{ key, name, version, description, enabled, fields, settings, actions }, ...] }`
|
||||
- Reload discovery: `POST /api/plugins/plugins/reload/`
|
||||
- Import plugin: `POST /api/plugins/plugins/import/` with form-data file field `file`
|
||||
- Update settings: `POST /api/plugins/plugins/<key>/settings/` with `{"settings": {...}}`
|
||||
- Run action: `POST /api/plugins/plugins/<key>/run/` with `{"action": "id", "params": {...}}`
|
||||
- Enable/disable: `POST /api/plugins/plugins/<key>/enabled/` with `{"enabled": true|false}`
|
||||
|
||||
Notes:
|
||||
- When disabled, a plugin cannot run actions; backend returns HTTP 403.
|
||||
|
||||
---
|
||||
|
||||
## Importing Plugins
|
||||
|
||||
- In the UI, click the Import button on the Plugins page and upload a `.zip` containing a plugin folder.
|
||||
- The archive should contain either `plugin.py` or a Python package (`__init__.py`).
|
||||
- On success, the UI shows the plugin name/description and lets you enable it immediately (plugins are disabled by default).
|
||||
|
||||
---
|
||||
|
||||
## Enabling / Disabling Plugins
|
||||
|
||||
- Each plugin has a persisted `enabled` flag (default: disabled) and `ever_enabled` flag in the DB (`apps/plugins/models.py`).
|
||||
- New plugins are disabled by default and require an explicit enable.
|
||||
- The first time a plugin is enabled, the UI shows a trust warning modal explaining that plugins can run arbitrary server-side code.
|
||||
- The Plugins page shows a toggle in the card header. Turning it off dims the card and disables the Run button.
|
||||
- Backend enforcement: Attempts to run an action for a disabled plugin return HTTP 403.
|
||||
|
||||
---
|
||||
|
||||
## Example: Refresh All Sources Plugin
|
||||
|
||||
Path: `data/plugins/refresh_all/plugin.py`
|
||||
|
||||
```
|
||||
class Plugin:
|
||||
name = "Refresh All Sources"
|
||||
version = "1.0.0"
|
||||
description = "Force refresh all M3U accounts and EPG sources."
|
||||
|
||||
fields = [
|
||||
{"id": "confirm", "label": "Require confirmation", "type": "boolean", "default": True,
|
||||
"help_text": "If enabled, the UI should ask before running."}
|
||||
]
|
||||
|
||||
actions = [
|
||||
{"id": "refresh_all", "label": "Refresh All M3Us and EPGs",
|
||||
"description": "Queues background refresh for all active M3U accounts and EPG sources."}
|
||||
]
|
||||
|
||||
def run(self, action: str, params: dict, context: dict):
|
||||
if action == "refresh_all":
|
||||
from apps.m3u.tasks import refresh_m3u_accounts
|
||||
from apps.epg.tasks import refresh_all_epg_data
|
||||
refresh_m3u_accounts.delay()
|
||||
refresh_all_epg_data.delay()
|
||||
return {"status": "queued", "message": "Refresh jobs queued"}
|
||||
return {"status": "error", "message": f"Unknown action: {action}"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Keep `run` short and schedule heavy operations via Celery tasks.
|
||||
- Validate and sanitize `params` received from the UI.
|
||||
- Use database transactions for bulk or related updates.
|
||||
- Log actionable messages for troubleshooting.
|
||||
- Only write files under `/data` or `/app/data` paths.
|
||||
- Treat plugins as trusted code: they run with full app permissions.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Plugin not listed: ensure the folder exists and contains `plugin.py` with a `Plugin` class.
|
||||
- Import errors: the folder name is the import name; avoid spaces or exotic characters.
|
||||
- No confirmation: include a boolean field with `id: "confirm"` and set it to true or default true.
|
||||
- HTTP 403 on run: the plugin is disabled; enable it from the toggle or via the `enabled/` endpoint.
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
- Keep dependencies minimal. Vendoring small helpers into the plugin folder is acceptable.
|
||||
- Use the existing task and model APIs where possible; propose extensions if you need new capabilities.
|
||||
|
||||
---
|
||||
|
||||
## Internals Reference
|
||||
|
||||
- Loader: `apps/plugins/loader.py`
|
||||
- API Views: `apps/plugins/api_views.py`
|
||||
- API URLs: `apps/plugins/api_urls.py`
|
||||
- Model: `apps/plugins/models.py`
|
||||
- Frontend page: `frontend/src/pages/Plugins.jsx`
|
||||
- Sidebar entry: `frontend/src/components/Sidebar.jsx`
|
||||
|
|
@ -22,7 +22,6 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
|
|||
📊 **Real-Time Stats Dashboard** — Live insights into stream health and client activity\
|
||||
🧠 **EPG Auto-Match** — Match program data to channels automatically\
|
||||
⚙️ **Streamlink + FFmpeg Support** — Flexible backend options for streaming and recording\
|
||||
🎬 **VOD Management** — Full Video on Demand support with movies and TV series\
|
||||
🧼 **UI & UX Enhancements** — Smoother, faster, more responsive interface\
|
||||
🛁 **Output Compatibility** — HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more
|
||||
|
||||
|
|
@ -32,7 +31,6 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
|
|||
|
||||
✅ **Full IPTV Control** — Import, organize, proxy, and monitor IPTV streams on your own terms\
|
||||
✅ **Smart Playlist Handling** — M3U import, filtering, grouping, and failover support\
|
||||
✅ **VOD Content Management** — Organize movies and TV series with metadata and streaming\
|
||||
✅ **Reliable EPG Integration** — Match and manage TV guide data with ease\
|
||||
✅ **Clean & Responsive Interface** — Modern design that gets out of your way\
|
||||
✅ **Fully Self-Hosted** — Total control, zero reliance on third-party services
|
||||
|
|
|
|||
|
|
@ -20,88 +20,30 @@ class TokenObtainPairView(TokenObtainPairView):
|
|||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked login attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
username = request.data.get("username", 'unknown')
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied',
|
||||
)
|
||||
return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
# Get the response from the parent class first
|
||||
username = request.data.get("username")
|
||||
response = super().post(request, *args, **kwargs)
|
||||
|
||||
# Log login attempt
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
# If login was successful, update last_login
|
||||
if response.status_code == 200:
|
||||
username = request.data.get("username")
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
|
||||
try:
|
||||
response = super().post(request, *args, **kwargs)
|
||||
|
||||
# If login was successful, update last_login and log success
|
||||
if response.status_code == 200:
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
else:
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
# If parent class raises an exception (e.g., validation error), log failed attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason=f'Authentication error: {str(e)[:100]}',
|
||||
)
|
||||
raise # Re-raise the exception to maintain normal error flow
|
||||
return response
|
||||
|
||||
|
||||
class TokenRefreshView(TokenRefreshView):
|
||||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked token refresh attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user='token_refresh',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied (token refresh)',
|
||||
)
|
||||
return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
return super().post(request, *args, **kwargs)
|
||||
|
|
@ -138,15 +80,6 @@ def initialize_superuser(request):
|
|||
class AuthViewSet(viewsets.ViewSet):
|
||||
"""Handles user login and logout"""
|
||||
|
||||
def get_permissions(self):
|
||||
"""
|
||||
Login doesn't require auth, but logout does
|
||||
"""
|
||||
if self.action == 'logout':
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
return [IsAuthenticated()]
|
||||
return []
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Authenticate and log in a user",
|
||||
request_body=openapi.Schema(
|
||||
|
|
@ -167,11 +100,6 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
password = request.data.get("password")
|
||||
user = authenticate(request, username=username, password=password)
|
||||
|
||||
# Get client info for logging
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
if user:
|
||||
login(request, user)
|
||||
# Update last_login timestamp
|
||||
|
|
@ -179,14 +107,6 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"message": "Login successful",
|
||||
|
|
@ -198,15 +118,6 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
},
|
||||
}
|
||||
)
|
||||
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
return Response({"error": "Invalid credentials"}, status=400)
|
||||
|
||||
@swagger_auto_schema(
|
||||
|
|
@ -215,19 +126,6 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
)
|
||||
def logout(self, request):
|
||||
"""Logs out the authenticated user"""
|
||||
# Log logout event before actually logging out
|
||||
from core.utils import log_system_event
|
||||
username = request.user.username if request.user and request.user.is_authenticated else 'unknown'
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
log_system_event(
|
||||
event_type='logout',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
logout(request)
|
||||
return Response({"message": "Logout successful"})
|
||||
|
||||
|
|
@ -236,7 +134,7 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
class UserViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for Users"""
|
||||
|
||||
queryset = User.objects.all().prefetch_related('channel_profiles')
|
||||
queryset = User.objects.all()
|
||||
serializer_class = UserSerializer
|
||||
|
||||
def get_permissions(self):
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('accounts', '0002_remove_user_channel_groups_user_channel_profiles_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='user',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -21,7 +21,7 @@ class User(AbstractUser):
|
|||
related_name="users",
|
||||
)
|
||||
user_level = models.IntegerField(default=UserLevel.STREAMER)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.username
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
from django.urls import path, include, re_path
|
||||
from django.urls import path, include
|
||||
from drf_yasg.views import get_schema_view
|
||||
from drf_yasg import openapi
|
||||
from rest_framework.permissions import AllowAny
|
||||
|
||||
app_name = 'api'
|
||||
|
||||
# Configure Swagger Schema
|
||||
schema_view = get_schema_view(
|
||||
openapi.Info(
|
||||
title="Dispatcharr API",
|
||||
|
|
@ -25,9 +26,6 @@ urlpatterns = [
|
|||
path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')),
|
||||
path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')),
|
||||
path('core/', include(('core.api_urls', 'core'), namespace='core')),
|
||||
path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')),
|
||||
path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')),
|
||||
path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')),
|
||||
# path('output/', include(('apps.output.api_urls', 'output'), namespace='output')),
|
||||
#path('player/', include(('apps.player.api_urls', 'player'), namespace='player')),
|
||||
#path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')),
|
||||
|
|
@ -36,7 +34,7 @@ urlpatterns = [
|
|||
|
||||
|
||||
# Swagger Documentation api_urls
|
||||
re_path(r'^swagger/?$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
|
||||
path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
from django.urls import path
|
||||
|
||||
from . import api_views
|
||||
|
||||
app_name = "backups"
|
||||
|
||||
urlpatterns = [
|
||||
path("", api_views.list_backups, name="backup-list"),
|
||||
path("create/", api_views.create_backup, name="backup-create"),
|
||||
path("upload/", api_views.upload_backup, name="backup-upload"),
|
||||
path("schedule/", api_views.get_schedule, name="backup-schedule-get"),
|
||||
path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"),
|
||||
path("status/<str:task_id>/", api_views.backup_status, name="backup-status"),
|
||||
path("<str:filename>/download-token/", api_views.get_download_token, name="backup-download-token"),
|
||||
path("<str:filename>/download/", api_views.download_backup, name="backup-download"),
|
||||
path("<str:filename>/delete/", api_views.delete_backup, name="backup-delete"),
|
||||
path("<str:filename>/restore/", api_views.restore_backup, name="backup-restore"),
|
||||
]
|
||||
|
|
@ -1,364 +0,0 @@
|
|||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse, StreamingHttpResponse, Http404
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import api_view, permission_classes, parser_classes
|
||||
from rest_framework.permissions import IsAdminUser, AllowAny
|
||||
from rest_framework.parsers import MultiPartParser, FormParser
|
||||
from rest_framework.response import Response
|
||||
|
||||
from . import services
|
||||
from .tasks import create_backup_task, restore_backup_task
|
||||
from .scheduler import get_schedule_settings, update_schedule_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_task_token(task_id: str) -> str:
|
||||
"""Generate a signed token for task status access without auth."""
|
||||
secret = settings.SECRET_KEY.encode()
|
||||
return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32]
|
||||
|
||||
|
||||
def _verify_task_token(task_id: str, token: str) -> bool:
|
||||
"""Verify a task token is valid."""
|
||||
expected = _generate_task_token(task_id)
|
||||
return hmac.compare_digest(expected, token)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def list_backups(request):
|
||||
"""List all available backup files."""
|
||||
try:
|
||||
backups = services.list_backups()
|
||||
return Response(backups, status=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to list backups: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def create_backup(request):
|
||||
"""Create a new backup (async via Celery)."""
|
||||
try:
|
||||
task = create_backup_task.delay()
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start backup: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def backup_status(request, task_id):
|
||||
"""Check the status of a backup/restore task.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid task_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (for restore when session is invalidated)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(task_id, token):
|
||||
return Response(
|
||||
{"detail": "Invalid task token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
result = AsyncResult(task_id)
|
||||
|
||||
if result.ready():
|
||||
task_result = result.get()
|
||||
if task_result.get("status") == "completed":
|
||||
return Response({
|
||||
"state": "completed",
|
||||
"result": task_result,
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": task_result.get("error", "Unknown error"),
|
||||
})
|
||||
elif result.failed():
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": str(result.result),
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": result.state.lower(),
|
||||
})
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get task status: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_download_token(request, filename):
|
||||
"""Get a signed token for downloading a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
token = _generate_task_token(filename)
|
||||
return Response({"token": token})
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to generate token: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def download_backup(request, filename):
|
||||
"""Download a backup file.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid download_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (avoids CORS preflight issues)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(filename, token):
|
||||
return Response(
|
||||
{"detail": "Invalid download token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
# Security: prevent path traversal by checking for suspicious characters
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = (backup_dir / filename).resolve()
|
||||
|
||||
# Security: ensure the resolved path is still within backup_dir
|
||||
if not str(backup_file).startswith(str(backup_dir.resolve())):
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
if not backup_file.exists() or not backup_file.is_file():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
file_size = backup_file.stat().st_size
|
||||
|
||||
# Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly
|
||||
# Fall back to streaming for non-nginx deployments
|
||||
use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true"
|
||||
logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}")
|
||||
|
||||
if use_nginx_accel:
|
||||
# X-Accel-Redirect: Django returns immediately, nginx serves file
|
||||
logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}")
|
||||
response = HttpResponse()
|
||||
response["X-Accel-Redirect"] = f"/protected-backups/{filename}"
|
||||
response["Content-Type"] = "application/zip"
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
else:
|
||||
# Streaming fallback for non-nginx deployments
|
||||
logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)")
|
||||
def file_iterator(file_path, chunk_size=2 * 1024 * 1024):
|
||||
with open(file_path, "rb") as f:
|
||||
while chunk := f.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
response = StreamingHttpResponse(
|
||||
file_iterator(backup_file),
|
||||
content_type="application/zip",
|
||||
)
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Download failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["DELETE"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def delete_backup(request, filename):
|
||||
"""Delete a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
services.delete_backup(filename)
|
||||
return Response(
|
||||
{"detail": "Backup deleted successfully"},
|
||||
status=status.HTTP_204_NO_CONTENT,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise Http404("Backup file not found")
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Delete failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
@parser_classes([MultiPartParser, FormParser])
|
||||
def upload_backup(request):
|
||||
"""Upload a backup file for restoration."""
|
||||
uploaded = request.FILES.get("file")
|
||||
if not uploaded:
|
||||
return Response(
|
||||
{"detail": "No file uploaded"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
try:
|
||||
backup_dir = services.get_backup_dir()
|
||||
filename = uploaded.name or "uploaded-backup.zip"
|
||||
|
||||
# Ensure unique filename
|
||||
backup_file = backup_dir / filename
|
||||
counter = 1
|
||||
while backup_file.exists():
|
||||
name_parts = filename.rsplit(".", 1)
|
||||
if len(name_parts) == 2:
|
||||
backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}"
|
||||
else:
|
||||
backup_file = backup_dir / f"{filename}-{counter}"
|
||||
counter += 1
|
||||
|
||||
# Save uploaded file
|
||||
with backup_file.open("wb") as f:
|
||||
for chunk in uploaded.chunks():
|
||||
f.write(chunk)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup uploaded successfully",
|
||||
"filename": backup_file.name,
|
||||
},
|
||||
status=status.HTTP_201_CREATED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Upload failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def restore_backup(request, filename):
|
||||
"""Restore from a backup file (async via Celery). WARNING: This will flush the database!"""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
task = restore_backup_task.delay(filename)
|
||||
return Response(
|
||||
{
|
||||
"detail": "Restore started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start restore: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_schedule(request):
|
||||
"""Get backup schedule settings."""
|
||||
try:
|
||||
settings = get_schedule_settings()
|
||||
return Response(settings)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["PUT"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def update_schedule(request):
|
||||
"""Update backup schedule settings."""
|
||||
try:
|
||||
settings = update_schedule_settings(request.data)
|
||||
return Response(settings)
|
||||
except ValueError as e:
|
||||
return Response(
|
||||
{"detail": str(e)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to update schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class BackupsConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "apps.backups"
|
||||
verbose_name = "Backups"
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from django_celery_beat.models import PeriodicTask, CrontabSchedule
|
||||
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task"
|
||||
|
||||
DEFAULTS = {
|
||||
"schedule_enabled": True,
|
||||
"schedule_frequency": "daily",
|
||||
"schedule_time": "03:00",
|
||||
"schedule_day_of_week": 0, # Sunday
|
||||
"retention_count": 3,
|
||||
"schedule_cron_expression": "",
|
||||
}
|
||||
|
||||
|
||||
def _get_backup_settings():
|
||||
"""Get all backup settings from CoreSettings grouped JSON."""
|
||||
try:
|
||||
settings_obj = CoreSettings.objects.get(key="backup_settings")
|
||||
return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy()
|
||||
except CoreSettings.DoesNotExist:
|
||||
return DEFAULTS.copy()
|
||||
|
||||
|
||||
def _update_backup_settings(updates: dict) -> None:
|
||||
"""Update backup settings in the grouped JSON."""
|
||||
obj, created = CoreSettings.objects.get_or_create(
|
||||
key="backup_settings",
|
||||
defaults={"name": "Backup Settings", "value": DEFAULTS.copy()}
|
||||
)
|
||||
current = obj.value if isinstance(obj.value, dict) else {}
|
||||
current.update(updates)
|
||||
obj.value = current
|
||||
obj.save()
|
||||
|
||||
|
||||
def get_schedule_settings() -> dict:
|
||||
"""Get all backup schedule settings."""
|
||||
settings = _get_backup_settings()
|
||||
return {
|
||||
"enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])),
|
||||
"frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])),
|
||||
"time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])),
|
||||
"day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])),
|
||||
"retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])),
|
||||
"cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])),
|
||||
}
|
||||
|
||||
|
||||
def update_schedule_settings(data: dict) -> dict:
|
||||
"""Update backup schedule settings and sync the PeriodicTask."""
|
||||
# Validate
|
||||
if "frequency" in data and data["frequency"] not in ("daily", "weekly"):
|
||||
raise ValueError("frequency must be 'daily' or 'weekly'")
|
||||
|
||||
if "time" in data:
|
||||
try:
|
||||
hour, minute = data["time"].split(":")
|
||||
int(hour)
|
||||
int(minute)
|
||||
except (ValueError, AttributeError):
|
||||
raise ValueError("time must be in HH:MM format")
|
||||
|
||||
if "day_of_week" in data:
|
||||
day = int(data["day_of_week"])
|
||||
if day < 0 or day > 6:
|
||||
raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)")
|
||||
|
||||
if "retention_count" in data:
|
||||
count = int(data["retention_count"])
|
||||
if count < 0:
|
||||
raise ValueError("retention_count must be >= 0")
|
||||
|
||||
# Update settings with proper key names
|
||||
updates = {}
|
||||
if "enabled" in data:
|
||||
updates["schedule_enabled"] = bool(data["enabled"])
|
||||
if "frequency" in data:
|
||||
updates["schedule_frequency"] = str(data["frequency"])
|
||||
if "time" in data:
|
||||
updates["schedule_time"] = str(data["time"])
|
||||
if "day_of_week" in data:
|
||||
updates["schedule_day_of_week"] = int(data["day_of_week"])
|
||||
if "retention_count" in data:
|
||||
updates["retention_count"] = int(data["retention_count"])
|
||||
if "cron_expression" in data:
|
||||
updates["schedule_cron_expression"] = str(data["cron_expression"])
|
||||
|
||||
_update_backup_settings(updates)
|
||||
|
||||
# Sync the periodic task
|
||||
_sync_periodic_task()
|
||||
|
||||
return get_schedule_settings()
|
||||
|
||||
|
||||
def _sync_periodic_task() -> None:
|
||||
"""Create, update, or delete the scheduled backup task based on settings."""
|
||||
settings = get_schedule_settings()
|
||||
|
||||
if not settings["enabled"]:
|
||||
# Delete the task if it exists
|
||||
task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first()
|
||||
if task:
|
||||
old_crontab = task.crontab
|
||||
task.delete()
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
logger.info("Backup schedule disabled, removed periodic task")
|
||||
return
|
||||
|
||||
# Get old crontab before creating new one
|
||||
old_crontab = None
|
||||
try:
|
||||
old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME)
|
||||
old_crontab = old_task.crontab
|
||||
except PeriodicTask.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Check if using cron expression (advanced mode)
|
||||
if settings["cron_expression"]:
|
||||
# Parse cron expression: "minute hour day month weekday"
|
||||
try:
|
||||
parts = settings["cron_expression"].split()
|
||||
if len(parts) != 5:
|
||||
raise ValueError("Cron expression must have 5 parts: minute hour day month weekday")
|
||||
|
||||
minute, hour, day_of_month, month_of_year, day_of_week = parts
|
||||
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=day_of_week,
|
||||
day_of_month=day_of_month,
|
||||
month_of_year=month_of_year,
|
||||
timezone=CoreSettings.get_system_time_zone(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}")
|
||||
raise ValueError(f"Invalid cron expression: {e}")
|
||||
else:
|
||||
# Use simple frequency-based scheduling
|
||||
# Parse time
|
||||
hour, minute = settings["time"].split(":")
|
||||
|
||||
# Build crontab based on frequency
|
||||
system_tz = CoreSettings.get_system_time_zone()
|
||||
if settings["frequency"] == "daily":
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week="*",
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
else: # weekly
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=str(settings["day_of_week"]),
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
|
||||
# Create or update the periodic task
|
||||
task, created = PeriodicTask.objects.update_or_create(
|
||||
name=BACKUP_SCHEDULE_TASK_NAME,
|
||||
defaults={
|
||||
"task": "apps.backups.tasks.scheduled_backup_task",
|
||||
"crontab": crontab,
|
||||
"enabled": True,
|
||||
"kwargs": json.dumps({"retention_count": settings["retention_count"]}),
|
||||
},
|
||||
)
|
||||
|
||||
# Clean up old crontab if it changed and is orphaned
|
||||
if old_crontab and old_crontab.id != crontab.id:
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
|
||||
action = "Created" if created else "Updated"
|
||||
logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}")
|
||||
|
||||
|
||||
def _cleanup_orphaned_crontab(crontab_schedule):
|
||||
"""Delete old CrontabSchedule if no other tasks are using it."""
|
||||
if crontab_schedule is None:
|
||||
return
|
||||
|
||||
# Check if any other tasks are using this crontab
|
||||
if PeriodicTask.objects.filter(crontab=crontab_schedule).exists():
|
||||
logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting")
|
||||
return
|
||||
|
||||
logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}")
|
||||
crontab_schedule.delete()
|
||||
|
|
@ -1,350 +0,0 @@
|
|||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from zipfile import ZipFile, ZIP_DEFLATED
|
||||
import logging
|
||||
import pytz
|
||||
|
||||
from django.conf import settings
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_backup_dir() -> Path:
|
||||
"""Get the backup directory, creating it if necessary."""
|
||||
backup_dir = Path(settings.BACKUP_ROOT)
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
return backup_dir
|
||||
|
||||
|
||||
def _is_postgresql() -> bool:
|
||||
"""Check if we're using PostgreSQL."""
|
||||
return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql"
|
||||
|
||||
|
||||
def _get_pg_env() -> dict:
|
||||
"""Get environment variables for PostgreSQL commands."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
env = os.environ.copy()
|
||||
env["PGPASSWORD"] = db_config.get("PASSWORD", "")
|
||||
return env
|
||||
|
||||
|
||||
def _get_pg_args() -> list[str]:
|
||||
"""Get common PostgreSQL command arguments."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
return [
|
||||
"-h", db_config.get("HOST", "localhost"),
|
||||
"-p", str(db_config.get("PORT", 5432)),
|
||||
"-U", db_config.get("USER", "postgres"),
|
||||
"-d", db_config.get("NAME", "dispatcharr"),
|
||||
]
|
||||
|
||||
|
||||
def _dump_postgresql(output_file: Path) -> None:
|
||||
"""Dump PostgreSQL database using pg_dump."""
|
||||
logger.info("Dumping PostgreSQL database with pg_dump...")
|
||||
|
||||
cmd = [
|
||||
"pg_dump",
|
||||
*_get_pg_args(),
|
||||
"-Fc", # Custom format for pg_restore
|
||||
"-v", # Verbose
|
||||
"-f", str(output_file),
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"pg_dump failed: {result.stderr}")
|
||||
raise RuntimeError(f"pg_dump failed: {result.stderr}")
|
||||
|
||||
logger.debug(f"pg_dump output: {result.stderr}")
|
||||
|
||||
|
||||
def _clean_postgresql_schema() -> None:
|
||||
"""Drop and recreate the public schema to ensure a completely clean restore."""
|
||||
logger.info("[PG_CLEAN] Dropping and recreating public schema...")
|
||||
|
||||
# Commands to drop and recreate schema
|
||||
sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;"
|
||||
|
||||
cmd = [
|
||||
"psql",
|
||||
*_get_pg_args(),
|
||||
"-c", sql_commands,
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}")
|
||||
raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}")
|
||||
|
||||
logger.info("[PG_CLEAN] Schema cleaned successfully")
|
||||
|
||||
|
||||
def _restore_postgresql(dump_file: Path) -> None:
|
||||
"""Restore PostgreSQL database using pg_restore."""
|
||||
logger.info("[PG_RESTORE] Starting pg_restore...")
|
||||
logger.info(f"[PG_RESTORE] Dump file: {dump_file}")
|
||||
|
||||
# Drop and recreate schema to ensure a completely clean restore
|
||||
_clean_postgresql_schema()
|
||||
|
||||
pg_args = _get_pg_args()
|
||||
logger.info(f"[PG_RESTORE] Connection args: {pg_args}")
|
||||
|
||||
cmd = [
|
||||
"pg_restore",
|
||||
"--no-owner", # Skip ownership commands (we already created schema)
|
||||
*pg_args,
|
||||
"-v", # Verbose
|
||||
str(dump_file),
|
||||
]
|
||||
|
||||
logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}")
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
logger.info(f"[PG_RESTORE] Return code: {result.returncode}")
|
||||
|
||||
# pg_restore may return non-zero even on partial success
|
||||
# Check for actual errors vs warnings
|
||||
if result.returncode != 0:
|
||||
# Some errors during restore are expected (e.g., "does not exist" when cleaning)
|
||||
# Only fail on critical errors
|
||||
stderr = result.stderr.lower()
|
||||
if "fatal" in stderr or "could not connect" in stderr:
|
||||
logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}")
|
||||
raise RuntimeError(f"pg_restore failed: {result.stderr}")
|
||||
else:
|
||||
logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...")
|
||||
|
||||
logger.info("[PG_RESTORE] Completed successfully")
|
||||
|
||||
|
||||
def _dump_sqlite(output_file: Path) -> None:
|
||||
"""Dump SQLite database using sqlite3 .backup command."""
|
||||
logger.info("Dumping SQLite database with sqlite3 .backup...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"SQLite database not found: {db_path}")
|
||||
|
||||
# Use sqlite3 .backup command via stdin for reliable execution
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=f".backup '{output_file}'\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 backup failed: {result.stderr}")
|
||||
raise RuntimeError(f"sqlite3 backup failed: {result.stderr}")
|
||||
|
||||
# Verify the backup file was created
|
||||
if not output_file.exists():
|
||||
raise RuntimeError("sqlite3 backup failed: output file not created")
|
||||
|
||||
logger.info(f"sqlite3 backup completed successfully: {output_file}")
|
||||
|
||||
|
||||
def _restore_sqlite(dump_file: Path) -> None:
|
||||
"""Restore SQLite database by replacing the database file."""
|
||||
logger.info("Restoring SQLite database...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
backup_current = None
|
||||
|
||||
# Backup current database before overwriting
|
||||
if db_path.exists():
|
||||
backup_current = db_path.with_suffix(".db.bak")
|
||||
shutil.copy2(db_path, backup_current)
|
||||
logger.info(f"Backed up current database to {backup_current}")
|
||||
|
||||
# Ensure parent directory exists
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# The backup file from _dump_sqlite is a complete SQLite database file
|
||||
# We can simply copy it over the existing database
|
||||
shutil.copy2(dump_file, db_path)
|
||||
|
||||
# Verify the restore worked by checking if sqlite3 can read it
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=".tables\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 verification failed: {result.stderr}")
|
||||
# Try to restore from backup
|
||||
if backup_current and backup_current.exists():
|
||||
shutil.copy2(backup_current, db_path)
|
||||
logger.info("Restored original database from backup")
|
||||
raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}")
|
||||
|
||||
logger.info("sqlite3 restore completed successfully")
|
||||
|
||||
|
||||
def create_backup() -> Path:
|
||||
"""
|
||||
Create a backup archive containing database dump and data directories.
|
||||
Returns the path to the created backup file.
|
||||
"""
|
||||
backup_dir = get_backup_dir()
|
||||
|
||||
# Use system timezone for filename (user-friendly), but keep internal timestamps as UTC
|
||||
system_tz_name = CoreSettings.get_system_time_zone()
|
||||
try:
|
||||
system_tz = pytz.timezone(system_tz_name)
|
||||
now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz)
|
||||
timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC")
|
||||
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S")
|
||||
|
||||
backup_name = f"dispatcharr-backup-{timestamp}.zip"
|
||||
backup_file = backup_dir / backup_name
|
||||
|
||||
logger.info(f"Creating backup: {backup_name}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Determine database type and dump accordingly
|
||||
if _is_postgresql():
|
||||
db_dump_file = temp_path / "database.dump"
|
||||
_dump_postgresql(db_dump_file)
|
||||
db_type = "postgresql"
|
||||
else:
|
||||
db_dump_file = temp_path / "database.sqlite3"
|
||||
_dump_sqlite(db_dump_file)
|
||||
db_type = "sqlite"
|
||||
|
||||
# Create ZIP archive with compression and ZIP64 support for large files
|
||||
with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file:
|
||||
# Add database dump
|
||||
zip_file.write(db_dump_file, db_dump_file.name)
|
||||
|
||||
# Add metadata
|
||||
metadata = {
|
||||
"format": "dispatcharr-backup",
|
||||
"version": 2,
|
||||
"database_type": db_type,
|
||||
"database_file": db_dump_file.name,
|
||||
"created_at": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
}
|
||||
zip_file.writestr("metadata.json", json.dumps(metadata, indent=2))
|
||||
|
||||
logger.info(f"Backup created successfully: {backup_file}")
|
||||
return backup_file
|
||||
|
||||
|
||||
def restore_backup(backup_file: Path) -> None:
|
||||
"""
|
||||
Restore from a backup archive.
|
||||
WARNING: This will overwrite the database!
|
||||
"""
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {backup_file}")
|
||||
|
||||
logger.info(f"Restoring from backup: {backup_file}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Extract backup
|
||||
logger.debug("Extracting backup archive...")
|
||||
with ZipFile(backup_file, "r") as zip_file:
|
||||
zip_file.extractall(temp_path)
|
||||
|
||||
# Read metadata
|
||||
metadata_file = temp_path / "metadata.json"
|
||||
if not metadata_file.exists():
|
||||
raise ValueError("Invalid backup: missing metadata.json")
|
||||
|
||||
with open(metadata_file) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Restore database
|
||||
_restore_database(temp_path, metadata)
|
||||
|
||||
logger.info("Restore completed successfully")
|
||||
|
||||
|
||||
def _restore_database(temp_path: Path, metadata: dict) -> None:
|
||||
"""Restore database from backup."""
|
||||
db_type = metadata.get("database_type", "postgresql")
|
||||
db_file = metadata.get("database_file", "database.dump")
|
||||
dump_file = temp_path / db_file
|
||||
|
||||
if not dump_file.exists():
|
||||
raise ValueError(f"Invalid backup: missing {db_file}")
|
||||
|
||||
current_db_type = "postgresql" if _is_postgresql() else "sqlite"
|
||||
|
||||
if db_type != current_db_type:
|
||||
raise ValueError(
|
||||
f"Database type mismatch: backup is {db_type}, "
|
||||
f"but current database is {current_db_type}"
|
||||
)
|
||||
|
||||
if db_type == "postgresql":
|
||||
_restore_postgresql(dump_file)
|
||||
else:
|
||||
_restore_sqlite(dump_file)
|
||||
|
||||
|
||||
def list_backups() -> list[dict]:
|
||||
"""List all available backup files with metadata."""
|
||||
backup_dir = get_backup_dir()
|
||||
backups = []
|
||||
|
||||
for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True):
|
||||
# Use UTC timezone so frontend can convert to user's local time
|
||||
created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC)
|
||||
backups.append({
|
||||
"name": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"created": created_time.isoformat(),
|
||||
})
|
||||
|
||||
return backups
|
||||
|
||||
|
||||
def delete_backup(filename: str) -> None:
|
||||
"""Delete a backup file."""
|
||||
backup_dir = get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {filename}")
|
||||
|
||||
if not backup_file.is_file():
|
||||
raise ValueError(f"Invalid backup file: {filename}")
|
||||
|
||||
backup_file.unlink()
|
||||
logger.info(f"Deleted backup: {filename}")
|
||||
|
|
@ -1,106 +0,0 @@
|
|||
import logging
|
||||
import traceback
|
||||
from celery import shared_task
|
||||
|
||||
from . import services
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _cleanup_old_backups(retention_count: int) -> int:
|
||||
"""Delete old backups, keeping only the most recent N. Returns count deleted."""
|
||||
if retention_count <= 0:
|
||||
return 0
|
||||
|
||||
backups = services.list_backups()
|
||||
if len(backups) <= retention_count:
|
||||
return 0
|
||||
|
||||
# Backups are sorted newest first, so delete from the end
|
||||
to_delete = backups[retention_count:]
|
||||
deleted = 0
|
||||
|
||||
for backup in to_delete:
|
||||
try:
|
||||
services.delete_backup(backup["name"])
|
||||
deleted += 1
|
||||
logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}")
|
||||
except Exception as e:
|
||||
logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}")
|
||||
|
||||
return deleted
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def create_backup_task(self):
|
||||
"""Celery task to create a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[BACKUP] Starting backup task {self.request.id}")
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def restore_backup_task(self, filename: str):
|
||||
"""Celery task to restore a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}")
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
logger.info(f"[RESTORE] Backup file path: {backup_file}")
|
||||
services.restore_backup(backup_file)
|
||||
logger.info(f"[RESTORE] Task {self.request.id} completed successfully")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": filename,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def scheduled_backup_task(self, retention_count: int = 0):
|
||||
"""Celery task for scheduled backups with optional retention cleanup."""
|
||||
try:
|
||||
logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}")
|
||||
|
||||
# Create backup
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[SCHEDULED] Backup created: {backup_file.name}")
|
||||
|
||||
# Cleanup old backups if retention is set
|
||||
deleted = 0
|
||||
if retention_count > 0:
|
||||
deleted = _cleanup_old_backups(retention_count)
|
||||
logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)")
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"deleted_count": deleted,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -6,21 +6,12 @@ from .api_views import (
|
|||
ChannelGroupViewSet,
|
||||
BulkDeleteStreamsAPIView,
|
||||
BulkDeleteChannelsAPIView,
|
||||
BulkDeleteLogosAPIView,
|
||||
CleanupUnusedLogosAPIView,
|
||||
LogoViewSet,
|
||||
ChannelProfileViewSet,
|
||||
UpdateChannelMembershipAPIView,
|
||||
BulkUpdateChannelMembershipAPIView,
|
||||
RecordingViewSet,
|
||||
RecurringRecordingRuleViewSet,
|
||||
GetChannelStreamsAPIView,
|
||||
SeriesRulesAPIView,
|
||||
DeleteSeriesRuleAPIView,
|
||||
EvaluateSeriesRulesAPIView,
|
||||
BulkRemoveSeriesRecordingsAPIView,
|
||||
BulkDeleteUpcomingRecordingsAPIView,
|
||||
ComskipConfigAPIView,
|
||||
)
|
||||
|
||||
app_name = 'channels' # for DRF routing
|
||||
|
|
@ -32,24 +23,14 @@ router.register(r'channels', ChannelViewSet, basename='channel')
|
|||
router.register(r'logos', LogoViewSet, basename='logo')
|
||||
router.register(r'profiles', ChannelProfileViewSet, basename='profile')
|
||||
router.register(r'recordings', RecordingViewSet, basename='recording')
|
||||
router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule')
|
||||
|
||||
urlpatterns = [
|
||||
# Bulk delete is a single APIView, not a ViewSet
|
||||
path('streams/bulk-delete/', BulkDeleteStreamsAPIView.as_view(), name='bulk_delete_streams'),
|
||||
path('channels/bulk-delete/', BulkDeleteChannelsAPIView.as_view(), name='bulk_delete_channels'),
|
||||
path('logos/bulk-delete/', BulkDeleteLogosAPIView.as_view(), name='bulk_delete_logos'),
|
||||
path('logos/cleanup/', CleanupUnusedLogosAPIView.as_view(), name='cleanup_unused_logos'),
|
||||
path('channels/<int:channel_id>/streams/', GetChannelStreamsAPIView.as_view(), name='get_channel_streams'),
|
||||
path('profiles/<int:profile_id>/channels/<int:channel_id>/', UpdateChannelMembershipAPIView.as_view(), name='update_channel_membership'),
|
||||
path('profiles/<int:profile_id>/channels/bulk-update/', BulkUpdateChannelMembershipAPIView.as_view(), name='bulk_update_channel_membership'),
|
||||
# DVR series rules (order matters: specific routes before catch-all slug)
|
||||
path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'),
|
||||
path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'),
|
||||
path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'),
|
||||
path('series-rules/<path:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
|
||||
path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'),
|
||||
path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'),
|
||||
]
|
||||
|
||||
urlpatterns += router.urls
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,35 +0,0 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-13 23:08
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0021_channel_user_level'),
|
||||
('m3u', '0012_alter_m3uaccount_refresh_interval'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='auto_created',
|
||||
field=models.BooleanField(default=False, help_text='Whether this channel was automatically created via M3U auto channel sync'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='auto_created_by',
|
||||
field=models.ForeignKey(blank=True, help_text='The M3U account that auto-created this channel', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='auto_created_channels', to='m3u.m3uaccount'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='auto_channel_sync',
|
||||
field=models.BooleanField(default=False, help_text='Automatically create/delete channels to match streams in this group'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='auto_sync_channel_start',
|
||||
field=models.FloatField(blank=True, help_text='Starting channel number for auto-created channels in this group', null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-29 02:39
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0022_channel_auto_created_channel_auto_created_by_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='stream_stats',
|
||||
field=models.JSONField(blank=True, help_text='JSON object containing stream statistics like video codec, resolution, etc.', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='stream_stats_updated_at',
|
||||
field=models.DateTimeField(blank=True, db_index=True, help_text='When stream statistics were last updated', null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-22 20:14
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='channel_group',
|
||||
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='recording',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='stream',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
# Generated by Django 5.0.14 on 2025-09-18 14:56
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='RecurringRecordingRule',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('days_of_week', models.JSONField(default=list)),
|
||||
('start_time', models.TimeField()),
|
||||
('end_time', models.TimeField()),
|
||||
('enabled', models.BooleanField(default=True)),
|
||||
('name', models.CharField(blank=True, max_length=255)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['channel', 'start_time'],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-05 20:50
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0026_recurringrecordingrule'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='end_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='start_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-06 22:55
|
||||
|
||||
import django.utils.timezone
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='created_at',
|
||||
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'),
|
||||
preserve_default=False,
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='updated_at',
|
||||
field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
# Generated migration to backfill stream_hash for existing custom streams
|
||||
|
||||
from django.db import migrations
|
||||
import hashlib
|
||||
|
||||
|
||||
def backfill_custom_stream_hashes(apps, schema_editor):
|
||||
"""
|
||||
Generate stream_hash for all custom streams that don't have one.
|
||||
Uses stream ID to create a stable hash that won't change when name/url is edited.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams_without_hash = Stream.objects.filter(
|
||||
is_custom=True,
|
||||
stream_hash__isnull=True
|
||||
)
|
||||
|
||||
updated_count = 0
|
||||
for stream in custom_streams_without_hash:
|
||||
# Generate a stable hash using the stream's ID
|
||||
# This ensures the hash never changes even if name/url is edited
|
||||
unique_string = f"custom_stream_{stream.id}"
|
||||
stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
stream.save(update_fields=['stream_hash'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Backfilled stream_hash for {updated_count} custom streams")
|
||||
else:
|
||||
print("No custom streams needed stream_hash backfill")
|
||||
|
||||
|
||||
def reverse_backfill(apps, schema_editor):
|
||||
"""
|
||||
Reverse migration - clear stream_hash for custom streams.
|
||||
Note: This will break preview functionality for custom streams.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams = Stream.objects.filter(is_custom=True)
|
||||
count = custom_streams.update(stream_hash=None)
|
||||
print(f"Cleared stream_hash for {count} custom streams")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-28 20:00
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0029_backfill_custom_stream_hashes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='stream',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=4096, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# Generated by Django 5.2.9 on 2026-01-09 18:19
|
||||
|
||||
import datetime
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0030_alter_stream_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='last_seen',
|
||||
field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
]
|
||||
|
|
@ -55,7 +55,7 @@ class Stream(models.Model):
|
|||
"""
|
||||
|
||||
name = models.CharField(max_length=255, default="Default Stream")
|
||||
url = models.URLField(max_length=4096, blank=True, null=True)
|
||||
url = models.URLField(max_length=2000, blank=True, null=True)
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -94,25 +94,7 @@ class Stream(models.Model):
|
|||
db_index=True,
|
||||
)
|
||||
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)"
|
||||
)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
# Stream statistics fields
|
||||
stream_stats = models.JSONField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="JSON object containing stream statistics like video codec, resolution, etc."
|
||||
)
|
||||
stream_stats_updated_at = models.DateTimeField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="When stream statistics were last updated",
|
||||
db_index=True
|
||||
)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
|
||||
class Meta:
|
||||
# If you use m3u_account, you might do unique_together = ('name','url','m3u_account')
|
||||
|
|
@ -124,11 +106,11 @@ class Stream(models.Model):
|
|||
return self.name or self.url or f"Stream ID {self.id}"
|
||||
|
||||
@classmethod
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None):
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None):
|
||||
if keys is None:
|
||||
keys = CoreSettings.get_m3u_hash_key().split(",")
|
||||
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group}
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id}
|
||||
|
||||
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
|
||||
|
||||
|
|
@ -157,14 +139,8 @@ class Stream(models.Model):
|
|||
stream = cls.objects.create(**fields_to_update)
|
||||
return stream, True # True means it was created
|
||||
|
||||
# @TODO: honor stream's stream profile
|
||||
def get_stream_profile(self):
|
||||
"""
|
||||
Get the stream profile for this stream.
|
||||
Uses the stream's own profile if set, otherwise returns the default.
|
||||
"""
|
||||
if self.stream_profile:
|
||||
return self.stream_profile
|
||||
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
|
|
@ -301,28 +277,6 @@ class Channel(models.Model):
|
|||
|
||||
user_level = models.IntegerField(default=0)
|
||||
|
||||
auto_created = models.BooleanField(
|
||||
default=False,
|
||||
help_text="Whether this channel was automatically created via M3U auto channel sync"
|
||||
)
|
||||
auto_created_by = models.ForeignKey(
|
||||
"m3u.M3UAccount",
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
related_name="auto_created_channels",
|
||||
help_text="The M3U account that auto-created this channel"
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(
|
||||
auto_now_add=True,
|
||||
help_text="Timestamp when this channel was created"
|
||||
)
|
||||
updated_at = models.DateTimeField(
|
||||
auto_now=True,
|
||||
help_text="Timestamp when this channel was last updated"
|
||||
)
|
||||
|
||||
def clean(self):
|
||||
# Enforce unique channel_number within a given group
|
||||
existing = Channel.objects.filter(
|
||||
|
|
@ -399,17 +353,14 @@ class Channel(models.Model):
|
|||
if not m3u_account:
|
||||
logger.debug(f"Stream {stream.id} has no M3U account")
|
||||
continue
|
||||
if m3u_account.is_active == False:
|
||||
logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.")
|
||||
continue
|
||||
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
default_profile = next(
|
||||
(obj for obj in m3u_profiles if obj.is_default), None
|
||||
)
|
||||
|
||||
if not default_profile:
|
||||
logger.debug(f"M3U account {m3u_account.id} has no active default profile")
|
||||
logger.debug(f"M3U account {m3u_account.id} has no default profile")
|
||||
continue
|
||||
|
||||
profiles = [default_profile] + [
|
||||
|
|
@ -417,6 +368,11 @@ class Channel(models.Model):
|
|||
]
|
||||
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
has_active_profiles = True
|
||||
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
|
|
@ -451,9 +407,9 @@ class Channel(models.Model):
|
|||
|
||||
# No available streams - determine specific reason
|
||||
if has_streams_but_maxed_out:
|
||||
error_reason = "All active M3U profiles have reached maximum connection limits"
|
||||
error_reason = "All M3U profiles have reached maximum connection limits"
|
||||
elif has_active_profiles:
|
||||
error_reason = "No compatible active profile found for any assigned stream"
|
||||
error_reason = "No compatible profile found for any assigned stream"
|
||||
else:
|
||||
error_reason = "No active profiles found for any assigned stream"
|
||||
|
||||
|
|
@ -578,32 +534,13 @@ class ChannelStream(models.Model):
|
|||
|
||||
class ChannelGroupM3UAccount(models.Model):
|
||||
channel_group = models.ForeignKey(
|
||||
ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts"
|
||||
ChannelGroup, on_delete=models.CASCADE, related_name="m3u_account"
|
||||
)
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount, on_delete=models.CASCADE, related_name="channel_group"
|
||||
)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
enabled = models.BooleanField(default=True)
|
||||
auto_channel_sync = models.BooleanField(
|
||||
default=False,
|
||||
help_text='Automatically create/delete channels to match streams in this group'
|
||||
)
|
||||
auto_sync_channel_start = models.FloatField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text='Starting channel number for auto-created channels in this group'
|
||||
)
|
||||
last_seen = models.DateTimeField(
|
||||
default=datetime.now,
|
||||
db_index=True,
|
||||
help_text='Last time this group was seen in the M3U source during a refresh'
|
||||
)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ("channel_group", "m3u_account")
|
||||
|
|
@ -627,39 +564,7 @@ class Recording(models.Model):
|
|||
start_time = models.DateTimeField()
|
||||
end_time = models.DateTimeField()
|
||||
task_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.channel.name} - {self.start_time} to {self.end_time}"
|
||||
|
||||
|
||||
class RecurringRecordingRule(models.Model):
|
||||
"""Rule describing a recurring manual DVR schedule."""
|
||||
|
||||
channel = models.ForeignKey(
|
||||
"Channel",
|
||||
on_delete=models.CASCADE,
|
||||
related_name="recurring_rules",
|
||||
)
|
||||
days_of_week = models.JSONField(default=list)
|
||||
start_time = models.TimeField()
|
||||
end_time = models.TimeField()
|
||||
enabled = models.BooleanField(default=True)
|
||||
name = models.CharField(max_length=255, blank=True)
|
||||
start_date = models.DateField(null=True, blank=True)
|
||||
end_date = models.DateField(null=True, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["channel", "start_time"]
|
||||
|
||||
def __str__(self):
|
||||
channel_name = getattr(self.channel, "name", str(self.channel_id))
|
||||
return f"Recurring rule for {channel_name}"
|
||||
|
||||
def cleaned_days(self):
|
||||
try:
|
||||
return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6})
|
||||
except Exception:
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from rest_framework import serializers
|
||||
from .models import (
|
||||
Stream,
|
||||
|
|
@ -12,7 +9,6 @@ from .models import (
|
|||
ChannelProfile,
|
||||
ChannelProfileMembership,
|
||||
Recording,
|
||||
RecurringRecordingRule,
|
||||
)
|
||||
from apps.epg.serializers import EPGDataSerializer
|
||||
from core.models import StreamProfile
|
||||
|
|
@ -20,39 +16,14 @@ from apps.epg.models import EPGData
|
|||
from django.urls import reverse
|
||||
from rest_framework import serializers
|
||||
from django.utils import timezone
|
||||
from core.utils import validate_flexible_url
|
||||
|
||||
|
||||
class LogoSerializer(serializers.ModelSerializer):
|
||||
cache_url = serializers.SerializerMethodField()
|
||||
channel_count = serializers.SerializerMethodField()
|
||||
is_used = serializers.SerializerMethodField()
|
||||
channel_names = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Logo
|
||||
fields = ["id", "name", "url", "cache_url", "channel_count", "is_used", "channel_names"]
|
||||
|
||||
def validate_url(self, value):
|
||||
"""Validate that the URL is unique for creation or update"""
|
||||
if self.instance and self.instance.url == value:
|
||||
return value
|
||||
|
||||
if Logo.objects.filter(url=value).exists():
|
||||
raise serializers.ValidationError("A logo with this URL already exists.")
|
||||
|
||||
return value
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Handle logo creation with proper URL validation"""
|
||||
return Logo.objects.create(**validated_data)
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
"""Handle logo updates"""
|
||||
for attr, value in validated_data.items():
|
||||
setattr(instance, attr, value)
|
||||
instance.save()
|
||||
return instance
|
||||
fields = ["id", "name", "url", "cache_url"]
|
||||
|
||||
def get_cache_url(self, obj):
|
||||
# return f"/api/channels/logos/{obj.id}/cache/"
|
||||
|
|
@ -63,41 +34,11 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
)
|
||||
return reverse("api:channels:logo-cache", args=[obj.id])
|
||||
|
||||
def get_channel_count(self, obj):
|
||||
"""Get the number of channels using this logo"""
|
||||
return obj.channels.count()
|
||||
|
||||
def get_is_used(self, obj):
|
||||
"""Check if this logo is used by any channels"""
|
||||
return obj.channels.exists()
|
||||
|
||||
def get_channel_names(self, obj):
|
||||
"""Get the names of channels using this logo (limited to first 5)"""
|
||||
names = []
|
||||
|
||||
# Get channel names
|
||||
channels = obj.channels.all()[:5]
|
||||
for channel in channels:
|
||||
names.append(f"Channel: {channel.name}")
|
||||
|
||||
# Calculate total count for "more" message
|
||||
total_count = self.get_channel_count(obj)
|
||||
if total_count > 5:
|
||||
names.append(f"...and {total_count - 5} more")
|
||||
|
||||
return names
|
||||
|
||||
|
||||
#
|
||||
# Stream
|
||||
#
|
||||
class StreamSerializer(serializers.ModelSerializer):
|
||||
url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url]
|
||||
)
|
||||
stream_profile_id = serializers.PrimaryKeyRelatedField(
|
||||
queryset=StreamProfile.objects.all(),
|
||||
source="stream_profile",
|
||||
|
|
@ -119,13 +60,10 @@ class StreamSerializer(serializers.ModelSerializer):
|
|||
"current_viewers",
|
||||
"updated_at",
|
||||
"last_seen",
|
||||
"is_stale",
|
||||
"stream_profile_id",
|
||||
"is_custom",
|
||||
"channel_group",
|
||||
"stream_hash",
|
||||
"stream_stats",
|
||||
"stream_stats_updated_at",
|
||||
]
|
||||
|
||||
def get_fields(self):
|
||||
|
|
@ -147,57 +85,13 @@ class StreamSerializer(serializers.ModelSerializer):
|
|||
return fields
|
||||
|
||||
|
||||
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
||||
m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True)
|
||||
enabled = serializers.BooleanField()
|
||||
auto_channel_sync = serializers.BooleanField(default=False)
|
||||
auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False)
|
||||
custom_properties = serializers.JSONField(required=False)
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroupM3UAccount
|
||||
fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"]
|
||||
|
||||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
return data
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# Accept both dict and JSON string for custom_properties (for backward compatibility)
|
||||
val = data.get("custom_properties")
|
||||
if isinstance(val, str):
|
||||
try:
|
||||
data["custom_properties"] = json.loads(val)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return super().to_internal_value(data)
|
||||
|
||||
#
|
||||
# Channel Group
|
||||
#
|
||||
class ChannelGroupSerializer(serializers.ModelSerializer):
|
||||
channel_count = serializers.SerializerMethodField()
|
||||
m3u_account_count = serializers.SerializerMethodField()
|
||||
m3u_accounts = ChannelGroupM3UAccountSerializer(
|
||||
many=True,
|
||||
read_only=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroup
|
||||
fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"]
|
||||
|
||||
def get_channel_count(self, obj):
|
||||
"""Get count of channels in this group"""
|
||||
return obj.channels.count()
|
||||
|
||||
def get_m3u_account_count(self, obj):
|
||||
"""Get count of M3U accounts associated with this group"""
|
||||
return obj.m3u_accounts.count()
|
||||
fields = ["id", "name"]
|
||||
|
||||
|
||||
class ChannelProfileSerializer(serializers.ModelSerializer):
|
||||
|
|
@ -276,8 +170,6 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
required=False,
|
||||
)
|
||||
|
||||
auto_created_by_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Channel
|
||||
fields = [
|
||||
|
|
@ -293,9 +185,6 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
"uuid",
|
||||
"logo_id",
|
||||
"user_level",
|
||||
"auto_created",
|
||||
"auto_created_by",
|
||||
"auto_created_by_name",
|
||||
]
|
||||
|
||||
def to_representation(self, instance):
|
||||
|
|
@ -303,17 +192,8 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
|
||||
if include_streams:
|
||||
self.fields["streams"] = serializers.SerializerMethodField()
|
||||
return super().to_representation(instance)
|
||||
else:
|
||||
# Fix: For PATCH/PUT responses, ensure streams are ordered
|
||||
representation = super().to_representation(instance)
|
||||
if "streams" in representation:
|
||||
representation["streams"] = list(
|
||||
instance.streams.all()
|
||||
.order_by("channelstream__order")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
return representation
|
||||
|
||||
return super().to_representation(instance)
|
||||
|
||||
def get_logo(self, obj):
|
||||
return LogoSerializer(obj.logo).data
|
||||
|
|
@ -403,11 +283,16 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
return None
|
||||
return value # PrimaryKeyRelatedField will handle the conversion to object
|
||||
|
||||
def get_auto_created_by_name(self, obj):
|
||||
"""Get the name of the M3U account that auto-created this channel."""
|
||||
if obj.auto_created_by:
|
||||
return obj.auto_created_by.name
|
||||
return None
|
||||
|
||||
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
||||
enabled = serializers.BooleanField()
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroupM3UAccount
|
||||
fields = ["id", "channel_group", "enabled"]
|
||||
|
||||
# Optionally, if you only need the id of the ChannelGroup, you can customize it like this:
|
||||
# channel_group = serializers.PrimaryKeyRelatedField(queryset=ChannelGroup.objects.all())
|
||||
|
||||
|
||||
class RecordingSerializer(serializers.ModelSerializer):
|
||||
|
|
@ -417,48 +302,9 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
read_only_fields = ["task_id"]
|
||||
|
||||
def validate(self, data):
|
||||
from core.models import CoreSettings
|
||||
start_time = data.get("start_time")
|
||||
end_time = data.get("end_time")
|
||||
|
||||
if start_time and timezone.is_naive(start_time):
|
||||
start_time = timezone.make_aware(start_time, timezone.get_current_timezone())
|
||||
data["start_time"] = start_time
|
||||
if end_time and timezone.is_naive(end_time):
|
||||
end_time = timezone.make_aware(end_time, timezone.get_current_timezone())
|
||||
data["end_time"] = end_time
|
||||
|
||||
# If this is an EPG-based recording (program provided), apply global pre/post offsets
|
||||
try:
|
||||
cp = data.get("custom_properties") or {}
|
||||
is_epg_based = isinstance(cp, dict) and isinstance(cp.get("program"), (dict,))
|
||||
except Exception:
|
||||
is_epg_based = False
|
||||
|
||||
if is_epg_based and start_time and end_time:
|
||||
try:
|
||||
pre_min = int(CoreSettings.get_dvr_pre_offset_minutes())
|
||||
except Exception:
|
||||
pre_min = 0
|
||||
try:
|
||||
post_min = int(CoreSettings.get_dvr_post_offset_minutes())
|
||||
except Exception:
|
||||
post_min = 0
|
||||
from datetime import timedelta
|
||||
try:
|
||||
if pre_min and pre_min > 0:
|
||||
start_time = start_time - timedelta(minutes=pre_min)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if post_min and post_min > 0:
|
||||
end_time = end_time + timedelta(minutes=post_min)
|
||||
except Exception:
|
||||
pass
|
||||
# write back adjusted times so scheduling uses them
|
||||
data["start_time"] = start_time
|
||||
data["end_time"] = end_time
|
||||
|
||||
now = timezone.now() # timezone-aware current time
|
||||
|
||||
if end_time < now:
|
||||
|
|
@ -471,56 +317,3 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
raise serializers.ValidationError("End time must be after start time.")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class RecurringRecordingRuleSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = RecurringRecordingRule
|
||||
fields = "__all__"
|
||||
read_only_fields = ["created_at", "updated_at"]
|
||||
|
||||
def validate_days_of_week(self, value):
|
||||
if not value:
|
||||
raise serializers.ValidationError("Select at least one day of the week")
|
||||
cleaned = []
|
||||
for entry in value:
|
||||
try:
|
||||
iv = int(entry)
|
||||
except (TypeError, ValueError):
|
||||
raise serializers.ValidationError("Days of week must be integers 0-6")
|
||||
if iv < 0 or iv > 6:
|
||||
raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)")
|
||||
cleaned.append(iv)
|
||||
return sorted(set(cleaned))
|
||||
|
||||
def validate(self, attrs):
|
||||
start = attrs.get("start_time") or getattr(self.instance, "start_time", None)
|
||||
end = attrs.get("end_time") or getattr(self.instance, "end_time", None)
|
||||
start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None)
|
||||
end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None)
|
||||
if start_date is None:
|
||||
existing_start = getattr(self.instance, "start_date", None)
|
||||
if existing_start is None:
|
||||
raise serializers.ValidationError("Start date is required")
|
||||
if start_date and end_date and end_date < start_date:
|
||||
raise serializers.ValidationError("End date must be on or after start date")
|
||||
if end_date is None:
|
||||
existing_end = getattr(self.instance, "end_date", None)
|
||||
if existing_end is None:
|
||||
raise serializers.ValidationError("End date is required")
|
||||
if start and end and start_date and end_date:
|
||||
start_dt = datetime.combine(start_date, start)
|
||||
end_dt = datetime.combine(end_date, end)
|
||||
if end_dt <= start_dt:
|
||||
raise serializers.ValidationError("End datetime must be after start datetime")
|
||||
elif start and end and end == start:
|
||||
raise serializers.ValidationError("End time must be different from start time")
|
||||
# Normalize empty strings to None for dates
|
||||
if attrs.get("end_date") == "":
|
||||
attrs["end_date"] = None
|
||||
if attrs.get("start_date") == "":
|
||||
attrs["start_date"] = None
|
||||
return super().validate(attrs)
|
||||
|
||||
def create(self, validated_data):
|
||||
return super().create(validated_data)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from .models import Channel, Stream, ChannelProfile, ChannelProfileMembership, R
|
|||
from apps.m3u.models import M3UAccount
|
||||
from apps.epg.tasks import parse_programs_for_tvg_id
|
||||
import logging, requests, time
|
||||
from .tasks import run_recording, prefetch_recording_artwork
|
||||
from .tasks import run_recording
|
||||
from django.utils.timezone import now, is_aware, make_aware
|
||||
from datetime import timedelta
|
||||
|
||||
|
|
@ -45,20 +45,6 @@ def set_default_m3u_account(sender, instance, **kwargs):
|
|||
else:
|
||||
raise ValueError("No default M3UAccount found.")
|
||||
|
||||
@receiver(post_save, sender=Stream)
|
||||
def generate_custom_stream_hash(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Generate a stable stream_hash for custom streams after creation.
|
||||
Uses the stream's ID to ensure the hash never changes even if name/url is edited.
|
||||
"""
|
||||
if instance.is_custom and not instance.stream_hash and created:
|
||||
import hashlib
|
||||
# Use stream ID for a stable, unique hash that never changes
|
||||
unique_string = f"custom_stream_{instance.id}"
|
||||
instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
# Use update to avoid triggering signals again
|
||||
Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash)
|
||||
|
||||
@receiver(post_save, sender=Channel)
|
||||
def refresh_epg_programs(sender, instance, created, **kwargs):
|
||||
"""
|
||||
|
|
@ -76,6 +62,15 @@ def refresh_epg_programs(sender, instance, created, **kwargs):
|
|||
logger.info(f"New channel {instance.id} ({instance.name}) created with EPG data, refreshing program data")
|
||||
parse_programs_for_tvg_id.delay(instance.epg_data.id)
|
||||
|
||||
@receiver(post_save, sender=Channel)
|
||||
def add_new_channel_to_groups(sender, instance, created, **kwargs):
|
||||
if created:
|
||||
profiles = ChannelProfile.objects.all()
|
||||
ChannelProfileMembership.objects.bulk_create([
|
||||
ChannelProfileMembership(channel_profile=profile, channel=instance)
|
||||
for profile in profiles
|
||||
])
|
||||
|
||||
@receiver(post_save, sender=ChannelProfile)
|
||||
def create_profile_memberships(sender, instance, created, **kwargs):
|
||||
if created:
|
||||
|
|
@ -87,9 +82,8 @@ def create_profile_memberships(sender, instance, created, **kwargs):
|
|||
|
||||
def schedule_recording_task(instance):
|
||||
eta = instance.start_time
|
||||
# Pass recording_id first so task can persist metadata to the correct row
|
||||
task = run_recording.apply_async(
|
||||
args=[instance.id, instance.channel_id, str(instance.start_time), str(instance.end_time)],
|
||||
args=[instance.channel_id, str(instance.start_time), str(instance.end_time)],
|
||||
eta=eta
|
||||
)
|
||||
return task.id
|
||||
|
|
@ -138,11 +132,6 @@ def schedule_task_on_save(sender, instance, created, **kwargs):
|
|||
instance.save(update_fields=['task_id'])
|
||||
else:
|
||||
print("Start time is in the past. Not scheduling.")
|
||||
# Kick off poster/artwork prefetch to enrich Upcoming cards
|
||||
try:
|
||||
prefetch_recording_artwork.apply_async(args=[instance.id], countdown=1)
|
||||
except Exception as e:
|
||||
print("Error scheduling artwork prefetch:", e)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
print("Error in post_save signal:", e)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,211 +0,0 @@
|
|||
from django.test import TestCase
|
||||
from django.contrib.auth import get_user_model
|
||||
from rest_framework.test import APIClient
|
||||
from rest_framework import status
|
||||
|
||||
from apps.channels.models import Channel, ChannelGroup
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class ChannelBulkEditAPITests(TestCase):
|
||||
def setUp(self):
|
||||
# Create a test admin user (user_level >= 10) and authenticate
|
||||
self.user = User.objects.create_user(username="testuser", password="testpass123")
|
||||
self.user.user_level = 10 # Set admin level
|
||||
self.user.save()
|
||||
self.client = APIClient()
|
||||
self.client.force_authenticate(user=self.user)
|
||||
self.bulk_edit_url = "/api/channels/channels/edit/bulk/"
|
||||
|
||||
# Create test channel group
|
||||
self.group1 = ChannelGroup.objects.create(name="Test Group 1")
|
||||
self.group2 = ChannelGroup.objects.create(name="Test Group 2")
|
||||
|
||||
# Create test channels
|
||||
self.channel1 = Channel.objects.create(
|
||||
channel_number=1.0,
|
||||
name="Channel 1",
|
||||
tvg_id="channel1",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel2 = Channel.objects.create(
|
||||
channel_number=2.0,
|
||||
name="Channel 2",
|
||||
tvg_id="channel2",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel3 = Channel.objects.create(
|
||||
channel_number=3.0,
|
||||
name="Channel 3",
|
||||
tvg_id="channel3"
|
||||
)
|
||||
|
||||
def test_bulk_edit_success(self):
|
||||
"""Test successful bulk update of multiple channels"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Updated Channel 1"},
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
self.assertEqual(len(response.data["channels"]), 2)
|
||||
|
||||
# Verify database changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Updated Channel 1")
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
self.assertEqual(self.channel2.channel_number, 22.0)
|
||||
|
||||
def test_bulk_edit_with_empty_validated_data_first(self):
|
||||
"""
|
||||
Test the bug fix: when first channel has empty validated_data.
|
||||
This was causing: ValueError: Field names must be given to bulk_update()
|
||||
"""
|
||||
# Create a channel with data that will be "unchanged" (empty validated_data)
|
||||
# We'll send the same data it already has
|
||||
data = [
|
||||
# First channel: no actual changes (this would create empty validated_data)
|
||||
{"id": self.channel1.id},
|
||||
# Second channel: has changes
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should not crash with ValueError
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
# Verify the channel with changes was updated
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
|
||||
def test_bulk_edit_all_empty_updates(self):
|
||||
"""Test when all channels have empty updates (no actual changes)"""
|
||||
data = [
|
||||
{"id": self.channel1.id},
|
||||
{"id": self.channel2.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should succeed without calling bulk_update
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
def test_bulk_edit_mixed_fields(self):
|
||||
"""Test bulk update where different channels update different fields"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "New Name 1"},
|
||||
{"id": self.channel2.id, "channel_number": 99.0},
|
||||
{"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 3 channels")
|
||||
|
||||
# Verify all updates
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
|
||||
self.assertEqual(self.channel1.name, "New Name 1")
|
||||
self.assertEqual(self.channel2.channel_number, 99.0)
|
||||
self.assertEqual(self.channel3.tvg_id, "new_tvg_id")
|
||||
self.assertEqual(self.channel3.name, "New Name 3")
|
||||
|
||||
def test_bulk_edit_with_channel_group(self):
|
||||
"""Test bulk update with channel_group_id changes"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_group_id": self.group2.id},
|
||||
{"id": self.channel3.id, "channel_group_id": self.group1.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify group changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
self.assertEqual(self.channel1.channel_group, self.group2)
|
||||
self.assertEqual(self.channel3.channel_group, self.group1)
|
||||
|
||||
def test_bulk_edit_nonexistent_channel(self):
|
||||
"""Test bulk update with a channel that doesn't exist"""
|
||||
nonexistent_id = 99999
|
||||
data = [
|
||||
{"id": nonexistent_id, "name": "Should Fail"},
|
||||
{"id": self.channel1.id, "name": "Should Still Update"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id)
|
||||
self.assertEqual(response.data["errors"][0]["error"], "Channel not found")
|
||||
|
||||
# The valid channel should still be updated
|
||||
self.assertEqual(response.data["updated_count"], 1)
|
||||
|
||||
def test_bulk_edit_validation_error(self):
|
||||
"""Test bulk update with invalid data (validation error)"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_number": "invalid_number"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with validation errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertIn("channel_number", response.data["errors"][0]["errors"])
|
||||
|
||||
def test_bulk_edit_empty_channel_updates(self):
|
||||
"""Test bulk update with empty list"""
|
||||
data = []
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Empty list is accepted and returns success with 0 updates
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 0 channels")
|
||||
|
||||
def test_bulk_edit_missing_channel_updates(self):
|
||||
"""Test bulk update without proper format (dict instead of list)"""
|
||||
data = {"channel_updates": {}}
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertEqual(response.data["error"], "Expected a list of channel updates")
|
||||
|
||||
def test_bulk_edit_preserves_other_fields(self):
|
||||
"""Test that bulk update only changes specified fields"""
|
||||
original_channel_number = self.channel1.channel_number
|
||||
original_tvg_id = self.channel1.tvg_id
|
||||
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Only Name Changed"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify only name changed, other fields preserved
|
||||
self.channel1.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Only Name Changed")
|
||||
self.assertEqual(self.channel1.channel_number, original_channel_number)
|
||||
self.assertEqual(self.channel1.tvg_id, original_tvg_id)
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
from datetime import datetime, timedelta
|
||||
from django.test import TestCase
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.channels.models import Channel, RecurringRecordingRule, Recording
|
||||
from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl
|
||||
|
||||
|
||||
class RecurringRecordingRuleTasksTests(TestCase):
|
||||
def test_sync_recurring_rule_creates_and_purges_recordings(self):
|
||||
now = timezone.now()
|
||||
channel = Channel.objects.create(channel_number=1, name='Test Channel')
|
||||
|
||||
start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0)
|
||||
end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0)
|
||||
|
||||
rule = RecurringRecordingRule.objects.create(
|
||||
channel=channel,
|
||||
days_of_week=[now.weekday()],
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
)
|
||||
|
||||
created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1)
|
||||
self.assertEqual(created, 1)
|
||||
|
||||
recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first()
|
||||
self.assertIsNotNone(recording)
|
||||
self.assertEqual(recording.channel, channel)
|
||||
self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id)
|
||||
|
||||
expected_start = timezone.make_aware(
|
||||
datetime.combine(recording.start_time.date(), start_time),
|
||||
timezone.get_current_timezone(),
|
||||
)
|
||||
self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60)
|
||||
|
||||
removed = purge_recurring_rule_impl(rule.id)
|
||||
self.assertEqual(removed, 1)
|
||||
self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists())
|
||||
|
|
@ -147,37 +147,23 @@ class EPGGridAPIView(APIView):
|
|||
f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows."
|
||||
)
|
||||
|
||||
# Generate dummy programs for channels that have no EPG data OR dummy EPG sources
|
||||
# Generate dummy programs for channels that have no EPG data
|
||||
from apps.channels.models import Channel
|
||||
from apps.epg.models import EPGSource
|
||||
from django.db.models import Q
|
||||
|
||||
# Get channels with no EPG data at all (standard dummy)
|
||||
# Get channels with no EPG data
|
||||
channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True))
|
||||
channels_count = channels_without_epg.count()
|
||||
|
||||
# Get channels with custom dummy EPG sources (generate on-demand with patterns)
|
||||
channels_with_custom_dummy = Channel.objects.filter(
|
||||
epg_data__epg_source__source_type='dummy'
|
||||
).distinct()
|
||||
|
||||
# Log what we found
|
||||
without_count = channels_without_epg.count()
|
||||
custom_count = channels_with_custom_dummy.count()
|
||||
|
||||
if without_count > 0:
|
||||
# Log more detailed information about channels missing EPG data
|
||||
if channels_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg]
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
if custom_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy]
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}"
|
||||
logger.warning(
|
||||
f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG."
|
||||
f"EPGGridAPIView: Found {channels_count} channels with no EPG data."
|
||||
)
|
||||
|
||||
# Serialize the regular programs
|
||||
|
|
@ -219,91 +205,12 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Generate and append dummy programs
|
||||
dummy_programs = []
|
||||
|
||||
# Import the function from output.views
|
||||
from apps.output.views import generate_dummy_programs as gen_dummy_progs
|
||||
|
||||
# Handle channels with CUSTOM dummy EPG sources (with patterns)
|
||||
for channel in channels_with_custom_dummy:
|
||||
# For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel
|
||||
# This prevents multiple channels assigned to the same dummy EPG from showing identical data
|
||||
# Each channel gets its own unique program data even if they share the same EPG source
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
# Get the custom dummy EPG source
|
||||
epg_source = channel.epg_data.epg_source if channel.epg_data else None
|
||||
|
||||
logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Determine which name to parse based on custom properties
|
||||
name_to_parse = channel.name
|
||||
if epg_source and epg_source.custom_properties:
|
||||
custom_props = epg_source.custom_properties
|
||||
name_source = custom_props.get('name_source')
|
||||
|
||||
if name_source == 'stream':
|
||||
# Get the stream index (1-based from user, convert to 0-based)
|
||||
stream_index = custom_props.get('stream_index', 1) - 1
|
||||
|
||||
# Get streams ordered by channelstream order
|
||||
channel_streams = channel.streams.all().order_by('channelstream__order')
|
||||
|
||||
if channel_streams.exists() and 0 <= stream_index < channel_streams.count():
|
||||
stream = list(channel_streams)[stream_index]
|
||||
name_to_parse = stream.name
|
||||
logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})")
|
||||
else:
|
||||
logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name")
|
||||
elif name_source == 'channel':
|
||||
logger.debug(f"Using channel name for parsing: {name_to_parse}")
|
||||
|
||||
# Generate programs using custom patterns from the dummy EPG source
|
||||
# Use the same tvg_id that will be set in the program data
|
||||
generated = gen_dummy_progs(
|
||||
channel_id=dummy_tvg_id,
|
||||
channel_name=name_to_parse,
|
||||
num_days=1,
|
||||
program_length_hours=4,
|
||||
epg_source=epg_source
|
||||
)
|
||||
|
||||
# Custom dummy should always return data (either from patterns or fallback)
|
||||
if generated:
|
||||
logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}")
|
||||
# Convert generated programs to API format
|
||||
for program in generated:
|
||||
dummy_program = {
|
||||
"id": f"dummy-custom-{channel.id}-{program['start_time'].hour}",
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": program['start_time'].isoformat(),
|
||||
"end_time": program['end_time'].isoformat(),
|
||||
"title": program['title'],
|
||||
"description": program['description'],
|
||||
"tvg_id": dummy_tvg_id,
|
||||
"sub_title": None,
|
||||
"custom_properties": None,
|
||||
}
|
||||
dummy_programs.append(dummy_program)
|
||||
else:
|
||||
logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Handle channels with NO EPG data (standard dummy with humorous descriptions)
|
||||
for channel in channels_without_epg:
|
||||
# For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic)
|
||||
# The frontend uses: tvgRecord?.tvg_id ?? channel.uuid
|
||||
# Since there's no EPG data, it will fall back to UUID
|
||||
# Use the channel UUID as tvg_id for dummy programs to match in the guide
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Create programs every 4 hours for the next 24 hours with humorous descriptions
|
||||
# Create programs every 4 hours for the next 24 hours
|
||||
for hour_offset in range(0, 24, 4):
|
||||
# Use timedelta for time arithmetic instead of replace() to avoid hour overflow
|
||||
start_time = now + timedelta(hours=hour_offset)
|
||||
|
|
@ -331,7 +238,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Create a dummy program in the same format as regular programs
|
||||
dummy_program = {
|
||||
"id": f"dummy-standard-{channel.id}-{hour_offset}",
|
||||
"id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": start_time.isoformat(),
|
||||
"end_time": end_time.isoformat(),
|
||||
|
|
@ -345,7 +252,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Combine regular and dummy programs
|
||||
|
|
@ -377,22 +284,7 @@ class EPGImportAPIView(APIView):
|
|||
)
|
||||
def post(self, request, format=None):
|
||||
logger.info("EPGImportAPIView: Received request to import EPG data.")
|
||||
epg_id = request.data.get("id", None)
|
||||
|
||||
# Check if this is a dummy EPG source
|
||||
try:
|
||||
from .models import EPGSource
|
||||
epg_source = EPGSource.objects.get(id=epg_id)
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}")
|
||||
return Response(
|
||||
{"success": False, "message": "Dummy EPG sources do not require refreshing."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except EPGSource.DoesNotExist:
|
||||
pass # Let the task handle the missing source
|
||||
|
||||
refresh_epg_data.delay(epg_id) # Trigger Celery task
|
||||
refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task
|
||||
logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.")
|
||||
return Response(
|
||||
{"success": True, "message": "EPG data import initiated."},
|
||||
|
|
@ -416,4 +308,3 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
return [perm() for perm in permission_classes_by_action[self.action]]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0014_epgsource_extracted_file_path'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='programdata',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-16 22:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0015_alter_programdata_custom_properties'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgdata',
|
||||
name='icon_url',
|
||||
field=models.URLField(blank=True, max_length=500, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-24 21:07
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0016_epgdata_icon_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=1000, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-17 17:02
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0017_alter_epgsource_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='source_type',
|
||||
field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-22 21:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0018_epgsource_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='programdata',
|
||||
name='sub_title',
|
||||
field=models.TextField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
# Generated migration to replace {time} placeholders with {starttime}
|
||||
|
||||
import re
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def migrate_time_placeholders(apps, schema_editor):
|
||||
"""
|
||||
Replace {time} with {starttime} and {time24} with {starttime24}
|
||||
in all dummy EPG source custom_properties templates.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
# Fields that contain templates with placeholders
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
# Get all dummy EPG sources
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Replace {time24} first (before {time}) to avoid double replacement
|
||||
# e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime}
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{time24\}', '{starttime24}', new_value)
|
||||
new_value = re.sub(r'\{time\}', '{starttime}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.")
|
||||
else:
|
||||
print("No dummy EPG sources needed placeholder updates.")
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""
|
||||
Reverse the migration by replacing {starttime} back to {time}.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Reverse the replacements
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{starttime24\}', '{time24}', new_value)
|
||||
new_value = re.sub(r'\{starttime\}', '{time}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0019_alter_programdata_sub_title'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_time_placeholders, reverse_migration),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-12-05 15:24
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0020_migrate_time_to_starttime_placeholders'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='priority',
|
||||
field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'),
|
||||
),
|
||||
]
|
||||
|
|
@ -8,7 +8,6 @@ class EPGSource(models.Model):
|
|||
SOURCE_TYPE_CHOICES = [
|
||||
('xmltv', 'XMLTV URL'),
|
||||
('schedules_direct', 'Schedules Direct API'),
|
||||
('dummy', 'Custom Dummy EPG'),
|
||||
]
|
||||
|
||||
STATUS_IDLE = 'idle'
|
||||
|
|
@ -29,7 +28,7 @@ class EPGSource(models.Model):
|
|||
|
||||
name = models.CharField(max_length=255, unique=True)
|
||||
source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES)
|
||||
url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV
|
||||
url = models.URLField(blank=True, null=True) # For XMLTV
|
||||
api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct
|
||||
is_active = models.BooleanField(default=True)
|
||||
file_path = models.CharField(max_length=1024, blank=True, null=True)
|
||||
|
|
@ -39,16 +38,6 @@ class EPGSource(models.Model):
|
|||
refresh_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
)
|
||||
custom_properties = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)"
|
||||
)
|
||||
priority = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel."
|
||||
)
|
||||
status = models.CharField(
|
||||
max_length=20,
|
||||
choices=STATUS_CHOICES,
|
||||
|
|
@ -138,7 +127,6 @@ class EPGData(models.Model):
|
|||
# and a name (which might simply be the tvg_id if no real channel exists).
|
||||
tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True)
|
||||
name = models.CharField(max_length=255)
|
||||
icon_url = models.URLField(max_length=500, null=True, blank=True)
|
||||
epg_source = models.ForeignKey(
|
||||
EPGSource,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -159,10 +147,10 @@ class ProgramData(models.Model):
|
|||
start_time = models.DateTimeField()
|
||||
end_time = models.DateTimeField()
|
||||
title = models.CharField(max_length=255)
|
||||
sub_title = models.TextField(blank=True, null=True)
|
||||
sub_title = models.CharField(max_length=255, blank=True, null=True)
|
||||
description = models.TextField(blank=True, null=True)
|
||||
tvg_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.title} ({self.start_time} - {self.end_time})"
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from core.utils import validate_flexible_url
|
||||
from rest_framework import serializers
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from apps.channels.models import Channel
|
||||
|
||||
class EPGSourceSerializer(serializers.ModelSerializer):
|
||||
epg_data_count = serializers.SerializerMethodField()
|
||||
epg_data_ids = serializers.SerializerMethodField()
|
||||
read_only_fields = ['created_at', 'updated_at']
|
||||
url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url]
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = EPGSource
|
||||
|
|
@ -24,18 +17,15 @@ class EPGSourceSerializer(serializers.ModelSerializer):
|
|||
'is_active',
|
||||
'file_path',
|
||||
'refresh_interval',
|
||||
'priority',
|
||||
'status',
|
||||
'last_message',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'custom_properties',
|
||||
'epg_data_count'
|
||||
'epg_data_ids'
|
||||
]
|
||||
|
||||
def get_epg_data_count(self, obj):
|
||||
"""Return the count of EPG data entries instead of all IDs to prevent large payloads"""
|
||||
return obj.epgs.count()
|
||||
def get_epg_data_ids(self, obj):
|
||||
return list(obj.epgs.values_list('id', flat=True))
|
||||
|
||||
class ProgramDataSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
|
|
@ -55,6 +45,5 @@ class EPGDataSerializer(serializers.ModelSerializer):
|
|||
'id',
|
||||
'tvg_id',
|
||||
'name',
|
||||
'icon_url',
|
||||
'epg_source',
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from django.db.models.signals import post_save, post_delete, pre_save
|
||||
from django.dispatch import receiver
|
||||
from .models import EPGSource, EPGData
|
||||
from .models import EPGSource
|
||||
from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id
|
||||
from django_celery_beat.models import PeriodicTask, IntervalSchedule
|
||||
from core.utils import is_protected_path, send_websocket_update
|
||||
from core.utils import is_protected_path
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
|
@ -12,77 +12,15 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs):
|
||||
# Trigger refresh only if the source is newly created, active, and not a dummy EPG
|
||||
if created and instance.is_active and instance.source_type != 'dummy':
|
||||
# Trigger refresh only if the source is newly created and active
|
||||
if created and instance.is_active:
|
||||
refresh_epg_data.delay(instance.id)
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_dummy_epg_data(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Automatically create EPGData for dummy EPG sources when they are created.
|
||||
This allows channels to be assigned to dummy EPGs immediately without
|
||||
requiring a refresh first.
|
||||
"""
|
||||
if instance.source_type == 'dummy':
|
||||
# Ensure dummy EPGs always have idle status and no status message
|
||||
if instance.status != EPGSource.STATUS_IDLE or instance.last_message:
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
instance.save(update_fields=['status', 'last_message'])
|
||||
|
||||
# Create a URL-friendly tvg_id from the dummy EPG name
|
||||
# Replace spaces and special characters with underscores
|
||||
friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_')
|
||||
# Remove any characters that aren't alphanumeric or underscores
|
||||
friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_')
|
||||
# Convert to lowercase for consistency
|
||||
friendly_tvg_id = friendly_tvg_id.lower()
|
||||
# Prefix with 'dummy_' to make it clear this is a dummy EPG
|
||||
friendly_tvg_id = f"dummy_{friendly_tvg_id}"
|
||||
|
||||
# Create or update the EPGData record
|
||||
epg_data, data_created = EPGData.objects.get_or_create(
|
||||
tvg_id=friendly_tvg_id,
|
||||
epg_source=instance,
|
||||
defaults={
|
||||
'name': instance.name,
|
||||
'icon_url': None
|
||||
}
|
||||
)
|
||||
|
||||
# Update name if it changed and record already existed
|
||||
if not data_created and epg_data.name != instance.name:
|
||||
epg_data.name = instance.name
|
||||
epg_data.save(update_fields=['name'])
|
||||
|
||||
if data_created:
|
||||
logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
# Send websocket update to notify frontend that EPG data has been created
|
||||
# This allows the channel form to immediately show the new dummy EPG without refreshing
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_data_created',
|
||||
'source_id': instance.id,
|
||||
'source_name': instance.name,
|
||||
'epg_data_id': epg_data.id
|
||||
})
|
||||
else:
|
||||
logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_or_update_refresh_task(sender, instance, **kwargs):
|
||||
"""
|
||||
Create or update a Celery Beat periodic task when an EPGSource is created/updated.
|
||||
Skip creating tasks for dummy EPG sources as they don't need refreshing.
|
||||
"""
|
||||
# Skip task creation for dummy EPGs
|
||||
if instance.source_type == 'dummy':
|
||||
# If there's an existing task, disable it
|
||||
if instance.refresh_task:
|
||||
instance.refresh_task.enabled = False
|
||||
instance.refresh_task.save(update_fields=['enabled'])
|
||||
return
|
||||
|
||||
task_name = f"epg_source-refresh-{instance.id}"
|
||||
interval, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=int(instance.refresh_interval),
|
||||
|
|
@ -142,14 +80,7 @@ def delete_refresh_task(sender, instance, **kwargs):
|
|||
def update_status_on_active_change(sender, instance, **kwargs):
|
||||
"""
|
||||
When an EPGSource's is_active field changes, update the status accordingly.
|
||||
For dummy EPGs, always ensure status is idle and no status message.
|
||||
"""
|
||||
# Dummy EPGs should always be idle with no status message
|
||||
if instance.source_type == 'dummy':
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
return
|
||||
|
||||
if instance.pk: # Only for existing records, not new ones
|
||||
try:
|
||||
# Get the current record from the database
|
||||
|
|
|
|||
|
|
@ -24,30 +24,11 @@ from asgiref.sync import async_to_sync
|
|||
from channels.layers import get_channel_layer
|
||||
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validate_icon_url_fast(icon_url, max_length=None):
|
||||
"""
|
||||
Fast validation for icon URLs during parsing.
|
||||
Returns None if URL is too long, original URL otherwise.
|
||||
If max_length is None, gets it dynamically from the EPGData model field.
|
||||
"""
|
||||
if max_length is None:
|
||||
# Get max_length dynamically from the model field
|
||||
max_length = EPGData._meta.get_field('icon_url').max_length
|
||||
|
||||
if icon_url and len(icon_url) > max_length:
|
||||
logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...")
|
||||
return None
|
||||
return icon_url
|
||||
|
||||
|
||||
MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2)
|
||||
|
||||
|
||||
def send_epg_update(source_id, action, progress, **kwargs):
|
||||
"""Send WebSocket update about EPG download/parsing progress"""
|
||||
# Start with the base data dictionary
|
||||
|
|
@ -133,9 +114,8 @@ def delete_epg_refresh_task_by_id(epg_id):
|
|||
@shared_task
|
||||
def refresh_all_epg_data():
|
||||
logger.info("Starting refresh_epg_data task.")
|
||||
# Exclude dummy EPG sources from refresh - they don't need refreshing
|
||||
active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy')
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).")
|
||||
active_sources = EPGSource.objects.filter(is_active=True)
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s).")
|
||||
|
||||
for source in active_sources:
|
||||
refresh_epg_data(source.id)
|
||||
|
|
@ -181,13 +161,6 @@ def refresh_epg_data(source_id):
|
|||
gc.collect()
|
||||
return
|
||||
|
||||
# Skip refresh for dummy EPG sources - they don't need refreshing
|
||||
if source.source_type == 'dummy':
|
||||
logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})")
|
||||
release_task_lock('refresh_epg_data', source_id)
|
||||
gc.collect()
|
||||
return
|
||||
|
||||
# Continue with the normal processing...
|
||||
logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})")
|
||||
if source.source_type == 'xmltv':
|
||||
|
|
@ -213,12 +186,6 @@ def refresh_epg_data(source_id):
|
|||
fetch_schedules_direct(source)
|
||||
|
||||
source.save(update_fields=['updated_at'])
|
||||
# After successful EPG refresh, evaluate DVR series rules to schedule new episodes
|
||||
try:
|
||||
from apps.channels.tasks import evaluate_series_rules
|
||||
evaluate_series_rules.delay()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Error in refresh_epg_data for source {source_id}: {e}", exc_info=True)
|
||||
try:
|
||||
|
|
@ -286,12 +253,11 @@ def fetch_xmltv(source):
|
|||
logger.info(f"Fetching XMLTV data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
if default_user_agent_id:
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -675,11 +641,7 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
|
|||
# Reset file pointer and extract the content
|
||||
gz_file.seek(0)
|
||||
with open(extracted_path, 'wb') as out_file:
|
||||
while True:
|
||||
chunk = gz_file.read(MAX_EXTRACT_CHUNK_SIZE)
|
||||
if not chunk or len(chunk) == 0:
|
||||
break
|
||||
out_file.write(chunk)
|
||||
out_file.write(gz_file.read())
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting GZIP file: {e}", exc_info=True)
|
||||
return None
|
||||
|
|
@ -723,13 +685,9 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
|
|||
return None
|
||||
|
||||
# Extract the first XML file
|
||||
xml_content = zip_file.read(xml_files[0])
|
||||
with open(extracted_path, 'wb') as out_file:
|
||||
with zip_file.open(xml_files[0], "r") as xml_file:
|
||||
while True:
|
||||
chunk = xml_file.read(MAX_EXTRACT_CHUNK_SIZE)
|
||||
if not chunk or len(chunk) == 0:
|
||||
break
|
||||
out_file.write(chunk)
|
||||
out_file.write(xml_content)
|
||||
|
||||
logger.info(f"Successfully extracted zip file to: {extracted_path}")
|
||||
|
||||
|
|
@ -857,7 +815,6 @@ def parse_channels_only(source):
|
|||
processed_channels = 0
|
||||
batch_size = 500 # Process in batches to limit memory usage
|
||||
progress = 0 # Initialize progress variable here
|
||||
icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field
|
||||
|
||||
# Track memory at key points
|
||||
if process:
|
||||
|
|
@ -886,7 +843,7 @@ def parse_channels_only(source):
|
|||
|
||||
# Change iterparse to look for both channel and programme elements
|
||||
logger.debug(f"Creating iterparse context for channels and programmes")
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True)
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
|
||||
if process:
|
||||
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
|
||||
|
|
@ -900,15 +857,10 @@ def parse_channels_only(source):
|
|||
tvg_id = elem.get('id', '').strip()
|
||||
if tvg_id:
|
||||
display_name = None
|
||||
icon_url = None
|
||||
for child in elem:
|
||||
if display_name is None and child.tag == 'display-name' and child.text:
|
||||
if child.tag == 'display-name' and child.text:
|
||||
display_name = child.text.strip()
|
||||
elif child.tag == 'icon':
|
||||
raw_icon_url = child.get('src', '').strip()
|
||||
icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length)
|
||||
if display_name and icon_url:
|
||||
break # No need to continue if we have both
|
||||
break
|
||||
|
||||
if not display_name:
|
||||
display_name = tvg_id
|
||||
|
|
@ -926,24 +878,17 @@ def parse_channels_only(source):
|
|||
epgs_to_create.append(EPGData(
|
||||
tvg_id=tvg_id,
|
||||
name=display_name,
|
||||
icon_url=icon_url,
|
||||
epg_source=source,
|
||||
))
|
||||
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}")
|
||||
processed_channels += 1
|
||||
continue
|
||||
|
||||
# We use the cached object to check if the name or icon_url has changed
|
||||
# We use the cached object to check if the name has changed
|
||||
epg_obj = existing_epgs[tvg_id]
|
||||
needs_update = False
|
||||
if epg_obj.name != display_name:
|
||||
# Only update if the name actually changed
|
||||
epg_obj.name = display_name
|
||||
needs_update = True
|
||||
if epg_obj.icon_url != icon_url:
|
||||
epg_obj.icon_url = icon_url
|
||||
needs_update = True
|
||||
|
||||
if needs_update:
|
||||
epgs_to_update.append(epg_obj)
|
||||
logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}")
|
||||
else:
|
||||
|
|
@ -954,7 +899,6 @@ def parse_channels_only(source):
|
|||
epgs_to_create.append(EPGData(
|
||||
tvg_id=tvg_id,
|
||||
name=display_name,
|
||||
icon_url=icon_url,
|
||||
epg_source=source,
|
||||
))
|
||||
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}")
|
||||
|
|
@ -977,7 +921,7 @@ def parse_channels_only(source):
|
|||
logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries")
|
||||
if process:
|
||||
logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name"])
|
||||
if process:
|
||||
logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
epgs_to_update = []
|
||||
|
|
@ -1044,7 +988,7 @@ def parse_channels_only(source):
|
|||
logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries")
|
||||
|
||||
if epgs_to_update:
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name"])
|
||||
logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries")
|
||||
if process:
|
||||
logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
|
|
@ -1158,12 +1102,6 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
epg = EPGData.objects.get(id=epg_id)
|
||||
epg_source = epg.epg_source
|
||||
|
||||
# Skip program parsing for dummy EPG sources - they don't have program data files
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
return
|
||||
|
||||
if not Channel.objects.filter(epg_data=epg).exists():
|
||||
logger.info(f"No channels matched to EPG {epg.tvg_id}")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
|
|
@ -1257,7 +1195,7 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
if elem.get('channel') == epg.tvg_id:
|
||||
|
|
@ -1286,7 +1224,10 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
|
||||
if custom_props:
|
||||
logger.trace(f"Number of custom properties: {len(custom_props)}")
|
||||
custom_properties_json = custom_props
|
||||
try:
|
||||
custom_properties_json = json.dumps(custom_props)
|
||||
except Exception as e:
|
||||
logger.error(f"Error serializing custom properties to JSON: {e}", exc_info=True)
|
||||
|
||||
programs_to_create.append(ProgramData(
|
||||
epg=epg,
|
||||
|
|
@ -1394,23 +1335,11 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
|
||||
|
||||
def parse_programs_for_source(epg_source, tvg_id=None):
|
||||
"""
|
||||
Parse programs for all MAPPED channels from an EPG source in a single pass.
|
||||
|
||||
This is an optimized version that:
|
||||
1. Only processes EPG entries that are actually mapped to channels
|
||||
2. Parses the XML file ONCE instead of once per channel
|
||||
3. Skips programmes for unmapped channels entirely during parsing
|
||||
|
||||
This dramatically improves performance when an EPG source has many channels
|
||||
but only a fraction are mapped.
|
||||
"""
|
||||
# Send initial programs parsing notification
|
||||
send_epg_update(epg_source.id, "parsing_programs", 0)
|
||||
should_log_memory = False
|
||||
process = None
|
||||
initial_memory = 0
|
||||
source_file = None
|
||||
|
||||
# Add memory tracking only in trace mode or higher
|
||||
try:
|
||||
|
|
@ -1430,251 +1359,91 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
should_log_memory = False
|
||||
|
||||
try:
|
||||
# Only get EPG entries that are actually mapped to channels
|
||||
mapped_epg_ids = set(
|
||||
Channel.objects.filter(
|
||||
epg_data__epg_source=epg_source,
|
||||
epg_data__isnull=False
|
||||
).values_list('epg_data_id', flat=True)
|
||||
)
|
||||
# Process EPG entries in batches rather than all at once
|
||||
batch_size = 20 # Process fewer channels at once to reduce memory usage
|
||||
epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
|
||||
if not mapped_epg_ids:
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} "
|
||||
f"(source has {total_epg_count} EPG entries, 0 mapped)")
|
||||
# Update status - this is not an error, just no mapped entries
|
||||
if epg_count == 0:
|
||||
logger.info(f"No EPG entries found for source: {epg_source.name}")
|
||||
# Update status - this is not an error, just no entries
|
||||
epg_source.status = 'success'
|
||||
epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
epg_source.save(update_fields=['status'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="success")
|
||||
return True
|
||||
|
||||
# Get the mapped EPG entries with their tvg_ids
|
||||
mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id')
|
||||
tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']}
|
||||
mapped_tvg_ids = set(tvg_id_to_epg_id.keys())
|
||||
logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}")
|
||||
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
mapped_count = len(mapped_tvg_ids)
|
||||
failed_entries = []
|
||||
program_count = 0
|
||||
channel_count = 0
|
||||
updated_count = 0
|
||||
processed = 0
|
||||
# Process in batches using cursor-based approach to limit memory usage
|
||||
last_id = 0
|
||||
while True:
|
||||
# Get a batch of EPG entries
|
||||
batch_entries = list(EPGData.objects.filter(
|
||||
epg_source=epg_source,
|
||||
id__gt=last_id
|
||||
).order_by('id')[:batch_size])
|
||||
|
||||
logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} "
|
||||
f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)")
|
||||
if not batch_entries:
|
||||
break # No more entries to process
|
||||
|
||||
# Get the file path
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
if not file_path:
|
||||
file_path = epg_source.get_cache_file()
|
||||
# Update last_id for next iteration
|
||||
last_id = batch_entries[-1].id
|
||||
|
||||
# Check if the file exists
|
||||
if not os.path.exists(file_path):
|
||||
logger.error(f"EPG file not found at: {file_path}")
|
||||
# Process this batch
|
||||
for epg in batch_entries:
|
||||
if epg.tvg_id:
|
||||
try:
|
||||
result = parse_programs_for_tvg_id(epg.id)
|
||||
if result == "Task already running":
|
||||
logger.info(f"Program parse for {epg.id} already in progress, skipping")
|
||||
|
||||
if epg_source.url:
|
||||
# Update the file path in the database
|
||||
new_path = epg_source.get_cache_file()
|
||||
logger.info(f"Updating file_path from '{file_path}' to '{new_path}'")
|
||||
epg_source.file_path = new_path
|
||||
epg_source.save(update_fields=['file_path'])
|
||||
logger.info(f"Fetching new EPG data from URL: {epg_source.url}")
|
||||
processed += 1
|
||||
progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True)
|
||||
failed_entries.append(f"{epg.tvg_id}: {str(e)}")
|
||||
|
||||
# Fetch new data before continuing
|
||||
fetch_success = fetch_xmltv(epg_source)
|
||||
|
||||
if not fetch_success:
|
||||
logger.error(f"Failed to fetch EPG data for source: {epg_source.name}")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"Failed to download EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file")
|
||||
return False
|
||||
|
||||
# Update file_path with the new location
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
else:
|
||||
logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"No URL provided, cannot fetch EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided")
|
||||
return False
|
||||
|
||||
# SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory
|
||||
# We parse FIRST, then do an atomic delete+insert to avoid race conditions
|
||||
# where clients might see empty/partial EPG data during the transition
|
||||
all_programs_to_create = []
|
||||
programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel
|
||||
total_programs = 0
|
||||
skipped_programs = 0
|
||||
last_progress_update = 0
|
||||
|
||||
try:
|
||||
logger.debug(f"Opening file for single-pass parsing: {file_path}")
|
||||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
channel_id = elem.get('channel')
|
||||
|
||||
# Skip programmes for unmapped channels immediately
|
||||
if channel_id not in mapped_tvg_ids:
|
||||
skipped_programs += 1
|
||||
# Clear element to free memory
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
# This programme is for a mapped channel - process it
|
||||
try:
|
||||
start_time = parse_xmltv_time(elem.get('start'))
|
||||
end_time = parse_xmltv_time(elem.get('stop'))
|
||||
title = None
|
||||
desc = None
|
||||
sub_title = None
|
||||
|
||||
# Efficiently process child elements
|
||||
for child in elem:
|
||||
if child.tag == 'title':
|
||||
title = child.text or 'No Title'
|
||||
elif child.tag == 'desc':
|
||||
desc = child.text or ''
|
||||
elif child.tag == 'sub-title':
|
||||
sub_title = child.text or ''
|
||||
|
||||
if not title:
|
||||
title = 'No Title'
|
||||
|
||||
# Extract custom properties
|
||||
custom_props = extract_custom_properties(elem)
|
||||
custom_properties_json = custom_props if custom_props else None
|
||||
|
||||
epg_id = tvg_id_to_epg_id[channel_id]
|
||||
all_programs_to_create.append(ProgramData(
|
||||
epg_id=epg_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
title=title,
|
||||
description=desc,
|
||||
sub_title=sub_title,
|
||||
tvg_id=channel_id,
|
||||
custom_properties=custom_properties_json
|
||||
))
|
||||
total_programs += 1
|
||||
programs_by_channel[channel_id] += 1
|
||||
|
||||
# Clear the element to free memory
|
||||
clear_element(elem)
|
||||
|
||||
# Send progress update (estimate based on programs processed)
|
||||
if total_programs - last_progress_update >= 5000:
|
||||
last_progress_update = total_programs
|
||||
# Cap at 70% during parsing phase (save 30% for DB operations)
|
||||
progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60))
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress,
|
||||
processed=total_programs, channels=mapped_count)
|
||||
|
||||
# Periodic garbage collection during parsing
|
||||
if total_programs % 5000 == 0:
|
||||
gc.collect()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True)
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
except etree.XMLSyntaxError as xml_error:
|
||||
logger.error(f"XML syntax error parsing program data: {xml_error}")
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"XML parsing error: {str(xml_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing XML for programs: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
if source_file:
|
||||
source_file.close()
|
||||
source_file = None
|
||||
|
||||
# Now perform atomic delete + bulk insert
|
||||
# This ensures clients never see empty/partial EPG data
|
||||
logger.info(f"Parsed {total_programs} programs, performing atomic database update...")
|
||||
send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...")
|
||||
|
||||
batch_size = 1000
|
||||
try:
|
||||
with transaction.atomic():
|
||||
# Delete existing programs for mapped EPGs
|
||||
deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0]
|
||||
logger.debug(f"Deleted {deleted_count} existing programs")
|
||||
|
||||
# Clean up orphaned programs for unmapped EPG entries
|
||||
unmapped_epg_ids = list(EPGData.objects.filter(
|
||||
epg_source=epg_source
|
||||
).exclude(id__in=mapped_epg_ids).values_list('id', flat=True))
|
||||
|
||||
if unmapped_epg_ids:
|
||||
orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0]
|
||||
if orphaned_count > 0:
|
||||
logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries")
|
||||
|
||||
# Bulk insert all new programs in batches within the same transaction
|
||||
for i in range(0, len(all_programs_to_create), batch_size):
|
||||
batch = all_programs_to_create[i:i + batch_size]
|
||||
ProgramData.objects.bulk_create(batch)
|
||||
|
||||
# Update progress during insertion
|
||||
progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95
|
||||
if i % (batch_size * 5) == 0:
|
||||
send_epg_update(epg_source.id, "parsing_programs", min(95, progress),
|
||||
message=f"Inserting programs... {i}/{len(all_programs_to_create)}")
|
||||
|
||||
logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs")
|
||||
|
||||
except Exception as db_error:
|
||||
logger.error(f"Database error during atomic update: {db_error}", exc_info=True)
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"Database error: {str(db_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error))
|
||||
return False
|
||||
finally:
|
||||
# Clear the large list to free memory
|
||||
all_programs_to_create = None
|
||||
# Force garbage collection after each batch
|
||||
batch_entries = None # Remove reference to help garbage collection
|
||||
gc.collect()
|
||||
|
||||
# Count channels that actually got programs
|
||||
channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0)
|
||||
# If there were failures, include them in the message but continue
|
||||
if failed_entries:
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed
|
||||
error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries"
|
||||
stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.last_message = f"{stats_summary} Warning: {error_summary}"
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
|
||||
# Success message
|
||||
# Send completion notification with mixed status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message)
|
||||
|
||||
# Explicitly release memory of large lists before returning
|
||||
del failed_entries
|
||||
gc.collect()
|
||||
|
||||
return True
|
||||
|
||||
# If all successful, set a comprehensive success message
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS
|
||||
epg_source.last_message = (
|
||||
f"Parsed {total_programs:,} programs for {channels_with_programs} channels "
|
||||
f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)"
|
||||
)
|
||||
epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
|
||||
# Log system event for EPG refresh
|
||||
log_system_event(
|
||||
event_type='epg_refresh',
|
||||
source_name=epg_source.name,
|
||||
programs=total_programs,
|
||||
channels=channels_with_programs,
|
||||
skipped_programs=skipped_programs,
|
||||
unmapped_channels=total_epg_count - mapped_count,
|
||||
)
|
||||
|
||||
# Send completion notification with status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message,
|
||||
updated_at=epg_source.updated_at.isoformat())
|
||||
message=epg_source.last_message)
|
||||
|
||||
logger.info(f"Completed parsing programs for source: {epg_source.name} - "
|
||||
f"{total_programs:,} programs for {channels_with_programs} channels, "
|
||||
f"skipped {skipped_programs:,} programs for unmapped channels")
|
||||
logger.info(f"Completed parsing all programs for source: {epg_source.name}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -1689,19 +1458,14 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
return False
|
||||
finally:
|
||||
# Final memory cleanup and tracking
|
||||
if source_file:
|
||||
try:
|
||||
source_file.close()
|
||||
except:
|
||||
pass
|
||||
source_file = None
|
||||
|
||||
|
||||
# Explicitly release any remaining large data structures
|
||||
programs_to_create = None
|
||||
programs_by_channel = None
|
||||
mapped_epg_ids = None
|
||||
mapped_tvg_ids = None
|
||||
tvg_id_to_epg_id = None
|
||||
failed_entries = None
|
||||
program_count = None
|
||||
channel_count = None
|
||||
updated_count = None
|
||||
processed = None
|
||||
gc.collect()
|
||||
|
||||
# Add comprehensive memory cleanup at the end
|
||||
|
|
@ -1715,13 +1479,12 @@ def fetch_schedules_direct(source):
|
|||
logger.info(f"Fetching Schedules Direct data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
|
||||
if default_user_agent_id:
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -1849,11 +1612,6 @@ def extract_custom_properties(prog):
|
|||
if categories:
|
||||
custom_props['categories'] = categories
|
||||
|
||||
# Extract keywords (new)
|
||||
keywords = [kw.text.strip() for kw in prog.findall('keyword') if kw.text and kw.text.strip()]
|
||||
if keywords:
|
||||
custom_props['keywords'] = keywords
|
||||
|
||||
# Extract episode numbers
|
||||
for ep_num in prog.findall('episode-num'):
|
||||
system = ep_num.get('system', '')
|
||||
|
|
@ -1879,9 +1637,6 @@ def extract_custom_properties(prog):
|
|||
elif system == 'dd_progid' and ep_num.text:
|
||||
# Store the dd_progid format
|
||||
custom_props['dd_progid'] = ep_num.text.strip()
|
||||
# Add support for other systems like thetvdb.com, themoviedb.org, imdb.com
|
||||
elif system in ['thetvdb.com', 'themoviedb.org', 'imdb.com'] and ep_num.text:
|
||||
custom_props[f'{system}_id'] = ep_num.text.strip()
|
||||
|
||||
# Extract ratings more efficiently
|
||||
rating_elem = prog.find('rating')
|
||||
|
|
@ -1892,172 +1647,37 @@ def extract_custom_properties(prog):
|
|||
if rating_elem.get('system'):
|
||||
custom_props['rating_system'] = rating_elem.get('system')
|
||||
|
||||
# Extract star ratings (new)
|
||||
star_ratings = []
|
||||
for star_rating in prog.findall('star-rating'):
|
||||
value_elem = star_rating.find('value')
|
||||
if value_elem is not None and value_elem.text:
|
||||
rating_data = {'value': value_elem.text.strip()}
|
||||
if star_rating.get('system'):
|
||||
rating_data['system'] = star_rating.get('system')
|
||||
star_ratings.append(rating_data)
|
||||
if star_ratings:
|
||||
custom_props['star_ratings'] = star_ratings
|
||||
|
||||
# Extract credits more efficiently
|
||||
credits_elem = prog.find('credits')
|
||||
if credits_elem is not None:
|
||||
credits = {}
|
||||
for credit_type in ['director', 'actor', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']:
|
||||
if credit_type == 'actor':
|
||||
# Handle actors with roles and guest status
|
||||
actors = []
|
||||
for actor_elem in credits_elem.findall('actor'):
|
||||
if actor_elem.text and actor_elem.text.strip():
|
||||
actor_data = {'name': actor_elem.text.strip()}
|
||||
if actor_elem.get('role'):
|
||||
actor_data['role'] = actor_elem.get('role')
|
||||
if actor_elem.get('guest') == 'yes':
|
||||
actor_data['guest'] = True
|
||||
actors.append(actor_data)
|
||||
if actors:
|
||||
credits['actor'] = actors
|
||||
else:
|
||||
names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()]
|
||||
if names:
|
||||
credits[credit_type] = names
|
||||
for credit_type in ['director', 'actor', 'writer', 'presenter', 'producer']:
|
||||
names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()]
|
||||
if names:
|
||||
credits[credit_type] = names
|
||||
if credits:
|
||||
custom_props['credits'] = credits
|
||||
|
||||
# Extract other common program metadata
|
||||
date_elem = prog.find('date')
|
||||
if date_elem is not None and date_elem.text:
|
||||
custom_props['date'] = date_elem.text.strip()
|
||||
custom_props['year'] = date_elem.text.strip()[:4] # Just the year part
|
||||
|
||||
country_elem = prog.find('country')
|
||||
if country_elem is not None and country_elem.text:
|
||||
custom_props['country'] = country_elem.text.strip()
|
||||
|
||||
# Extract language information (new)
|
||||
language_elem = prog.find('language')
|
||||
if language_elem is not None and language_elem.text:
|
||||
custom_props['language'] = language_elem.text.strip()
|
||||
|
||||
orig_language_elem = prog.find('orig-language')
|
||||
if orig_language_elem is not None and orig_language_elem.text:
|
||||
custom_props['original_language'] = orig_language_elem.text.strip()
|
||||
|
||||
# Extract length (new)
|
||||
length_elem = prog.find('length')
|
||||
if length_elem is not None and length_elem.text:
|
||||
try:
|
||||
length_value = int(length_elem.text.strip())
|
||||
length_units = length_elem.get('units', 'minutes')
|
||||
custom_props['length'] = {'value': length_value, 'units': length_units}
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Extract video information (new)
|
||||
video_elem = prog.find('video')
|
||||
if video_elem is not None:
|
||||
video_info = {}
|
||||
for video_attr in ['present', 'colour', 'aspect', 'quality']:
|
||||
attr_elem = video_elem.find(video_attr)
|
||||
if attr_elem is not None and attr_elem.text:
|
||||
video_info[video_attr] = attr_elem.text.strip()
|
||||
if video_info:
|
||||
custom_props['video'] = video_info
|
||||
|
||||
# Extract audio information (new)
|
||||
audio_elem = prog.find('audio')
|
||||
if audio_elem is not None:
|
||||
audio_info = {}
|
||||
for audio_attr in ['present', 'stereo']:
|
||||
attr_elem = audio_elem.find(audio_attr)
|
||||
if attr_elem is not None and attr_elem.text:
|
||||
audio_info[audio_attr] = attr_elem.text.strip()
|
||||
if audio_info:
|
||||
custom_props['audio'] = audio_info
|
||||
|
||||
# Extract subtitles information (new)
|
||||
subtitles = []
|
||||
for subtitle_elem in prog.findall('subtitles'):
|
||||
subtitle_data = {}
|
||||
if subtitle_elem.get('type'):
|
||||
subtitle_data['type'] = subtitle_elem.get('type')
|
||||
lang_elem = subtitle_elem.find('language')
|
||||
if lang_elem is not None and lang_elem.text:
|
||||
subtitle_data['language'] = lang_elem.text.strip()
|
||||
if subtitle_data:
|
||||
subtitles.append(subtitle_data)
|
||||
|
||||
if subtitles:
|
||||
custom_props['subtitles'] = subtitles
|
||||
|
||||
# Extract reviews (new)
|
||||
reviews = []
|
||||
for review_elem in prog.findall('review'):
|
||||
if review_elem.text and review_elem.text.strip():
|
||||
review_data = {'content': review_elem.text.strip()}
|
||||
if review_elem.get('type'):
|
||||
review_data['type'] = review_elem.get('type')
|
||||
if review_elem.get('source'):
|
||||
review_data['source'] = review_elem.get('source')
|
||||
if review_elem.get('reviewer'):
|
||||
review_data['reviewer'] = review_elem.get('reviewer')
|
||||
reviews.append(review_data)
|
||||
if reviews:
|
||||
custom_props['reviews'] = reviews
|
||||
|
||||
# Extract images (new)
|
||||
images = []
|
||||
for image_elem in prog.findall('image'):
|
||||
if image_elem.text and image_elem.text.strip():
|
||||
image_data = {'url': image_elem.text.strip()}
|
||||
for attr in ['type', 'size', 'orient', 'system']:
|
||||
if image_elem.get(attr):
|
||||
image_data[attr] = image_elem.get(attr)
|
||||
images.append(image_data)
|
||||
if images:
|
||||
custom_props['images'] = images
|
||||
|
||||
icon_elem = prog.find('icon')
|
||||
if icon_elem is not None and icon_elem.get('src'):
|
||||
custom_props['icon'] = icon_elem.get('src')
|
||||
|
||||
# Simpler approach for boolean flags - expanded list
|
||||
for kw in ['previously-shown', 'premiere', 'new', 'live', 'last-chance']:
|
||||
# Simpler approach for boolean flags
|
||||
for kw in ['previously-shown', 'premiere', 'new', 'live']:
|
||||
if prog.find(kw) is not None:
|
||||
custom_props[kw.replace('-', '_')] = True
|
||||
|
||||
# Extract premiere and last-chance text content if available
|
||||
premiere_elem = prog.find('premiere')
|
||||
if premiere_elem is not None:
|
||||
custom_props['premiere'] = True
|
||||
if premiere_elem.text and premiere_elem.text.strip():
|
||||
custom_props['premiere_text'] = premiere_elem.text.strip()
|
||||
|
||||
last_chance_elem = prog.find('last-chance')
|
||||
if last_chance_elem is not None:
|
||||
custom_props['last_chance'] = True
|
||||
if last_chance_elem.text and last_chance_elem.text.strip():
|
||||
custom_props['last_chance_text'] = last_chance_elem.text.strip()
|
||||
|
||||
# Extract previously-shown details
|
||||
prev_shown_elem = prog.find('previously-shown')
|
||||
if prev_shown_elem is not None:
|
||||
custom_props['previously_shown'] = True
|
||||
prev_shown_data = {}
|
||||
if prev_shown_elem.get('start'):
|
||||
prev_shown_data['start'] = prev_shown_elem.get('start')
|
||||
if prev_shown_elem.get('channel'):
|
||||
prev_shown_data['channel'] = prev_shown_elem.get('channel')
|
||||
if prev_shown_data:
|
||||
custom_props['previously_shown_details'] = prev_shown_data
|
||||
|
||||
return custom_props
|
||||
|
||||
|
||||
def clear_element(elem):
|
||||
"""Clear an XML element and its parent to free memory."""
|
||||
try:
|
||||
|
|
@ -2136,20 +1756,3 @@ def detect_file_format(file_path=None, content=None):
|
|||
|
||||
# If we reach here, we couldn't reliably determine the format
|
||||
return format_type, is_compressed, file_extension
|
||||
|
||||
|
||||
def generate_dummy_epg(source):
|
||||
"""
|
||||
DEPRECATED: This function is no longer used.
|
||||
|
||||
Dummy EPG programs are now generated on-demand when they are requested
|
||||
(during XMLTV export or EPG grid display), rather than being pre-generated
|
||||
and stored in the database.
|
||||
|
||||
See: apps/output/views.py - generate_custom_dummy_programs()
|
||||
|
||||
This function remains for backward compatibility but should not be called.
|
||||
"""
|
||||
logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. "
|
||||
f"Dummy EPG programs are now generated on-demand.")
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from django.views import View
|
|||
from django.utils.decorators import method_decorator
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -59,9 +60,43 @@ class DiscoverAPIView(APIView):
|
|||
base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip("/")
|
||||
device = HDHRDevice.objects.first()
|
||||
|
||||
# Calculate tuner count using centralized function
|
||||
from apps.m3u.utils import calculate_tuner_count
|
||||
tuner_count = calculate_tuner_count(minimum=1, unlimited_default=10)
|
||||
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
is_active=True,
|
||||
m3u_account__is_active=True, # Only include profiles from enabled M3U accounts
|
||||
).exclude(id=1)
|
||||
|
||||
# 1. Check if any profile has unlimited streams (max_streams=0)
|
||||
has_unlimited = profiles.filter(max_streams=0).exists()
|
||||
|
||||
# 2. Calculate tuner count from limited profiles
|
||||
limited_tuners = 0
|
||||
if not has_unlimited:
|
||||
limited_tuners = (
|
||||
profiles.filter(max_streams__gt=0)
|
||||
.aggregate(total=models.Sum("max_streams"))
|
||||
.get("total", 0)
|
||||
or 0
|
||||
)
|
||||
|
||||
# 3. Add custom stream count to tuner count
|
||||
custom_stream_count = Stream.objects.filter(is_custom=True).count()
|
||||
logger.debug(f"Found {custom_stream_count} custom streams")
|
||||
|
||||
# 4. Calculate final tuner count
|
||||
if has_unlimited:
|
||||
# If there are unlimited profiles, start with 10 plus custom streams
|
||||
tuner_count = 10 + custom_stream_count
|
||||
else:
|
||||
# Otherwise use the limited profile sum plus custom streams
|
||||
tuner_count = limited_tuners + custom_stream_count
|
||||
|
||||
# 5. Ensure minimum of 1 tuners
|
||||
tuner_count = max(1, tuner_count)
|
||||
|
||||
logger.debug(
|
||||
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"
|
||||
)
|
||||
|
||||
# Create a unique DeviceID for the HDHomeRun device based on profile ID or a default value
|
||||
device_ID = "12345678" # Default DeviceID
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
from django.contrib import admin
|
||||
from django.utils.html import format_html
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent, M3UAccountProfile
|
||||
import json
|
||||
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent
|
||||
|
||||
class M3UFilterInline(admin.TabularInline):
|
||||
model = M3UFilter
|
||||
|
|
@ -10,181 +8,50 @@ class M3UFilterInline(admin.TabularInline):
|
|||
verbose_name = "M3U Filter"
|
||||
verbose_name_plural = "M3U Filters"
|
||||
|
||||
|
||||
@admin.register(M3UAccount)
|
||||
class M3UAccountAdmin(admin.ModelAdmin):
|
||||
list_display = (
|
||||
"name",
|
||||
"server_url",
|
||||
"server_group",
|
||||
"max_streams",
|
||||
"priority",
|
||||
"is_active",
|
||||
"user_agent_display",
|
||||
"uploaded_file_link",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
)
|
||||
list_filter = ("is_active", "server_group")
|
||||
search_fields = ("name", "server_url", "server_group__name")
|
||||
list_display = ('name', 'server_url', 'server_group', 'max_streams', 'is_active', 'user_agent_display', 'uploaded_file_link', 'created_at', 'updated_at')
|
||||
list_filter = ('is_active', 'server_group')
|
||||
search_fields = ('name', 'server_url', 'server_group__name')
|
||||
inlines = [M3UFilterInline]
|
||||
actions = ["activate_accounts", "deactivate_accounts"]
|
||||
actions = ['activate_accounts', 'deactivate_accounts']
|
||||
|
||||
# Handle both ForeignKey and ManyToManyField cases for UserAgent
|
||||
def user_agent_display(self, obj):
|
||||
if hasattr(obj, "user_agent"): # ForeignKey case
|
||||
if hasattr(obj, 'user_agent'): # ForeignKey case
|
||||
return obj.user_agent.user_agent if obj.user_agent else "None"
|
||||
elif hasattr(obj, "user_agents"): # ManyToManyField case
|
||||
elif hasattr(obj, 'user_agents'): # ManyToManyField case
|
||||
return ", ".join([ua.user_agent for ua in obj.user_agents.all()]) or "None"
|
||||
return "None"
|
||||
|
||||
user_agent_display.short_description = "User Agent(s)"
|
||||
|
||||
def vod_enabled_display(self, obj):
|
||||
"""Display whether VOD is enabled for this account"""
|
||||
if obj.custom_properties:
|
||||
custom_props = obj.custom_properties or {}
|
||||
return "Yes" if custom_props.get('enable_vod', False) else "No"
|
||||
return "No"
|
||||
vod_enabled_display.short_description = "VOD Enabled"
|
||||
vod_enabled_display.boolean = True
|
||||
|
||||
def uploaded_file_link(self, obj):
|
||||
if obj.uploaded_file:
|
||||
return format_html(
|
||||
"<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url
|
||||
)
|
||||
return format_html("<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url)
|
||||
return "No file uploaded"
|
||||
|
||||
uploaded_file_link.short_description = "Uploaded File"
|
||||
|
||||
@admin.action(description="Activate selected accounts")
|
||||
@admin.action(description='Activate selected accounts')
|
||||
def activate_accounts(self, request, queryset):
|
||||
queryset.update(is_active=True)
|
||||
|
||||
@admin.action(description="Deactivate selected accounts")
|
||||
@admin.action(description='Deactivate selected accounts')
|
||||
def deactivate_accounts(self, request, queryset):
|
||||
queryset.update(is_active=False)
|
||||
|
||||
# Add ManyToManyField for Django Admin (if applicable)
|
||||
if hasattr(M3UAccount, "user_agents"):
|
||||
filter_horizontal = ("user_agents",) # Only for ManyToManyField
|
||||
|
||||
if hasattr(M3UAccount, 'user_agents'):
|
||||
filter_horizontal = ('user_agents',) # Only for ManyToManyField
|
||||
|
||||
@admin.register(M3UFilter)
|
||||
class M3UFilterAdmin(admin.ModelAdmin):
|
||||
list_display = ("m3u_account", "filter_type", "regex_pattern", "exclude")
|
||||
list_filter = ("filter_type", "exclude")
|
||||
search_fields = ("regex_pattern",)
|
||||
ordering = ("m3u_account",)
|
||||
|
||||
list_display = ('m3u_account', 'filter_type', 'regex_pattern', 'exclude')
|
||||
list_filter = ('filter_type', 'exclude')
|
||||
search_fields = ('regex_pattern',)
|
||||
ordering = ('m3u_account',)
|
||||
|
||||
@admin.register(ServerGroup)
|
||||
class ServerGroupAdmin(admin.ModelAdmin):
|
||||
list_display = ("name",)
|
||||
search_fields = ("name",)
|
||||
list_display = ('name',)
|
||||
search_fields = ('name',)
|
||||
|
||||
|
||||
@admin.register(M3UAccountProfile)
|
||||
class M3UAccountProfileAdmin(admin.ModelAdmin):
|
||||
list_display = (
|
||||
"name",
|
||||
"m3u_account",
|
||||
"is_default",
|
||||
"is_active",
|
||||
"max_streams",
|
||||
"current_viewers",
|
||||
"account_status_display",
|
||||
"account_expiration_display",
|
||||
"last_refresh_display",
|
||||
)
|
||||
list_filter = ("is_active", "is_default", "m3u_account__account_type")
|
||||
search_fields = ("name", "m3u_account__name")
|
||||
readonly_fields = ("account_info_display",)
|
||||
|
||||
def account_status_display(self, obj):
|
||||
"""Display account status from custom properties"""
|
||||
status = obj.get_account_status()
|
||||
if status:
|
||||
# Create colored status display
|
||||
color_map = {
|
||||
'Active': 'green',
|
||||
'Expired': 'red',
|
||||
'Disabled': 'red',
|
||||
'Banned': 'red',
|
||||
}
|
||||
color = color_map.get(status, 'black')
|
||||
return format_html(
|
||||
'<span style="color: {};">{}</span>',
|
||||
color,
|
||||
status
|
||||
)
|
||||
return "Unknown"
|
||||
account_status_display.short_description = "Account Status"
|
||||
|
||||
def account_expiration_display(self, obj):
|
||||
"""Display account expiration from custom properties"""
|
||||
expiration = obj.get_account_expiration()
|
||||
if expiration:
|
||||
from datetime import datetime
|
||||
if expiration < datetime.now():
|
||||
return format_html(
|
||||
'<span style="color: red;">{}</span>',
|
||||
expiration.strftime('%Y-%m-%d %H:%M')
|
||||
)
|
||||
else:
|
||||
return format_html(
|
||||
'<span style="color: green;">{}</span>',
|
||||
expiration.strftime('%Y-%m-%d %H:%M')
|
||||
)
|
||||
return "Unknown"
|
||||
account_expiration_display.short_description = "Expires"
|
||||
|
||||
def last_refresh_display(self, obj):
|
||||
"""Display last refresh time from custom properties"""
|
||||
last_refresh = obj.get_last_refresh()
|
||||
if last_refresh:
|
||||
return last_refresh.strftime('%Y-%m-%d %H:%M:%S')
|
||||
return "Never"
|
||||
last_refresh_display.short_description = "Last Refresh"
|
||||
|
||||
def account_info_display(self, obj):
|
||||
"""Display formatted account information from custom properties"""
|
||||
if not obj.custom_properties:
|
||||
return "No account information available"
|
||||
|
||||
html_parts = []
|
||||
|
||||
# User Info
|
||||
user_info = obj.custom_properties.get('user_info', {})
|
||||
if user_info:
|
||||
html_parts.append("<h3>User Information:</h3>")
|
||||
html_parts.append("<ul>")
|
||||
for key, value in user_info.items():
|
||||
if key == 'exp_date' and value:
|
||||
try:
|
||||
from datetime import datetime
|
||||
exp_date = datetime.fromtimestamp(float(value))
|
||||
value = exp_date.strftime('%Y-%m-%d %H:%M:%S')
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
|
||||
html_parts.append("</ul>")
|
||||
|
||||
# Server Info
|
||||
server_info = obj.custom_properties.get('server_info', {})
|
||||
if server_info:
|
||||
html_parts.append("<h3>Server Information:</h3>")
|
||||
html_parts.append("<ul>")
|
||||
for key, value in server_info.items():
|
||||
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
|
||||
html_parts.append("</ul>")
|
||||
|
||||
# Last Refresh
|
||||
last_refresh = obj.custom_properties.get('last_refresh')
|
||||
if last_refresh:
|
||||
html_parts.append(f"<p><strong>Last Refresh:</strong> {last_refresh}</p>")
|
||||
|
||||
return format_html(''.join(html_parts)) if html_parts else "No account information available"
|
||||
|
||||
account_info_display.short_description = "Account Information"
|
||||
|
|
|
|||
|
|
@ -1,44 +1,18 @@
|
|||
from django.urls import path, include
|
||||
from rest_framework.routers import DefaultRouter
|
||||
from .api_views import (
|
||||
M3UAccountViewSet,
|
||||
M3UFilterViewSet,
|
||||
ServerGroupViewSet,
|
||||
RefreshM3UAPIView,
|
||||
RefreshSingleM3UAPIView,
|
||||
RefreshAccountInfoAPIView,
|
||||
UserAgentViewSet,
|
||||
M3UAccountProfileViewSet,
|
||||
)
|
||||
from .api_views import M3UAccountViewSet, M3UFilterViewSet, ServerGroupViewSet, RefreshM3UAPIView, RefreshSingleM3UAPIView, UserAgentViewSet, M3UAccountProfileViewSet
|
||||
|
||||
app_name = "m3u"
|
||||
app_name = 'm3u'
|
||||
|
||||
router = DefaultRouter()
|
||||
router.register(r"accounts", M3UAccountViewSet, basename="m3u-account")
|
||||
router.register(
|
||||
r"accounts\/(?P<account_id>\d+)\/profiles",
|
||||
M3UAccountProfileViewSet,
|
||||
basename="m3u-account-profiles",
|
||||
)
|
||||
router.register(
|
||||
r"accounts\/(?P<account_id>\d+)\/filters",
|
||||
M3UFilterViewSet,
|
||||
basename="m3u-filters",
|
||||
)
|
||||
router.register(r"server-groups", ServerGroupViewSet, basename="server-group")
|
||||
router.register(r'accounts', M3UAccountViewSet, basename='m3u-account')
|
||||
router.register(r'accounts\/(?P<account_id>\d+)\/profiles', M3UAccountProfileViewSet, basename='m3u-account-profiles')
|
||||
router.register(r'filters', M3UFilterViewSet, basename='m3u-filter')
|
||||
router.register(r'server-groups', ServerGroupViewSet, basename='server-group')
|
||||
|
||||
urlpatterns = [
|
||||
path("refresh/", RefreshM3UAPIView.as_view(), name="m3u_refresh"),
|
||||
path(
|
||||
"refresh/<int:account_id>/",
|
||||
RefreshSingleM3UAPIView.as_view(),
|
||||
name="m3u_refresh_single",
|
||||
),
|
||||
path(
|
||||
"refresh-account-info/<int:profile_id>/",
|
||||
RefreshAccountInfoAPIView.as_view(),
|
||||
name="m3u_refresh_account_info",
|
||||
),
|
||||
path('refresh/', RefreshM3UAPIView.as_view(), name='m3u_refresh'),
|
||||
path('refresh/<int:account_id>/', RefreshSingleM3UAPIView.as_view(), name='m3u_refresh_single'),
|
||||
]
|
||||
|
||||
urlpatterns += router.urls
|
||||
|
|
|
|||
|
|
@ -15,14 +15,14 @@ import os
|
|||
from rest_framework.decorators import action
|
||||
from django.conf import settings
|
||||
from .tasks import refresh_m3u_groups
|
||||
import json
|
||||
|
||||
# Import all models, including UserAgent.
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
|
||||
from core.models import UserAgent
|
||||
from apps.channels.models import ChannelGroupM3UAccount
|
||||
from core.serializers import UserAgentSerializer
|
||||
from apps.vod.models import M3UVODCategoryRelation
|
||||
|
||||
# Import all serializers, including the UserAgentSerializer.
|
||||
from .serializers import (
|
||||
M3UAccountSerializer,
|
||||
M3UFilterSerializer,
|
||||
|
|
@ -30,8 +30,9 @@ from .serializers import (
|
|||
M3UAccountProfileSerializer,
|
||||
)
|
||||
|
||||
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts, refresh_account_info
|
||||
import json
|
||||
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts
|
||||
from django.core.files.storage import default_storage
|
||||
from django.core.files.base import ContentFile
|
||||
|
||||
|
||||
class M3UAccountViewSet(viewsets.ModelViewSet):
|
||||
|
|
@ -78,37 +79,15 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Now call super().create() to create the instance
|
||||
response = super().create(request, *args, **kwargs)
|
||||
|
||||
account_type = response.data.get("account_type")
|
||||
account_id = response.data.get("id")
|
||||
|
||||
# Notify frontend that a new playlist was created
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'playlist_created',
|
||||
'playlist_id': account_id
|
||||
})
|
||||
|
||||
if account_type == M3UAccount.Types.XC:
|
||||
refresh_m3u_groups(account_id)
|
||||
|
||||
# Check if VOD is enabled
|
||||
enable_vod = request.data.get("enable_vod", False)
|
||||
if enable_vod:
|
||||
from apps.vod.tasks import refresh_categories
|
||||
|
||||
refresh_categories(account_id)
|
||||
print(response.data.get("account_type"))
|
||||
if response.data.get("account_type") == M3UAccount.Types.XC:
|
||||
refresh_m3u_groups(response.data.get("id"))
|
||||
|
||||
# After the instance is created, return the response
|
||||
return response
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
old_vod_enabled = False
|
||||
|
||||
# Check current VOD setting
|
||||
if instance.custom_properties:
|
||||
custom_props = instance.custom_properties or {}
|
||||
old_vod_enabled = custom_props.get("enable_vod", False)
|
||||
|
||||
# Handle file upload first, if any
|
||||
file_path = None
|
||||
|
|
@ -144,58 +123,6 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Now call super().update() to update the instance
|
||||
response = super().update(request, *args, **kwargs)
|
||||
|
||||
# Check if VOD setting changed and trigger refresh if needed
|
||||
new_vod_enabled = request.data.get("enable_vod", old_vod_enabled)
|
||||
|
||||
if (
|
||||
instance.account_type == M3UAccount.Types.XC
|
||||
and not old_vod_enabled
|
||||
and new_vod_enabled
|
||||
):
|
||||
# Create Uncategorized categories immediately so they're available in the UI
|
||||
from apps.vod.models import VODCategory, M3UVODCategoryRelation
|
||||
|
||||
# Create movie Uncategorized category
|
||||
movie_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="movie",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create series Uncategorized category
|
||||
series_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="series",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create relations for both categories (disabled by default until first refresh)
|
||||
account_custom_props = instance.custom_properties or {}
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=movie_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=series_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
# Trigger full VOD refresh
|
||||
from apps.vod.tasks import refresh_vod_content
|
||||
|
||||
refresh_vod_content.delay(instance.id)
|
||||
|
||||
# After the instance is updated, return the response
|
||||
return response
|
||||
|
||||
|
|
@ -217,95 +144,10 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Continue with regular partial update
|
||||
return super().partial_update(request, *args, **kwargs)
|
||||
|
||||
@action(detail=True, methods=["post"], url_path="refresh-vod")
|
||||
def refresh_vod(self, request, pk=None):
|
||||
"""Trigger VOD content refresh for XtreamCodes accounts"""
|
||||
account = self.get_object()
|
||||
|
||||
if account.account_type != M3UAccount.Types.XC:
|
||||
return Response(
|
||||
{"error": "VOD refresh is only available for XtreamCodes accounts"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
# Check if VOD is enabled
|
||||
vod_enabled = False
|
||||
if account.custom_properties:
|
||||
custom_props = account.custom_properties or {}
|
||||
vod_enabled = custom_props.get("enable_vod", False)
|
||||
|
||||
if not vod_enabled:
|
||||
return Response(
|
||||
{"error": "VOD is not enabled for this account"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
try:
|
||||
from apps.vod.tasks import refresh_vod_content
|
||||
|
||||
refresh_vod_content.delay(account.id)
|
||||
return Response(
|
||||
{"message": f"VOD refresh initiated for account {account.name}"},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"error": f"Failed to initiate VOD refresh: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
@action(detail=True, methods=["patch"], url_path="group-settings")
|
||||
def update_group_settings(self, request, pk=None):
|
||||
"""Update auto channel sync settings for M3U account groups"""
|
||||
account = self.get_object()
|
||||
group_settings = request.data.get("group_settings", [])
|
||||
category_settings = request.data.get("category_settings", [])
|
||||
|
||||
try:
|
||||
for setting in group_settings:
|
||||
group_id = setting.get("channel_group")
|
||||
enabled = setting.get("enabled", True)
|
||||
auto_sync = setting.get("auto_channel_sync", False)
|
||||
sync_start = setting.get("auto_sync_channel_start")
|
||||
custom_properties = setting.get("custom_properties", {})
|
||||
|
||||
if group_id:
|
||||
ChannelGroupM3UAccount.objects.update_or_create(
|
||||
channel_group_id=group_id,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
"enabled": enabled,
|
||||
"auto_channel_sync": auto_sync,
|
||||
"auto_sync_channel_start": sync_start,
|
||||
"custom_properties": custom_properties,
|
||||
},
|
||||
)
|
||||
|
||||
for setting in category_settings:
|
||||
category_id = setting.get("id")
|
||||
enabled = setting.get("enabled", True)
|
||||
custom_properties = setting.get("custom_properties", {})
|
||||
|
||||
if category_id:
|
||||
M3UVODCategoryRelation.objects.update_or_create(
|
||||
category_id=category_id,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
"enabled": enabled,
|
||||
"custom_properties": custom_properties,
|
||||
},
|
||||
)
|
||||
|
||||
return Response({"message": "Group settings updated successfully"})
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"error": f"Failed to update group settings: {str(e)}"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
|
||||
class M3UFilterViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for M3U filters"""
|
||||
|
||||
queryset = M3UFilter.objects.all()
|
||||
serializer_class = M3UFilterSerializer
|
||||
|
||||
|
|
@ -315,23 +157,6 @@ class M3UFilterViewSet(viewsets.ModelViewSet):
|
|||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def get_queryset(self):
|
||||
m3u_account_id = self.kwargs["account_id"]
|
||||
return M3UFilter.objects.filter(m3u_account_id=m3u_account_id)
|
||||
|
||||
def perform_create(self, serializer):
|
||||
# Get the account ID from the URL
|
||||
account_id = self.kwargs["account_id"]
|
||||
|
||||
# # Get the M3UAccount instance for the account_id
|
||||
# m3u_account = M3UAccount.objects.get(id=account_id)
|
||||
|
||||
# Save the 'm3u_account' in the serializer context
|
||||
serializer.context["m3u_account"] = account_id
|
||||
|
||||
# Perform the actual save
|
||||
serializer.save(m3u_account_id=account_id)
|
||||
|
||||
|
||||
class ServerGroupViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for Server Groups"""
|
||||
|
|
@ -395,54 +220,6 @@ class RefreshSingleM3UAPIView(APIView):
|
|||
)
|
||||
|
||||
|
||||
class RefreshAccountInfoAPIView(APIView):
|
||||
"""Triggers account info refresh for a single M3U account"""
|
||||
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Triggers a refresh of account information for a specific M3U profile",
|
||||
responses={202: "Account info refresh initiated", 400: "Profile not found or not XtreamCodes"},
|
||||
)
|
||||
def post(self, request, profile_id, format=None):
|
||||
try:
|
||||
from .models import M3UAccountProfile
|
||||
profile = M3UAccountProfile.objects.get(id=profile_id)
|
||||
account = profile.m3u_account
|
||||
|
||||
if account.account_type != M3UAccount.Types.XC:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Account info refresh is only available for XtreamCodes accounts",
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
refresh_account_info.delay(profile_id)
|
||||
return Response(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Account info refresh initiated for profile {profile.name}.",
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except M3UAccountProfile.DoesNotExist:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Profile not found",
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
|
||||
class UserAgentViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for User Agents"""
|
||||
|
||||
|
|
|
|||
|
|
@ -4,13 +4,6 @@ from .models import M3UAccount, M3UFilter
|
|||
import re
|
||||
|
||||
class M3UAccountForm(forms.ModelForm):
|
||||
enable_vod = forms.BooleanField(
|
||||
required=False,
|
||||
initial=False,
|
||||
label="Enable VOD Content",
|
||||
help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = M3UAccount
|
||||
fields = [
|
||||
|
|
@ -20,34 +13,8 @@ class M3UAccountForm(forms.ModelForm):
|
|||
'server_group',
|
||||
'max_streams',
|
||||
'is_active',
|
||||
'enable_vod',
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Set initial value for enable_vod from custom_properties
|
||||
if self.instance and self.instance.custom_properties:
|
||||
custom_props = self.instance.custom_properties or {}
|
||||
self.fields['enable_vod'].initial = custom_props.get('enable_vod', False)
|
||||
|
||||
def save(self, commit=True):
|
||||
instance = super().save(commit=False)
|
||||
|
||||
# Handle enable_vod field
|
||||
enable_vod = self.cleaned_data.get('enable_vod', False)
|
||||
|
||||
# Parse existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update VOD preference
|
||||
custom_props['enable_vod'] = enable_vod
|
||||
instance.custom_properties = custom_props
|
||||
|
||||
if commit:
|
||||
instance.save()
|
||||
return instance
|
||||
|
||||
def clean_uploaded_file(self):
|
||||
uploaded_file = self.cleaned_data.get('uploaded_file')
|
||||
if uploaded_file:
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-22 21:16
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0012_alter_m3uaccount_refresh_interval'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='m3ufilter',
|
||||
name='filter_type',
|
||||
field=models.CharField(choices=[('group', 'Group'), ('name', 'Stream Name'), ('url', 'Stream URL')], default='group', help_text='Filter based on either group title or stream name.', max_length=50),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-31 17:14
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0013_alter_m3ufilter_filter_type'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='m3ufilter',
|
||||
options={'ordering': ['order']},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='m3ufilter',
|
||||
name='order',
|
||||
field=models.PositiveIntegerField(default=0),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-02 16:06
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0014_alter_m3ufilter_options_m3ufilter_order'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='m3ufilter',
|
||||
options={},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='m3ufilter',
|
||||
name='custom_properties',
|
||||
field=models.TextField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-20 22:35
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='m3uaccount',
|
||||
name='priority',
|
||||
field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 15:19
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0016_m3uaccount_priority'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='m3uaccount',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='m3uaccount',
|
||||
name='server_url',
|
||||
field=models.URLField(blank=True, help_text='The base URL of the M3U server (optional if a file is uploaded)', max_length=1000, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='m3ufilter',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-09 20:57
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0017_alter_m3uaccount_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='m3uaccountprofile',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for storing account information from provider (e.g., XC account details, expiration dates)', null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -29,7 +29,6 @@ class M3UAccount(models.Model):
|
|||
max_length=255, unique=True, help_text="Unique name for this M3U account"
|
||||
)
|
||||
server_url = models.URLField(
|
||||
max_length=1000,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="The base URL of the M3U server (optional if a file is uploaded)",
|
||||
|
|
@ -86,7 +85,7 @@ class M3UAccount(models.Model):
|
|||
account_type = models.CharField(choices=Types.choices, default=Types.STADNARD)
|
||||
username = models.CharField(max_length=255, null=True, blank=True)
|
||||
password = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
refresh_interval = models.IntegerField(default=0)
|
||||
refresh_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
|
|
@ -95,10 +94,6 @@ class M3UAccount(models.Model):
|
|||
default=7,
|
||||
help_text="Number of days after which a stream will be removed if not seen in the M3U source.",
|
||||
)
|
||||
priority = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
|
@ -160,11 +155,9 @@ class M3UFilter(models.Model):
|
|||
"""Defines filters for M3U accounts based on stream name or group title."""
|
||||
|
||||
FILTER_TYPE_CHOICES = (
|
||||
("group", "Group"),
|
||||
("group", "Group Title"),
|
||||
("name", "Stream Name"),
|
||||
("url", "Stream URL"),
|
||||
)
|
||||
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -184,8 +177,6 @@ class M3UFilter(models.Model):
|
|||
default=True,
|
||||
help_text="If True, matching items are excluded; if False, only matches are included.",
|
||||
)
|
||||
order = models.PositiveIntegerField(default=0)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
def applies_to(self, stream_name, group_name):
|
||||
target = group_name if self.filter_type == "group" else stream_name
|
||||
|
|
@ -235,6 +226,9 @@ class ServerGroup(models.Model):
|
|||
return self.name
|
||||
|
||||
|
||||
from django.db import models
|
||||
|
||||
|
||||
class M3UAccountProfile(models.Model):
|
||||
"""Represents a profile associated with an M3U Account."""
|
||||
|
||||
|
|
@ -263,12 +257,6 @@ class M3UAccountProfile(models.Model):
|
|||
max_length=255,
|
||||
)
|
||||
current_viewers = models.PositiveIntegerField(default=0)
|
||||
custom_properties = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Custom properties for storing account information from provider (e.g., XC account details, expiration dates)"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
constraints = [
|
||||
|
|
@ -280,70 +268,6 @@ class M3UAccountProfile(models.Model):
|
|||
def __str__(self):
|
||||
return f"{self.name} ({self.m3u_account.name})"
|
||||
|
||||
def get_account_expiration(self):
|
||||
"""Get account expiration date from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
exp_date = user_info.get('exp_date')
|
||||
|
||||
if exp_date:
|
||||
try:
|
||||
from datetime import datetime
|
||||
# XC exp_date is typically a Unix timestamp
|
||||
if isinstance(exp_date, (int, float)):
|
||||
return datetime.fromtimestamp(exp_date)
|
||||
elif isinstance(exp_date, str):
|
||||
# Try to parse as timestamp first, then as ISO date
|
||||
try:
|
||||
return datetime.fromtimestamp(float(exp_date))
|
||||
except ValueError:
|
||||
return datetime.fromisoformat(exp_date)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def get_account_status(self):
|
||||
"""Get account status from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('status')
|
||||
|
||||
def get_max_connections(self):
|
||||
"""Get maximum connections from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('max_connections')
|
||||
|
||||
def get_active_connections(self):
|
||||
"""Get active connections from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('active_cons')
|
||||
|
||||
def get_last_refresh(self):
|
||||
"""Get last refresh timestamp from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
last_refresh = self.custom_properties.get('last_refresh')
|
||||
if last_refresh:
|
||||
try:
|
||||
from datetime import datetime
|
||||
return datetime.fromisoformat(last_refresh)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@receiver(models.signals.post_save, sender=M3UAccount)
|
||||
def create_profile_for_m3u_account(sender, instance, created, **kwargs):
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
from core.utils import validate_flexible_url
|
||||
from rest_framework import serializers, status
|
||||
from rest_framework import serializers
|
||||
from rest_framework.response import Response
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
|
||||
from core.models import UserAgent
|
||||
from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount
|
||||
from apps.channels.serializers import (
|
||||
ChannelGroupM3UAccountSerializer,
|
||||
ChannelGroupSerializer,
|
||||
)
|
||||
import logging
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -16,30 +15,14 @@ logger = logging.getLogger(__name__)
|
|||
class M3UFilterSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for M3U Filters"""
|
||||
|
||||
channel_groups = ChannelGroupM3UAccountSerializer(source="m3u_account", many=True)
|
||||
|
||||
class Meta:
|
||||
model = M3UFilter
|
||||
fields = [
|
||||
"id",
|
||||
"filter_type",
|
||||
"regex_pattern",
|
||||
"exclude",
|
||||
"order",
|
||||
"custom_properties",
|
||||
]
|
||||
fields = ["id", "filter_type", "regex_pattern", "exclude", "channel_groups"]
|
||||
|
||||
|
||||
class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
||||
account = serializers.SerializerMethodField()
|
||||
|
||||
def get_account(self, obj):
|
||||
"""Include basic account information for frontend use"""
|
||||
return {
|
||||
'id': obj.m3u_account.id,
|
||||
'name': obj.m3u_account.name,
|
||||
'account_type': obj.m3u_account.account_type,
|
||||
'is_xtream_codes': obj.m3u_account.account_type == 'XC'
|
||||
}
|
||||
|
||||
class Meta:
|
||||
model = M3UAccountProfile
|
||||
fields = [
|
||||
|
|
@ -51,14 +34,8 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
"current_viewers",
|
||||
"search_pattern",
|
||||
"replace_pattern",
|
||||
"custom_properties",
|
||||
"account",
|
||||
]
|
||||
read_only_fields = ["id", "account"]
|
||||
extra_kwargs = {
|
||||
'search_pattern': {'required': False, 'allow_blank': True},
|
||||
'replace_pattern': {'required': False, 'allow_blank': True},
|
||||
}
|
||||
read_only_fields = ["id"]
|
||||
|
||||
def create(self, validated_data):
|
||||
m3u_account = self.context.get("m3u_account")
|
||||
|
|
@ -68,39 +45,9 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
|
||||
return super().create(validated_data)
|
||||
|
||||
def validate(self, data):
|
||||
"""Custom validation to handle default profiles"""
|
||||
# For updates to existing instances
|
||||
if self.instance and self.instance.is_default:
|
||||
# For default profiles, search_pattern and replace_pattern are not required
|
||||
# and we don't want to validate them since they shouldn't be changed
|
||||
return data
|
||||
|
||||
# For non-default profiles or new profiles, ensure required fields are present
|
||||
if not data.get('search_pattern'):
|
||||
raise serializers.ValidationError({
|
||||
'search_pattern': ['This field is required for non-default profiles.']
|
||||
})
|
||||
if not data.get('replace_pattern'):
|
||||
raise serializers.ValidationError({
|
||||
'replace_pattern': ['This field is required for non-default profiles.']
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
if instance.is_default:
|
||||
# For default profiles, only allow updating name and custom_properties (for notes)
|
||||
allowed_fields = {'name', 'custom_properties'}
|
||||
|
||||
# Remove any fields that aren't allowed for default profiles
|
||||
disallowed_fields = set(validated_data.keys()) - allowed_fields
|
||||
if disallowed_fields:
|
||||
raise serializers.ValidationError(
|
||||
f"Default profiles can only modify name and notes. "
|
||||
f"Cannot modify: {', '.join(disallowed_fields)}"
|
||||
)
|
||||
|
||||
raise serializers.ValidationError("Default profiles cannot be modified.")
|
||||
return super().update(instance, validated_data)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
|
|
@ -116,7 +63,7 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
class M3UAccountSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for M3U Account"""
|
||||
|
||||
filters = serializers.SerializerMethodField()
|
||||
filters = M3UFilterSerializer(many=True, read_only=True)
|
||||
# Include user_agent as a mandatory field using its primary key.
|
||||
user_agent = serializers.PrimaryKeyRelatedField(
|
||||
queryset=UserAgent.objects.all(),
|
||||
|
|
@ -129,16 +76,6 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
channel_groups = ChannelGroupM3UAccountSerializer(
|
||||
source="channel_group", many=True, required=False
|
||||
)
|
||||
server_url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url],
|
||||
)
|
||||
enable_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = M3UAccount
|
||||
|
|
@ -163,13 +100,8 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
"username",
|
||||
"password",
|
||||
"stale_stream_days",
|
||||
"priority",
|
||||
"status",
|
||||
"last_message",
|
||||
"enable_vod",
|
||||
"auto_enable_new_groups_live",
|
||||
"auto_enable_new_groups_vod",
|
||||
"auto_enable_new_groups_series",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"password": {
|
||||
|
|
@ -178,40 +110,7 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
},
|
||||
}
|
||||
|
||||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
||||
# Parse custom_properties to get VOD preference and auto_enable_new_groups settings
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
data["enable_vod"] = custom_props.get("enable_vod", False)
|
||||
data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True)
|
||||
data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True)
|
||||
data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True)
|
||||
return data
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings
|
||||
enable_vod = validated_data.pop("enable_vod", None)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None)
|
||||
|
||||
# Get existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update preferences
|
||||
if enable_vod is not None:
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
if auto_enable_new_groups_live is not None:
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
if auto_enable_new_groups_vod is not None:
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
if auto_enable_new_groups_series is not None:
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
# Pop out channel group memberships so we can handle them manually
|
||||
channel_group_data = validated_data.pop("channel_group", [])
|
||||
|
||||
|
|
@ -243,29 +142,6 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
|
||||
return instance
|
||||
|
||||
def create(self, validated_data):
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings during creation
|
||||
enable_vod = validated_data.pop("enable_vod", False)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True)
|
||||
|
||||
# Parse existing custom_properties or create new
|
||||
custom_props = validated_data.get("custom_properties", {})
|
||||
|
||||
# Set preferences (default to True for auto_enable_new_groups)
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
return super().create(validated_data)
|
||||
|
||||
def get_filters(self, obj):
|
||||
filters = obj.filters.order_by("order")
|
||||
return M3UFilterSerializer(filters, many=True).data
|
||||
|
||||
|
||||
class ServerGroupSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for Server Group"""
|
||||
|
|
|
|||
2775
apps/m3u/tasks.py
2775
apps/m3u/tasks.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,40 +1,9 @@
|
|||
# apps/m3u/utils.py
|
||||
import threading
|
||||
import logging
|
||||
from django.db import models
|
||||
|
||||
lock = threading.Lock()
|
||||
# Dictionary to track usage: {m3u_account_id: current_usage}
|
||||
active_streams_map = {}
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def normalize_stream_url(url):
|
||||
"""
|
||||
Normalize stream URLs for compatibility with FFmpeg.
|
||||
|
||||
Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol.
|
||||
FFmpeg doesn't recognize the @ prefix for multicast addresses.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to normalize
|
||||
|
||||
Returns:
|
||||
str: The normalized URL
|
||||
"""
|
||||
if not url:
|
||||
return url
|
||||
|
||||
# Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234
|
||||
# The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax
|
||||
if url.startswith('udp://@'):
|
||||
normalized = url.replace('udp://@', 'udp://', 1)
|
||||
logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}")
|
||||
return normalized
|
||||
|
||||
# Could add other normalizations here in the future (rtp://@, etc.)
|
||||
return url
|
||||
|
||||
|
||||
def increment_stream_count(account):
|
||||
with lock:
|
||||
|
|
@ -55,64 +24,3 @@ def decrement_stream_count(account):
|
|||
active_streams_map[account.id] = current_usage
|
||||
account.active_streams = current_usage
|
||||
account.save(update_fields=['active_streams'])
|
||||
|
||||
|
||||
def calculate_tuner_count(minimum=1, unlimited_default=10):
|
||||
"""
|
||||
Calculate tuner/connection count from active M3U profiles and custom streams.
|
||||
This is the centralized function used by both HDHR and XtreamCodes APIs.
|
||||
|
||||
Args:
|
||||
minimum (int): Minimum number to return (default: 1)
|
||||
unlimited_default (int): Default value when unlimited profiles exist (default: 10)
|
||||
|
||||
Returns:
|
||||
int: Calculated tuner/connection count
|
||||
"""
|
||||
try:
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
from apps.channels.models import Stream
|
||||
|
||||
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
is_active=True,
|
||||
m3u_account__is_active=True, # Only include profiles from enabled M3U accounts
|
||||
).exclude(id=1)
|
||||
|
||||
# 1. Check if any profile has unlimited streams (max_streams=0)
|
||||
has_unlimited = profiles.filter(max_streams=0).exists()
|
||||
|
||||
# 2. Calculate tuner count from limited profiles
|
||||
limited_tuners = 0
|
||||
if not has_unlimited:
|
||||
limited_tuners = (
|
||||
profiles.filter(max_streams__gt=0)
|
||||
.aggregate(total=models.Sum("max_streams"))
|
||||
.get("total", 0)
|
||||
or 0
|
||||
)
|
||||
|
||||
# 3. Add custom stream count to tuner count
|
||||
custom_stream_count = Stream.objects.filter(is_custom=True).count()
|
||||
logger.debug(f"Found {custom_stream_count} custom streams")
|
||||
|
||||
# 4. Calculate final tuner count
|
||||
if has_unlimited:
|
||||
# If there are unlimited profiles, start with unlimited_default plus custom streams
|
||||
tuner_count = unlimited_default + custom_stream_count
|
||||
else:
|
||||
# Otherwise use the limited profile sum plus custom streams
|
||||
tuner_count = limited_tuners + custom_stream_count
|
||||
|
||||
# 5. Ensure minimum number
|
||||
tuner_count = max(minimum, tuner_count)
|
||||
|
||||
logger.debug(
|
||||
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"
|
||||
)
|
||||
|
||||
return tuner_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating tuner count: {e}")
|
||||
return minimum # Fallback to minimum value
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ from django.views import View
|
|||
from django.utils.decorators import method_decorator
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.http import JsonResponse
|
||||
from apps.m3u.models import M3UAccount
|
||||
import json
|
||||
|
||||
|
|
|
|||
|
|
@ -14,26 +14,3 @@ class OutputM3UTest(TestCase):
|
|||
self.assertEqual(response.status_code, 200)
|
||||
content = response.content.decode()
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_empty_body(self):
|
||||
"""
|
||||
Test that a POST request with an empty body returns 200 OK.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded')
|
||||
content = response.content.decode()
|
||||
|
||||
self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK")
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_with_body(self):
|
||||
"""
|
||||
Test that a POST request with a non-empty body returns 403 Forbidden.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data={'evilstring': 'muhahaha'})
|
||||
|
||||
self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden")
|
||||
self.assertIn("POST requests with body are not allowed, body is:", response.content.decode())
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from django.urls import path, re_path, include
|
||||
from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream
|
||||
from .views import m3u_endpoint, epg_endpoint, xc_get
|
||||
from core.views import stream_view
|
||||
|
||||
app_name = "output"
|
||||
|
|
|
|||
2568
apps/output/views.py
2568
apps/output/views.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,2 +0,0 @@
|
|||
default_app_config = "apps.plugins.apps.PluginsConfig"
|
||||
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
from django.urls import path
|
||||
from .api_views import (
|
||||
PluginsListAPIView,
|
||||
PluginReloadAPIView,
|
||||
PluginSettingsAPIView,
|
||||
PluginRunAPIView,
|
||||
PluginEnabledAPIView,
|
||||
PluginImportAPIView,
|
||||
PluginDeleteAPIView,
|
||||
)
|
||||
|
||||
app_name = "plugins"
|
||||
|
||||
urlpatterns = [
|
||||
path("plugins/", PluginsListAPIView.as_view(), name="list"),
|
||||
path("plugins/reload/", PluginReloadAPIView.as_view(), name="reload"),
|
||||
path("plugins/import/", PluginImportAPIView.as_view(), name="import"),
|
||||
path("plugins/<str:key>/delete/", PluginDeleteAPIView.as_view(), name="delete"),
|
||||
path("plugins/<str:key>/settings/", PluginSettingsAPIView.as_view(), name="settings"),
|
||||
path("plugins/<str:key>/run/", PluginRunAPIView.as_view(), name="run"),
|
||||
path("plugins/<str:key>/enabled/", PluginEnabledAPIView.as_view(), name="enabled"),
|
||||
]
|
||||
|
|
@ -1,306 +0,0 @@
|
|||
import logging
|
||||
from rest_framework.views import APIView
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import api_view
|
||||
from django.conf import settings
|
||||
from django.core.files.uploadedfile import UploadedFile
|
||||
import io
|
||||
import os
|
||||
import zipfile
|
||||
import shutil
|
||||
import tempfile
|
||||
from apps.accounts.permissions import (
|
||||
Authenticated,
|
||||
permission_classes_by_method,
|
||||
)
|
||||
|
||||
from .loader import PluginManager
|
||||
from .models import PluginConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginsListAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def get(self, request):
|
||||
pm = PluginManager.get()
|
||||
# Ensure registry is up-to-date on each request
|
||||
pm.discover_plugins()
|
||||
return Response({"plugins": pm.list_plugins()})
|
||||
|
||||
|
||||
class PluginReloadAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request):
|
||||
pm = PluginManager.get()
|
||||
pm.discover_plugins()
|
||||
return Response({"success": True, "count": len(pm._registry)})
|
||||
|
||||
|
||||
class PluginImportAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request):
|
||||
file: UploadedFile = request.FILES.get("file")
|
||||
if not file:
|
||||
return Response({"success": False, "error": "Missing 'file' upload"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
pm = PluginManager.get()
|
||||
plugins_dir = pm.plugins_dir
|
||||
|
||||
try:
|
||||
zf = zipfile.ZipFile(file)
|
||||
except zipfile.BadZipFile:
|
||||
return Response({"success": False, "error": "Invalid zip file"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Extract to a temporary directory first to avoid server reload thrash
|
||||
tmp_root = tempfile.mkdtemp(prefix="plugin_import_")
|
||||
try:
|
||||
file_members = [m for m in zf.infolist() if not m.is_dir()]
|
||||
if not file_members:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Archive is empty"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
for member in file_members:
|
||||
name = member.filename
|
||||
if not name or name.endswith("/"):
|
||||
continue
|
||||
# Normalize and prevent path traversal
|
||||
norm = os.path.normpath(name)
|
||||
if norm.startswith("..") or os.path.isabs(norm):
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Unsafe path in archive"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
dest_path = os.path.join(tmp_root, norm)
|
||||
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
||||
with zf.open(member, 'r') as src, open(dest_path, 'wb') as dst:
|
||||
shutil.copyfileobj(src, dst)
|
||||
|
||||
# Find candidate directory containing plugin.py or __init__.py
|
||||
candidates = []
|
||||
for dirpath, dirnames, filenames in os.walk(tmp_root):
|
||||
has_pluginpy = "plugin.py" in filenames
|
||||
has_init = "__init__.py" in filenames
|
||||
if has_pluginpy or has_init:
|
||||
depth = len(os.path.relpath(dirpath, tmp_root).split(os.sep))
|
||||
candidates.append((0 if has_pluginpy else 1, depth, dirpath))
|
||||
if not candidates:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Invalid plugin: missing plugin.py or package __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
candidates.sort()
|
||||
chosen = candidates[0][2]
|
||||
# Determine plugin key: prefer chosen folder name; if chosen is tmp_root, use zip base name
|
||||
base_name = os.path.splitext(getattr(file, "name", "plugin"))[0]
|
||||
plugin_key = os.path.basename(chosen.rstrip(os.sep))
|
||||
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
|
||||
plugin_key = base_name
|
||||
plugin_key = plugin_key.replace(" ", "_").lower()
|
||||
|
||||
final_dir = os.path.join(plugins_dir, plugin_key)
|
||||
if os.path.exists(final_dir):
|
||||
# If final dir exists but contains a valid plugin, refuse; otherwise clear it
|
||||
if os.path.exists(os.path.join(final_dir, "plugin.py")) or os.path.exists(os.path.join(final_dir, "__init__.py")):
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": f"Plugin '{plugin_key}' already exists"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
shutil.rmtree(final_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Move chosen directory into final location
|
||||
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
|
||||
# Move all contents into final_dir
|
||||
os.makedirs(final_dir, exist_ok=True)
|
||||
for item in os.listdir(tmp_root):
|
||||
shutil.move(os.path.join(tmp_root, item), os.path.join(final_dir, item))
|
||||
else:
|
||||
shutil.move(chosen, final_dir)
|
||||
# Cleanup temp
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
target_dir = final_dir
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Reload discovery and validate plugin entry
|
||||
pm.discover_plugins()
|
||||
plugin = pm._registry.get(plugin_key)
|
||||
if not plugin:
|
||||
# Cleanup the copied folder to avoid leaving invalid plugin behind
|
||||
try:
|
||||
shutil.rmtree(target_dir, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
return Response({"success": False, "error": "Invalid plugin: missing Plugin class in plugin.py or __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Extra validation: ensure Plugin.run exists
|
||||
instance = getattr(plugin, "instance", None)
|
||||
run_method = getattr(instance, "run", None)
|
||||
if not callable(run_method):
|
||||
try:
|
||||
shutil.rmtree(target_dir, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
return Response({"success": False, "error": "Invalid plugin: Plugin class must define a callable run(action, params, context)"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Find DB config to return enabled/ever_enabled
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=plugin_key)
|
||||
enabled = cfg.enabled
|
||||
ever_enabled = getattr(cfg, "ever_enabled", False)
|
||||
except PluginConfig.DoesNotExist:
|
||||
enabled = False
|
||||
ever_enabled = False
|
||||
|
||||
return Response({
|
||||
"success": True,
|
||||
"plugin": {
|
||||
"key": plugin.key,
|
||||
"name": plugin.name,
|
||||
"version": plugin.version,
|
||||
"description": plugin.description,
|
||||
"enabled": enabled,
|
||||
"ever_enabled": ever_enabled,
|
||||
"fields": plugin.fields or [],
|
||||
"actions": plugin.actions or [],
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class PluginSettingsAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
data = request.data or {}
|
||||
settings = data.get("settings", {})
|
||||
try:
|
||||
updated = pm.update_settings(key, settings)
|
||||
return Response({"success": True, "settings": updated})
|
||||
except Exception as e:
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class PluginRunAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
action = request.data.get("action")
|
||||
params = request.data.get("params", {})
|
||||
if not action:
|
||||
return Response({"success": False, "error": "Missing 'action'"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Respect plugin enabled flag
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
if not cfg.enabled:
|
||||
return Response({"success": False, "error": "Plugin is disabled"}, status=status.HTTP_403_FORBIDDEN)
|
||||
except PluginConfig.DoesNotExist:
|
||||
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
try:
|
||||
result = pm.run_action(key, action, params)
|
||||
return Response({"success": True, "result": result})
|
||||
except PermissionError as e:
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_403_FORBIDDEN)
|
||||
except Exception as e:
|
||||
logger.exception("Plugin action failed")
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class PluginEnabledAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
enabled = request.data.get("enabled")
|
||||
if enabled is None:
|
||||
return Response({"success": False, "error": "Missing 'enabled' boolean"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
cfg.enabled = bool(enabled)
|
||||
# Mark that this plugin has been enabled at least once
|
||||
if cfg.enabled and not cfg.ever_enabled:
|
||||
cfg.ever_enabled = True
|
||||
cfg.save(update_fields=["enabled", "ever_enabled", "updated_at"])
|
||||
return Response({"success": True, "enabled": cfg.enabled, "ever_enabled": cfg.ever_enabled})
|
||||
except PluginConfig.DoesNotExist:
|
||||
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
|
||||
class PluginDeleteAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def delete(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
plugins_dir = pm.plugins_dir
|
||||
target_dir = os.path.join(plugins_dir, key)
|
||||
# Safety: ensure path inside plugins_dir
|
||||
abs_plugins = os.path.abspath(plugins_dir) + os.sep
|
||||
abs_target = os.path.abspath(target_dir)
|
||||
if not abs_target.startswith(abs_plugins):
|
||||
return Response({"success": False, "error": "Invalid plugin path"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Remove files
|
||||
if os.path.isdir(target_dir):
|
||||
try:
|
||||
shutil.rmtree(target_dir)
|
||||
except Exception as e:
|
||||
return Response({"success": False, "error": f"Failed to delete plugin files: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
# Remove DB record
|
||||
try:
|
||||
PluginConfig.objects.filter(key=key).delete()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Reload registry
|
||||
pm.discover_plugins()
|
||||
return Response({"success": True})
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
from django.apps import AppConfig
|
||||
import os
|
||||
import sys
|
||||
from django.db.models.signals import post_migrate
|
||||
|
||||
|
||||
class PluginsConfig(AppConfig):
|
||||
name = "apps.plugins"
|
||||
verbose_name = "Plugins"
|
||||
|
||||
def ready(self):
|
||||
"""Wire up plugin discovery without hitting the DB during app init.
|
||||
|
||||
- Skip during common management commands that don't need discovery.
|
||||
- Register post_migrate handler to sync plugin registry to DB after migrations.
|
||||
- Do an in-memory discovery (no DB) so registry is available early.
|
||||
"""
|
||||
try:
|
||||
# Allow explicit opt-out via env var
|
||||
if os.environ.get("DISPATCHARR_SKIP_PLUGIN_AUTODISCOVERY", "").lower() in ("1", "true", "yes"):
|
||||
return
|
||||
|
||||
argv = sys.argv[1:] if len(sys.argv) > 1 else []
|
||||
mgmt_cmds_to_skip = {
|
||||
# Skip immediate discovery for these commands
|
||||
"makemigrations", "collectstatic", "check", "test", "shell", "showmigrations",
|
||||
}
|
||||
if argv and argv[0] in mgmt_cmds_to_skip:
|
||||
return
|
||||
|
||||
# Run discovery with DB sync after the plugins app has been migrated
|
||||
def _post_migrate_discover(sender=None, app_config=None, **kwargs):
|
||||
try:
|
||||
if app_config and getattr(app_config, 'label', None) != 'plugins':
|
||||
return
|
||||
from .loader import PluginManager
|
||||
PluginManager.get().discover_plugins(sync_db=True)
|
||||
except Exception:
|
||||
import logging
|
||||
logging.getLogger(__name__).exception("Plugin discovery failed in post_migrate")
|
||||
|
||||
post_migrate.connect(
|
||||
_post_migrate_discover,
|
||||
dispatch_uid="apps.plugins.post_migrate_discover",
|
||||
)
|
||||
|
||||
# Perform non-DB discovery now to populate in-memory registry.
|
||||
from .loader import PluginManager
|
||||
PluginManager.get().discover_plugins(sync_db=False)
|
||||
except Exception:
|
||||
# Avoid breaking startup due to plugin errors
|
||||
import logging
|
||||
|
||||
logging.getLogger(__name__).exception("Plugin discovery wiring failed during app ready")
|
||||
|
|
@ -1,254 +0,0 @@
|
|||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
from .models import PluginConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoadedPlugin:
|
||||
key: str
|
||||
name: str
|
||||
version: str = ""
|
||||
description: str = ""
|
||||
module: Any = None
|
||||
instance: Any = None
|
||||
fields: List[Dict[str, Any]] = field(default_factory=list)
|
||||
actions: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
|
||||
class PluginManager:
|
||||
"""Singleton manager that discovers and runs plugins from /data/plugins."""
|
||||
|
||||
_instance: Optional["PluginManager"] = None
|
||||
|
||||
@classmethod
|
||||
def get(cls) -> "PluginManager":
|
||||
if not cls._instance:
|
||||
cls._instance = PluginManager()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.plugins_dir = os.environ.get("DISPATCHARR_PLUGINS_DIR", "/data/plugins")
|
||||
self._registry: Dict[str, LoadedPlugin] = {}
|
||||
|
||||
# Ensure plugins directory exists
|
||||
os.makedirs(self.plugins_dir, exist_ok=True)
|
||||
if self.plugins_dir not in sys.path:
|
||||
sys.path.append(self.plugins_dir)
|
||||
|
||||
def discover_plugins(self, *, sync_db: bool = True) -> Dict[str, LoadedPlugin]:
|
||||
if sync_db:
|
||||
logger.info(f"Discovering plugins in {self.plugins_dir}")
|
||||
else:
|
||||
logger.debug(f"Discovering plugins (no DB sync) in {self.plugins_dir}")
|
||||
self._registry.clear()
|
||||
|
||||
try:
|
||||
for entry in sorted(os.listdir(self.plugins_dir)):
|
||||
path = os.path.join(self.plugins_dir, entry)
|
||||
if not os.path.isdir(path):
|
||||
continue
|
||||
|
||||
plugin_key = entry.replace(" ", "_").lower()
|
||||
|
||||
try:
|
||||
self._load_plugin(plugin_key, path)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to load plugin '{plugin_key}' from {path}")
|
||||
|
||||
logger.info(f"Discovered {len(self._registry)} plugin(s)")
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Plugins directory not found: {self.plugins_dir}")
|
||||
|
||||
# Sync DB records (optional)
|
||||
if sync_db:
|
||||
try:
|
||||
self._sync_db_with_registry()
|
||||
except Exception:
|
||||
# Defer sync if database is not ready (e.g., first startup before migrate)
|
||||
logger.exception("Deferring plugin DB sync; database not ready yet")
|
||||
return self._registry
|
||||
|
||||
def _load_plugin(self, key: str, path: str):
|
||||
# Plugin can be a package and/or contain plugin.py. Prefer plugin.py when present.
|
||||
has_pkg = os.path.exists(os.path.join(path, "__init__.py"))
|
||||
has_pluginpy = os.path.exists(os.path.join(path, "plugin.py"))
|
||||
if not (has_pkg or has_pluginpy):
|
||||
logger.debug(f"Skipping {path}: no plugin.py or package")
|
||||
return
|
||||
|
||||
candidate_modules = []
|
||||
if has_pluginpy:
|
||||
candidate_modules.append(f"{key}.plugin")
|
||||
if has_pkg:
|
||||
candidate_modules.append(key)
|
||||
|
||||
module = None
|
||||
plugin_cls = None
|
||||
last_error = None
|
||||
for module_name in candidate_modules:
|
||||
try:
|
||||
logger.debug(f"Importing plugin module {module_name}")
|
||||
module = importlib.import_module(module_name)
|
||||
plugin_cls = getattr(module, "Plugin", None)
|
||||
if plugin_cls is not None:
|
||||
break
|
||||
else:
|
||||
logger.warning(f"Module {module_name} has no Plugin class")
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
logger.exception(f"Error importing module {module_name}")
|
||||
|
||||
if plugin_cls is None:
|
||||
if last_error:
|
||||
raise last_error
|
||||
else:
|
||||
logger.warning(f"No Plugin class found for {key}; skipping")
|
||||
return
|
||||
|
||||
instance = plugin_cls()
|
||||
|
||||
name = getattr(instance, "name", key)
|
||||
version = getattr(instance, "version", "")
|
||||
description = getattr(instance, "description", "")
|
||||
fields = getattr(instance, "fields", [])
|
||||
actions = getattr(instance, "actions", [])
|
||||
|
||||
self._registry[key] = LoadedPlugin(
|
||||
key=key,
|
||||
name=name,
|
||||
version=version,
|
||||
description=description,
|
||||
module=module,
|
||||
instance=instance,
|
||||
fields=fields,
|
||||
actions=actions,
|
||||
)
|
||||
|
||||
def _sync_db_with_registry(self):
|
||||
with transaction.atomic():
|
||||
for key, lp in self._registry.items():
|
||||
obj, _ = PluginConfig.objects.get_or_create(
|
||||
key=key,
|
||||
defaults={
|
||||
"name": lp.name,
|
||||
"version": lp.version,
|
||||
"description": lp.description,
|
||||
"settings": {},
|
||||
},
|
||||
)
|
||||
# Update meta if changed
|
||||
changed = False
|
||||
if obj.name != lp.name:
|
||||
obj.name = lp.name
|
||||
changed = True
|
||||
if obj.version != lp.version:
|
||||
obj.version = lp.version
|
||||
changed = True
|
||||
if obj.description != lp.description:
|
||||
obj.description = lp.description
|
||||
changed = True
|
||||
if changed:
|
||||
obj.save()
|
||||
|
||||
def list_plugins(self) -> List[Dict[str, Any]]:
|
||||
from .models import PluginConfig
|
||||
|
||||
plugins: List[Dict[str, Any]] = []
|
||||
try:
|
||||
configs = {c.key: c for c in PluginConfig.objects.all()}
|
||||
except Exception as e:
|
||||
# Database might not be migrated yet; fall back to registry only
|
||||
logger.warning("PluginConfig table unavailable; listing registry only: %s", e)
|
||||
configs = {}
|
||||
|
||||
# First, include all discovered plugins
|
||||
for key, lp in self._registry.items():
|
||||
conf = configs.get(key)
|
||||
plugins.append(
|
||||
{
|
||||
"key": key,
|
||||
"name": lp.name,
|
||||
"version": lp.version,
|
||||
"description": lp.description,
|
||||
"enabled": conf.enabled if conf else False,
|
||||
"ever_enabled": getattr(conf, "ever_enabled", False) if conf else False,
|
||||
"fields": lp.fields or [],
|
||||
"settings": (conf.settings if conf else {}),
|
||||
"actions": lp.actions or [],
|
||||
"missing": False,
|
||||
}
|
||||
)
|
||||
|
||||
# Then, include any DB-only configs (files missing or failed to load)
|
||||
discovered_keys = set(self._registry.keys())
|
||||
for key, conf in configs.items():
|
||||
if key in discovered_keys:
|
||||
continue
|
||||
plugins.append(
|
||||
{
|
||||
"key": key,
|
||||
"name": conf.name,
|
||||
"version": conf.version,
|
||||
"description": conf.description,
|
||||
"enabled": conf.enabled,
|
||||
"ever_enabled": getattr(conf, "ever_enabled", False),
|
||||
"fields": [],
|
||||
"settings": conf.settings or {},
|
||||
"actions": [],
|
||||
"missing": True,
|
||||
}
|
||||
)
|
||||
|
||||
return plugins
|
||||
|
||||
def get_plugin(self, key: str) -> Optional[LoadedPlugin]:
|
||||
return self._registry.get(key)
|
||||
|
||||
def update_settings(self, key: str, settings: Dict[str, Any]) -> Dict[str, Any]:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
cfg.settings = settings or {}
|
||||
cfg.save(update_fields=["settings", "updated_at"])
|
||||
return cfg.settings
|
||||
|
||||
def run_action(self, key: str, action_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
lp = self.get_plugin(key)
|
||||
if not lp or not lp.instance:
|
||||
raise ValueError(f"Plugin '{key}' not found")
|
||||
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
if not cfg.enabled:
|
||||
raise PermissionError(f"Plugin '{key}' is disabled")
|
||||
params = params or {}
|
||||
|
||||
# Provide a context object to the plugin
|
||||
context = {
|
||||
"settings": cfg.settings or {},
|
||||
"logger": logger,
|
||||
"actions": {a.get("id"): a for a in (lp.actions or [])},
|
||||
}
|
||||
|
||||
# Run either via Celery if plugin provides a delayed method, or inline
|
||||
run_method = getattr(lp.instance, "run", None)
|
||||
if not callable(run_method):
|
||||
raise ValueError(f"Plugin '{key}' has no runnable 'run' method")
|
||||
|
||||
try:
|
||||
result = run_method(action_id, params, context)
|
||||
except Exception:
|
||||
logger.exception(f"Plugin '{key}' action '{action_id}' failed")
|
||||
raise
|
||||
|
||||
# Normalize return
|
||||
if isinstance(result, dict):
|
||||
return result
|
||||
return {"status": "ok", "result": result}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-13 13:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='PluginConfig',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('key', models.CharField(max_length=128, unique=True)),
|
||||
('name', models.CharField(max_length=255)),
|
||||
('version', models.CharField(blank=True, default='', max_length=64)),
|
||||
('description', models.TextField(blank=True, default='')),
|
||||
('enabled', models.BooleanField(default=False)),
|
||||
('ever_enabled', models.BooleanField(default=False)),
|
||||
('settings', models.JSONField(blank=True, default=dict)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
|
@ -1 +0,0 @@
|
|||
# This file marks the migrations package for the plugins app.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
from django.db import models
|
||||
|
||||
|
||||
class PluginConfig(models.Model):
|
||||
"""Stores discovered plugins and their persisted settings."""
|
||||
|
||||
key = models.CharField(max_length=128, unique=True)
|
||||
name = models.CharField(max_length=255)
|
||||
version = models.CharField(max_length=64, blank=True, default="")
|
||||
description = models.TextField(blank=True, default="")
|
||||
enabled = models.BooleanField(default=False)
|
||||
# Tracks whether this plugin has ever been enabled at least once
|
||||
ever_enabled = models.BooleanField(default=False)
|
||||
settings = models.JSONField(default=dict, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name} ({self.key})"
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
from rest_framework import serializers
|
||||
|
||||
|
||||
class PluginActionSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
description = serializers.CharField(required=False, allow_blank=True)
|
||||
|
||||
|
||||
class PluginFieldSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
type = serializers.ChoiceField(choices=["string", "number", "boolean", "select"]) # simple types
|
||||
default = serializers.JSONField(required=False)
|
||||
help_text = serializers.CharField(required=False, allow_blank=True)
|
||||
options = serializers.ListField(child=serializers.DictField(), required=False)
|
||||
|
||||
|
||||
class PluginSerializer(serializers.Serializer):
|
||||
key = serializers.CharField()
|
||||
name = serializers.CharField()
|
||||
version = serializers.CharField(allow_blank=True)
|
||||
description = serializers.CharField(allow_blank=True)
|
||||
enabled = serializers.BooleanField()
|
||||
fields = PluginFieldSerializer(many=True)
|
||||
settings = serializers.JSONField()
|
||||
actions = PluginActionSerializer(many=True)
|
||||
|
||||
|
|
@ -1,6 +1,4 @@
|
|||
"""Shared configuration between proxy types"""
|
||||
import time
|
||||
from django.db import connection
|
||||
|
||||
class BaseConfig:
|
||||
DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail
|
||||
|
|
@ -14,29 +12,13 @@ class BaseConfig:
|
|||
BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams
|
||||
BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc.
|
||||
|
||||
# Cache for proxy settings (class-level, shared across all instances)
|
||||
_proxy_settings_cache = None
|
||||
_proxy_settings_cache_time = 0
|
||||
_proxy_settings_cache_ttl = 10 # Cache for 10 seconds
|
||||
|
||||
@classmethod
|
||||
def get_proxy_settings(cls):
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)"""
|
||||
# Check if cache is still valid
|
||||
now = time.time()
|
||||
if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl:
|
||||
return cls._proxy_settings_cache
|
||||
|
||||
# Cache miss or expired - fetch from database
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults"""
|
||||
try:
|
||||
from core.models import CoreSettings
|
||||
settings = CoreSettings.get_proxy_settings()
|
||||
cls._proxy_settings_cache = settings
|
||||
cls._proxy_settings_cache_time = now
|
||||
return settings
|
||||
|
||||
return CoreSettings.get_proxy_settings()
|
||||
except Exception:
|
||||
# Return defaults if database query fails
|
||||
return {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
|
|
@ -45,13 +27,6 @@ class BaseConfig:
|
|||
"channel_init_grace_period": 5,
|
||||
}
|
||||
|
||||
finally:
|
||||
# Always close the connection after reading settings
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_redis_chunk_ttl(cls):
|
||||
"""Get Redis chunk TTL from database or default"""
|
||||
|
|
@ -94,10 +69,10 @@ class TSConfig(BaseConfig):
|
|||
CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds
|
||||
|
||||
# Client tracking settings
|
||||
CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1)
|
||||
CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect
|
||||
|
||||
# Stream health and recovery settings
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import gc # Add import for garbage collection
|
|||
from core.utils import RedisClient
|
||||
from apps.proxy.ts_proxy.channel_status import ChannelStatus
|
||||
from core.utils import send_websocket_update
|
||||
from apps.proxy.vod_proxy.connection_manager import get_connection_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -60,13 +59,3 @@ def fetch_channel_stats():
|
|||
# Explicitly clean up large data structures
|
||||
all_channels = None
|
||||
gc.collect()
|
||||
|
||||
@shared_task
|
||||
def cleanup_vod_connections():
|
||||
"""Clean up stale VOD connections"""
|
||||
try:
|
||||
connection_manager = get_connection_manager()
|
||||
connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour
|
||||
logger.info("VOD connection cleanup completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -8,11 +8,10 @@ import gevent
|
|||
from typing import Set, Optional
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .constants import EventType, ChannelState, ChannelMetadataField
|
||||
from .constants import EventType
|
||||
from .config_helper import ConfigHelper
|
||||
from .redis_keys import RedisKeys
|
||||
from .utils import get_logger
|
||||
from core.utils import send_websocket_update
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
|
@ -26,7 +25,6 @@ class ClientManager:
|
|||
self.lock = threading.Lock()
|
||||
self.last_active_time = time.time()
|
||||
self.worker_id = worker_id # Store worker ID as instance variable
|
||||
self._heartbeat_running = True # Flag to control heartbeat thread
|
||||
|
||||
# STANDARDIZED KEYS: Move client set under channel namespace
|
||||
self.client_set_key = RedisKeys.clients(channel_id)
|
||||
|
|
@ -34,78 +32,61 @@ class ClientManager:
|
|||
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
|
||||
self.last_heartbeat_time = {}
|
||||
|
||||
# Get ProxyServer instance for ownership checks
|
||||
from .server import ProxyServer
|
||||
self.proxy_server = ProxyServer.get_instance()
|
||||
|
||||
# Start heartbeat thread for local clients
|
||||
self._start_heartbeat_thread()
|
||||
self._registered_clients = set() # Track already registered client IDs
|
||||
|
||||
def _trigger_stats_update(self):
|
||||
"""Trigger a channel stats update via WebSocket"""
|
||||
try:
|
||||
# Import here to avoid potential import issues
|
||||
from apps.proxy.ts_proxy.channel_status import ChannelStatus
|
||||
import redis
|
||||
from django.conf import settings
|
||||
|
||||
# Get all channels from Redis using settings
|
||||
redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0')
|
||||
redis_client = redis.Redis.from_url(redis_url, decode_responses=True)
|
||||
all_channels = []
|
||||
cursor = 0
|
||||
|
||||
while True:
|
||||
cursor, keys = redis_client.scan(cursor, match="ts_proxy:channel:*:clients", count=100)
|
||||
for key in keys:
|
||||
# Extract channel ID from key
|
||||
parts = key.split(':')
|
||||
if len(parts) >= 4:
|
||||
ch_id = parts[2]
|
||||
channel_info = ChannelStatus.get_basic_channel_info(ch_id)
|
||||
if channel_info:
|
||||
all_channels.append(channel_info)
|
||||
|
||||
if cursor == 0:
|
||||
break
|
||||
|
||||
# Send WebSocket update using existing infrastructure
|
||||
send_websocket_update(
|
||||
"updates",
|
||||
"update",
|
||||
{
|
||||
"success": True,
|
||||
"type": "channel_stats",
|
||||
"stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to trigger stats update: {e}")
|
||||
|
||||
def _start_heartbeat_thread(self):
|
||||
"""Start thread to regularly refresh client presence in Redis for local clients"""
|
||||
"""Start thread to regularly refresh client presence in Redis"""
|
||||
def heartbeat_task():
|
||||
no_clients_count = 0 # Track consecutive empty cycles
|
||||
max_empty_cycles = 3 # Exit after this many consecutive empty checks
|
||||
|
||||
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
while self._heartbeat_running:
|
||||
while True:
|
||||
try:
|
||||
# Wait for the interval, but check stop flag frequently for quick shutdown
|
||||
# Sleep in 1-second increments to allow faster response to stop signal
|
||||
for _ in range(int(self.heartbeat_interval)):
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
# Final check before doing work
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
# Wait for the interval
|
||||
gevent.sleep(self.heartbeat_interval)
|
||||
|
||||
# Send heartbeat for all local clients
|
||||
with self.lock:
|
||||
# Skip this cycle if we have no local clients
|
||||
if not self.clients:
|
||||
continue
|
||||
if not self.clients or not self.redis_client:
|
||||
# No clients left, increment our counter
|
||||
no_clients_count += 1
|
||||
|
||||
# Check if we're in a shutdown delay period before exiting
|
||||
in_shutdown_delay = False
|
||||
if self.redis_client:
|
||||
try:
|
||||
disconnect_key = RedisKeys.last_client_disconnect(self.channel_id)
|
||||
disconnect_time_bytes = self.redis_client.get(disconnect_key)
|
||||
if disconnect_time_bytes:
|
||||
disconnect_time = float(disconnect_time_bytes.decode('utf-8'))
|
||||
elapsed = time.time() - disconnect_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if elapsed < shutdown_delay:
|
||||
in_shutdown_delay = True
|
||||
logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking shutdown delay: {e}")
|
||||
|
||||
# Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay
|
||||
if no_clients_count >= max_empty_cycles and not in_shutdown_delay:
|
||||
logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread")
|
||||
return # This exits the thread
|
||||
|
||||
# Skip this cycle if we have no clients but continue if in shutdown delay
|
||||
if not in_shutdown_delay:
|
||||
continue
|
||||
else:
|
||||
# Reset counter during shutdown delay to prevent premature exit
|
||||
no_clients_count = 0
|
||||
continue
|
||||
else:
|
||||
# Reset counter when we see clients
|
||||
no_clients_count = 0
|
||||
|
||||
# IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats
|
||||
current_time = time.time()
|
||||
|
|
@ -176,20 +157,11 @@ class ClientManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Error in client heartbeat thread: {e}")
|
||||
|
||||
logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}")
|
||||
|
||||
thread = threading.Thread(target=heartbeat_task, daemon=True)
|
||||
thread.name = f"client-heartbeat-{self.channel_id}"
|
||||
thread.start()
|
||||
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the heartbeat thread and cleanup"""
|
||||
logger.debug(f"Stopping ClientManager for channel {self.channel_id}")
|
||||
self._heartbeat_running = False
|
||||
# Give the thread a moment to exit gracefully
|
||||
# Note: We don't join() here because it's a daemon thread and will exit on its own
|
||||
|
||||
def _execute_redis_command(self, command_func):
|
||||
"""Execute Redis command with error handling"""
|
||||
if not self.redis_client:
|
||||
|
|
@ -288,9 +260,6 @@ class ClientManager:
|
|||
json.dumps(event_data)
|
||||
)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
|
||||
# Get total clients across all workers
|
||||
total_clients = self.get_total_client_count()
|
||||
logger.info(f"New client connected: {client_id} (local: {len(self.clients)}, total: {total_clients})")
|
||||
|
|
@ -305,8 +274,6 @@ class ClientManager:
|
|||
|
||||
def remove_client(self, client_id):
|
||||
"""Remove a client from this channel and Redis"""
|
||||
client_ip = None
|
||||
|
||||
with self.lock:
|
||||
if client_id in self.clients:
|
||||
self.clients.remove(client_id)
|
||||
|
|
@ -317,14 +284,6 @@ class ClientManager:
|
|||
self.last_active_time = time.time()
|
||||
|
||||
if self.redis_client:
|
||||
# Get client IP before removing the data
|
||||
client_key = f"ts_proxy:channel:{self.channel_id}:clients:{client_id}"
|
||||
client_data = self.redis_client.hgetall(client_key)
|
||||
if client_data and b'ip_address' in client_data:
|
||||
client_ip = client_data[b'ip_address'].decode('utf-8')
|
||||
elif client_data and 'ip_address' in client_data:
|
||||
client_ip = client_data['ip_address']
|
||||
|
||||
# Remove from channel's client set
|
||||
self.redis_client.srem(self.client_set_key, client_id)
|
||||
|
||||
|
|
@ -343,33 +302,16 @@ class ClientManager:
|
|||
|
||||
self._notify_owner_of_activity()
|
||||
|
||||
# Check if we're the owner - if so, handle locally; if not, publish event
|
||||
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
|
||||
|
||||
if am_i_owner:
|
||||
# We're the owner - handle the disconnect directly
|
||||
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
|
||||
if remaining == 0:
|
||||
# Trigger shutdown check directly via ProxyServer method
|
||||
logger.debug(f"No clients left - triggering immediate shutdown check")
|
||||
# Spawn greenlet to avoid blocking
|
||||
import gevent
|
||||
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
|
||||
else:
|
||||
# We're not the owner - publish event so owner can handle it
|
||||
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED,
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
# Publish client disconnected event
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
|
||||
total_clients = self.get_total_client_count()
|
||||
logger.info(f"Client disconnected: {client_id} (local: {len(self.clients)}, total: {total_clients})")
|
||||
|
|
|
|||
|
|
@ -100,12 +100,3 @@ class ConfigHelper:
|
|||
def channel_init_grace_period():
|
||||
"""Get channel initialization grace period in seconds"""
|
||||
return Config.get_channel_init_grace_period()
|
||||
|
||||
@staticmethod
|
||||
def chunk_timeout():
|
||||
"""
|
||||
Get chunk timeout in seconds (used for both socket and HTTP read timeouts).
|
||||
This controls how long we wait for each chunk before timing out.
|
||||
Set this higher (e.g., 30s) for slow providers that may have intermittent delays.
|
||||
"""
|
||||
return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds
|
||||
|
|
|
|||
|
|
@ -33,8 +33,6 @@ class EventType:
|
|||
# Stream types
|
||||
class StreamType:
|
||||
HLS = "hls"
|
||||
RTSP = "rtsp"
|
||||
UDP = "udp"
|
||||
TS = "ts"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,138 +0,0 @@
|
|||
"""
|
||||
HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe.
|
||||
This allows us to use the same fetch_chunk() path for both transcode and HTTP streams.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import os
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from .utils import get_logger
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
class HTTPStreamReader:
|
||||
"""Thread-based HTTP stream reader that writes to a pipe"""
|
||||
|
||||
def __init__(self, url, user_agent=None, chunk_size=8192):
|
||||
self.url = url
|
||||
self.user_agent = user_agent
|
||||
self.chunk_size = chunk_size
|
||||
self.session = None
|
||||
self.response = None
|
||||
self.thread = None
|
||||
self.pipe_read = None
|
||||
self.pipe_write = None
|
||||
self.running = False
|
||||
|
||||
def start(self):
|
||||
"""Start the HTTP stream reader thread"""
|
||||
# Create a pipe (works on Windows and Unix)
|
||||
self.pipe_read, self.pipe_write = os.pipe()
|
||||
|
||||
# Start the reader thread
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self._read_stream, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
logger.info(f"Started HTTP stream reader thread for {self.url}")
|
||||
return self.pipe_read
|
||||
|
||||
def _read_stream(self):
|
||||
"""Thread worker that reads HTTP stream and writes to pipe"""
|
||||
try:
|
||||
# Build headers
|
||||
headers = {}
|
||||
if self.user_agent:
|
||||
headers['User-Agent'] = self.user_agent
|
||||
|
||||
logger.info(f"HTTP reader connecting to {self.url}")
|
||||
|
||||
# Create session
|
||||
self.session = requests.Session()
|
||||
|
||||
# Disable retries for faster failure detection
|
||||
adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1)
|
||||
self.session.mount('http://', adapter)
|
||||
self.session.mount('https://', adapter)
|
||||
|
||||
# Stream the URL
|
||||
self.response = self.session.get(
|
||||
self.url,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=(5, 30) # 5s connect, 30s read
|
||||
)
|
||||
|
||||
if self.response.status_code != 200:
|
||||
logger.error(f"HTTP {self.response.status_code} from {self.url}")
|
||||
return
|
||||
|
||||
logger.info(f"HTTP reader connected successfully, streaming data...")
|
||||
|
||||
# Stream chunks to pipe
|
||||
chunk_count = 0
|
||||
for chunk in self.response.iter_content(chunk_size=self.chunk_size):
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
try:
|
||||
# Write binary data to pipe
|
||||
os.write(self.pipe_write, chunk)
|
||||
chunk_count += 1
|
||||
|
||||
# Log progress periodically
|
||||
if chunk_count % 1000 == 0:
|
||||
logger.debug(f"HTTP reader streamed {chunk_count} chunks")
|
||||
except OSError as e:
|
||||
logger.error(f"Pipe write error: {e}")
|
||||
break
|
||||
|
||||
logger.info("HTTP stream ended")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP reader request error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"HTTP reader unexpected error: {e}", exc_info=True)
|
||||
finally:
|
||||
self.running = False
|
||||
# Close write end of pipe to signal EOF
|
||||
try:
|
||||
if self.pipe_write is not None:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""Stop the HTTP stream reader"""
|
||||
logger.info("Stopping HTTP stream reader")
|
||||
self.running = False
|
||||
|
||||
# Close response
|
||||
if self.response:
|
||||
try:
|
||||
self.response.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close session
|
||||
if self.session:
|
||||
try:
|
||||
self.session.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close write end of pipe
|
||||
if self.pipe_write is not None:
|
||||
try:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
# Wait for thread
|
||||
if self.thread and self.thread.is_alive():
|
||||
self.thread.join(timeout=2.0)
|
||||
|
|
@ -19,7 +19,7 @@ import gevent # Add gevent import
|
|||
from typing import Dict, Optional, Set
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from core.utils import RedisClient, log_system_event
|
||||
from core.utils import RedisClient
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .stream_manager import StreamManager
|
||||
from .stream_buffer import StreamBuffer
|
||||
|
|
@ -131,8 +131,6 @@ class ProxyServer:
|
|||
max_retries = 10
|
||||
base_retry_delay = 1 # Start with 1 second delay
|
||||
max_retry_delay = 30 # Cap at 30 seconds
|
||||
pubsub_client = None
|
||||
pubsub = None
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
|
@ -194,11 +192,35 @@ class ProxyServer:
|
|||
self.redis_client.delete(disconnect_key)
|
||||
|
||||
elif event_type == EventType.CLIENT_DISCONNECTED:
|
||||
client_id = data.get("client_id")
|
||||
worker_id = data.get("worker_id")
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}")
|
||||
# Delegate to dedicated method
|
||||
self.handle_client_disconnect(channel_id)
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
|
||||
# Check if any clients remain
|
||||
if channel_id in self.client_managers:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
|
||||
|
||||
elif event_type == EventType.STREAM_SWITCH:
|
||||
|
|
@ -317,27 +339,20 @@ class ProxyServer:
|
|||
logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})")
|
||||
gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay)
|
||||
|
||||
# Try to clean up the old connection
|
||||
try:
|
||||
if 'pubsub' in locals():
|
||||
pubsub.close()
|
||||
if 'pubsub_client' in locals():
|
||||
pubsub_client.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in event listener: {e}")
|
||||
# Add a short delay to prevent rapid retries on persistent errors
|
||||
gevent.sleep(5) # REPLACE: time.sleep(5)
|
||||
|
||||
finally:
|
||||
# Always clean up PubSub connections in all error paths
|
||||
try:
|
||||
if pubsub:
|
||||
pubsub.close()
|
||||
pubsub = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub: {e}")
|
||||
|
||||
try:
|
||||
if pubsub_client:
|
||||
pubsub_client.close()
|
||||
pubsub_client = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub_client: {e}")
|
||||
|
||||
thread = threading.Thread(target=event_listener, daemon=True)
|
||||
thread.name = "redis-event-listener"
|
||||
thread.start()
|
||||
|
|
@ -471,18 +486,17 @@ class ProxyServer:
|
|||
)
|
||||
return True
|
||||
|
||||
# Create buffer and client manager instances (or reuse if they exist)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer and client manager instances
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Store in local tracking
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# IMPROVED: Set initializing state in Redis BEFORE any other operations
|
||||
if self.redis_client:
|
||||
|
|
@ -536,15 +550,13 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} already owned by worker {current_owner}")
|
||||
logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only")
|
||||
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -559,15 +571,13 @@ class ProxyServer:
|
|||
# Another worker just acquired ownership
|
||||
logger.info(f"Another worker just acquired ownership of channel {channel_id}")
|
||||
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -586,7 +596,7 @@ class ProxyServer:
|
|||
if channel_user_agent:
|
||||
metadata["user_agent"] = channel_user_agent
|
||||
|
||||
# Make sure stream_id is always set in metadata and properly logged
|
||||
# CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged
|
||||
if channel_stream_id:
|
||||
metadata["stream_id"] = str(channel_stream_id)
|
||||
logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}")
|
||||
|
|
@ -622,37 +632,13 @@ class ProxyServer:
|
|||
logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}")
|
||||
self.stream_managers[channel_id] = stream_manager
|
||||
|
||||
# Log channel start event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Get stream name if stream_id is available
|
||||
stream_name = None
|
||||
if channel_stream_id:
|
||||
try:
|
||||
stream_obj = Stream.objects.get(id=channel_stream_id)
|
||||
stream_name = stream_obj.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_start',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
stream_name=stream_name,
|
||||
stream_id=channel_stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel start event: {e}")
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id, redis_client AND worker_id
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# Start stream manager thread only for the owner
|
||||
thread = threading.Thread(target=stream_manager.run, daemon=True)
|
||||
|
|
@ -702,10 +688,9 @@ class ProxyServer:
|
|||
state = metadata.get(b'state', b'unknown').decode('utf-8')
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8')
|
||||
|
||||
# States that indicate the channel is running properly or shutting down
|
||||
# States that indicate the channel is running properly
|
||||
valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING,
|
||||
ChannelState.STOPPING]
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING]
|
||||
|
||||
# If the channel is in a valid state, check if the owner is still active
|
||||
if state in valid_states:
|
||||
|
|
@ -718,24 +703,12 @@ class ProxyServer:
|
|||
else:
|
||||
# This is a zombie channel - owner is gone but metadata still exists
|
||||
logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active")
|
||||
|
||||
# Check if there are any clients connected
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if client_count > 0:
|
||||
logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover")
|
||||
# Could potentially take ownership here in the future
|
||||
# For now, just clean it up to be safe
|
||||
else:
|
||||
logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up")
|
||||
|
||||
self._clean_zombie_channel(channel_id, metadata)
|
||||
return False
|
||||
elif state in [ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These terminal states indicate the channel should be cleaned up and reinitialized
|
||||
logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup")
|
||||
return False
|
||||
elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These states indicate the channel should be reinitialized
|
||||
logger.info(f"Channel {channel_id} exists but in terminal state: {state}")
|
||||
return True
|
||||
else:
|
||||
# Unknown or initializing state, check how long it's been in this state
|
||||
if b'state_changed_at' in metadata:
|
||||
|
|
@ -799,44 +772,6 @@ class ProxyServer:
|
|||
logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def handle_client_disconnect(self, channel_id):
|
||||
"""
|
||||
Handle client disconnect event - check if channel should shut down.
|
||||
Can be called directly by owner or via PubSub from non-owner workers.
|
||||
"""
|
||||
if channel_id not in self.client_managers:
|
||||
return
|
||||
|
||||
try:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling client disconnect for channel {channel_id}: {e}")
|
||||
|
||||
def stop_channel(self, channel_id):
|
||||
"""Stop a channel with proper ownership handling"""
|
||||
try:
|
||||
|
|
@ -884,41 +819,6 @@ class ProxyServer:
|
|||
self.release_ownership(channel_id)
|
||||
logger.info(f"Released ownership of channel {channel_id}")
|
||||
|
||||
# Log channel stop event (after cleanup, before releasing ownership section ends)
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Calculate runtime and get total bytes from metadata
|
||||
runtime = None
|
||||
total_bytes = None
|
||||
if self.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(channel_id)
|
||||
metadata = self.redis_client.hgetall(metadata_key)
|
||||
if metadata:
|
||||
# Calculate runtime from init_time
|
||||
if b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
runtime = round(time.time() - init_time, 2)
|
||||
except Exception:
|
||||
pass
|
||||
# Get total bytes transferred
|
||||
if b'total_bytes' in metadata:
|
||||
try:
|
||||
total_bytes = int(metadata[b'total_bytes'].decode('utf-8'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_stop',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
runtime=runtime,
|
||||
total_bytes=total_bytes
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel stop event: {e}")
|
||||
|
||||
# Always clean up local resources - WITH SAFE CHECKS
|
||||
if channel_id in self.stream_managers:
|
||||
del self.stream_managers[channel_id]
|
||||
|
|
@ -946,10 +846,6 @@ class ProxyServer:
|
|||
# Clean up client manager - SAFE CHECK HERE TOO
|
||||
if channel_id in self.client_managers:
|
||||
try:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
# Stop the heartbeat thread before deleting
|
||||
if hasattr(client_manager, 'stop'):
|
||||
client_manager.stop()
|
||||
del self.client_managers[channel_id]
|
||||
logger.info(f"Removed client manager for channel {channel_id}")
|
||||
except KeyError:
|
||||
|
|
@ -1024,15 +920,6 @@ class ProxyServer:
|
|||
if channel_id in self.client_managers:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
total_clients = client_manager.get_total_client_count()
|
||||
else:
|
||||
# This can happen during reconnection attempts or crashes
|
||||
# Check Redis directly for any connected clients
|
||||
if self.redis_client:
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total_clients = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total_clients == 0:
|
||||
logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup")
|
||||
|
||||
# Log client count periodically
|
||||
if time.time() % 30 < 1: # Every ~30 seconds
|
||||
|
|
@ -1040,14 +927,7 @@ class ProxyServer:
|
|||
|
||||
# If in connecting or waiting_for_clients state, check grace period
|
||||
if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Get connection_ready_time from metadata (indicates if channel reached ready state)
|
||||
# Get connection ready time from metadata
|
||||
connection_ready_time = None
|
||||
if metadata and b'connection_ready_time' in metadata:
|
||||
try:
|
||||
|
|
@ -1055,60 +935,17 @@ class ProxyServer:
|
|||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
if total_clients == 0:
|
||||
# Check if we have a connection_attempt timestamp (set when CONNECTING starts)
|
||||
connection_attempt_time = None
|
||||
attempt_key = RedisKeys.connection_attempt(channel_id)
|
||||
if self.redis_client:
|
||||
attempt_value = self.redis_client.get(attempt_key)
|
||||
if attempt_value:
|
||||
try:
|
||||
connection_attempt_time = float(attempt_value.decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
# If still connecting, give it more time
|
||||
if channel_state == ChannelState.CONNECTING:
|
||||
logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet")
|
||||
continue
|
||||
|
||||
# Also get init time as a fallback
|
||||
init_time = None
|
||||
if metadata and b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Use whichever timestamp we have (prefer connection_attempt as it's more recent)
|
||||
start_time = connection_attempt_time or init_time
|
||||
|
||||
if start_time:
|
||||
# Check which timeout to apply based on channel lifecycle
|
||||
if connection_ready_time:
|
||||
# Already reached ready - use shutdown_delay
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if time_since_ready > shutdown_delay:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s "
|
||||
f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
else:
|
||||
# Never reached ready - use grace_period timeout
|
||||
time_since_start = time.time() - start_time
|
||||
connecting_timeout = ConfigHelper.channel_init_grace_period()
|
||||
|
||||
if time_since_start > connecting_timeout:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s "
|
||||
f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
elif connection_ready_time:
|
||||
# We have clients now, but check grace period for state transition
|
||||
# If waiting for clients, check grace period
|
||||
if connection_ready_time:
|
||||
grace_period = ConfigHelper.channel_init_grace_period()
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
|
||||
# Add this debug log
|
||||
logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, "
|
||||
f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, "
|
||||
f"total_clients={total_clients}")
|
||||
|
|
@ -1117,9 +954,16 @@ class ProxyServer:
|
|||
# Still within grace period
|
||||
logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed")
|
||||
continue
|
||||
elif total_clients == 0:
|
||||
# Grace period expired with no clients
|
||||
logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Grace period expired with clients - mark channel as active
|
||||
# Grace period expired but we have clients - mark channel as active
|
||||
logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active")
|
||||
old_state = "unknown"
|
||||
if metadata and b'state' in metadata:
|
||||
old_state = metadata[b'state'].decode('utf-8')
|
||||
if self.update_channel_state(channel_id, ChannelState.ACTIVE, {
|
||||
"grace_period_ended_at": str(time.time()),
|
||||
"clients_at_activation": str(total_clients)
|
||||
|
|
@ -1127,13 +971,6 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period")
|
||||
# If active and no clients, start normal shutdown procedure
|
||||
elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Check if there's a pending no-clients timeout
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
disconnect_time = None
|
||||
|
|
@ -1193,30 +1030,14 @@ class ProxyServer:
|
|||
continue
|
||||
|
||||
# Check for local client count - if zero, clean up our local resources
|
||||
if channel_id in self.client_managers:
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
else:
|
||||
# This shouldn't happen, but clean up anyway
|
||||
logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources")
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in cleanup thread: {e}", exc_info=True)
|
||||
|
||||
# Periodically check for orphaned channels (every 30 seconds)
|
||||
if hasattr(self, '_last_orphan_check'):
|
||||
if time.time() - self._last_orphan_check > 30:
|
||||
try:
|
||||
self._check_orphaned_metadata()
|
||||
self._last_orphan_check = time.time()
|
||||
except Exception as orphan_error:
|
||||
logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True)
|
||||
else:
|
||||
self._last_orphan_check = time.time()
|
||||
|
||||
gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval())
|
||||
|
||||
thread = threading.Thread(target=cleanup_task, daemon=True)
|
||||
|
|
@ -1238,6 +1059,10 @@ class ProxyServer:
|
|||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Skip channels we already have locally
|
||||
if channel_id in self.stream_buffers:
|
||||
continue
|
||||
|
||||
# Check if this channel has an owner
|
||||
owner = self.get_channel_owner(channel_id)
|
||||
|
||||
|
|
@ -1252,84 +1077,13 @@ class ProxyServer:
|
|||
else:
|
||||
# Orphaned channel with no clients - clean it up
|
||||
logger.info(f"Cleaning up orphaned channel {channel_id}")
|
||||
|
||||
# If we have it locally, stop it properly to clean up processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
self._clean_redis_keys(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing channel key {key}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned channels: {e}")
|
||||
|
||||
def _check_orphaned_metadata(self):
|
||||
"""
|
||||
Check for metadata entries that have no owner and no clients.
|
||||
This catches zombie channels that weren't cleaned up properly.
|
||||
"""
|
||||
if not self.redis_client:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get all channel metadata keys
|
||||
channel_pattern = "ts_proxy:channel:*:metadata"
|
||||
channel_keys = self.redis_client.keys(channel_pattern)
|
||||
|
||||
for key in channel_keys:
|
||||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Get metadata first
|
||||
metadata = self.redis_client.hgetall(key)
|
||||
if not metadata:
|
||||
# Empty metadata - clean it up
|
||||
logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up")
|
||||
# If we have it locally, stop it properly
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
self._clean_redis_keys(channel_id)
|
||||
continue
|
||||
|
||||
# Get owner
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else ''
|
||||
|
||||
# Check if owner is still alive
|
||||
owner_alive = False
|
||||
if owner:
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
owner_alive = self.redis_client.exists(owner_heartbeat_key)
|
||||
|
||||
# Check client count
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
# If no owner and no clients, clean it up
|
||||
if not owner_alive and client_count == 0:
|
||||
state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown'
|
||||
logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up")
|
||||
|
||||
# If we have it locally, stop it properly to clean up transcode/proxy processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
elif not owner_alive and client_count > 0:
|
||||
# Owner is gone but clients remain - just log for now
|
||||
logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing metadata key {key}: {e}", exc_info=True)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned metadata: {e}", exc_info=True)
|
||||
|
||||
def _clean_redis_keys(self, channel_id):
|
||||
"""Clean up all Redis keys for a channel more efficiently"""
|
||||
# Release the channel, stream, and profile keys from the channel
|
||||
|
|
|
|||
|
|
@ -14,8 +14,6 @@ from ..server import ProxyServer
|
|||
from ..redis_keys import RedisKeys
|
||||
from ..constants import EventType, ChannelState, ChannelMetadataField
|
||||
from ..url_utils import get_stream_info_for_switch
|
||||
from core.utils import log_system_event
|
||||
from .log_parsers import LogParserFactory
|
||||
|
||||
logger = logging.getLogger("ts_proxy")
|
||||
|
||||
|
|
@ -419,52 +417,106 @@ class ChannelService:
|
|||
return False, None, None, {"error": f"Exception: {str(e)}"}
|
||||
|
||||
@staticmethod
|
||||
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None):
|
||||
"""
|
||||
Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB.
|
||||
Uses specialized parsers for each streaming tool.
|
||||
"""
|
||||
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video"):
|
||||
"""Parse FFmpeg stream info line and store in Redis metadata"""
|
||||
try:
|
||||
# Use factory to parse the line based on stream type
|
||||
parsed_data = LogParserFactory.parse(stream_type, stream_info_line)
|
||||
|
||||
if not parsed_data:
|
||||
return
|
||||
if stream_type == "input":
|
||||
# Example lines:
|
||||
# Input #0, mpegts, from 'http://example.com/stream.ts':
|
||||
# Input #0, hls, from 'http://example.com/stream.m3u8':
|
||||
|
||||
# Update Redis and database with parsed data
|
||||
ChannelService._update_stream_info_in_redis(
|
||||
channel_id,
|
||||
parsed_data.get('video_codec'),
|
||||
parsed_data.get('resolution'),
|
||||
parsed_data.get('width'),
|
||||
parsed_data.get('height'),
|
||||
parsed_data.get('source_fps'),
|
||||
parsed_data.get('pixel_format'),
|
||||
parsed_data.get('video_bitrate'),
|
||||
parsed_data.get('audio_codec'),
|
||||
parsed_data.get('sample_rate'),
|
||||
parsed_data.get('audio_channels'),
|
||||
parsed_data.get('audio_bitrate'),
|
||||
parsed_data.get('stream_type')
|
||||
)
|
||||
# Extract input format (e.g., "mpegts", "hls", "flv", etc.)
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
stream_id,
|
||||
video_codec=parsed_data.get('video_codec'),
|
||||
resolution=parsed_data.get('resolution'),
|
||||
source_fps=parsed_data.get('source_fps'),
|
||||
pixel_format=parsed_data.get('pixel_format'),
|
||||
video_bitrate=parsed_data.get('video_bitrate'),
|
||||
audio_codec=parsed_data.get('audio_codec'),
|
||||
sample_rate=parsed_data.get('sample_rate'),
|
||||
audio_channels=parsed_data.get('audio_channels'),
|
||||
audio_bitrate=parsed_data.get('audio_bitrate'),
|
||||
stream_type=parsed_data.get('stream_type')
|
||||
)
|
||||
# Store in Redis if we have valid data
|
||||
if input_format:
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format)
|
||||
|
||||
logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}")
|
||||
|
||||
elif stream_type == "video":
|
||||
# Example line:
|
||||
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
|
||||
|
||||
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
video_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract resolution (e.g., "1280x720") - be more specific to avoid hex values
|
||||
# Look for resolution patterns that are realistic video dimensions
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
# Validate that these look like reasonable video dimensions
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
resolution = f"{width}x{height}"
|
||||
else:
|
||||
width = height = resolution = None
|
||||
else:
|
||||
width = height = resolution = None
|
||||
|
||||
# Extract source FPS (e.g., "29.97 fps")
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
|
||||
source_fps = float(fps_match.group(1)) if fps_match else None
|
||||
|
||||
# Extract pixel format (e.g., "yuv420p")
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
|
||||
pixel_format = None
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
# Clean up pixel format (remove extra info in parentheses)
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
pixel_format = pf
|
||||
|
||||
# Extract bitrate if present (e.g., "2000 kb/s")
|
||||
video_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
video_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None)
|
||||
|
||||
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
|
||||
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
|
||||
f"Video Bitrate: {video_bitrate} kb/s")
|
||||
|
||||
elif stream_type == "audio":
|
||||
# Example line:
|
||||
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
|
||||
|
||||
# Extract audio codec (e.g., "aac", "mp3", etc.)
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
audio_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract sample rate (e.g., "48000 Hz")
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
|
||||
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
|
||||
|
||||
# Extract channel layout (e.g., "stereo", "5.1", "mono")
|
||||
# Look for common channel layouts
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
|
||||
channels = channel_match.group(1) if channel_match else None
|
||||
|
||||
# Extract audio bitrate if present (e.g., "64 kb/s")
|
||||
audio_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
audio_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None)
|
||||
|
||||
logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, "
|
||||
f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing {stream_type} stream info: {e}")
|
||||
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None):
|
||||
|
|
@ -523,44 +575,6 @@ class ChannelService:
|
|||
logger.error(f"Error updating stream info in Redis: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _update_stream_stats_in_db(stream_id, **stats):
|
||||
"""Update stream stats in database"""
|
||||
from django.db import connection
|
||||
|
||||
try:
|
||||
from apps.channels.models import Stream
|
||||
from django.utils import timezone
|
||||
|
||||
stream = Stream.objects.get(id=stream_id)
|
||||
|
||||
# Get existing stats or create new dict
|
||||
current_stats = stream.stream_stats or {}
|
||||
|
||||
# Update with new stats
|
||||
for key, value in stats.items():
|
||||
if value is not None:
|
||||
current_stats[key] = value
|
||||
|
||||
# Save updated stats and timestamp
|
||||
stream.stream_stats = current_stats
|
||||
stream.stream_stats_updated_at = timezone.now()
|
||||
stream.save(update_fields=['stream_stats', 'stream_stats_updated_at'])
|
||||
|
||||
logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
# Always close database connection after update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Helper methods for Redis operations
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -616,7 +630,7 @@ class ChannelService:
|
|||
|
||||
switch_request = {
|
||||
"event": EventType.STREAM_SWITCH,
|
||||
"channel_id": str(channel_id),
|
||||
"channel_id": channel_id,
|
||||
"url": new_url,
|
||||
"user_agent": user_agent,
|
||||
"stream_id": stream_id,
|
||||
|
|
@ -629,7 +643,6 @@ class ChannelService:
|
|||
RedisKeys.events_channel(channel_id),
|
||||
json.dumps(switch_request)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -642,7 +655,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CHANNEL_STOP,
|
||||
"channel_id": str(channel_id),
|
||||
"channel_id": channel_id,
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
|
@ -665,7 +678,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CLIENT_STOP,
|
||||
"channel_id": str(channel_id),
|
||||
"channel_id": channel_id,
|
||||
"client_id": client_id,
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
|
|
|
|||
|
|
@ -1,410 +0,0 @@
|
|||
"""Log parsers for FFmpeg, Streamlink, and VLC output."""
|
||||
import re
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseLogParser(ABC):
|
||||
"""Base class for log parsers"""
|
||||
|
||||
# Map of stream_type -> method_name that this parser handles
|
||||
STREAM_TYPE_METHODS: Dict[str, str] = {}
|
||||
|
||||
@abstractmethod
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""
|
||||
Check if this parser can handle the line.
|
||||
Returns the stream_type if it can parse, None otherwise.
|
||||
e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink'
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
|
||||
class FFmpegLogParser(BaseLogParser):
|
||||
"""Parser for FFmpeg log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'input': 'parse_input_format',
|
||||
'video': 'parse_video_stream',
|
||||
'audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is an FFmpeg line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# Input format detection
|
||||
if lower.startswith('input #'):
|
||||
return 'input'
|
||||
|
||||
# Stream info (only during input phase, but we'll let stream_manager handle phase tracking)
|
||||
if 'stream #' in lower:
|
||||
if 'video:' in lower:
|
||||
return 'video'
|
||||
elif 'audio:' in lower:
|
||||
return 'audio'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg input format (e.g., mpegts, hls)"""
|
||||
try:
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
|
||||
if input_format:
|
||||
logger.debug(f"Input format info - Format: {input_format}")
|
||||
return {'stream_type': input_format}
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg input format: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg video stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
# Extract codec, resolution, fps, pixel format, bitrate
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['video_codec'] = codec_match.group(1)
|
||||
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line)
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
result['pixel_format'] = pf
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['video_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
logger.info(f"Video stream info - Codec: {result.get('video_codec')}, "
|
||||
f"Resolution: {result.get('resolution')}, "
|
||||
f"Source FPS: {result.get('source_fps')}, "
|
||||
f"Pixel Format: {result.get('pixel_format')}, "
|
||||
f"Video Bitrate: {result.get('video_bitrate')} kb/s")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg audio stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['audio_codec'] = codec_match.group(1)
|
||||
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', line)
|
||||
if sample_rate_match:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['audio_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class VLCLogParser(BaseLogParser):
|
||||
"""Parser for VLC log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'vlc_video': 'parse_video_stream',
|
||||
'vlc_audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a VLC line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# VLC TS demux codec detection
|
||||
if 'ts demux debug' in lower and 'type=' in lower:
|
||||
if 'video' in lower:
|
||||
return 'vlc_video'
|
||||
elif 'audio' in lower:
|
||||
return 'vlc_audio'
|
||||
|
||||
# VLC decoder output
|
||||
if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower):
|
||||
if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower:
|
||||
return 'vlc_audio'
|
||||
else:
|
||||
return 'vlc_video'
|
||||
|
||||
# VLC transcode output for resolution/FPS
|
||||
if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)):
|
||||
return 'vlc_video'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for video"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
video_codec_map = {
|
||||
('avc', 'h.264', 'type=0x1b'): "h264",
|
||||
('hevc', 'h.265', 'type=0x24'): "hevc",
|
||||
('mpeg-2', 'type=0x02'): "mpeg2video",
|
||||
('mpeg-4', 'type=0x10'): "mpeg4"
|
||||
}
|
||||
|
||||
for patterns, codec in video_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['video_codec'] = codec
|
||||
break
|
||||
|
||||
# Extract FPS from transcode output: "source fps 30/1"
|
||||
fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower)
|
||||
if fps_fraction_match:
|
||||
numerator = int(fps_fraction_match.group(1))
|
||||
denominator = int(fps_fraction_match.group(2))
|
||||
if denominator > 0:
|
||||
result['source_fps'] = numerator / denominator
|
||||
|
||||
# Extract resolution from transcode output: "source 1280x720"
|
||||
source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower)
|
||||
if source_res_match:
|
||||
width = int(source_res_match.group(1))
|
||||
height = int(source_res_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
else:
|
||||
# Fallback: generic resolution pattern
|
||||
resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
# Fallback: try to extract FPS from generic format
|
||||
if 'source_fps' not in result:
|
||||
fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing VLC video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for audio"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
audio_codec_map = {
|
||||
('type=0xf', 'adts'): "aac",
|
||||
('type=0x03', 'type=0x04'): "mp3",
|
||||
('type=0x06', 'type=0x81'): "ac3",
|
||||
('type=0x0b', 'lpcm'): "pcm"
|
||||
}
|
||||
|
||||
for patterns, codec in audio_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['audio_codec'] = codec
|
||||
break
|
||||
|
||||
# VLC decoder format: "AAC channels: 2 samplerate: 48000"
|
||||
if 'channels:' in lower:
|
||||
channels_match = re.search(r'channels:\s*(\d+)', lower)
|
||||
if channels_match:
|
||||
num_channels = int(channels_match.group(1))
|
||||
# Convert number to name
|
||||
channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'}
|
||||
result['audio_channels'] = channel_names.get(num_channels, str(num_channels))
|
||||
|
||||
if 'samplerate:' in lower:
|
||||
samplerate_match = re.search(r'samplerate:\s*(\d+)', lower)
|
||||
if samplerate_match:
|
||||
result['sample_rate'] = int(samplerate_match.group(1))
|
||||
|
||||
# Try to extract sample rate (Hz format)
|
||||
sample_rate_match = re.search(r'(\d+)\s*hz', lower)
|
||||
if sample_rate_match and 'sample_rate' not in result:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
# Try to extract channels (word format)
|
||||
if 'audio_channels' not in result:
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class StreamlinkLogParser(BaseLogParser):
|
||||
"""Parser for Streamlink log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'streamlink': 'parse_video_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a Streamlink line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
if 'opening stream:' in lower or 'available streams:' in lower:
|
||||
return 'streamlink'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse Streamlink quality/resolution"""
|
||||
try:
|
||||
quality_match = re.search(r'(\d+p|\d+x\d+)', line)
|
||||
if quality_match:
|
||||
quality = quality_match.group(1)
|
||||
|
||||
if 'x' in quality:
|
||||
resolution = quality
|
||||
width, height = map(int, quality.split('x'))
|
||||
else:
|
||||
resolutions = {
|
||||
'2160p': ('3840x2160', 3840, 2160),
|
||||
'1080p': ('1920x1080', 1920, 1080),
|
||||
'720p': ('1280x720', 1280, 720),
|
||||
'480p': ('854x480', 854, 480),
|
||||
'360p': ('640x360', 640, 360)
|
||||
}
|
||||
resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080))
|
||||
|
||||
return {
|
||||
'video_codec': 'h264',
|
||||
'resolution': resolution,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'pixel_format': 'yuv420p'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing Streamlink video info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
|
||||
class LogParserFactory:
|
||||
"""Factory to get the appropriate log parser"""
|
||||
|
||||
_parsers = {
|
||||
'ffmpeg': FFmpegLogParser(),
|
||||
'vlc': VLCLogParser(),
|
||||
'streamlink': StreamlinkLogParser()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]:
|
||||
"""Determine parser and method from stream_type"""
|
||||
# Check each parser to see if it handles this stream_type
|
||||
for parser in cls._parsers.values():
|
||||
method_name = parser.STREAM_TYPE_METHODS.get(stream_type)
|
||||
if method_name:
|
||||
return (parser, method_name)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Parse a log line based on stream type.
|
||||
Returns parsed data or None if parsing fails.
|
||||
"""
|
||||
result = cls._get_parser_and_method(stream_type)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
parser, method_name = result
|
||||
method = getattr(parser, method_name, None)
|
||||
if method:
|
||||
return method(line)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]:
|
||||
"""
|
||||
Automatically detect which parser can handle this line and parse it.
|
||||
Returns (stream_type, parsed_data) or None if no parser can handle it.
|
||||
"""
|
||||
# Try each parser to see if it can handle this line
|
||||
for parser in cls._parsers.values():
|
||||
stream_type = parser.can_parse(line)
|
||||
if stream_type:
|
||||
# Parser can handle this line, now parse it
|
||||
parsed_data = cls.parse(stream_type, line)
|
||||
if parsed_data:
|
||||
return (stream_type, parsed_data)
|
||||
|
||||
return None
|
||||
|
|
@ -303,14 +303,6 @@ class StreamBuffer:
|
|||
# Retrieve chunks
|
||||
chunks = self.get_chunks_exact(client_index, chunk_count)
|
||||
|
||||
# Check if we got significantly fewer chunks than expected (likely due to expiration)
|
||||
# Only check if we expected multiple chunks and got none or very few
|
||||
if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10:
|
||||
# Chunks are missing - likely expired from Redis
|
||||
# Return empty list to signal client should skip forward
|
||||
logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)")
|
||||
return [], client_index
|
||||
|
||||
# Check total size
|
||||
total_size = sum(len(c) for c in chunks)
|
||||
|
||||
|
|
@ -324,7 +316,7 @@ class StreamBuffer:
|
|||
additional_size = sum(len(c) for c in more_chunks)
|
||||
if total_size + additional_size <= MAX_SIZE:
|
||||
chunks.extend(more_chunks)
|
||||
chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved
|
||||
chunk_count += additional
|
||||
|
||||
return chunks, client_index + chunk_count
|
||||
|
||||
|
|
|
|||
|
|
@ -8,8 +8,6 @@ import logging
|
|||
import threading
|
||||
import gevent # Add this import at the top of your file
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel
|
||||
from core.utils import log_system_event
|
||||
from .server import ProxyServer
|
||||
from .utils import create_ts_packet, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -54,10 +52,6 @@ class StreamGenerator:
|
|||
self.last_stats_bytes = 0
|
||||
self.current_rate = 0.0
|
||||
|
||||
# TTL refresh tracking
|
||||
self.last_ttl_refresh = time.time()
|
||||
self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generator function that produces the stream content for the client.
|
||||
|
|
@ -90,20 +84,6 @@ class StreamGenerator:
|
|||
if not self._setup_streaming():
|
||||
return
|
||||
|
||||
# Log client connect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_connect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client connect event: {e}")
|
||||
|
||||
# Main streaming loop
|
||||
for chunk in self._stream_data_generator():
|
||||
yield chunk
|
||||
|
|
@ -224,18 +204,6 @@ class StreamGenerator:
|
|||
self.empty_reads += 1
|
||||
self.consecutive_empty += 1
|
||||
|
||||
# Check if we're too far behind (chunks expired from Redis)
|
||||
chunks_behind = self.buffer.index - self.local_index
|
||||
if chunks_behind > 50: # If more than 50 chunks behind, jump forward
|
||||
# Calculate new position: stay a few chunks behind current buffer
|
||||
initial_behind = ConfigHelper.initial_behind_chunks()
|
||||
new_index = max(self.local_index, self.buffer.index - initial_behind)
|
||||
|
||||
logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}")
|
||||
self.local_index = new_index
|
||||
self.consecutive_empty = 0 # Reset since we're repositioning
|
||||
continue # Try again immediately with new position
|
||||
|
||||
if self._should_send_keepalive(self.local_index):
|
||||
keepalive_packet = create_ts_packet('keepalive')
|
||||
logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head")
|
||||
|
|
@ -356,20 +324,7 @@ class StreamGenerator:
|
|||
ChannelMetadataField.STATS_UPDATED_AT: str(current_time)
|
||||
}
|
||||
proxy_server.redis_client.hset(client_key, mapping=stats)
|
||||
|
||||
# Refresh TTL periodically while actively streaming
|
||||
# This provides proof-of-life independent of heartbeat thread
|
||||
if current_time - self.last_ttl_refresh > self.ttl_refresh_interval:
|
||||
try:
|
||||
# Refresh TTL on client key
|
||||
proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL)
|
||||
# Also refresh the client set TTL
|
||||
client_set_key = f"ts_proxy:channel:{self.channel_id}:clients"
|
||||
proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL)
|
||||
self.last_ttl_refresh = current_time
|
||||
logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)")
|
||||
except Exception as ttl_error:
|
||||
logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}")
|
||||
# No need to set expiration as client heartbeat will refresh this key
|
||||
except Exception as e:
|
||||
logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}")
|
||||
|
||||
|
|
@ -455,22 +410,6 @@ class StreamGenerator:
|
|||
total_clients = client_manager.get_total_client_count()
|
||||
logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})")
|
||||
|
||||
# Log client disconnect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_disconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None,
|
||||
duration=round(elapsed, 2),
|
||||
bytes_sent=self.bytes_sent
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client disconnect event: {e}")
|
||||
|
||||
# Schedule channel shutdown if no clients left
|
||||
if not stream_released: # Only if we haven't already released the stream
|
||||
self._schedule_channel_shutdown_if_needed(local_clients)
|
||||
|
|
|
|||
|
|
@ -9,14 +9,11 @@ import subprocess
|
|||
import gevent
|
||||
import re
|
||||
from typing import Optional, List
|
||||
from django.db import connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from urllib3.exceptions import ReadTimeoutError
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.utils import log_system_event
|
||||
from .stream_buffer import StreamBuffer
|
||||
from .utils import detect_stream_type, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -94,23 +91,17 @@ class StreamManager:
|
|||
self.tried_stream_ids.add(self.current_stream_id)
|
||||
logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}")
|
||||
else:
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading stream ID from Redis: {e}")
|
||||
else:
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly")
|
||||
|
||||
logger.info(f"Initialized stream manager for channel {buffer.channel_id}")
|
||||
|
||||
# Add this flag for tracking transcoding process status
|
||||
self.transcode_process_active = False
|
||||
|
||||
# Track stream command for efficient log parser routing
|
||||
self.stream_command = None
|
||||
self.parser_type = None # Will be set when transcode process starts
|
||||
|
||||
# Add tracking for data throughput
|
||||
self.bytes_processed = 0
|
||||
self.last_bytes_update = time.time()
|
||||
|
|
@ -120,9 +111,6 @@ class StreamManager:
|
|||
self.stderr_reader_thread = None
|
||||
self.ffmpeg_input_phase = True # Track if we're still reading input info
|
||||
|
||||
# Add HTTP reader thread property
|
||||
self.http_reader = None
|
||||
|
||||
def _create_session(self):
|
||||
"""Create and configure requests session with optimal settings"""
|
||||
session = requests.Session()
|
||||
|
|
@ -232,12 +220,11 @@ class StreamManager:
|
|||
# Continue with normal flow
|
||||
|
||||
# Check stream type before connecting
|
||||
self.stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP):
|
||||
stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP")
|
||||
logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS, RTSP/RTP, and UDP streams
|
||||
stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and stream_type == StreamType.HLS:
|
||||
logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS streams
|
||||
self.transcode = True
|
||||
# We'll override the stream profile selection with ffmpeg in the transcoding section
|
||||
self.force_ffmpeg = True
|
||||
|
|
@ -265,20 +252,6 @@ class StreamManager:
|
|||
# Store connection start time to measure success duration
|
||||
connection_start_time = time.time()
|
||||
|
||||
# Log reconnection event if this is a retry (not first attempt)
|
||||
if self.retry_count > 0:
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
attempt=self.retry_count + 1,
|
||||
max_attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
# Successfully connected - read stream data until disconnect/error
|
||||
self._process_stream_data()
|
||||
# If we get here, the connection was closed/failed
|
||||
|
|
@ -308,20 +281,6 @@ class StreamManager:
|
|||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}")
|
||||
|
||||
# Log connection error event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_failed',
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log connection error event: {e}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -335,21 +294,6 @@ class StreamManager:
|
|||
|
||||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
|
||||
# Log connection error event with exception details
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_exception',
|
||||
error_message=str(e)[:200],
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as log_error:
|
||||
logger.error(f"Could not log connection error event: {log_error}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -434,12 +378,6 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True)
|
||||
|
||||
# Close database connection for this thread
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"Stream manager stopped for channel {self.channel_id}")
|
||||
|
||||
def _establish_transcode_connection(self):
|
||||
|
|
@ -469,7 +407,7 @@ class StreamManager:
|
|||
from core.models import StreamProfile
|
||||
try:
|
||||
stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True)
|
||||
logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)")
|
||||
logger.info("Using FFmpeg stream profile for HLS content")
|
||||
except StreamProfile.DoesNotExist:
|
||||
# Fall back to channel's profile if FFmpeg not found
|
||||
stream_profile = channel.get_stream_profile()
|
||||
|
|
@ -479,28 +417,6 @@ class StreamManager:
|
|||
|
||||
# Build and start transcode command
|
||||
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
|
||||
|
||||
# Store stream command for efficient log parser routing
|
||||
self.stream_command = stream_profile.command
|
||||
# Map actual commands to parser types for direct routing
|
||||
command_to_parser = {
|
||||
'ffmpeg': 'ffmpeg',
|
||||
'cvlc': 'vlc',
|
||||
'vlc': 'vlc',
|
||||
'streamlink': 'streamlink'
|
||||
}
|
||||
self.parser_type = command_to_parser.get(self.stream_command.lower())
|
||||
if self.parser_type:
|
||||
logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})")
|
||||
else:
|
||||
logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing")
|
||||
|
||||
# For UDP streams, remove any user_agent parameters from the command
|
||||
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
|
||||
# Filter out any arguments that contain the user_agent value or related headers
|
||||
self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()]
|
||||
logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}")
|
||||
|
||||
logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}")
|
||||
|
||||
# Modified to capture stderr instead of discarding it
|
||||
|
|
@ -664,51 +580,35 @@ class StreamManager:
|
|||
if content_lower.startswith('output #') or 'encoder' in content_lower:
|
||||
self.ffmpeg_input_phase = False
|
||||
|
||||
# Route to appropriate parser based on known command type
|
||||
from .services.log_parsers import LogParserFactory
|
||||
from .services.channel_service import ChannelService
|
||||
# Only parse stream info if we're still in the input phase
|
||||
if ("stream #" in content_lower and
|
||||
("video:" in content_lower or "audio:" in content_lower) and
|
||||
self.ffmpeg_input_phase):
|
||||
|
||||
parse_result = None
|
||||
|
||||
# If we know the parser type, use direct routing for efficiency
|
||||
if self.parser_type:
|
||||
# Get the appropriate parser and check what it can parse
|
||||
parser = LogParserFactory._parsers.get(self.parser_type)
|
||||
if parser:
|
||||
stream_type = parser.can_parse(content)
|
||||
if stream_type:
|
||||
# Parser can handle this line, parse it directly
|
||||
parsed_data = LogParserFactory.parse(stream_type, content)
|
||||
if parsed_data:
|
||||
parse_result = (stream_type, parsed_data)
|
||||
else:
|
||||
# Unknown command type - use auto-detection as fallback
|
||||
parse_result = LogParserFactory.auto_parse(content)
|
||||
|
||||
if parse_result:
|
||||
stream_type, parsed_data = parse_result
|
||||
# For FFmpeg, only parse during input phase
|
||||
if stream_type in ['video', 'audio', 'input']:
|
||||
if self.ffmpeg_input_phase:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
else:
|
||||
# VLC and Streamlink can be parsed anytime
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
from .services.channel_service import ChannelService
|
||||
if "video:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video")
|
||||
elif "audio:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio")
|
||||
|
||||
# Determine log level based on content
|
||||
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
|
||||
logger.error(f"Stream process error for channel {self.channel_id}: {content}")
|
||||
logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
|
||||
logger.warning(f"Stream process warning for channel {self.channel_id}: {content}")
|
||||
logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
|
||||
# Stats lines - log at trace level to avoid spam
|
||||
logger.trace(f"Stream stats for channel {self.channel_id}: {content}")
|
||||
logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
|
||||
# Stream info - log at info level
|
||||
logger.info(f"Stream info for channel {self.channel_id}: {content}")
|
||||
logger.info(f"FFmpeg info for channel {self.channel_id}: {content}")
|
||||
if content.startswith('Input #0'):
|
||||
# If it's input 0, parse stream info
|
||||
from .services.channel_service import ChannelService
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "input")
|
||||
else:
|
||||
# Everything else at debug level
|
||||
logger.debug(f"Stream process output for channel {self.channel_id}: {content}")
|
||||
logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}")
|
||||
|
|
@ -749,14 +649,6 @@ class StreamManager:
|
|||
if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate]):
|
||||
self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate)
|
||||
|
||||
# Also save ffmpeg_output_bitrate to database if we have stream_id
|
||||
if ffmpeg_output_bitrate is not None and self.current_stream_id:
|
||||
from .services.channel_service import ChannelService
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
self.current_stream_id,
|
||||
ffmpeg_output_bitrate=ffmpeg_output_bitrate
|
||||
)
|
||||
|
||||
# Fix the f-string formatting
|
||||
actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A"
|
||||
ffmpeg_output_bitrate_str = f"{ffmpeg_output_bitrate:.1f}" if ffmpeg_output_bitrate is not None else "N/A"
|
||||
|
|
@ -781,19 +673,6 @@ class StreamManager:
|
|||
# Reset buffering state
|
||||
self.buffering = False
|
||||
self.buffering_start_time = None
|
||||
|
||||
# Log failover event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_failover',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='buffering_timeout',
|
||||
duration=buffering_duration
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log failover event: {e}")
|
||||
else:
|
||||
logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout")
|
||||
else:
|
||||
|
|
@ -801,19 +680,6 @@ class StreamManager:
|
|||
self.buffering = True
|
||||
self.buffering_start_time = time.time()
|
||||
logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x")
|
||||
|
||||
# Log system event for buffering
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_buffering',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
speed=ffmpeg_speed
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log buffering event: {e}")
|
||||
|
||||
# Log buffering warning
|
||||
logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected")
|
||||
# Set channel state to buffering
|
||||
|
|
@ -863,9 +729,9 @@ class StreamManager:
|
|||
|
||||
|
||||
def _establish_http_connection(self):
|
||||
"""Establish HTTP connection using thread-based reader (same as transcode path)"""
|
||||
"""Establish a direct HTTP connection to the stream"""
|
||||
try:
|
||||
logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}")
|
||||
logger.debug(f"Using TS Proxy to connect to stream: {self.url}")
|
||||
|
||||
# Check if we already have active HTTP connections
|
||||
if self.current_response or self.current_session:
|
||||
|
|
@ -882,39 +748,41 @@ class StreamManager:
|
|||
logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}")
|
||||
self._close_socket()
|
||||
|
||||
# Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor
|
||||
# This allows us to use the same fetch_chunk() path as transcode
|
||||
from .http_streamer import HTTPStreamReader
|
||||
# Create new session for each connection attempt
|
||||
session = self._create_session()
|
||||
self.current_session = session
|
||||
|
||||
# Create and start the HTTP stream reader
|
||||
self.http_reader = HTTPStreamReader(
|
||||
url=self.url,
|
||||
user_agent=self.user_agent,
|
||||
chunk_size=self.chunk_size
|
||||
# Stream the URL with proper timeout handling
|
||||
response = session.get(
|
||||
self.url,
|
||||
stream=True,
|
||||
timeout=(10, 60) # 10s connect timeout, 60s read timeout
|
||||
)
|
||||
self.current_response = response
|
||||
|
||||
# Start the reader thread and get the read end of the pipe
|
||||
pipe_fd = self.http_reader.start()
|
||||
if response.status_code == 200:
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
logger.info(f"Successfully connected to stream source for channel {self.channel_id}")
|
||||
|
||||
# Wrap the file descriptor in a file object (same as transcode stdout)
|
||||
import os
|
||||
self.socket = os.fdopen(pipe_fd, 'rb', buffering=0)
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
|
||||
logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}")
|
||||
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
|
||||
return True
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP request error: {e}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True)
|
||||
self._close_socket()
|
||||
self._close_connection()
|
||||
return False
|
||||
|
||||
def _update_bytes_processed(self, chunk_size):
|
||||
|
|
@ -942,19 +810,48 @@ class StreamManager:
|
|||
logger.error(f"Error updating bytes processed: {e}")
|
||||
|
||||
def _process_stream_data(self):
|
||||
"""Process stream data until disconnect or error - unified path for both transcode and HTTP"""
|
||||
"""Process stream data until disconnect or error"""
|
||||
try:
|
||||
# Both transcode and HTTP now use the same subprocess/socket approach
|
||||
# This gives us perfect control: check flags between chunks, timeout just returns False
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
# fetch_chunk() returned False - could be timeout, no data, or error
|
||||
if not self.running:
|
||||
break
|
||||
# Brief sleep before retry to avoid tight loop
|
||||
gevent.sleep(0.1)
|
||||
if self.transcode:
|
||||
# Handle transcoded stream data
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
if not self.running:
|
||||
break
|
||||
gevent.sleep(0.1) # REPLACE time.sleep(0.1)
|
||||
else:
|
||||
# Handle direct HTTP connection
|
||||
chunk_count = 0
|
||||
try:
|
||||
for chunk in self.current_response.iter_content(chunk_size=self.chunk_size):
|
||||
# Check if we've been asked to stop
|
||||
if self.stop_requested or self.url_switching or self.needs_stream_switch:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
# Track chunk size before adding to buffer
|
||||
chunk_size = len(chunk)
|
||||
self._update_bytes_processed(chunk_size)
|
||||
|
||||
# Add chunk to buffer with TS packet alignment
|
||||
success = self.buffer.add_chunk(chunk)
|
||||
|
||||
if success:
|
||||
self.last_data_time = time.time()
|
||||
chunk_count += 1
|
||||
|
||||
# Update last data timestamp in Redis
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
last_data_key = RedisKeys.last_data(self.buffer.channel_id)
|
||||
self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60)
|
||||
except (AttributeError, ConnectionError) as e:
|
||||
if self.stop_requested or self.url_switching:
|
||||
logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}")
|
||||
else:
|
||||
logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
||||
|
|
@ -1043,7 +940,6 @@ class StreamManager:
|
|||
|
||||
# Import both models for proper resource management
|
||||
from apps.channels.models import Stream, Channel
|
||||
from django.db import connection
|
||||
|
||||
# Update stream profile if we're switching streams
|
||||
if self.current_stream_id and stream_id and self.current_stream_id != stream_id:
|
||||
|
|
@ -1061,17 +957,9 @@ class StreamManager:
|
|||
logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}")
|
||||
else:
|
||||
logger.warning(f"Failed to update stream profile for channel {self.channel_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}")
|
||||
|
||||
finally:
|
||||
# Always close database connection after profile update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# CRITICAL: Set a flag to prevent immediate reconnection with old URL
|
||||
self.url_switching = True
|
||||
self.url_switch_start_time = time.time()
|
||||
|
|
@ -1109,19 +997,6 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.warning(f"Failed to reset buffer position: {e}")
|
||||
|
||||
# Log stream switch event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'stream_switch',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
new_url=new_url[:100] if new_url else None,
|
||||
stream_id=stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log stream switch event: {e}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
@ -1240,19 +1115,6 @@ class StreamManager:
|
|||
if connection_result:
|
||||
self.connection_start_time = time.time()
|
||||
logger.info(f"Reconnect successful for channel {self.channel_id}")
|
||||
|
||||
# Log reconnection event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='health_monitor'
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Reconnect failed for channel {self.channel_id}")
|
||||
|
|
@ -1313,15 +1175,6 @@ class StreamManager:
|
|||
if self.current_response or self.current_session:
|
||||
self._close_connection()
|
||||
|
||||
# Stop HTTP reader thread if it exists
|
||||
if hasattr(self, 'http_reader') and self.http_reader:
|
||||
try:
|
||||
logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}")
|
||||
self.http_reader.stop()
|
||||
self.http_reader = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}")
|
||||
|
||||
# Otherwise handle socket and transcode resources
|
||||
if self.socket:
|
||||
try:
|
||||
|
|
@ -1330,17 +1183,25 @@ class StreamManager:
|
|||
logger.debug(f"Error closing socket for channel {self.channel_id}: {e}")
|
||||
pass
|
||||
|
||||
# Enhanced transcode process cleanup with immediate termination
|
||||
# Enhanced transcode process cleanup with more aggressive termination
|
||||
if self.transcode_process:
|
||||
try:
|
||||
logger.debug(f"Killing transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
# First try polite termination
|
||||
logger.debug(f"Terminating transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.terminate()
|
||||
|
||||
# Give it a very short time to die
|
||||
# Give it a short time to terminate gracefully
|
||||
try:
|
||||
self.transcode_process.wait(timeout=0.5)
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
# If it doesn't terminate quickly, kill it
|
||||
logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}")
|
||||
|
||||
|
|
@ -1350,30 +1211,6 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}")
|
||||
|
||||
# Explicitly close all subprocess pipes to prevent file descriptor leaks
|
||||
try:
|
||||
if self.transcode_process.stdin:
|
||||
self.transcode_process.stdin.close()
|
||||
if self.transcode_process.stdout:
|
||||
self.transcode_process.stdout.close()
|
||||
if self.transcode_process.stderr:
|
||||
self.transcode_process.stderr.close()
|
||||
logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}")
|
||||
|
||||
# Join stderr reader thread to ensure it's fully terminated
|
||||
if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive():
|
||||
try:
|
||||
logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}")
|
||||
self.stderr_reader_thread.join(timeout=2.0)
|
||||
if self.stderr_reader_thread.is_alive():
|
||||
logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}")
|
||||
finally:
|
||||
self.stderr_reader_thread = None
|
||||
|
||||
self.transcode_process = None
|
||||
self.transcode_process_active = False # Reset the flag
|
||||
|
||||
|
|
@ -1405,7 +1242,7 @@ class StreamManager:
|
|||
|
||||
try:
|
||||
# Set timeout for chunk reads
|
||||
chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration
|
||||
chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds
|
||||
|
||||
try:
|
||||
# Handle different socket types with timeout
|
||||
|
|
@ -1488,17 +1325,7 @@ class StreamManager:
|
|||
# Only update if not already past connecting
|
||||
if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]:
|
||||
# NEW CODE: Check if buffer has enough chunks
|
||||
# IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup
|
||||
# each worker has its own StreamBuffer instance with potentially stale local index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
current_buffer_index = getattr(self.buffer, 'index', 0)
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks()
|
||||
|
||||
if current_buffer_index < initial_chunks_needed:
|
||||
|
|
@ -1546,21 +1373,10 @@ class StreamManager:
|
|||
# Clean up completed timers
|
||||
self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()]
|
||||
|
||||
if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'):
|
||||
if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'):
|
||||
current_buffer_index = self.buffer.index
|
||||
initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10)
|
||||
channel_id = self.buffer.channel_id
|
||||
redis_client = self.buffer.redis_client
|
||||
|
||||
# IMPORTANT: Read from Redis, not local buffer.index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency
|
||||
|
||||
if current_buffer_index >= initial_chunks_needed:
|
||||
# We now have enough buffer, call _set_waiting_for_clients again
|
||||
|
|
@ -1585,7 +1401,6 @@ class StreamManager:
|
|||
def _try_next_stream(self):
|
||||
"""
|
||||
Try to switch to the next available stream for this channel.
|
||||
Will iterate through multiple alternate streams if needed to find one with a different URL.
|
||||
|
||||
Returns:
|
||||
bool: True if successfully switched to a new stream, False otherwise
|
||||
|
|
@ -1611,71 +1426,60 @@ class StreamManager:
|
|||
logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
# IMPROVED: Try multiple streams until we find one with a different URL
|
||||
for next_stream in untried_streams:
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
# Get the next stream to try
|
||||
next_stream = untried_streams[0]
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
continue # Try next stream instead of giving up
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
return False
|
||||
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
|
||||
# CRITICAL FIX: Check if the new URL is the same as current URL
|
||||
# This can happen when current_stream_id is None and we accidentally select the same stream
|
||||
if new_url == self.url:
|
||||
logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). "
|
||||
f"Skipping this stream and trying next alternative.")
|
||||
continue # Try next stream instead of giving up
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
continue # Try next stream
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
|
||||
# If we get here, we tried all streams but none worked
|
||||
logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}")
|
||||
return False
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from typing import Optional, Tuple, List
|
|||
from django.shortcuts import get_object_or_404
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings, StreamProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from .utils import get_logger
|
||||
from uuid import UUID
|
||||
import requests
|
||||
|
|
@ -26,100 +26,16 @@ def get_stream_object(id: str):
|
|||
|
||||
def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]:
|
||||
"""
|
||||
Generate the appropriate stream URL for a channel or stream based on its profile settings.
|
||||
Generate the appropriate stream URL for a channel based on its profile settings.
|
||||
|
||||
Args:
|
||||
channel_id: The UUID of the channel or stream hash
|
||||
channel_id: The UUID of the channel
|
||||
|
||||
Returns:
|
||||
Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id)
|
||||
"""
|
||||
try:
|
||||
channel_or_stream = get_stream_object(channel_id)
|
||||
|
||||
# Handle direct stream preview (custom streams)
|
||||
if isinstance(channel_or_stream, Stream):
|
||||
from core.utils import RedisClient
|
||||
|
||||
stream = channel_or_stream
|
||||
logger.info(f"Previewing stream directly: {stream.id} ({stream.name})")
|
||||
|
||||
# For custom streams, we need to get the M3U account and profile
|
||||
m3u_account = stream.m3u_account
|
||||
if not m3u_account:
|
||||
logger.error(f"Stream {stream.id} has no M3U account")
|
||||
return None, None, False, None
|
||||
|
||||
# Get active profiles for this M3U account
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
logger.error(f"No default active profile found for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Check profiles in order: default first, then others
|
||||
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
|
||||
|
||||
# Try to find an available profile with connection capacity
|
||||
redis_client = RedisClient.get_client()
|
||||
selected_profile = None
|
||||
|
||||
for profile in profiles:
|
||||
logger.info(profile)
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
# Check if profile has available slots (or unlimited connections)
|
||||
if profile.max_streams == 0 or current_connections < profile.max_streams:
|
||||
selected_profile = profile
|
||||
logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview")
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}")
|
||||
else:
|
||||
# No Redis available, use first active profile
|
||||
selected_profile = profile
|
||||
break
|
||||
|
||||
if not selected_profile:
|
||||
logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Get the appropriate user agent
|
||||
stream_user_agent = m3u_account.get_user_agent().user_agent
|
||||
if stream_user_agent is None:
|
||||
stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id())
|
||||
logger.debug(f"No user agent found for account, using default: {stream_user_agent}")
|
||||
|
||||
# Get stream URL with the selected profile's URL transformation
|
||||
stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern)
|
||||
|
||||
# Check if the stream has its own stream_profile set, otherwise use default
|
||||
if stream.stream_profile:
|
||||
stream_profile = stream.stream_profile
|
||||
logger.debug(f"Using stream's own stream profile: {stream_profile.name}")
|
||||
else:
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
logger.debug(f"Using default stream profile: {stream_profile.name}")
|
||||
|
||||
# Check if transcoding is needed
|
||||
if stream_profile.is_proxy() or stream_profile is None:
|
||||
transcode = False
|
||||
else:
|
||||
transcode = True
|
||||
|
||||
stream_profile_id = stream_profile.id
|
||||
|
||||
return stream_url, stream_user_agent, transcode, stream_profile_id
|
||||
|
||||
# Handle channel preview (existing logic)
|
||||
channel = channel_or_stream
|
||||
channel = get_stream_object(channel_id)
|
||||
|
||||
# Get stream and profile for this channel
|
||||
# Note: get_stream now returns 3 values (stream_id, profile_id, error_reason)
|
||||
|
|
@ -226,7 +142,7 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int]
|
|||
if not m3u_account:
|
||||
return {'error': 'Stream has no M3U account'}
|
||||
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
|
|
@ -237,6 +153,10 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int]
|
|||
|
||||
selected_profile = None
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
|
|
@ -361,10 +281,8 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No
|
|||
if not m3u_account:
|
||||
logger.debug(f"Stream {stream.id} has no M3U account")
|
||||
continue
|
||||
if m3u_account.is_active == False:
|
||||
logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.")
|
||||
continue
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
|
|
@ -376,6 +294,11 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No
|
|||
|
||||
selected_profile = None
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
|
|
@ -435,9 +358,6 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
"""
|
||||
Validate if a stream URL is accessible without downloading the full content.
|
||||
|
||||
Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot
|
||||
be validated via HTTP methods.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate
|
||||
user_agent (str): User agent to use for the request
|
||||
|
|
@ -446,12 +366,6 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
Returns:
|
||||
tuple: (is_valid, final_url, status_code, message)
|
||||
"""
|
||||
# Check if URL uses non-HTTP protocols (UDP/RTP/RTSP)
|
||||
# These cannot be validated via HTTP methods, so we skip validation
|
||||
if url.startswith(('udp://', 'rtp://', 'rtsp://')):
|
||||
logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}")
|
||||
return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped"
|
||||
|
||||
try:
|
||||
# Create session with proper headers
|
||||
session = requests.Session()
|
||||
|
|
@ -462,21 +376,16 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
session.headers.update(headers)
|
||||
|
||||
# Make HEAD request first as it's faster and doesn't download content
|
||||
head_request_success = True
|
||||
try:
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
head_request_success = False
|
||||
logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}")
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
|
||||
# If HEAD not supported, server will return 405 or other error
|
||||
if head_request_success and (200 <= head_response.status_code < 300):
|
||||
if 200 <= head_response.status_code < 300:
|
||||
# HEAD request successful
|
||||
return True, url, head_response.status_code, "Valid (HEAD request)"
|
||||
return True, head_response.url, head_response.status_code, "Valid (HEAD request)"
|
||||
|
||||
# Try a GET request with stream=True to avoid downloading all content
|
||||
get_response = session.get(
|
||||
|
|
@ -489,7 +398,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
# IMPORTANT: Check status code first before checking content
|
||||
if not (200 <= get_response.status_code < 300):
|
||||
logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}")
|
||||
return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
|
||||
# Only check content if status code is valid
|
||||
try:
|
||||
|
|
@ -543,7 +452,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
get_response.close()
|
||||
|
||||
# If we have content, consider it valid even with unrecognized content type
|
||||
return is_valid, url, get_response.status_code, message
|
||||
return is_valid, get_response.url, get_response.status_code, message
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
return False, url, 0, "Timeout connecting to stream"
|
||||
|
|
|
|||
|
|
@ -7,27 +7,19 @@ logger = logging.getLogger("ts_proxy")
|
|||
|
||||
def detect_stream_type(url):
|
||||
"""
|
||||
Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format.
|
||||
Detect if stream URL is HLS or TS format.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to analyze
|
||||
|
||||
Returns:
|
||||
str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format
|
||||
str: 'hls' or 'ts' depending on detected format
|
||||
"""
|
||||
if not url:
|
||||
return 'unknown'
|
||||
|
||||
url_lower = url.lower()
|
||||
|
||||
# Check for UDP streams (requires FFmpeg)
|
||||
if url_lower.startswith('udp://'):
|
||||
return 'udp'
|
||||
|
||||
# Check for RTSP/RTP streams (requires FFmpeg)
|
||||
if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'):
|
||||
return 'rtsp'
|
||||
|
||||
# Look for common HLS indicators
|
||||
if (url_lower.endswith('.m3u8') or
|
||||
'.m3u8?' in url_lower or
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import time
|
|||
import random
|
||||
import re
|
||||
import pathlib
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.shortcuts import get_object_or_404
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
|
|
@ -28,7 +28,6 @@ from apps.accounts.permissions import (
|
|||
from .constants import ChannelState, EventType, StreamType, ChannelMetadataField
|
||||
from .config_helper import ConfigHelper
|
||||
from .services.channel_service import ChannelService
|
||||
from core.utils import send_websocket_update
|
||||
from .url_utils import (
|
||||
generate_stream_url,
|
||||
transform_url,
|
||||
|
|
@ -84,18 +83,11 @@ def stream_ts(request, channel_id):
|
|||
if state_field in metadata:
|
||||
channel_state = metadata[state_field].decode("utf-8")
|
||||
|
||||
# Active/running states - channel is operational, don't reinitialize
|
||||
if channel_state in [
|
||||
ChannelState.ACTIVE,
|
||||
ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.BUFFERING,
|
||||
ChannelState.INITIALIZING,
|
||||
ChannelState.CONNECTING,
|
||||
ChannelState.STOPPING,
|
||||
]:
|
||||
if channel_state:
|
||||
# Channel is being initialized or already active - no need for reinitialization
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization"
|
||||
f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization"
|
||||
)
|
||||
|
||||
# Special handling for initializing/connecting states
|
||||
|
|
@ -105,34 +97,19 @@ def stream_ts(request, channel_id):
|
|||
]:
|
||||
channel_initializing = True
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait"
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion"
|
||||
)
|
||||
# Terminal states - channel needs cleanup before reinitialization
|
||||
elif channel_state in [
|
||||
ChannelState.ERROR,
|
||||
ChannelState.STOPPED,
|
||||
]:
|
||||
needs_initialization = True
|
||||
logger.info(
|
||||
f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize"
|
||||
)
|
||||
# Unknown/empty state - check if owner is alive
|
||||
else:
|
||||
# Only check for owner if channel is in a valid state
|
||||
owner_field = ChannelMetadataField.OWNER.encode("utf-8")
|
||||
if owner_field in metadata:
|
||||
owner = metadata[owner_field].decode("utf-8")
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
if proxy_server.redis_client.exists(owner_heartbeat_key):
|
||||
# Owner is still active with unknown state - don't reinitialize
|
||||
# Owner is still active, so we don't need to reinitialize
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init"
|
||||
)
|
||||
else:
|
||||
# Owner dead - needs reinitialization
|
||||
needs_initialization = True
|
||||
logger.warning(
|
||||
f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize"
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}"
|
||||
)
|
||||
|
||||
# Start initialization if needed
|
||||
|
|
@ -149,9 +126,9 @@ def stream_ts(request, channel_id):
|
|||
)
|
||||
ChannelService.stop_channel(channel_id)
|
||||
|
||||
# Use fixed retry interval and timeout
|
||||
retry_timeout = 3 # 3 seconds total timeout
|
||||
retry_interval = 0.1 # 100ms between attempts
|
||||
# Use max retry attempts and connection timeout from config
|
||||
max_retries = ConfigHelper.max_retries()
|
||||
retry_timeout = ConfigHelper.connection_timeout()
|
||||
wait_start_time = time.time()
|
||||
|
||||
stream_url = None
|
||||
|
|
@ -159,69 +136,54 @@ def stream_ts(request, channel_id):
|
|||
transcode = False
|
||||
profile_value = None
|
||||
error_reason = None
|
||||
attempt = 0
|
||||
should_retry = True
|
||||
|
||||
# Try to get a stream with fixed interval retries
|
||||
while should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
# Try to get a stream with configured retries
|
||||
for attempt in range(max_retries):
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts"
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id}"
|
||||
)
|
||||
break
|
||||
|
||||
# On first failure, check if the error is retryable
|
||||
if attempt == 1:
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
)
|
||||
should_retry = False
|
||||
break
|
||||
|
||||
# Check if we have time remaining for another sleep cycle
|
||||
elapsed_time = time.time() - wait_start_time
|
||||
remaining_time = retry_timeout - elapsed_time
|
||||
|
||||
# If we don't have enough time for the next sleep interval, break
|
||||
# but only after we've already made an attempt (the while condition will try one more time)
|
||||
if remaining_time <= retry_interval:
|
||||
logger.info(
|
||||
f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt"
|
||||
# If we failed because there are no streams assigned, don't retry
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
)
|
||||
break
|
||||
|
||||
# Wait before retrying
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
|
||||
)
|
||||
gevent.sleep(retry_interval)
|
||||
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
|
||||
|
||||
# Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout
|
||||
if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
logger.info(
|
||||
f"[{client_id}] Making final attempt {attempt} at timeout boundary"
|
||||
)
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}"
|
||||
# Don't exceed the overall connection timeout
|
||||
if time.time() - wait_start_time > retry_timeout:
|
||||
logger.warning(
|
||||
f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)"
|
||||
)
|
||||
break
|
||||
|
||||
# Wait before retrying (using exponential backoff with a cap)
|
||||
wait_time = min(0.5 * (2**attempt), 2.0) # Caps at 2 seconds
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})"
|
||||
)
|
||||
gevent.sleep(
|
||||
wait_time
|
||||
) # FIXED: Using gevent.sleep instead of time.sleep
|
||||
|
||||
if stream_url is None:
|
||||
# Release the channel's stream lock if one was acquired
|
||||
# Note: Only call this if get_stream() actually assigned a stream
|
||||
# In our case, if stream_url is None, no stream was ever assigned, so don't release
|
||||
# Make sure to release any stream locks that might have been acquired
|
||||
if hasattr(channel, "streams") and channel.streams.exists():
|
||||
for stream in channel.streams.all():
|
||||
try:
|
||||
stream.release_stream()
|
||||
logger.info(
|
||||
f"[{client_id}] Released stream {stream.id} for channel {channel_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{client_id}] Error releasing stream: {e}")
|
||||
|
||||
# Get the specific error message if available
|
||||
wait_duration = f"{int(time.time() - wait_start_time)}s"
|
||||
|
|
@ -230,9 +192,6 @@ def stream_ts(request, channel_id):
|
|||
if error_reason
|
||||
else "No available streams for this channel"
|
||||
)
|
||||
logger.info(
|
||||
f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}"
|
||||
)
|
||||
return JsonResponse(
|
||||
{"error": error_msg, "waited": wait_duration}, status=503
|
||||
) # 503 Service Unavailable is appropriate here
|
||||
|
|
@ -314,15 +273,6 @@ def stream_ts(request, channel_id):
|
|||
logger.info(
|
||||
f"[{client_id}] Redirecting to validated URL: {final_url} ({message})"
|
||||
)
|
||||
|
||||
# For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect
|
||||
# because Django's HttpResponseRedirect blocks them for security
|
||||
if final_url.startswith(('rtsp://', 'rtp://', 'udp://')):
|
||||
logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol")
|
||||
response = HttpResponse(status=301)
|
||||
response['Location'] = final_url
|
||||
return response
|
||||
|
||||
return HttpResponseRedirect(final_url)
|
||||
else:
|
||||
logger.error(
|
||||
|
|
@ -517,7 +467,9 @@ def stream_xc(request, username, password, channel_id):
|
|||
extension = pathlib.Path(channel_id).suffix
|
||||
channel_id = pathlib.Path(channel_id).stem
|
||||
|
||||
custom_properties = user.custom_properties or {}
|
||||
custom_properties = (
|
||||
json.loads(user.custom_properties) if user.custom_properties else {}
|
||||
)
|
||||
|
||||
if "xc_password" not in custom_properties:
|
||||
return Response({"error": "Invalid credentials"}, status=401)
|
||||
|
|
@ -527,33 +479,24 @@ def stream_xc(request, username, password, channel_id):
|
|||
|
||||
print(f"Fetchin channel with ID: {channel_id}")
|
||||
if user.user_level < 10:
|
||||
user_profile_count = user.channel_profiles.count()
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
}
|
||||
|
||||
# If user has ALL profiles or NO profiles, give unrestricted access
|
||||
if user_profile_count == 0:
|
||||
# No profile filtering - user sees all channels based on user_level
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"user_level__lte": user.user_level
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).first()
|
||||
else:
|
||||
# User has specific limited profiles assigned
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
"channelprofilemembership__channel_profile__in": user.channel_profiles.all()
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
if user.channel_profiles.count() > 0:
|
||||
channel_profiles = user.channel_profiles.all()
|
||||
filters["channelprofilemembership__channel_profile__in"] = channel_profiles
|
||||
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
if not channel:
|
||||
return JsonResponse({"error": "Not found"}, status=404)
|
||||
else:
|
||||
channel = get_object_or_404(Channel, id=channel_id)
|
||||
|
||||
# @TODO: we've got the file 'type' via extension, support this when we support multiple outputs
|
||||
return stream_ts(request._request, str(channel.uuid))
|
||||
return stream_ts(request._request, channel.uuid)
|
||||
|
||||
|
||||
@csrf_exempt
|
||||
|
|
@ -692,18 +635,6 @@ def channel_status(request, channel_id=None):
|
|||
if cursor == 0:
|
||||
break
|
||||
|
||||
# Send WebSocket update with the stats
|
||||
# Format it the same way the original Celery task did
|
||||
send_websocket_update(
|
||||
"updates",
|
||||
"update",
|
||||
{
|
||||
"success": True,
|
||||
"type": "channel_stats",
|
||||
"stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})
|
||||
}
|
||||
)
|
||||
|
||||
return JsonResponse({"channels": all_channels, "count": len(all_channels)})
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -5,5 +5,4 @@ app_name = 'proxy'
|
|||
urlpatterns = [
|
||||
path('ts/', include('apps.proxy.ts_proxy.urls')),
|
||||
path('hls/', include('apps.proxy.hls_proxy.urls')),
|
||||
path('vod/', include('apps.proxy.vod_proxy.urls')),
|
||||
]
|
||||
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue