Compare commits

..

No commits in common. "main" and "v0.5.2" have entirely different histories.
main ... v0.5.2

347 changed files with 9559 additions and 77315 deletions

View file

@ -11,10 +11,6 @@
**/.toolstarget
**/.vs
**/.vscode
**/.history
**/media
**/models
**/static
**/*.*proj.user
**/*.dbmdl
**/*.jfm
@ -30,5 +26,3 @@
**/values.dev.yaml
LICENSE
README.md
data/
docker/data/

View file

@ -1,7 +1,7 @@
name: Bug Report
description: I have an issue with Dispatcharr
title: "[Bug]: "
labels: ["Triage"]
labels: ["Bug", "Triage"]
type: "Bug"
projects: []
assignees: []

View file

@ -1,7 +1,7 @@
name: Feature request
description: I want to suggest a new feature for Dispatcharr
title: "[Feature]: "
labels: ["Triage"]
labels: ["Feature Request"]
type: "Feature"
projects: []
assignees: []

View file

@ -2,37 +2,42 @@ name: Base Image Build
on:
push:
branches: [main, dev]
branches: [ main, dev ]
paths:
- 'docker/DispatcharrBase'
- '.github/workflows/base-image.yml'
- 'requirements.txt'
pull_request:
branches: [main, dev]
branches: [ main, dev ]
paths:
- 'docker/DispatcharrBase'
- '.github/workflows/base-image.yml'
- 'requirements.txt'
workflow_dispatch: # Allow manual triggering
workflow_dispatch: # Allow manual triggering
permissions:
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
jobs:
prepare:
runs-on: ubuntu-24.04
outputs:
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
branch_tag: ${{ steps.meta.outputs.branch_tag }}
timestamp: ${{ steps.timestamp.outputs.timestamp }}
build-base-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
@ -61,190 +66,13 @@ jobs:
echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT
fi
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Git
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
labels: |
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
org.opencontainers.image.url=https://github.com/${{ github.repository }}
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.licenses=See repository
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
org.opencontainers.image.authors=${{ github.actor }}
maintainer=${{ github.actor }}
build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
- name: Build and push Docker base image
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/DispatcharrBase
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/${{ matrix.platform }}
push: true
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
create-manifest:
needs: [prepare, docker]
runs-on: ubuntu-24.04
if: ${{ github.event_name != 'pull_request' }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# GitHub Container Registry manifests
# branch tag (e.g. base or base-dev)
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
# branch + timestamp tag
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
# Docker Hub manifests
# branch tag (e.g. base or base-dev)
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
# branch + timestamp tag
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }}

View file

@ -2,86 +2,19 @@ name: CI Pipeline
on:
push:
branches: [dev]
paths-ignore:
- '**.md'
branches: [ dev ]
pull_request:
branches: [dev]
workflow_dispatch:
branches: [ dev ]
workflow_dispatch: # Allow manual triggering
# Add explicit permissions for the workflow
permissions:
contents: write
packages: write
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
jobs:
prepare:
runs-on: ubuntu-24.04
# compute a single timestamp, version, and repo metadata for the entire workflow
outputs:
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
branch_tag: ${{ steps.meta.outputs.branch_tag }}
version: ${{ steps.version.outputs.version }}
timestamp: ${{ steps.timestamp.outputs.timestamp }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
- name: Extract version info
id: version
run: |
VERSION=$(python -c "import version; print(version.__version__)")
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set repository and image metadata
id: meta
run: |
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "branch_tag=latest" >> $GITHUB_OUTPUT
echo "is_main=true" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "branch_tag=dev" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
else
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
fi
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
echo "is_fork=true" >> $GITHUB_OUTPUT
else
echo "is_fork=false" >> $GITHUB_OUTPUT
fi
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
# no per-job outputs here; shared metadata comes from the `prepare` job
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
@ -112,162 +45,66 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
- name: Extract metadata for Docker
- name: Extract version info
id: version
run: |
VERSION=$(python -c "import version; print(version.__version__)")
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
- name: Set repository and image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
labels: |
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
org.opencontainers.image.url=https://github.com/${{ github.repository }}
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.licenses=See repository
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
org.opencontainers.image.authors=${{ github.actor }}
maintainer=${{ github.actor }}
build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
run: |
# Get lowercase repository owner
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
# Get repository name
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
# Determine branch name
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "branch_tag=latest" >> $GITHUB_OUTPUT
echo "is_main=true" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "branch_tag=dev" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
else
# For other branches, use the branch name
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
fi
# Determine if this is from a fork
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
echo "is_fork=true" >> $GITHUB_OUTPUT
else
echo "is_fork=false" >> $GITHUB_OUTPUT
fi
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
# Build only the platform for this matrix job to avoid running amd64
# stages under qemu on an arm64 runner (and vice-versa). This makes
# the matrix runner's platform the one built by buildx.
platforms: linux/${{ matrix.platform }}
# push arch-specific tags from each matrix job (they will be combined
# into a multi-arch manifest in a follow-up job)
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }}
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }}
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }}
build-args: |
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
REPO_OWNER=${{ steps.meta.outputs.repo_owner }}
REPO_NAME=${{ steps.meta.outputs.repo_name }}
BASE_TAG=base
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
TIMESTAMP=${{ steps.timestamp.outputs.timestamp }}
file: ./docker/Dockerfile
create-manifest:
# wait for prepare and all matrix builds to finish
needs: [prepare, docker]
runs-on: ubuntu-24.04
if: ${{ github.event_name != 'pull_request' }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
VERSION=${{ needs.prepare.outputs.version }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# branch tag (e.g. latest or dev)
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
# version + timestamp tag
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
# also create Docker Hub manifests using the same username
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64

View file

@ -1,41 +0,0 @@
name: Frontend Tests
on:
push:
branches: [main, dev]
paths:
- 'frontend/**'
- '.github/workflows/frontend-tests.yml'
pull_request:
branches: [main, dev]
paths:
- 'frontend/**'
- '.github/workflows/frontend-tests.yml'
jobs:
test:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./frontend
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '24'
cache: 'npm'
cache-dependency-path: './frontend/package-lock.json'
- name: Install dependencies
run: npm ci
# - name: Run linter
# run: npm run lint
- name: Run tests
run: npm test

View file

@ -15,22 +15,16 @@ on:
# Add explicit permissions for the workflow
permissions:
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
jobs:
prepare:
runs-on: ubuntu-24.04
outputs:
new_version: ${{ steps.update_version.outputs.new_version }}
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
timestamp: ${{ steps.timestamp.outputs.timestamp }}
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Git
run: |
@ -44,194 +38,55 @@ jobs:
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: Update Changelog
run: |
python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }}
- name: Set repository metadata
id: meta
- name: Set lowercase repo owner
id: repo_owner
run: |
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Generate timestamp for build
id: timestamp
run: |
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Commit and Tag
run: |
git add version.py CHANGELOG.md
git add version.py
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
git push origin main --tags
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
ref: main
- name: Configure Git
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
labels: |
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
org.opencontainers.image.url=https://github.com/${{ github.repository }}
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }}
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.licenses=See repository
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
org.opencontainers.image.authors=${{ github.actor }}
maintainer=${{ github.actor }}
build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }}
- name: Build and push Docker image
- name: Build and Push Release Image
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/${{ matrix.platform }}
platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases
tags: |
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64
build-args: |
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
file: ./docker/Dockerfile
create-manifest:
needs: [prepare, docker]
runs-on: ubuntu-24.04
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
VERSION=${{ needs.prepare.outputs.new_version }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# GitHub Container Registry manifests
# Create one manifest with both latest and version tags
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${VERSION}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
--tag ghcr.io/${OWNER}/${REPO}:latest \
--tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
# Docker Hub manifests
# Create one manifest with both latest and version tags
docker buildx imagetools create \
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
--annotation "index:org.opencontainers.image.version=${VERSION}" \
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
--annotation "index:org.opencontainers.image.licenses=See repository" \
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
--annotation "index:maintainer=${{ github.actor }}" \
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
create-release:
needs: [prepare, create-manifest]
runs-on: ubuntu-24.04
steps:
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ needs.prepare.outputs.new_version }}
name: Release v${{ needs.prepare.outputs.new_version }}
tag_name: v${{ steps.update_version.outputs.new_version }}
name: Release v${{ steps.update_version.outputs.new_version }}
draft: false
prerelease: false
token: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View file

@ -18,5 +18,4 @@ dump.rdb
debugpy*
uwsgi.sock
package-lock.json
models
.idea
models

File diff suppressed because it is too large Load diff

View file

@ -1,286 +0,0 @@
# Dispatcharr Plugins
This document explains how to build, install, and use Python plugins in Dispatcharr. It covers discovery, the plugin interface, settings, actions, how to access application APIs, and examples.
---
## Quick Start
1) Create a folder under `/app/data/plugins/my_plugin/` (host path `data/plugins/my_plugin/` in the repo).
2) Add a `plugin.py` file exporting a `Plugin` class:
```
# /app/data/plugins/my_plugin/plugin.py
class Plugin:
name = "My Plugin"
version = "0.1.0"
description = "Does something useful"
# Settings fields rendered by the UI and persisted by the backend
fields = [
{"id": "enabled", "label": "Enabled", "type": "boolean", "default": True},
{"id": "limit", "label": "Item limit", "type": "number", "default": 5},
{"id": "mode", "label": "Mode", "type": "select", "default": "safe",
"options": [
{"value": "safe", "label": "Safe"},
{"value": "fast", "label": "Fast"},
]},
{"id": "note", "label": "Note", "type": "string", "default": ""},
]
# Actions appear as buttons. Clicking one calls run(action, params, context)
actions = [
{"id": "do_work", "label": "Do Work", "description": "Process items"},
]
def run(self, action: str, params: dict, context: dict):
settings = context.get("settings", {})
logger = context.get("logger")
if action == "do_work":
limit = int(settings.get("limit", 5))
mode = settings.get("mode", "safe")
logger.info(f"My Plugin running with limit={limit}, mode={mode}")
# Do a small amount of work here. Schedule Celery tasks for heavy work.
return {"status": "ok", "processed": limit, "mode": mode}
return {"status": "error", "message": f"Unknown action {action}"}
```
3) Open the Plugins page in the UI, click the refresh icon to reload discovery, then configure and run your plugin.
---
## Where Plugins Live
- Default directory: `/app/data/plugins` inside the container.
- Override with env var: `DISPATCHARR_PLUGINS_DIR`.
- Each plugin is a directory containing either:
- `plugin.py` exporting a `Plugin` class, or
- a Python package (`__init__.py`) exporting a `Plugin` class.
The directory name (lowercased, spaces as `_`) is used as the registry key and module import path (e.g. `my_plugin.plugin`).
---
## Discovery & Lifecycle
- Discovery runs at server startup and on-demand when:
- Fetching the plugins list from the UI
- Hitting `POST /api/plugins/plugins/reload/`
- The loader imports each plugin module and instantiates `Plugin()`.
- Metadata (name, version, description) and a per-plugin settings JSON are stored in the DB.
Backend code:
- Loader: `apps/plugins/loader.py`
- API Views: `apps/plugins/api_views.py`
- API URLs: `apps/plugins/api_urls.py`
- Model: `apps/plugins/models.py` (stores `enabled` flag and `settings` per plugin)
---
## Plugin Interface
Export a `Plugin` class. Supported attributes and behavior:
- `name` (str): Human-readable name.
- `version` (str): Semantic version string.
- `description` (str): Short description.
- `fields` (list): Settings schema used by the UI to render controls.
- `actions` (list): Available actions; the UI renders a Run button for each.
- `run(action, params, context)` (callable): Invoked when a user clicks an action.
### Settings Schema
Supported field `type`s:
- `boolean`
- `number`
- `string`
- `select` (requires `options`: `[{"value": ..., "label": ...}, ...]`)
Common field keys:
- `id` (str): Settings key.
- `label` (str): Label shown in the UI.
- `type` (str): One of above.
- `default` (any): Default value used until saved.
- `help_text` (str, optional): Shown under the control.
- `options` (list, for select): List of `{value, label}`.
The UI automatically renders settings and persists them. The backend stores settings in `PluginConfig.settings`.
Read settings in `run` via `context["settings"]`.
### Actions
Each action is a dict:
- `id` (str): Unique action id.
- `label` (str): Button label.
- `description` (str, optional): Helper text.
Clicking an action calls your plugins `run(action, params, context)` and shows a notification with the result or error.
### Action Confirmation (Modal)
Developers can request a confirmation modal per action using the `confirm` key on the action. Options:
- Boolean: `confirm: true` will show a default confirmation modal.
- Object: `confirm: { required: true, title: '...', message: '...' }` to customize the modal title and message.
Example:
```
actions = [
{
"id": "danger_run",
"label": "Do Something Risky",
"description": "Runs a job that affects many records.",
"confirm": { "required": true, "title": "Proceed?", "message": "This will modify many records." },
}
]
```
---
## Accessing Dispatcharr APIs from Plugins
Plugins are server-side Python code running within the Django application. You can:
- Import models and run queries/updates:
```
from apps.m3u.models import M3UAccount
from apps.epg.models import EPGSource
from apps.channels.models import Channel
from core.models import CoreSettings
```
- Dispatch Celery tasks for heavy work (recommended):
```
from apps.m3u.tasks import refresh_m3u_accounts # apps/m3u/tasks.py
from apps.epg.tasks import refresh_all_epg_data # apps/epg/tasks.py
refresh_m3u_accounts.delay()
refresh_all_epg_data.delay()
```
- Send WebSocket updates:
```
from core.utils import send_websocket_update
send_websocket_update('updates', 'update', {"type": "plugin", "plugin": "my_plugin", "message": "Done"})
```
- Use transactions:
```
from django.db import transaction
with transaction.atomic():
# bulk updates here
...
```
- Log via provided context or standard logging:
```
def run(self, action, params, context):
logger = context.get("logger") # already configured
logger.info("running action %s", action)
```
Prefer Celery tasks (`.delay()`) to keep `run` fast and non-blocking.
---
## REST Endpoints (for UI and tooling)
- List plugins: `GET /api/plugins/plugins/`
- Response: `{ "plugins": [{ key, name, version, description, enabled, fields, settings, actions }, ...] }`
- Reload discovery: `POST /api/plugins/plugins/reload/`
- Import plugin: `POST /api/plugins/plugins/import/` with form-data file field `file`
- Update settings: `POST /api/plugins/plugins/<key>/settings/` with `{"settings": {...}}`
- Run action: `POST /api/plugins/plugins/<key>/run/` with `{"action": "id", "params": {...}}`
- Enable/disable: `POST /api/plugins/plugins/<key>/enabled/` with `{"enabled": true|false}`
Notes:
- When disabled, a plugin cannot run actions; backend returns HTTP 403.
---
## Importing Plugins
- In the UI, click the Import button on the Plugins page and upload a `.zip` containing a plugin folder.
- The archive should contain either `plugin.py` or a Python package (`__init__.py`).
- On success, the UI shows the plugin name/description and lets you enable it immediately (plugins are disabled by default).
---
## Enabling / Disabling Plugins
- Each plugin has a persisted `enabled` flag (default: disabled) and `ever_enabled` flag in the DB (`apps/plugins/models.py`).
- New plugins are disabled by default and require an explicit enable.
- The first time a plugin is enabled, the UI shows a trust warning modal explaining that plugins can run arbitrary server-side code.
- The Plugins page shows a toggle in the card header. Turning it off dims the card and disables the Run button.
- Backend enforcement: Attempts to run an action for a disabled plugin return HTTP 403.
---
## Example: Refresh All Sources Plugin
Path: `data/plugins/refresh_all/plugin.py`
```
class Plugin:
name = "Refresh All Sources"
version = "1.0.0"
description = "Force refresh all M3U accounts and EPG sources."
fields = [
{"id": "confirm", "label": "Require confirmation", "type": "boolean", "default": True,
"help_text": "If enabled, the UI should ask before running."}
]
actions = [
{"id": "refresh_all", "label": "Refresh All M3Us and EPGs",
"description": "Queues background refresh for all active M3U accounts and EPG sources."}
]
def run(self, action: str, params: dict, context: dict):
if action == "refresh_all":
from apps.m3u.tasks import refresh_m3u_accounts
from apps.epg.tasks import refresh_all_epg_data
refresh_m3u_accounts.delay()
refresh_all_epg_data.delay()
return {"status": "queued", "message": "Refresh jobs queued"}
return {"status": "error", "message": f"Unknown action: {action}"}
```
---
## Best Practices
- Keep `run` short and schedule heavy operations via Celery tasks.
- Validate and sanitize `params` received from the UI.
- Use database transactions for bulk or related updates.
- Log actionable messages for troubleshooting.
- Only write files under `/data` or `/app/data` paths.
- Treat plugins as trusted code: they run with full app permissions.
---
## Troubleshooting
- Plugin not listed: ensure the folder exists and contains `plugin.py` with a `Plugin` class.
- Import errors: the folder name is the import name; avoid spaces or exotic characters.
- No confirmation: include a boolean field with `id: "confirm"` and set it to true or default true.
- HTTP 403 on run: the plugin is disabled; enable it from the toggle or via the `enabled/` endpoint.
---
## Contributing
- Keep dependencies minimal. Vendoring small helpers into the plugin folder is acceptable.
- Use the existing task and model APIs where possible; propose extensions if you need new capabilities.
---
## Internals Reference
- Loader: `apps/plugins/loader.py`
- API Views: `apps/plugins/api_views.py`
- API URLs: `apps/plugins/api_urls.py`
- Model: `apps/plugins/models.py`
- Frontend page: `frontend/src/pages/Plugins.jsx`
- Sidebar entry: `frontend/src/components/Sidebar.jsx`

View file

@ -22,7 +22,6 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
📊 **Real-Time Stats Dashboard** — Live insights into stream health and client activity\
🧠 **EPG Auto-Match** — Match program data to channels automatically\
⚙️ **Streamlink + FFmpeg Support** — Flexible backend options for streaming and recording\
🎬 **VOD Management** — Full Video on Demand support with movies and TV series\
🧼 **UI & UX Enhancements** — Smoother, faster, more responsive interface\
🛁 **Output Compatibility** — HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more
@ -32,7 +31,6 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
**Full IPTV Control** — Import, organize, proxy, and monitor IPTV streams on your own terms\
**Smart Playlist Handling** — M3U import, filtering, grouping, and failover support\
**VOD Content Management** — Organize movies and TV series with metadata and streaming\
**Reliable EPG Integration** — Match and manage TV guide data with ease\
**Clean & Responsive Interface** — Modern design that gets out of your way\
**Fully Self-Hosted** — Total control, zero reliance on third-party services

View file

@ -1,39 +1,41 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .api_views import (
AuthViewSet,
UserViewSet,
GroupViewSet,
TokenObtainPairView,
TokenRefreshView,
list_permissions,
initialize_superuser,
AuthViewSet, UserViewSet, GroupViewSet,
list_permissions, initialize_superuser
)
from rest_framework_simplejwt import views as jwt_views
app_name = "accounts"
app_name = 'accounts'
# 🔹 Register ViewSets with a Router
router = DefaultRouter()
router.register(r"users", UserViewSet, basename="user")
router.register(r"groups", GroupViewSet, basename="group")
router.register(r'users', UserViewSet, basename='user')
router.register(r'groups', GroupViewSet, basename='group')
# 🔹 Custom Authentication Endpoints
auth_view = AuthViewSet.as_view({"post": "login"})
auth_view = AuthViewSet.as_view({
'post': 'login'
})
logout_view = AuthViewSet.as_view({"post": "logout"})
logout_view = AuthViewSet.as_view({
'post': 'logout'
})
# 🔹 Define API URL patterns
urlpatterns = [
# Authentication
path("auth/login/", auth_view, name="user-login"),
path("auth/logout/", logout_view, name="user-logout"),
path('auth/login/', auth_view, name='user-login'),
path('auth/logout/', logout_view, name='user-logout'),
# Superuser API
path("initialize-superuser/", initialize_superuser, name="initialize_superuser"),
path('initialize-superuser/', initialize_superuser, name='initialize_superuser'),
# Permissions API
path("permissions/", list_permissions, name="list-permissions"),
path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
path('permissions/', list_permissions, name='list-permissions'),
path('token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
]
# 🔹 Include ViewSet routes

View file

@ -2,110 +2,16 @@ from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import Group, Permission
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view, permission_classes, action
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework import viewsets, status
from rest_framework import viewsets
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
import json
from .permissions import IsAdmin, Authenticated
from dispatcharr.utils import network_access_allowed
from .models import User
from .serializers import UserSerializer, GroupSerializer, PermissionSerializer
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
class TokenObtainPairView(TokenObtainPairView):
def post(self, request, *args, **kwargs):
# Custom logic here
if not network_access_allowed(request, "UI"):
# Log blocked login attempt due to network restrictions
from core.utils import log_system_event
username = request.data.get("username", 'unknown')
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='login_failed',
user=username,
client_ip=client_ip,
user_agent=user_agent,
reason='Network access denied',
)
return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
# Get the response from the parent class first
username = request.data.get("username")
# Log login attempt
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
try:
response = super().post(request, *args, **kwargs)
# If login was successful, update last_login and log success
if response.status_code == 200:
if username:
from django.utils import timezone
try:
user = User.objects.get(username=username)
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
# Log successful login
log_system_event(
event_type='login_success',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
except User.DoesNotExist:
pass # User doesn't exist, but login somehow succeeded
else:
# Log failed login attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason='Invalid credentials',
)
return response
except Exception as e:
# If parent class raises an exception (e.g., validation error), log failed attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason=f'Authentication error: {str(e)[:100]}',
)
raise # Re-raise the exception to maintain normal error flow
class TokenRefreshView(TokenRefreshView):
def post(self, request, *args, **kwargs):
# Custom logic here
if not network_access_allowed(request, "UI"):
# Log blocked token refresh attempt due to network restrictions
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='login_failed',
user='token_refresh',
client_ip=client_ip,
user_agent=user_agent,
reason='Network access denied (token refresh)',
)
return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN)
return super().post(request, *args, **kwargs)
@csrf_exempt # In production, consider CSRF protection strategies or ensure this endpoint is only accessible when no superuser exists.
def initialize_superuser(request):
@ -120,114 +26,56 @@ def initialize_superuser(request):
password = data.get("password")
email = data.get("email", "")
if not username or not password:
return JsonResponse(
{"error": "Username and password are required."}, status=400
)
return JsonResponse({"error": "Username and password are required."}, status=400)
# Create the superuser
User.objects.create_superuser(
username=username, password=password, email=email, user_level=10
)
User.objects.create_superuser(username=username, password=password, email=email)
return JsonResponse({"superuser_exists": True})
except Exception as e:
return JsonResponse({"error": str(e)}, status=500)
# For GET requests, indicate no superuser exists
return JsonResponse({"superuser_exists": False})
# 🔹 1) Authentication APIs
class AuthViewSet(viewsets.ViewSet):
"""Handles user login and logout"""
def get_permissions(self):
"""
Login doesn't require auth, but logout does
"""
if self.action == 'logout':
from rest_framework.permissions import IsAuthenticated
return [IsAuthenticated()]
return []
@swagger_auto_schema(
operation_description="Authenticate and log in a user",
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=["username", "password"],
required=['username', 'password'],
properties={
"username": openapi.Schema(type=openapi.TYPE_STRING),
"password": openapi.Schema(
type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD
),
'username': openapi.Schema(type=openapi.TYPE_STRING),
'password': openapi.Schema(type=openapi.TYPE_STRING, format=openapi.FORMAT_PASSWORD)
},
),
responses={200: "Login successful", 400: "Invalid credentials"},
)
def login(self, request):
"""Logs in a user and returns user details"""
username = request.data.get("username")
password = request.data.get("password")
username = request.data.get('username')
password = request.data.get('password')
user = authenticate(request, username=username, password=password)
# Get client info for logging
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
if user:
login(request, user)
# Update last_login timestamp
from django.utils import timezone
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
# Log successful login
log_system_event(
event_type='login_success',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
return Response(
{
"message": "Login successful",
"user": {
"id": user.id,
"username": user.username,
"email": user.email,
"groups": list(user.groups.values_list("name", flat=True)),
},
return Response({
"message": "Login successful",
"user": {
"id": user.id,
"username": user.username,
"email": user.email,
"groups": list(user.groups.values_list('name', flat=True))
}
)
# Log failed login attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason='Invalid credentials',
)
})
return Response({"error": "Invalid credentials"}, status=400)
@swagger_auto_schema(
operation_description="Log out the current user",
responses={200: "Logout successful"},
responses={200: "Logout successful"}
)
def logout(self, request):
"""Logs out the authenticated user"""
# Log logout event before actually logging out
from core.utils import log_system_event
username = request.user.username if request.user and request.user.is_authenticated else 'unknown'
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='logout',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
logout(request)
return Response({"message": "Logout successful"})
@ -235,19 +83,13 @@ class AuthViewSet(viewsets.ViewSet):
# 🔹 2) User Management APIs
class UserViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for Users"""
queryset = User.objects.all().prefetch_related('channel_profiles')
queryset = User.objects.all()
serializer_class = UserSerializer
def get_permissions(self):
if self.action == "me":
return [Authenticated()]
return [IsAdmin()]
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_description="Retrieve a list of users",
responses={200: UserSerializer(many=True)},
responses={200: UserSerializer(many=True)}
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@ -268,28 +110,17 @@ class UserViewSet(viewsets.ModelViewSet):
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
@swagger_auto_schema(
method="get",
operation_description="Get active user information",
)
@action(detail=False, methods=["get"], url_path="me")
def me(self, request):
user = request.user
serializer = UserSerializer(user)
return Response(serializer.data)
# 🔹 3) Group Management APIs
class GroupViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for Groups"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [Authenticated]
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_description="Retrieve a list of groups",
responses={200: GroupSerializer(many=True)},
responses={200: GroupSerializer(many=True)}
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@ -313,12 +144,12 @@ class GroupViewSet(viewsets.ModelViewSet):
# 🔹 4) Permissions List API
@swagger_auto_schema(
method="get",
method='get',
operation_description="Retrieve a list of all permissions",
responses={200: PermissionSerializer(many=True)},
responses={200: PermissionSerializer(many=True)}
)
@api_view(["GET"])
@permission_classes([Authenticated])
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def list_permissions(request):
"""Returns a list of all available permissions"""
permissions = Permission.objects.all()

View file

@ -1,7 +1,6 @@
from django.apps import AppConfig
class AccountsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "apps.accounts"
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.accounts'
verbose_name = "Accounts & Authentication"

View file

@ -1,43 +0,0 @@
# Generated by Django 5.1.6 on 2025-05-18 15:47
from django.db import migrations, models
def set_user_level_to_10(apps, schema_editor):
User = apps.get_model("accounts", "User")
User.objects.update(user_level=10)
class Migration(migrations.Migration):
dependencies = [
("accounts", "0001_initial"),
("dispatcharr_channels", "0021_channel_user_level"),
]
operations = [
migrations.RemoveField(
model_name="user",
name="channel_groups",
),
migrations.AddField(
model_name="user",
name="channel_profiles",
field=models.ManyToManyField(
blank=True,
related_name="users",
to="dispatcharr_channels.channelprofile",
),
),
migrations.AddField(
model_name="user",
name="user_level",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="user",
name="custom_properties",
field=models.TextField(blank=True, null=True),
),
migrations.RunPython(set_user_level_to_10),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-02 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_remove_user_channel_groups_user_channel_profiles_and_more'),
]
operations = [
migrations.AlterField(
model_name='user',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
]

View file

@ -2,26 +2,17 @@
from django.db import models
from django.contrib.auth.models import AbstractUser, Permission
class User(AbstractUser):
"""
Custom user model for Dispatcharr.
Inherits from Django's AbstractUser to add additional fields if needed.
"""
class UserLevel(models.IntegerChoices):
STREAMER = 0, "Streamer"
STANDARD = 1, "Standard User"
ADMIN = 10, "Admin"
avatar_config = models.JSONField(default=dict, blank=True, null=True)
channel_profiles = models.ManyToManyField(
"dispatcharr_channels.ChannelProfile",
channel_groups = models.ManyToManyField(
'dispatcharr_channels.ChannelGroup', # Updated reference to renamed model
blank=True,
related_name="users",
related_name="users"
)
user_level = models.IntegerField(default=UserLevel.STREAMER)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
def __str__(self):
return self.username

View file

@ -1,56 +0,0 @@
from rest_framework.permissions import IsAuthenticated
from .models import User
from dispatcharr.utils import network_access_allowed
class Authenticated(IsAuthenticated):
def has_permission(self, request, view):
is_authenticated = super().has_permission(request, view)
network_allowed = network_access_allowed(request, "UI")
return is_authenticated and network_allowed
class IsStandardUser(Authenticated):
def has_permission(self, request, view):
if not super().has_permission(request, view):
return False
return request.user and request.user.user_level >= User.UserLevel.STANDARD
class IsAdmin(Authenticated):
def has_permission(self, request, view):
if not super().has_permission(request, view):
return False
return request.user.user_level >= 10
class IsOwnerOfObject(Authenticated):
def has_object_permission(self, request, view, obj):
if not super().has_permission(request, view):
return False
is_admin = IsAdmin().has_permission(request, view)
is_owner = request.user in obj.users.all()
return is_admin or is_owner
permission_classes_by_action = {
"list": [IsStandardUser],
"create": [IsAdmin],
"retrieve": [IsStandardUser],
"update": [IsAdmin],
"partial_update": [IsAdmin],
"destroy": [IsAdmin],
}
permission_classes_by_method = {
"GET": [IsStandardUser],
"POST": [IsAdmin],
"PATCH": [IsAdmin],
"PUT": [IsAdmin],
"DELETE": [IsAdmin],
}

View file

@ -1,14 +1,13 @@
from rest_framework import serializers
from django.contrib.auth.models import Group, Permission
from .models import User
from apps.channels.models import ChannelProfile
# 🔹 Fix for Permission serialization
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = ["id", "name", "codename"]
fields = ['id', 'name', 'codename']
# 🔹 Fix for Group serialization
@ -19,61 +18,15 @@ class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ["id", "name", "permissions"]
fields = ['id', 'name', 'permissions']
# 🔹 Fix for User serialization
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
channel_profiles = serializers.PrimaryKeyRelatedField(
queryset=ChannelProfile.objects.all(), many=True, required=False
)
groups = serializers.SlugRelatedField(
many=True, queryset=Group.objects.all(), slug_field="name"
) # ✅ Fix ManyToMany `_meta` error
class Meta:
model = User
fields = [
"id",
"username",
"email",
"user_level",
"password",
"channel_profiles",
"custom_properties",
"avatar_config",
"is_active",
"is_staff",
"is_superuser",
"last_login",
"date_joined",
"first_name",
"last_name",
]
def create(self, validated_data):
channel_profiles = validated_data.pop("channel_profiles", [])
user = User(**validated_data)
user.set_password(validated_data["password"])
user.is_active = True
user.save()
user.channel_profiles.set(channel_profiles)
return user
def update(self, instance, validated_data):
password = validated_data.pop("password", None)
channel_profiles = validated_data.pop("channel_profiles", None)
for attr, value in validated_data.items():
setattr(instance, attr, value)
if password:
instance.set_password(password)
instance.save()
if channel_profiles is not None:
instance.channel_profiles.set(channel_profiles)
return instance
fields = ['id', 'username', 'email', 'groups']

View file

@ -5,7 +5,6 @@ from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import User
@receiver(post_save, sender=User)
def handle_new_user(sender, instance, created, **kwargs):
if created:

View file

@ -1,10 +1,11 @@
from django.urls import path, include, re_path
from django.urls import path, include
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework.permissions import AllowAny
app_name = 'api'
# Configure Swagger Schema
schema_view = get_schema_view(
openapi.Info(
title="Dispatcharr API",
@ -25,9 +26,6 @@ urlpatterns = [
path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')),
path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')),
path('core/', include(('core.api_urls', 'core'), namespace='core')),
path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')),
path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')),
path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')),
# path('output/', include(('apps.output.api_urls', 'output'), namespace='output')),
#path('player/', include(('apps.player.api_urls', 'player'), namespace='player')),
#path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')),
@ -36,7 +34,7 @@ urlpatterns = [
# Swagger Documentation api_urls
re_path(r'^swagger/?$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'),
]

View file

@ -1,18 +0,0 @@
from django.urls import path
from . import api_views
app_name = "backups"
urlpatterns = [
path("", api_views.list_backups, name="backup-list"),
path("create/", api_views.create_backup, name="backup-create"),
path("upload/", api_views.upload_backup, name="backup-upload"),
path("schedule/", api_views.get_schedule, name="backup-schedule-get"),
path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"),
path("status/<str:task_id>/", api_views.backup_status, name="backup-status"),
path("<str:filename>/download-token/", api_views.get_download_token, name="backup-download-token"),
path("<str:filename>/download/", api_views.download_backup, name="backup-download"),
path("<str:filename>/delete/", api_views.delete_backup, name="backup-delete"),
path("<str:filename>/restore/", api_views.restore_backup, name="backup-restore"),
]

View file

@ -1,364 +0,0 @@
import hashlib
import hmac
import logging
import os
from pathlib import Path
from celery.result import AsyncResult
from django.conf import settings
from django.http import HttpResponse, StreamingHttpResponse, Http404
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes, parser_classes
from rest_framework.permissions import IsAdminUser, AllowAny
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from . import services
from .tasks import create_backup_task, restore_backup_task
from .scheduler import get_schedule_settings, update_schedule_settings
logger = logging.getLogger(__name__)
def _generate_task_token(task_id: str) -> str:
"""Generate a signed token for task status access without auth."""
secret = settings.SECRET_KEY.encode()
return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32]
def _verify_task_token(task_id: str, token: str) -> bool:
"""Verify a task token is valid."""
expected = _generate_task_token(task_id)
return hmac.compare_digest(expected, token)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def list_backups(request):
"""List all available backup files."""
try:
backups = services.list_backups()
return Response(backups, status=status.HTTP_200_OK)
except Exception as e:
return Response(
{"detail": f"Failed to list backups: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
def create_backup(request):
"""Create a new backup (async via Celery)."""
try:
task = create_backup_task.delay()
return Response(
{
"detail": "Backup started",
"task_id": task.id,
"task_token": _generate_task_token(task.id),
},
status=status.HTTP_202_ACCEPTED,
)
except Exception as e:
return Response(
{"detail": f"Failed to start backup: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([AllowAny])
def backup_status(request, task_id):
"""Check the status of a backup/restore task.
Requires either:
- Valid admin authentication, OR
- Valid task_token query parameter
"""
# Check for token-based auth (for restore when session is invalidated)
token = request.query_params.get("token")
if token:
if not _verify_task_token(task_id, token):
return Response(
{"detail": "Invalid task token"},
status=status.HTTP_403_FORBIDDEN,
)
else:
# Fall back to admin auth check
if not request.user.is_authenticated or not request.user.is_staff:
return Response(
{"detail": "Authentication required"},
status=status.HTTP_401_UNAUTHORIZED,
)
try:
result = AsyncResult(task_id)
if result.ready():
task_result = result.get()
if task_result.get("status") == "completed":
return Response({
"state": "completed",
"result": task_result,
})
else:
return Response({
"state": "failed",
"error": task_result.get("error", "Unknown error"),
})
elif result.failed():
return Response({
"state": "failed",
"error": str(result.result),
})
else:
return Response({
"state": result.state.lower(),
})
except Exception as e:
return Response(
{"detail": f"Failed to get task status: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def get_download_token(request, filename):
"""Get a signed token for downloading a backup file."""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise Http404("Backup file not found")
token = _generate_task_token(filename)
return Response({"token": token})
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Failed to generate token: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([AllowAny])
def download_backup(request, filename):
"""Download a backup file.
Requires either:
- Valid admin authentication, OR
- Valid download_token query parameter
"""
# Check for token-based auth (avoids CORS preflight issues)
token = request.query_params.get("token")
if token:
if not _verify_task_token(filename, token):
return Response(
{"detail": "Invalid download token"},
status=status.HTTP_403_FORBIDDEN,
)
else:
# Fall back to admin auth check
if not request.user.is_authenticated or not request.user.is_staff:
return Response(
{"detail": "Authentication required"},
status=status.HTTP_401_UNAUTHORIZED,
)
try:
# Security: prevent path traversal by checking for suspicious characters
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = (backup_dir / filename).resolve()
# Security: ensure the resolved path is still within backup_dir
if not str(backup_file).startswith(str(backup_dir.resolve())):
raise Http404("Invalid filename")
if not backup_file.exists() or not backup_file.is_file():
raise Http404("Backup file not found")
file_size = backup_file.stat().st_size
# Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly
# Fall back to streaming for non-nginx deployments
use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true"
logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}")
if use_nginx_accel:
# X-Accel-Redirect: Django returns immediately, nginx serves file
logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}")
response = HttpResponse()
response["X-Accel-Redirect"] = f"/protected-backups/{filename}"
response["Content-Type"] = "application/zip"
response["Content-Length"] = file_size
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
else:
# Streaming fallback for non-nginx deployments
logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)")
def file_iterator(file_path, chunk_size=2 * 1024 * 1024):
with open(file_path, "rb") as f:
while chunk := f.read(chunk_size):
yield chunk
response = StreamingHttpResponse(
file_iterator(backup_file),
content_type="application/zip",
)
response["Content-Length"] = file_size
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Download failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["DELETE"])
@permission_classes([IsAdminUser])
def delete_backup(request, filename):
"""Delete a backup file."""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
services.delete_backup(filename)
return Response(
{"detail": "Backup deleted successfully"},
status=status.HTTP_204_NO_CONTENT,
)
except FileNotFoundError:
raise Http404("Backup file not found")
except Exception as e:
return Response(
{"detail": f"Delete failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
@parser_classes([MultiPartParser, FormParser])
def upload_backup(request):
"""Upload a backup file for restoration."""
uploaded = request.FILES.get("file")
if not uploaded:
return Response(
{"detail": "No file uploaded"},
status=status.HTTP_400_BAD_REQUEST,
)
try:
backup_dir = services.get_backup_dir()
filename = uploaded.name or "uploaded-backup.zip"
# Ensure unique filename
backup_file = backup_dir / filename
counter = 1
while backup_file.exists():
name_parts = filename.rsplit(".", 1)
if len(name_parts) == 2:
backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}"
else:
backup_file = backup_dir / f"{filename}-{counter}"
counter += 1
# Save uploaded file
with backup_file.open("wb") as f:
for chunk in uploaded.chunks():
f.write(chunk)
return Response(
{
"detail": "Backup uploaded successfully",
"filename": backup_file.name,
},
status=status.HTTP_201_CREATED,
)
except Exception as e:
return Response(
{"detail": f"Upload failed: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["POST"])
@permission_classes([IsAdminUser])
def restore_backup(request, filename):
"""Restore from a backup file (async via Celery). WARNING: This will flush the database!"""
try:
# Security: prevent path traversal
if ".." in filename or "/" in filename or "\\" in filename:
raise Http404("Invalid filename")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise Http404("Backup file not found")
task = restore_backup_task.delay(filename)
return Response(
{
"detail": "Restore started",
"task_id": task.id,
"task_token": _generate_task_token(task.id),
},
status=status.HTTP_202_ACCEPTED,
)
except Http404:
raise
except Exception as e:
return Response(
{"detail": f"Failed to start restore: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["GET"])
@permission_classes([IsAdminUser])
def get_schedule(request):
"""Get backup schedule settings."""
try:
settings = get_schedule_settings()
return Response(settings)
except Exception as e:
return Response(
{"detail": f"Failed to get schedule: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@api_view(["PUT"])
@permission_classes([IsAdminUser])
def update_schedule(request):
"""Update backup schedule settings."""
try:
settings = update_schedule_settings(request.data)
return Response(settings)
except ValueError as e:
return Response(
{"detail": str(e)},
status=status.HTTP_400_BAD_REQUEST,
)
except Exception as e:
return Response(
{"detail": f"Failed to update schedule: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class BackupsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "apps.backups"
verbose_name = "Backups"

View file

View file

@ -1,202 +0,0 @@
import json
import logging
from django_celery_beat.models import PeriodicTask, CrontabSchedule
from core.models import CoreSettings
logger = logging.getLogger(__name__)
BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task"
DEFAULTS = {
"schedule_enabled": True,
"schedule_frequency": "daily",
"schedule_time": "03:00",
"schedule_day_of_week": 0, # Sunday
"retention_count": 3,
"schedule_cron_expression": "",
}
def _get_backup_settings():
"""Get all backup settings from CoreSettings grouped JSON."""
try:
settings_obj = CoreSettings.objects.get(key="backup_settings")
return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy()
except CoreSettings.DoesNotExist:
return DEFAULTS.copy()
def _update_backup_settings(updates: dict) -> None:
"""Update backup settings in the grouped JSON."""
obj, created = CoreSettings.objects.get_or_create(
key="backup_settings",
defaults={"name": "Backup Settings", "value": DEFAULTS.copy()}
)
current = obj.value if isinstance(obj.value, dict) else {}
current.update(updates)
obj.value = current
obj.save()
def get_schedule_settings() -> dict:
"""Get all backup schedule settings."""
settings = _get_backup_settings()
return {
"enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])),
"frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])),
"time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])),
"day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])),
"retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])),
"cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])),
}
def update_schedule_settings(data: dict) -> dict:
"""Update backup schedule settings and sync the PeriodicTask."""
# Validate
if "frequency" in data and data["frequency"] not in ("daily", "weekly"):
raise ValueError("frequency must be 'daily' or 'weekly'")
if "time" in data:
try:
hour, minute = data["time"].split(":")
int(hour)
int(minute)
except (ValueError, AttributeError):
raise ValueError("time must be in HH:MM format")
if "day_of_week" in data:
day = int(data["day_of_week"])
if day < 0 or day > 6:
raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)")
if "retention_count" in data:
count = int(data["retention_count"])
if count < 0:
raise ValueError("retention_count must be >= 0")
# Update settings with proper key names
updates = {}
if "enabled" in data:
updates["schedule_enabled"] = bool(data["enabled"])
if "frequency" in data:
updates["schedule_frequency"] = str(data["frequency"])
if "time" in data:
updates["schedule_time"] = str(data["time"])
if "day_of_week" in data:
updates["schedule_day_of_week"] = int(data["day_of_week"])
if "retention_count" in data:
updates["retention_count"] = int(data["retention_count"])
if "cron_expression" in data:
updates["schedule_cron_expression"] = str(data["cron_expression"])
_update_backup_settings(updates)
# Sync the periodic task
_sync_periodic_task()
return get_schedule_settings()
def _sync_periodic_task() -> None:
"""Create, update, or delete the scheduled backup task based on settings."""
settings = get_schedule_settings()
if not settings["enabled"]:
# Delete the task if it exists
task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first()
if task:
old_crontab = task.crontab
task.delete()
_cleanup_orphaned_crontab(old_crontab)
logger.info("Backup schedule disabled, removed periodic task")
return
# Get old crontab before creating new one
old_crontab = None
try:
old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME)
old_crontab = old_task.crontab
except PeriodicTask.DoesNotExist:
pass
# Check if using cron expression (advanced mode)
if settings["cron_expression"]:
# Parse cron expression: "minute hour day month weekday"
try:
parts = settings["cron_expression"].split()
if len(parts) != 5:
raise ValueError("Cron expression must have 5 parts: minute hour day month weekday")
minute, hour, day_of_month, month_of_year, day_of_week = parts
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week=day_of_week,
day_of_month=day_of_month,
month_of_year=month_of_year,
timezone=CoreSettings.get_system_time_zone(),
)
except Exception as e:
logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}")
raise ValueError(f"Invalid cron expression: {e}")
else:
# Use simple frequency-based scheduling
# Parse time
hour, minute = settings["time"].split(":")
# Build crontab based on frequency
system_tz = CoreSettings.get_system_time_zone()
if settings["frequency"] == "daily":
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week="*",
day_of_month="*",
month_of_year="*",
timezone=system_tz,
)
else: # weekly
crontab, _ = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_week=str(settings["day_of_week"]),
day_of_month="*",
month_of_year="*",
timezone=system_tz,
)
# Create or update the periodic task
task, created = PeriodicTask.objects.update_or_create(
name=BACKUP_SCHEDULE_TASK_NAME,
defaults={
"task": "apps.backups.tasks.scheduled_backup_task",
"crontab": crontab,
"enabled": True,
"kwargs": json.dumps({"retention_count": settings["retention_count"]}),
},
)
# Clean up old crontab if it changed and is orphaned
if old_crontab and old_crontab.id != crontab.id:
_cleanup_orphaned_crontab(old_crontab)
action = "Created" if created else "Updated"
logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}")
def _cleanup_orphaned_crontab(crontab_schedule):
"""Delete old CrontabSchedule if no other tasks are using it."""
if crontab_schedule is None:
return
# Check if any other tasks are using this crontab
if PeriodicTask.objects.filter(crontab=crontab_schedule).exists():
logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting")
return
logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}")
crontab_schedule.delete()

View file

@ -1,350 +0,0 @@
import datetime
import json
import os
import shutil
import subprocess
import tempfile
from pathlib import Path
from zipfile import ZipFile, ZIP_DEFLATED
import logging
import pytz
from django.conf import settings
from core.models import CoreSettings
logger = logging.getLogger(__name__)
def get_backup_dir() -> Path:
"""Get the backup directory, creating it if necessary."""
backup_dir = Path(settings.BACKUP_ROOT)
backup_dir.mkdir(parents=True, exist_ok=True)
return backup_dir
def _is_postgresql() -> bool:
"""Check if we're using PostgreSQL."""
return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql"
def _get_pg_env() -> dict:
"""Get environment variables for PostgreSQL commands."""
db_config = settings.DATABASES["default"]
env = os.environ.copy()
env["PGPASSWORD"] = db_config.get("PASSWORD", "")
return env
def _get_pg_args() -> list[str]:
"""Get common PostgreSQL command arguments."""
db_config = settings.DATABASES["default"]
return [
"-h", db_config.get("HOST", "localhost"),
"-p", str(db_config.get("PORT", 5432)),
"-U", db_config.get("USER", "postgres"),
"-d", db_config.get("NAME", "dispatcharr"),
]
def _dump_postgresql(output_file: Path) -> None:
"""Dump PostgreSQL database using pg_dump."""
logger.info("Dumping PostgreSQL database with pg_dump...")
cmd = [
"pg_dump",
*_get_pg_args(),
"-Fc", # Custom format for pg_restore
"-v", # Verbose
"-f", str(output_file),
]
result = subprocess.run(
cmd,
env=_get_pg_env(),
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"pg_dump failed: {result.stderr}")
raise RuntimeError(f"pg_dump failed: {result.stderr}")
logger.debug(f"pg_dump output: {result.stderr}")
def _clean_postgresql_schema() -> None:
"""Drop and recreate the public schema to ensure a completely clean restore."""
logger.info("[PG_CLEAN] Dropping and recreating public schema...")
# Commands to drop and recreate schema
sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;"
cmd = [
"psql",
*_get_pg_args(),
"-c", sql_commands,
]
result = subprocess.run(
cmd,
env=_get_pg_env(),
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}")
raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}")
logger.info("[PG_CLEAN] Schema cleaned successfully")
def _restore_postgresql(dump_file: Path) -> None:
"""Restore PostgreSQL database using pg_restore."""
logger.info("[PG_RESTORE] Starting pg_restore...")
logger.info(f"[PG_RESTORE] Dump file: {dump_file}")
# Drop and recreate schema to ensure a completely clean restore
_clean_postgresql_schema()
pg_args = _get_pg_args()
logger.info(f"[PG_RESTORE] Connection args: {pg_args}")
cmd = [
"pg_restore",
"--no-owner", # Skip ownership commands (we already created schema)
*pg_args,
"-v", # Verbose
str(dump_file),
]
logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}")
result = subprocess.run(
cmd,
env=_get_pg_env(),
capture_output=True,
text=True,
)
logger.info(f"[PG_RESTORE] Return code: {result.returncode}")
# pg_restore may return non-zero even on partial success
# Check for actual errors vs warnings
if result.returncode != 0:
# Some errors during restore are expected (e.g., "does not exist" when cleaning)
# Only fail on critical errors
stderr = result.stderr.lower()
if "fatal" in stderr or "could not connect" in stderr:
logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}")
raise RuntimeError(f"pg_restore failed: {result.stderr}")
else:
logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...")
logger.info("[PG_RESTORE] Completed successfully")
def _dump_sqlite(output_file: Path) -> None:
"""Dump SQLite database using sqlite3 .backup command."""
logger.info("Dumping SQLite database with sqlite3 .backup...")
db_path = Path(settings.DATABASES["default"]["NAME"])
if not db_path.exists():
raise FileNotFoundError(f"SQLite database not found: {db_path}")
# Use sqlite3 .backup command via stdin for reliable execution
result = subprocess.run(
["sqlite3", str(db_path)],
input=f".backup '{output_file}'\n",
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"sqlite3 backup failed: {result.stderr}")
raise RuntimeError(f"sqlite3 backup failed: {result.stderr}")
# Verify the backup file was created
if not output_file.exists():
raise RuntimeError("sqlite3 backup failed: output file not created")
logger.info(f"sqlite3 backup completed successfully: {output_file}")
def _restore_sqlite(dump_file: Path) -> None:
"""Restore SQLite database by replacing the database file."""
logger.info("Restoring SQLite database...")
db_path = Path(settings.DATABASES["default"]["NAME"])
backup_current = None
# Backup current database before overwriting
if db_path.exists():
backup_current = db_path.with_suffix(".db.bak")
shutil.copy2(db_path, backup_current)
logger.info(f"Backed up current database to {backup_current}")
# Ensure parent directory exists
db_path.parent.mkdir(parents=True, exist_ok=True)
# The backup file from _dump_sqlite is a complete SQLite database file
# We can simply copy it over the existing database
shutil.copy2(dump_file, db_path)
# Verify the restore worked by checking if sqlite3 can read it
result = subprocess.run(
["sqlite3", str(db_path)],
input=".tables\n",
capture_output=True,
text=True,
)
if result.returncode != 0:
logger.error(f"sqlite3 verification failed: {result.stderr}")
# Try to restore from backup
if backup_current and backup_current.exists():
shutil.copy2(backup_current, db_path)
logger.info("Restored original database from backup")
raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}")
logger.info("sqlite3 restore completed successfully")
def create_backup() -> Path:
"""
Create a backup archive containing database dump and data directories.
Returns the path to the created backup file.
"""
backup_dir = get_backup_dir()
# Use system timezone for filename (user-friendly), but keep internal timestamps as UTC
system_tz_name = CoreSettings.get_system_time_zone()
try:
system_tz = pytz.timezone(system_tz_name)
now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz)
timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S")
except Exception as e:
logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC")
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S")
backup_name = f"dispatcharr-backup-{timestamp}.zip"
backup_file = backup_dir / backup_name
logger.info(f"Creating backup: {backup_name}")
with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir:
temp_path = Path(temp_dir)
# Determine database type and dump accordingly
if _is_postgresql():
db_dump_file = temp_path / "database.dump"
_dump_postgresql(db_dump_file)
db_type = "postgresql"
else:
db_dump_file = temp_path / "database.sqlite3"
_dump_sqlite(db_dump_file)
db_type = "sqlite"
# Create ZIP archive with compression and ZIP64 support for large files
with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file:
# Add database dump
zip_file.write(db_dump_file, db_dump_file.name)
# Add metadata
metadata = {
"format": "dispatcharr-backup",
"version": 2,
"database_type": db_type,
"database_file": db_dump_file.name,
"created_at": datetime.datetime.now(datetime.UTC).isoformat(),
}
zip_file.writestr("metadata.json", json.dumps(metadata, indent=2))
logger.info(f"Backup created successfully: {backup_file}")
return backup_file
def restore_backup(backup_file: Path) -> None:
"""
Restore from a backup archive.
WARNING: This will overwrite the database!
"""
if not backup_file.exists():
raise FileNotFoundError(f"Backup file not found: {backup_file}")
logger.info(f"Restoring from backup: {backup_file}")
with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir:
temp_path = Path(temp_dir)
# Extract backup
logger.debug("Extracting backup archive...")
with ZipFile(backup_file, "r") as zip_file:
zip_file.extractall(temp_path)
# Read metadata
metadata_file = temp_path / "metadata.json"
if not metadata_file.exists():
raise ValueError("Invalid backup: missing metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
# Restore database
_restore_database(temp_path, metadata)
logger.info("Restore completed successfully")
def _restore_database(temp_path: Path, metadata: dict) -> None:
"""Restore database from backup."""
db_type = metadata.get("database_type", "postgresql")
db_file = metadata.get("database_file", "database.dump")
dump_file = temp_path / db_file
if not dump_file.exists():
raise ValueError(f"Invalid backup: missing {db_file}")
current_db_type = "postgresql" if _is_postgresql() else "sqlite"
if db_type != current_db_type:
raise ValueError(
f"Database type mismatch: backup is {db_type}, "
f"but current database is {current_db_type}"
)
if db_type == "postgresql":
_restore_postgresql(dump_file)
else:
_restore_sqlite(dump_file)
def list_backups() -> list[dict]:
"""List all available backup files with metadata."""
backup_dir = get_backup_dir()
backups = []
for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True):
# Use UTC timezone so frontend can convert to user's local time
created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC)
backups.append({
"name": backup_file.name,
"size": backup_file.stat().st_size,
"created": created_time.isoformat(),
})
return backups
def delete_backup(filename: str) -> None:
"""Delete a backup file."""
backup_dir = get_backup_dir()
backup_file = backup_dir / filename
if not backup_file.exists():
raise FileNotFoundError(f"Backup file not found: {filename}")
if not backup_file.is_file():
raise ValueError(f"Invalid backup file: {filename}")
backup_file.unlink()
logger.info(f"Deleted backup: {filename}")

View file

@ -1,106 +0,0 @@
import logging
import traceback
from celery import shared_task
from . import services
logger = logging.getLogger(__name__)
def _cleanup_old_backups(retention_count: int) -> int:
"""Delete old backups, keeping only the most recent N. Returns count deleted."""
if retention_count <= 0:
return 0
backups = services.list_backups()
if len(backups) <= retention_count:
return 0
# Backups are sorted newest first, so delete from the end
to_delete = backups[retention_count:]
deleted = 0
for backup in to_delete:
try:
services.delete_backup(backup["name"])
deleted += 1
logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}")
except Exception as e:
logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}")
return deleted
@shared_task(bind=True)
def create_backup_task(self):
"""Celery task to create a backup asynchronously."""
try:
logger.info(f"[BACKUP] Starting backup task {self.request.id}")
backup_file = services.create_backup()
logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}")
return {
"status": "completed",
"filename": backup_file.name,
"size": backup_file.stat().st_size,
}
except Exception as e:
logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}")
logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}
@shared_task(bind=True)
def restore_backup_task(self, filename: str):
"""Celery task to restore a backup asynchronously."""
try:
logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}")
backup_dir = services.get_backup_dir()
backup_file = backup_dir / filename
logger.info(f"[RESTORE] Backup file path: {backup_file}")
services.restore_backup(backup_file)
logger.info(f"[RESTORE] Task {self.request.id} completed successfully")
return {
"status": "completed",
"filename": filename,
}
except Exception as e:
logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}")
logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}
@shared_task(bind=True)
def scheduled_backup_task(self, retention_count: int = 0):
"""Celery task for scheduled backups with optional retention cleanup."""
try:
logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}")
# Create backup
backup_file = services.create_backup()
logger.info(f"[SCHEDULED] Backup created: {backup_file.name}")
# Cleanup old backups if retention is set
deleted = 0
if retention_count > 0:
deleted = _cleanup_old_backups(retention_count)
logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)")
return {
"status": "completed",
"filename": backup_file.name,
"size": backup_file.stat().st_size,
"deleted_count": deleted,
}
except Exception as e:
logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}")
logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}")
return {
"status": "failed",
"error": str(e),
}

File diff suppressed because it is too large Load diff

View file

@ -6,21 +6,12 @@ from .api_views import (
ChannelGroupViewSet,
BulkDeleteStreamsAPIView,
BulkDeleteChannelsAPIView,
BulkDeleteLogosAPIView,
CleanupUnusedLogosAPIView,
LogoViewSet,
ChannelProfileViewSet,
UpdateChannelMembershipAPIView,
BulkUpdateChannelMembershipAPIView,
RecordingViewSet,
RecurringRecordingRuleViewSet,
GetChannelStreamsAPIView,
SeriesRulesAPIView,
DeleteSeriesRuleAPIView,
EvaluateSeriesRulesAPIView,
BulkRemoveSeriesRecordingsAPIView,
BulkDeleteUpcomingRecordingsAPIView,
ComskipConfigAPIView,
)
app_name = 'channels' # for DRF routing
@ -32,24 +23,14 @@ router.register(r'channels', ChannelViewSet, basename='channel')
router.register(r'logos', LogoViewSet, basename='logo')
router.register(r'profiles', ChannelProfileViewSet, basename='profile')
router.register(r'recordings', RecordingViewSet, basename='recording')
router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule')
urlpatterns = [
# Bulk delete is a single APIView, not a ViewSet
path('streams/bulk-delete/', BulkDeleteStreamsAPIView.as_view(), name='bulk_delete_streams'),
path('channels/bulk-delete/', BulkDeleteChannelsAPIView.as_view(), name='bulk_delete_channels'),
path('logos/bulk-delete/', BulkDeleteLogosAPIView.as_view(), name='bulk_delete_logos'),
path('logos/cleanup/', CleanupUnusedLogosAPIView.as_view(), name='cleanup_unused_logos'),
path('channels/<int:channel_id>/streams/', GetChannelStreamsAPIView.as_view(), name='get_channel_streams'),
path('profiles/<int:profile_id>/channels/<int:channel_id>/', UpdateChannelMembershipAPIView.as_view(), name='update_channel_membership'),
path('profiles/<int:profile_id>/channels/bulk-update/', BulkUpdateChannelMembershipAPIView.as_view(), name='bulk_update_channel_membership'),
# DVR series rules (order matters: specific routes before catch-all slug)
path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'),
path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'),
path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'),
path('series-rules/<path:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'),
path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'),
]
urlpatterns += router.urls

File diff suppressed because it is too large Load diff

View file

@ -1,18 +0,0 @@
# Generated by Django 5.1.6 on 2025-05-18 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0020_alter_channel_channel_number'),
]
operations = [
migrations.AddField(
model_name='channel',
name='user_level',
field=models.IntegerField(default=0),
),
]

View file

@ -1,35 +0,0 @@
# Generated by Django 5.1.6 on 2025-07-13 23:08
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0021_channel_user_level'),
('m3u', '0012_alter_m3uaccount_refresh_interval'),
]
operations = [
migrations.AddField(
model_name='channel',
name='auto_created',
field=models.BooleanField(default=False, help_text='Whether this channel was automatically created via M3U auto channel sync'),
),
migrations.AddField(
model_name='channel',
name='auto_created_by',
field=models.ForeignKey(blank=True, help_text='The M3U account that auto-created this channel', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='auto_created_channels', to='m3u.m3uaccount'),
),
migrations.AddField(
model_name='channelgroupm3uaccount',
name='auto_channel_sync',
field=models.BooleanField(default=False, help_text='Automatically create/delete channels to match streams in this group'),
),
migrations.AddField(
model_name='channelgroupm3uaccount',
name='auto_sync_channel_start',
field=models.FloatField(blank=True, help_text='Starting channel number for auto-created channels in this group', null=True),
),
]

View file

@ -1,23 +0,0 @@
# Generated by Django 5.1.6 on 2025-07-29 02:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0022_channel_auto_created_channel_auto_created_by_and_more'),
]
operations = [
migrations.AddField(
model_name='stream',
name='stream_stats',
field=models.JSONField(blank=True, help_text='JSON object containing stream statistics like video codec, resolution, etc.', null=True),
),
migrations.AddField(
model_name='stream',
name='stream_stats_updated_at',
field=models.DateTimeField(blank=True, db_index=True, help_text='When stream statistics were last updated', null=True),
),
]

View file

@ -1,19 +0,0 @@
# Generated by Django 5.2.4 on 2025-08-22 20:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'),
]
operations = [
migrations.AlterField(
model_name='channelgroupm3uaccount',
name='channel_group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'),
),
]

View file

@ -1,28 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-02 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'),
]
operations = [
migrations.AlterField(
model_name='channelgroupm3uaccount',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='recording',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='stream',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
]

View file

@ -1,31 +0,0 @@
# Generated by Django 5.0.14 on 2025-09-18 14:56
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'),
]
operations = [
migrations.CreateModel(
name='RecurringRecordingRule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('days_of_week', models.JSONField(default=list)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('enabled', models.BooleanField(default=True)),
('name', models.CharField(blank=True, max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')),
],
options={
'ordering': ['channel', 'start_time'],
},
),
]

View file

@ -1,23 +0,0 @@
# Generated by Django 5.2.4 on 2025-10-05 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0026_recurringrecordingrule'),
]
operations = [
migrations.AddField(
model_name='recurringrecordingrule',
name='end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='recurringrecordingrule',
name='start_date',
field=models.DateField(blank=True, null=True),
),
]

View file

@ -1,25 +0,0 @@
# Generated by Django 5.2.4 on 2025-10-06 22:55
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'),
]
operations = [
migrations.AddField(
model_name='channel',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'),
preserve_default=False,
),
migrations.AddField(
model_name='channel',
name='updated_at',
field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'),
),
]

View file

@ -1,54 +0,0 @@
# Generated migration to backfill stream_hash for existing custom streams
from django.db import migrations
import hashlib
def backfill_custom_stream_hashes(apps, schema_editor):
"""
Generate stream_hash for all custom streams that don't have one.
Uses stream ID to create a stable hash that won't change when name/url is edited.
"""
Stream = apps.get_model('dispatcharr_channels', 'Stream')
custom_streams_without_hash = Stream.objects.filter(
is_custom=True,
stream_hash__isnull=True
)
updated_count = 0
for stream in custom_streams_without_hash:
# Generate a stable hash using the stream's ID
# This ensures the hash never changes even if name/url is edited
unique_string = f"custom_stream_{stream.id}"
stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
stream.save(update_fields=['stream_hash'])
updated_count += 1
if updated_count > 0:
print(f"Backfilled stream_hash for {updated_count} custom streams")
else:
print("No custom streams needed stream_hash backfill")
def reverse_backfill(apps, schema_editor):
"""
Reverse migration - clear stream_hash for custom streams.
Note: This will break preview functionality for custom streams.
"""
Stream = apps.get_model('dispatcharr_channels', 'Stream')
custom_streams = Stream.objects.filter(is_custom=True)
count = custom_streams.update(stream_hash=None)
print(f"Cleared stream_hash for {count} custom streams")
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'),
]
operations = [
migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-10-28 20:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0029_backfill_custom_stream_hashes'),
]
operations = [
migrations.AlterField(
model_name='stream',
name='url',
field=models.URLField(blank=True, max_length=4096, null=True),
),
]

View file

@ -1,29 +0,0 @@
# Generated by Django 5.2.9 on 2026-01-09 18:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0030_alter_stream_url'),
]
operations = [
migrations.AddField(
model_name='channelgroupm3uaccount',
name='is_stale',
field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'),
),
migrations.AddField(
model_name='channelgroupm3uaccount',
name='last_seen',
field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'),
),
migrations.AddField(
model_name='stream',
name='is_stale',
field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'),
),
]

View file

@ -9,14 +9,12 @@ from datetime import datetime
import hashlib
import json
from apps.epg.models import EPGData
from apps.accounts.models import User
logger = logging.getLogger(__name__)
# If you have an M3UAccount model in apps.m3u, you can still import it:
from apps.m3u.models import M3UAccount
# Add fallback functions if Redis isn't available
def get_total_viewers(channel_id):
"""Get viewer count from Redis or return 0 if Redis isn't available"""
@ -27,7 +25,6 @@ def get_total_viewers(channel_id):
except Exception:
return 0
class ChannelGroup(models.Model):
name = models.TextField(unique=True, db_index=True)
@ -48,14 +45,12 @@ class ChannelGroup(models.Model):
return created_objects
class Stream(models.Model):
"""
Represents a single stream (e.g. from an M3U source or custom URL).
"""
name = models.CharField(max_length=255, default="Default Stream")
url = models.URLField(max_length=4096, blank=True, null=True)
url = models.URLField(max_length=2000, blank=True, null=True)
m3u_account = models.ForeignKey(
M3UAccount,
on_delete=models.CASCADE,
@ -65,7 +60,7 @@ class Stream(models.Model):
)
logo_url = models.TextField(blank=True, null=True)
tvg_id = models.CharField(max_length=255, blank=True, null=True)
local_file = models.FileField(upload_to="uploads/", blank=True, null=True)
local_file = models.FileField(upload_to='uploads/', blank=True, null=True)
current_viewers = models.PositiveIntegerField(default=0)
updated_at = models.DateTimeField(auto_now=True)
channel_group = models.ForeignKey(
@ -73,18 +68,18 @@ class Stream(models.Model):
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="streams",
related_name='streams'
)
stream_profile = models.ForeignKey(
StreamProfile,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="streams",
related_name='streams'
)
is_custom = models.BooleanField(
default=False,
help_text="Whether this is a user-created stream or from an M3U account",
help_text="Whether this is a user-created stream or from an M3U account"
)
stream_hash = models.CharField(
max_length=255,
@ -94,48 +89,30 @@ class Stream(models.Model):
db_index=True,
)
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
is_stale = models.BooleanField(
default=False,
db_index=True,
help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)"
)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
# Stream statistics fields
stream_stats = models.JSONField(
null=True,
blank=True,
help_text="JSON object containing stream statistics like video codec, resolution, etc."
)
stream_stats_updated_at = models.DateTimeField(
null=True,
blank=True,
help_text="When stream statistics were last updated",
db_index=True
)
custom_properties = models.TextField(null=True, blank=True)
class Meta:
# If you use m3u_account, you might do unique_together = ('name','url','m3u_account')
verbose_name = "Stream"
verbose_name_plural = "Streams"
ordering = ["-updated_at"]
ordering = ['-updated_at']
def __str__(self):
return self.name or self.url or f"Stream ID {self.id}"
@classmethod
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None):
def generate_hash_key(cls, name, url, tvg_id, keys=None):
if keys is None:
keys = CoreSettings.get_m3u_hash_key().split(",")
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group}
stream_parts = {
"name": name, "url": url, "tvg_id": tvg_id
}
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
# Serialize and hash the dictionary
serialized_obj = json.dumps(
hash_parts, sort_keys=True
) # sort_keys ensures consistent ordering
serialized_obj = json.dumps(hash_parts, sort_keys=True) # sort_keys ensures consistent ordering
hash_object = hashlib.sha256(serialized_obj.encode())
return hash_object.hexdigest()
@ -151,23 +128,13 @@ class Stream(models.Model):
return stream, False # False means it was updated, not created
except cls.DoesNotExist:
# If it doesn't exist, create a new object with the given hash
fields_to_update["stream_hash"] = (
hash_value # Make sure the hash field is set
)
fields_to_update['stream_hash'] = hash_value # Make sure the hash field is set
stream = cls.objects.create(**fields_to_update)
return stream, True # True means it was created
# @TODO: honor stream's stream profile
def get_stream_profile(self):
"""
Get the stream profile for this stream.
Uses the stream's own profile if set, otherwise returns the default.
"""
if self.stream_profile:
return self.stream_profile
stream_profile = StreamProfile.objects.get(
id=CoreSettings.get_default_stream_profile_id()
)
stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id())
return stream_profile
@ -185,9 +152,7 @@ class Stream(models.Model):
m3u_account = self.m3u_account
m3u_profiles = m3u_account.profiles.all()
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
profiles = [default_profile] + [
obj for obj in m3u_profiles if not obj.is_default
]
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
for profile in profiles:
logger.info(profile)
@ -202,19 +167,13 @@ class Stream(models.Model):
if profile.max_streams == 0 or current_connections < profile.max_streams:
# Start a new stream
redis_client.set(f"channel_stream:{self.id}", self.id)
redis_client.set(
f"stream_profile:{self.id}", profile.id
) # Store only the matched profile
redis_client.set(f"stream_profile:{self.id}", profile.id) # Store only the matched profile
# Increment connection count for profiles with limits
if profile.max_streams > 0:
redis_client.incr(profile_connections_key)
return (
self.id,
profile.id,
None,
) # Return newly assigned stream and matched profile
return self.id, profile.id, None # Return newly assigned stream and matched profile
# 4. No available streams
return None, None, None
@ -235,9 +194,7 @@ class Stream(models.Model):
redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association
profile_id = int(profile_id)
logger.debug(
f"Found profile ID {profile_id} associated with stream {stream_id}"
)
logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}")
profile_connections_key = f"profile_connections:{profile_id}"
@ -246,7 +203,6 @@ class Stream(models.Model):
if current_count > 0:
redis_client.decr(profile_connections_key)
class ChannelManager(models.Manager):
def active(self):
return self.all()
@ -256,35 +212,38 @@ class Channel(models.Model):
channel_number = models.FloatField(db_index=True)
name = models.CharField(max_length=255)
logo = models.ForeignKey(
"Logo",
'Logo',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="channels",
related_name='channels',
)
# M2M to Stream now in the same file
streams = models.ManyToManyField(
Stream, blank=True, through="ChannelStream", related_name="channels"
Stream,
blank=True,
through='ChannelStream',
related_name='channels'
)
channel_group = models.ForeignKey(
"ChannelGroup",
'ChannelGroup',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="channels",
help_text="Channel group this channel belongs to.",
related_name='channels',
help_text="Channel group this channel belongs to."
)
tvg_id = models.CharField(max_length=255, blank=True, null=True)
tvc_guide_stationid = models.CharField(max_length=255, blank=True, null=True)
epg_data = models.ForeignKey(
EPGData,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="channels",
related_name='channels'
)
stream_profile = models.ForeignKey(
@ -292,41 +251,16 @@ class Channel(models.Model):
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="channels",
related_name='channels'
)
uuid = models.UUIDField(
default=uuid.uuid4, editable=False, unique=True, db_index=True
)
user_level = models.IntegerField(default=0)
auto_created = models.BooleanField(
default=False,
help_text="Whether this channel was automatically created via M3U auto channel sync"
)
auto_created_by = models.ForeignKey(
"m3u.M3UAccount",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="auto_created_channels",
help_text="The M3U account that auto-created this channel"
)
created_at = models.DateTimeField(
auto_now_add=True,
help_text="Timestamp when this channel was created"
)
updated_at = models.DateTimeField(
auto_now=True,
help_text="Timestamp when this channel was last updated"
)
uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True, db_index=True)
def clean(self):
# Enforce unique channel_number within a given group
existing = Channel.objects.filter(
channel_number=self.channel_number, channel_group=self.channel_group
channel_number=self.channel_number,
channel_group=self.channel_group
).exclude(id=self.id)
if existing.exists():
raise ValidationError(
@ -338,7 +272,7 @@ class Channel(models.Model):
@classmethod
def get_next_available_channel_number(cls, starting_from=1):
used_numbers = set(cls.objects.all().values_list("channel_number", flat=True))
used_numbers = set(cls.objects.all().values_list('channel_number', flat=True))
n = starting_from
while n in used_numbers:
n += 1
@ -348,9 +282,7 @@ class Channel(models.Model):
def get_stream_profile(self):
stream_profile = self.stream_profile
if not stream_profile:
stream_profile = StreamProfile.objects.get(
id=CoreSettings.get_default_stream_profile_id()
)
stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id())
return stream_profile
@ -380,55 +312,44 @@ class Channel(models.Model):
profile_id = int(profile_id_bytes)
return stream_id, profile_id, None
except (ValueError, TypeError):
logger.debug(
f"Invalid profile ID retrieved from Redis: {profile_id_bytes}"
)
logger.debug(f"Invalid profile ID retrieved from Redis: {profile_id_bytes}")
except (ValueError, TypeError):
logger.debug(
f"Invalid stream ID retrieved from Redis: {stream_id_bytes}"
)
logger.debug(f"Invalid stream ID retrieved from Redis: {stream_id_bytes}")
# No existing active stream, attempt to assign a new one
has_streams_but_maxed_out = False
has_active_profiles = False
# Iterate through channel streams and their profiles
for stream in self.streams.all().order_by("channelstream__order"):
for stream in self.streams.all().order_by('channelstream__order'):
# Retrieve the M3U account associated with the stream.
m3u_account = stream.m3u_account
if not m3u_account:
logger.debug(f"Stream {stream.id} has no M3U account")
continue
if m3u_account.is_active == False:
logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.")
continue
m3u_profiles = m3u_account.profiles.filter(is_active=True)
default_profile = next(
(obj for obj in m3u_profiles if obj.is_default), None
)
m3u_profiles = m3u_account.profiles.all()
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
if not default_profile:
logger.debug(f"M3U account {m3u_account.id} has no active default profile")
logger.debug(f"M3U account {m3u_account.id} has no default profile")
continue
profiles = [default_profile] + [
obj for obj in m3u_profiles if not obj.is_default
]
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
for profile in profiles:
# Skip inactive profiles
if not profile.is_active:
logger.debug(f"Skipping inactive profile {profile.id}")
continue
has_active_profiles = True
profile_connections_key = f"profile_connections:{profile.id}"
current_connections = int(
redis_client.get(profile_connections_key) or 0
)
current_connections = int(redis_client.get(profile_connections_key) or 0)
# Check if profile has available slots (or unlimited connections)
if (
profile.max_streams == 0
or current_connections < profile.max_streams
):
if profile.max_streams == 0 or current_connections < profile.max_streams:
# Start a new stream
redis_client.set(f"channel_stream:{self.id}", stream.id)
redis_client.set(f"stream_profile:{stream.id}", profile.id)
@ -437,23 +358,17 @@ class Channel(models.Model):
if profile.max_streams > 0:
redis_client.incr(profile_connections_key)
return (
stream.id,
profile.id,
None,
) # Return newly assigned stream and matched profile
return stream.id, profile.id, None # Return newly assigned stream and matched profile
else:
# This profile is at max connections
has_streams_but_maxed_out = True
logger.debug(
f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}"
)
logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}")
# No available streams - determine specific reason
if has_streams_but_maxed_out:
error_reason = "All active M3U profiles have reached maximum connection limits"
error_reason = "All M3U profiles have reached maximum connection limits"
elif has_active_profiles:
error_reason = "No compatible active profile found for any assigned stream"
error_reason = "No compatible profile found for any assigned stream"
else:
error_reason = "No active profiles found for any assigned stream"
@ -473,9 +388,7 @@ class Channel(models.Model):
redis_client.delete(f"channel_stream:{self.id}") # Remove active stream
stream_id = int(stream_id)
logger.debug(
f"Found stream ID {stream_id} associated with channel stream {self.id}"
)
logger.debug(f"Found stream ID {stream_id} associated with channel stream {self.id}")
# Get the matched profile for cleanup
profile_id = redis_client.get(f"stream_profile:{stream_id}")
@ -486,9 +399,7 @@ class Channel(models.Model):
redis_client.delete(f"stream_profile:{stream_id}") # Remove profile association
profile_id = int(profile_id)
logger.debug(
f"Found profile ID {profile_id} associated with stream {stream_id}"
)
logger.debug(f"Found profile ID {profile_id} associated with stream {stream_id}")
profile_connections_key = f"profile_connections:{profile_id}"
@ -541,26 +452,20 @@ class Channel(models.Model):
# Increment connection count for new profile
new_profile_connections_key = f"profile_connections:{new_profile_id}"
redis_client.incr(new_profile_connections_key)
logger.info(
f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}"
)
logger.info(f"Updated stream {stream_id} profile from {current_profile_id} to {new_profile_id}")
return True
class ChannelProfile(models.Model):
name = models.CharField(max_length=100, unique=True)
class ChannelProfileMembership(models.Model):
channel_profile = models.ForeignKey(ChannelProfile, on_delete=models.CASCADE)
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
enabled = models.BooleanField(
default=True
) # Track if the channel is enabled for this group
enabled = models.BooleanField(default=True) # Track if the channel is enabled for this group
class Meta:
unique_together = ("channel_profile", "channel")
unique_together = ('channel_profile', 'channel')
class ChannelStream(models.Model):
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
@ -568,45 +473,27 @@ class ChannelStream(models.Model):
order = models.PositiveIntegerField(default=0) # Ordering field
class Meta:
ordering = ["order"] # Ensure streams are retrieved in order
ordering = ['order'] # Ensure streams are retrieved in order
constraints = [
models.UniqueConstraint(
fields=["channel", "stream"], name="unique_channel_stream"
)
models.UniqueConstraint(fields=['channel', 'stream'], name='unique_channel_stream')
]
class ChannelGroupM3UAccount(models.Model):
channel_group = models.ForeignKey(
ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts"
ChannelGroup,
on_delete=models.CASCADE,
related_name='m3u_account'
)
m3u_account = models.ForeignKey(
M3UAccount, on_delete=models.CASCADE, related_name="channel_group"
M3UAccount,
on_delete=models.CASCADE,
related_name='channel_group'
)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
custom_properties = models.TextField(null=True, blank=True)
enabled = models.BooleanField(default=True)
auto_channel_sync = models.BooleanField(
default=False,
help_text='Automatically create/delete channels to match streams in this group'
)
auto_sync_channel_start = models.FloatField(
null=True,
blank=True,
help_text='Starting channel number for auto-created channels in this group'
)
last_seen = models.DateTimeField(
default=datetime.now,
db_index=True,
help_text='Last time this group was seen in the M3U source during a refresh'
)
is_stale = models.BooleanField(
default=False,
db_index=True,
help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'
)
class Meta:
unique_together = ("channel_group", "m3u_account")
unique_together = ('channel_group', 'm3u_account')
def __str__(self):
return f"{self.channel_group.name} - {self.m3u_account.name} (Enabled: {self.enabled})"
@ -619,47 +506,12 @@ class Logo(models.Model):
def __str__(self):
return self.name
class Recording(models.Model):
channel = models.ForeignKey(
"Channel", on_delete=models.CASCADE, related_name="recordings"
)
channel = models.ForeignKey("Channel", on_delete=models.CASCADE, related_name="recordings")
start_time = models.DateTimeField()
end_time = models.DateTimeField()
task_id = models.CharField(max_length=255, null=True, blank=True)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
custom_properties = models.TextField(null=True, blank=True)
def __str__(self):
return f"{self.channel.name} - {self.start_time} to {self.end_time}"
class RecurringRecordingRule(models.Model):
"""Rule describing a recurring manual DVR schedule."""
channel = models.ForeignKey(
"Channel",
on_delete=models.CASCADE,
related_name="recurring_rules",
)
days_of_week = models.JSONField(default=list)
start_time = models.TimeField()
end_time = models.TimeField()
enabled = models.BooleanField(default=True)
name = models.CharField(max_length=255, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["channel", "start_time"]
def __str__(self):
channel_name = getattr(self.channel, "name", str(self.channel_id))
return f"Recurring rule for {channel_name}"
def cleaned_days(self):
try:
return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6})
except Exception:
return []

View file

@ -1,234 +1,108 @@
import json
from datetime import datetime
from rest_framework import serializers
from .models import (
Stream,
Channel,
ChannelGroup,
ChannelStream,
ChannelGroupM3UAccount,
Logo,
ChannelProfile,
ChannelProfileMembership,
Recording,
RecurringRecordingRule,
)
from .models import Stream, Channel, ChannelGroup, ChannelStream, ChannelGroupM3UAccount, Logo, ChannelProfile, ChannelProfileMembership, Recording
from apps.epg.serializers import EPGDataSerializer
from core.models import StreamProfile
from apps.epg.models import EPGData
from django.urls import reverse
from rest_framework import serializers
from django.utils import timezone
from core.utils import validate_flexible_url
class LogoSerializer(serializers.ModelSerializer):
cache_url = serializers.SerializerMethodField()
channel_count = serializers.SerializerMethodField()
is_used = serializers.SerializerMethodField()
channel_names = serializers.SerializerMethodField()
class Meta:
model = Logo
fields = ["id", "name", "url", "cache_url", "channel_count", "is_used", "channel_names"]
def validate_url(self, value):
"""Validate that the URL is unique for creation or update"""
if self.instance and self.instance.url == value:
return value
if Logo.objects.filter(url=value).exists():
raise serializers.ValidationError("A logo with this URL already exists.")
return value
def create(self, validated_data):
"""Handle logo creation with proper URL validation"""
return Logo.objects.create(**validated_data)
def update(self, instance, validated_data):
"""Handle logo updates"""
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
return instance
fields = ['id', 'name', 'url', 'cache_url']
def get_cache_url(self, obj):
# return f"/api/channels/logos/{obj.id}/cache/"
request = self.context.get("request")
request = self.context.get('request')
if request:
return request.build_absolute_uri(
reverse("api:channels:logo-cache", args=[obj.id])
)
return reverse("api:channels:logo-cache", args=[obj.id])
def get_channel_count(self, obj):
"""Get the number of channels using this logo"""
return obj.channels.count()
def get_is_used(self, obj):
"""Check if this logo is used by any channels"""
return obj.channels.exists()
def get_channel_names(self, obj):
"""Get the names of channels using this logo (limited to first 5)"""
names = []
# Get channel names
channels = obj.channels.all()[:5]
for channel in channels:
names.append(f"Channel: {channel.name}")
# Calculate total count for "more" message
total_count = self.get_channel_count(obj)
if total_count > 5:
names.append(f"...and {total_count - 5} more")
return names
return request.build_absolute_uri(reverse('api:channels:logo-cache', args=[obj.id]))
return reverse('api:channels:logo-cache', args=[obj.id])
#
# Stream
#
class StreamSerializer(serializers.ModelSerializer):
url = serializers.CharField(
required=False,
allow_blank=True,
allow_null=True,
validators=[validate_flexible_url]
)
stream_profile_id = serializers.PrimaryKeyRelatedField(
queryset=StreamProfile.objects.all(),
source="stream_profile",
source='stream_profile',
allow_null=True,
required=False,
required=False
)
read_only_fields = ["is_custom", "m3u_account", "stream_hash"]
read_only_fields = ['is_custom', 'm3u_account', 'stream_hash']
class Meta:
model = Stream
fields = [
"id",
"name",
"url",
"m3u_account", # Uncomment if using M3U fields
"logo_url",
"tvg_id",
"local_file",
"current_viewers",
"updated_at",
"last_seen",
"is_stale",
"stream_profile_id",
"is_custom",
"channel_group",
"stream_hash",
"stream_stats",
"stream_stats_updated_at",
'id',
'name',
'url',
'm3u_account', # Uncomment if using M3U fields
'logo_url',
'tvg_id',
'local_file',
'current_viewers',
'updated_at',
'last_seen',
'stream_profile_id',
'is_custom',
'channel_group',
'stream_hash',
]
def get_fields(self):
fields = super().get_fields()
# Unable to edit specific properties if this stream was created from an M3U account
if (
self.instance
and getattr(self.instance, "m3u_account", None)
and not self.instance.is_custom
):
fields["id"].read_only = True
fields["name"].read_only = True
fields["url"].read_only = True
fields["m3u_account"].read_only = True
fields["tvg_id"].read_only = True
fields["channel_group"].read_only = True
if self.instance and getattr(self.instance, 'm3u_account', None) and not self.instance.is_custom:
fields['id'].read_only = True
fields['name'].read_only = True
fields['url'].read_only = True
fields['m3u_account'].read_only = True
fields['tvg_id'].read_only = True
fields['channel_group'].read_only = True
return fields
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True)
enabled = serializers.BooleanField()
auto_channel_sync = serializers.BooleanField(default=False)
auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False)
custom_properties = serializers.JSONField(required=False)
class Meta:
model = ChannelGroupM3UAccount
fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"]
def to_representation(self, instance):
data = super().to_representation(instance)
custom_props = instance.custom_properties or {}
return data
def to_internal_value(self, data):
# Accept both dict and JSON string for custom_properties (for backward compatibility)
val = data.get("custom_properties")
if isinstance(val, str):
try:
data["custom_properties"] = json.loads(val)
except Exception:
pass
return super().to_internal_value(data)
#
# Channel Group
#
class ChannelGroupSerializer(serializers.ModelSerializer):
channel_count = serializers.SerializerMethodField()
m3u_account_count = serializers.SerializerMethodField()
m3u_accounts = ChannelGroupM3UAccountSerializer(
many=True,
read_only=True
)
class Meta:
model = ChannelGroup
fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"]
def get_channel_count(self, obj):
"""Get count of channels in this group"""
return obj.channels.count()
def get_m3u_account_count(self, obj):
"""Get count of M3U accounts associated with this group"""
return obj.m3u_accounts.count()
fields = ['id', 'name']
class ChannelProfileSerializer(serializers.ModelSerializer):
channels = serializers.SerializerMethodField()
class Meta:
model = ChannelProfile
fields = ["id", "name", "channels"]
fields = ['id', 'name', 'channels']
def get_channels(self, obj):
memberships = ChannelProfileMembership.objects.filter(
channel_profile=obj, enabled=True
)
return [membership.channel.id for membership in memberships]
memberships = ChannelProfileMembership.objects.filter(channel_profile=obj, enabled=True)
return [
membership.channel.id
for membership in memberships
]
class ChannelProfileMembershipSerializer(serializers.ModelSerializer):
class Meta:
model = ChannelProfileMembership
fields = ["channel", "enabled"]
fields = ['channel', 'enabled']
class ChanneProfilelMembershipUpdateSerializer(serializers.Serializer):
channel_id = serializers.IntegerField() # Ensure channel_id is an integer
enabled = serializers.BooleanField()
class BulkChannelProfileMembershipSerializer(serializers.Serializer):
channels = serializers.ListField(
child=ChanneProfilelMembershipUpdateSerializer(), # Use the nested serializer
allow_empty=False,
allow_empty=False
)
def validate_channels(self, value):
@ -236,7 +110,6 @@ class BulkChannelProfileMembershipSerializer(serializers.Serializer):
raise serializers.ValidationError("At least one channel must be provided.")
return value
#
# Channel
#
@ -246,10 +119,14 @@ class ChannelSerializer(serializers.ModelSerializer):
channel_number = serializers.FloatField(
allow_null=True,
required=False,
error_messages={"invalid": "Channel number must be a valid decimal number."},
error_messages={
'invalid': 'Channel number must be a valid decimal number.'
}
)
channel_group_id = serializers.PrimaryKeyRelatedField(
queryset=ChannelGroup.objects.all(), source="channel_group", required=False
queryset=ChannelGroup.objects.all(),
source="channel_group",
required=False
)
epg_data_id = serializers.PrimaryKeyRelatedField(
queryset=EPGData.objects.all(),
@ -260,88 +137,65 @@ class ChannelSerializer(serializers.ModelSerializer):
stream_profile_id = serializers.PrimaryKeyRelatedField(
queryset=StreamProfile.objects.all(),
source="stream_profile",
source='stream_profile',
allow_null=True,
required=False,
required=False
)
streams = serializers.PrimaryKeyRelatedField(
queryset=Stream.objects.all(), many=True, required=False
)
streams = serializers.PrimaryKeyRelatedField(queryset=Stream.objects.all(), many=True, required=False)
logo_id = serializers.PrimaryKeyRelatedField(
queryset=Logo.objects.all(),
source="logo",
source='logo',
allow_null=True,
required=False,
)
auto_created_by_name = serializers.SerializerMethodField()
class Meta:
model = Channel
fields = [
"id",
"channel_number",
"name",
"channel_group_id",
"tvg_id",
"tvc_guide_stationid",
"epg_data_id",
"streams",
"stream_profile_id",
"uuid",
"logo_id",
"user_level",
"auto_created",
"auto_created_by",
"auto_created_by_name",
'id',
'channel_number',
'name',
'channel_group_id',
'tvg_id',
'tvc_guide_stationid',
'epg_data_id',
'streams',
'stream_profile_id',
'uuid',
'logo_id',
]
def to_representation(self, instance):
include_streams = self.context.get("include_streams", False)
include_streams = self.context.get('include_streams', False)
if include_streams:
self.fields["streams"] = serializers.SerializerMethodField()
return super().to_representation(instance)
else:
# Fix: For PATCH/PUT responses, ensure streams are ordered
representation = super().to_representation(instance)
if "streams" in representation:
representation["streams"] = list(
instance.streams.all()
.order_by("channelstream__order")
.values_list("id", flat=True)
)
return representation
self.fields['streams'] = serializers.SerializerMethodField()
return super().to_representation(instance)
def get_logo(self, obj):
return LogoSerializer(obj.logo).data
def get_streams(self, obj):
"""Retrieve ordered stream IDs for GET requests."""
return StreamSerializer(
obj.streams.all().order_by("channelstream__order"), many=True
).data
return StreamSerializer(obj.streams.all().order_by('channelstream__order'), many=True).data
def create(self, validated_data):
streams = validated_data.pop("streams", [])
channel_number = validated_data.pop(
"channel_number", Channel.get_next_available_channel_number()
)
streams = validated_data.pop('streams', [])
channel_number = validated_data.pop('channel_number', Channel.get_next_available_channel_number())
validated_data["channel_number"] = channel_number
channel = Channel.objects.create(**validated_data)
# Add streams in the specified order
for index, stream in enumerate(streams):
ChannelStream.objects.create(
channel=channel, stream_id=stream.id, order=index
)
ChannelStream.objects.create(channel=channel, stream_id=stream.id, order=index)
return channel
def update(self, instance, validated_data):
streams = validated_data.pop("streams", None)
streams = validated_data.pop('streams', None)
# Update standard fields
for attr, value in validated_data.items():
@ -352,7 +206,8 @@ class ChannelSerializer(serializers.ModelSerializer):
if streams is not None:
# Normalize stream IDs
normalized_ids = [
stream.id if hasattr(stream, "id") else stream for stream in streams
stream.id if hasattr(stream, "id") else stream
for stream in streams
]
print(normalized_ids)
@ -379,7 +234,9 @@ class ChannelSerializer(serializers.ModelSerializer):
cs.save(update_fields=["order"])
else:
ChannelStream.objects.create(
channel=instance, stream_id=stream_id, order=order
channel=instance,
stream_id=stream_id,
order=order
)
return instance
@ -393,71 +250,34 @@ class ChannelSerializer(serializers.ModelSerializer):
# Ensure it's processed as a float
return float(value)
except (ValueError, TypeError):
raise serializers.ValidationError(
"Channel number must be a valid decimal number."
)
raise serializers.ValidationError("Channel number must be a valid decimal number.")
def validate_stream_profile(self, value):
"""Handle special case where empty/0 values mean 'use default' (null)"""
if value == "0" or value == 0 or value == "" or value is None:
if value == '0' or value == 0 or value == '' or value is None:
return None
return value # PrimaryKeyRelatedField will handle the conversion to object
def get_auto_created_by_name(self, obj):
"""Get the name of the M3U account that auto-created this channel."""
if obj.auto_created_by:
return obj.auto_created_by.name
return None
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
enabled = serializers.BooleanField()
class Meta:
model = ChannelGroupM3UAccount
fields = ['id', 'channel_group', 'enabled']
# Optionally, if you only need the id of the ChannelGroup, you can customize it like this:
# channel_group = serializers.PrimaryKeyRelatedField(queryset=ChannelGroup.objects.all())
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
read_only_fields = ["task_id"]
fields = '__all__'
read_only_fields = ['task_id']
def validate(self, data):
from core.models import CoreSettings
start_time = data.get("start_time")
end_time = data.get("end_time")
if start_time and timezone.is_naive(start_time):
start_time = timezone.make_aware(start_time, timezone.get_current_timezone())
data["start_time"] = start_time
if end_time and timezone.is_naive(end_time):
end_time = timezone.make_aware(end_time, timezone.get_current_timezone())
data["end_time"] = end_time
# If this is an EPG-based recording (program provided), apply global pre/post offsets
try:
cp = data.get("custom_properties") or {}
is_epg_based = isinstance(cp, dict) and isinstance(cp.get("program"), (dict,))
except Exception:
is_epg_based = False
if is_epg_based and start_time and end_time:
try:
pre_min = int(CoreSettings.get_dvr_pre_offset_minutes())
except Exception:
pre_min = 0
try:
post_min = int(CoreSettings.get_dvr_post_offset_minutes())
except Exception:
post_min = 0
from datetime import timedelta
try:
if pre_min and pre_min > 0:
start_time = start_time - timedelta(minutes=pre_min)
except Exception:
pass
try:
if post_min and post_min > 0:
end_time = end_time + timedelta(minutes=post_min)
except Exception:
pass
# write back adjusted times so scheduling uses them
data["start_time"] = start_time
data["end_time"] = end_time
start_time = data.get('start_time')
end_time = data.get('end_time')
now = timezone.now() # timezone-aware current time
@ -466,61 +286,8 @@ class RecordingSerializer(serializers.ModelSerializer):
if start_time < now:
# Optional: Adjust start_time if it's in the past but end_time is in the future
data["start_time"] = now # or: timezone.now() + timedelta(seconds=1)
if end_time <= data["start_time"]:
data['start_time'] = now # or: timezone.now() + timedelta(seconds=1)
if end_time <= data['start_time']:
raise serializers.ValidationError("End time must be after start time.")
return data
class RecurringRecordingRuleSerializer(serializers.ModelSerializer):
class Meta:
model = RecurringRecordingRule
fields = "__all__"
read_only_fields = ["created_at", "updated_at"]
def validate_days_of_week(self, value):
if not value:
raise serializers.ValidationError("Select at least one day of the week")
cleaned = []
for entry in value:
try:
iv = int(entry)
except (TypeError, ValueError):
raise serializers.ValidationError("Days of week must be integers 0-6")
if iv < 0 or iv > 6:
raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)")
cleaned.append(iv)
return sorted(set(cleaned))
def validate(self, attrs):
start = attrs.get("start_time") or getattr(self.instance, "start_time", None)
end = attrs.get("end_time") or getattr(self.instance, "end_time", None)
start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None)
end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None)
if start_date is None:
existing_start = getattr(self.instance, "start_date", None)
if existing_start is None:
raise serializers.ValidationError("Start date is required")
if start_date and end_date and end_date < start_date:
raise serializers.ValidationError("End date must be on or after start date")
if end_date is None:
existing_end = getattr(self.instance, "end_date", None)
if existing_end is None:
raise serializers.ValidationError("End date is required")
if start and end and start_date and end_date:
start_dt = datetime.combine(start_date, start)
end_dt = datetime.combine(end_date, end)
if end_dt <= start_dt:
raise serializers.ValidationError("End datetime must be after start datetime")
elif start and end and end == start:
raise serializers.ValidationError("End time must be different from start time")
# Normalize empty strings to None for dates
if attrs.get("end_date") == "":
attrs["end_date"] = None
if attrs.get("start_date") == "":
attrs["start_date"] = None
return super().validate(attrs)
def create(self, validated_data):
return super().create(validated_data)

View file

@ -8,7 +8,7 @@ from .models import Channel, Stream, ChannelProfile, ChannelProfileMembership, R
from apps.m3u.models import M3UAccount
from apps.epg.tasks import parse_programs_for_tvg_id
import logging, requests, time
from .tasks import run_recording, prefetch_recording_artwork
from .tasks import run_recording
from django.utils.timezone import now, is_aware, make_aware
from datetime import timedelta
@ -45,20 +45,6 @@ def set_default_m3u_account(sender, instance, **kwargs):
else:
raise ValueError("No default M3UAccount found.")
@receiver(post_save, sender=Stream)
def generate_custom_stream_hash(sender, instance, created, **kwargs):
"""
Generate a stable stream_hash for custom streams after creation.
Uses the stream's ID to ensure the hash never changes even if name/url is edited.
"""
if instance.is_custom and not instance.stream_hash and created:
import hashlib
# Use stream ID for a stable, unique hash that never changes
unique_string = f"custom_stream_{instance.id}"
instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
# Use update to avoid triggering signals again
Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash)
@receiver(post_save, sender=Channel)
def refresh_epg_programs(sender, instance, created, **kwargs):
"""
@ -76,6 +62,15 @@ def refresh_epg_programs(sender, instance, created, **kwargs):
logger.info(f"New channel {instance.id} ({instance.name}) created with EPG data, refreshing program data")
parse_programs_for_tvg_id.delay(instance.epg_data.id)
@receiver(post_save, sender=Channel)
def add_new_channel_to_groups(sender, instance, created, **kwargs):
if created:
profiles = ChannelProfile.objects.all()
ChannelProfileMembership.objects.bulk_create([
ChannelProfileMembership(channel_profile=profile, channel=instance)
for profile in profiles
])
@receiver(post_save, sender=ChannelProfile)
def create_profile_memberships(sender, instance, created, **kwargs):
if created:
@ -87,9 +82,8 @@ def create_profile_memberships(sender, instance, created, **kwargs):
def schedule_recording_task(instance):
eta = instance.start_time
# Pass recording_id first so task can persist metadata to the correct row
task = run_recording.apply_async(
args=[instance.id, instance.channel_id, str(instance.start_time), str(instance.end_time)],
args=[instance.channel_id, str(instance.start_time), str(instance.end_time)],
eta=eta
)
return task.id
@ -138,11 +132,6 @@ def schedule_task_on_save(sender, instance, created, **kwargs):
instance.save(update_fields=['task_id'])
else:
print("Start time is in the past. Not scheduling.")
# Kick off poster/artwork prefetch to enrich Upcoming cards
try:
prefetch_recording_artwork.apply_async(args=[instance.id], countdown=1)
except Exception as e:
print("Error scheduling artwork prefetch:", e)
except Exception as e:
import traceback
print("Error in post_save signal:", e)

File diff suppressed because it is too large Load diff

View file

@ -1,211 +0,0 @@
from django.test import TestCase
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from apps.channels.models import Channel, ChannelGroup
User = get_user_model()
class ChannelBulkEditAPITests(TestCase):
def setUp(self):
# Create a test admin user (user_level >= 10) and authenticate
self.user = User.objects.create_user(username="testuser", password="testpass123")
self.user.user_level = 10 # Set admin level
self.user.save()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.bulk_edit_url = "/api/channels/channels/edit/bulk/"
# Create test channel group
self.group1 = ChannelGroup.objects.create(name="Test Group 1")
self.group2 = ChannelGroup.objects.create(name="Test Group 2")
# Create test channels
self.channel1 = Channel.objects.create(
channel_number=1.0,
name="Channel 1",
tvg_id="channel1",
channel_group=self.group1
)
self.channel2 = Channel.objects.create(
channel_number=2.0,
name="Channel 2",
tvg_id="channel2",
channel_group=self.group1
)
self.channel3 = Channel.objects.create(
channel_number=3.0,
name="Channel 3",
tvg_id="channel3"
)
def test_bulk_edit_success(self):
"""Test successful bulk update of multiple channels"""
data = [
{"id": self.channel1.id, "name": "Updated Channel 1"},
{"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
self.assertEqual(len(response.data["channels"]), 2)
# Verify database changes
self.channel1.refresh_from_db()
self.channel2.refresh_from_db()
self.assertEqual(self.channel1.name, "Updated Channel 1")
self.assertEqual(self.channel2.name, "Updated Channel 2")
self.assertEqual(self.channel2.channel_number, 22.0)
def test_bulk_edit_with_empty_validated_data_first(self):
"""
Test the bug fix: when first channel has empty validated_data.
This was causing: ValueError: Field names must be given to bulk_update()
"""
# Create a channel with data that will be "unchanged" (empty validated_data)
# We'll send the same data it already has
data = [
# First channel: no actual changes (this would create empty validated_data)
{"id": self.channel1.id},
# Second channel: has changes
{"id": self.channel2.id, "name": "Updated Channel 2"},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
# Should not crash with ValueError
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
# Verify the channel with changes was updated
self.channel2.refresh_from_db()
self.assertEqual(self.channel2.name, "Updated Channel 2")
def test_bulk_edit_all_empty_updates(self):
"""Test when all channels have empty updates (no actual changes)"""
data = [
{"id": self.channel1.id},
{"id": self.channel2.id},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
# Should succeed without calling bulk_update
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
def test_bulk_edit_mixed_fields(self):
"""Test bulk update where different channels update different fields"""
data = [
{"id": self.channel1.id, "name": "New Name 1"},
{"id": self.channel2.id, "channel_number": 99.0},
{"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Successfully updated 3 channels")
# Verify all updates
self.channel1.refresh_from_db()
self.channel2.refresh_from_db()
self.channel3.refresh_from_db()
self.assertEqual(self.channel1.name, "New Name 1")
self.assertEqual(self.channel2.channel_number, 99.0)
self.assertEqual(self.channel3.tvg_id, "new_tvg_id")
self.assertEqual(self.channel3.name, "New Name 3")
def test_bulk_edit_with_channel_group(self):
"""Test bulk update with channel_group_id changes"""
data = [
{"id": self.channel1.id, "channel_group_id": self.group2.id},
{"id": self.channel3.id, "channel_group_id": self.group1.id},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Verify group changes
self.channel1.refresh_from_db()
self.channel3.refresh_from_db()
self.assertEqual(self.channel1.channel_group, self.group2)
self.assertEqual(self.channel3.channel_group, self.group1)
def test_bulk_edit_nonexistent_channel(self):
"""Test bulk update with a channel that doesn't exist"""
nonexistent_id = 99999
data = [
{"id": nonexistent_id, "name": "Should Fail"},
{"id": self.channel1.id, "name": "Should Still Update"},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
# Should return 400 with errors
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("errors", response.data)
self.assertEqual(len(response.data["errors"]), 1)
self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id)
self.assertEqual(response.data["errors"][0]["error"], "Channel not found")
# The valid channel should still be updated
self.assertEqual(response.data["updated_count"], 1)
def test_bulk_edit_validation_error(self):
"""Test bulk update with invalid data (validation error)"""
data = [
{"id": self.channel1.id, "channel_number": "invalid_number"},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
# Should return 400 with validation errors
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("errors", response.data)
self.assertEqual(len(response.data["errors"]), 1)
self.assertIn("channel_number", response.data["errors"][0]["errors"])
def test_bulk_edit_empty_channel_updates(self):
"""Test bulk update with empty list"""
data = []
response = self.client.patch(self.bulk_edit_url, data, format="json")
# Empty list is accepted and returns success with 0 updates
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Successfully updated 0 channels")
def test_bulk_edit_missing_channel_updates(self):
"""Test bulk update without proper format (dict instead of list)"""
data = {"channel_updates": {}}
response = self.client.patch(self.bulk_edit_url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data["error"], "Expected a list of channel updates")
def test_bulk_edit_preserves_other_fields(self):
"""Test that bulk update only changes specified fields"""
original_channel_number = self.channel1.channel_number
original_tvg_id = self.channel1.tvg_id
data = [
{"id": self.channel1.id, "name": "Only Name Changed"},
]
response = self.client.patch(self.bulk_edit_url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Verify only name changed, other fields preserved
self.channel1.refresh_from_db()
self.assertEqual(self.channel1.name, "Only Name Changed")
self.assertEqual(self.channel1.channel_number, original_channel_number)
self.assertEqual(self.channel1.tvg_id, original_tvg_id)

View file

@ -1,40 +0,0 @@
from datetime import datetime, timedelta
from django.test import TestCase
from django.utils import timezone
from apps.channels.models import Channel, RecurringRecordingRule, Recording
from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl
class RecurringRecordingRuleTasksTests(TestCase):
def test_sync_recurring_rule_creates_and_purges_recordings(self):
now = timezone.now()
channel = Channel.objects.create(channel_number=1, name='Test Channel')
start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0)
end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0)
rule = RecurringRecordingRule.objects.create(
channel=channel,
days_of_week=[now.weekday()],
start_time=start_time,
end_time=end_time,
)
created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1)
self.assertEqual(created, 1)
recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first()
self.assertIsNotNone(recording)
self.assertEqual(recording.channel, channel)
self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id)
expected_start = timezone.make_aware(
datetime.combine(recording.start_time.date(), start_time),
timezone.get_current_timezone(),
)
self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60)
removed = purge_recurring_rule_impl(rule.id)
self.assertEqual(removed, 1)
self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists())

View file

@ -2,27 +2,18 @@ import logging, os
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import action
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.utils import timezone
from datetime import timedelta
from .models import EPGSource, ProgramData, EPGData # Added ProgramData
from .serializers import (
ProgramDataSerializer,
EPGSourceSerializer,
EPGDataSerializer,
) # Updated serializer
from .serializers import ProgramDataSerializer, EPGSourceSerializer, EPGDataSerializer # Updated serializer
from .tasks import refresh_epg_data
from apps.accounts.permissions import (
Authenticated,
permission_classes_by_action,
permission_classes_by_method,
)
logger = logging.getLogger(__name__)
# ─────────────────────────────
# 1) EPG Source API (CRUD)
# ─────────────────────────────
@ -30,38 +21,30 @@ class EPGSourceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows EPG sources to be viewed or edited.
"""
queryset = EPGSource.objects.all()
serializer_class = EPGSourceSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
def list(self, request, *args, **kwargs):
logger.debug("Listing all EPG sources.")
return super().list(request, *args, **kwargs)
@action(detail=False, methods=["post"])
@action(detail=False, methods=['post'])
def upload(self, request):
if "file" not in request.FILES:
return Response(
{"error": "No file uploaded"}, status=status.HTTP_400_BAD_REQUEST
)
if 'file' not in request.FILES:
return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST)
file = request.FILES["file"]
file = request.FILES['file']
file_name = file.name
file_path = os.path.join("/data/uploads/epgs", file_name)
file_path = os.path.join('/data/uploads/epgs', file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as destination:
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
new_obj_data = request.data.copy()
new_obj_data["file_path"] = file_path
new_obj_data['file_path'] = file_path
serializer = self.get_serializer(data=new_obj_data)
serializer.is_valid(raise_exception=True)
@ -74,111 +57,70 @@ class EPGSourceViewSet(viewsets.ModelViewSet):
instance = self.get_object()
# Check if we're toggling is_active
if (
"is_active" in request.data
and instance.is_active != request.data["is_active"]
):
if 'is_active' in request.data and instance.is_active != request.data['is_active']:
# Set appropriate status based on new is_active value
if request.data["is_active"]:
request.data["status"] = "idle"
if request.data['is_active']:
request.data['status'] = 'idle'
else:
request.data["status"] = "disabled"
request.data['status'] = 'disabled'
# Continue with regular partial update
return super().partial_update(request, *args, **kwargs)
# ─────────────────────────────
# 2) Program API (CRUD)
# ─────────────────────────────
class ProgramViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for EPG programs"""
queryset = ProgramData.objects.all()
serializer_class = ProgramDataSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
def list(self, request, *args, **kwargs):
logger.debug("Listing all EPG programs.")
return super().list(request, *args, **kwargs)
# ─────────────────────────────
# 3) EPG Grid View
# ─────────────────────────────
class EPGGridAPIView(APIView):
"""Returns all programs airing in the next 24 hours including currently running ones and recent ones"""
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
@swagger_auto_schema(
operation_description="Retrieve programs from the previous hour, currently running and upcoming for the next 24 hours",
responses={200: ProgramDataSerializer(many=True)},
responses={200: ProgramDataSerializer(many=True)}
)
def get(self, request, format=None):
# Use current time instead of midnight
now = timezone.now()
one_hour_ago = now - timedelta(hours=1)
twenty_four_hours_later = now + timedelta(hours=24)
logger.debug(
f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}."
)
logger.debug(f"EPGGridAPIView: Querying programs between {one_hour_ago} and {twenty_four_hours_later}.")
# Use select_related to prefetch EPGData and include programs from the last hour
programs = ProgramData.objects.select_related("epg").filter(
programs = ProgramData.objects.select_related('epg').filter(
# Programs that end after one hour ago (includes recently ended programs)
end_time__gt=one_hour_ago,
# AND start before the end time window
start_time__lt=twenty_four_hours_later,
start_time__lt=twenty_four_hours_later
)
count = programs.count()
logger.debug(
f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows."
)
logger.debug(f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows.")
# Generate dummy programs for channels that have no EPG data OR dummy EPG sources
# Generate dummy programs for channels that have no EPG data
from apps.channels.models import Channel
from apps.epg.models import EPGSource
from django.db.models import Q
# Get channels with no EPG data at all (standard dummy)
# Get channels with no EPG data
channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True))
channels_count = channels_without_epg.count()
# Get channels with custom dummy EPG sources (generate on-demand with patterns)
channels_with_custom_dummy = Channel.objects.filter(
epg_data__epg_source__source_type='dummy'
).distinct()
# Log what we found
without_count = channels_without_epg.count()
custom_count = channels_with_custom_dummy.count()
if without_count > 0:
# Log more detailed information about channels missing EPG data
if channels_count > 0:
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg]
logger.debug(
f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}"
)
logger.warning(f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}")
if custom_count > 0:
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy]
logger.debug(
f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}"
)
logger.debug(
f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG."
)
logger.debug(f"EPGGridAPIView: Found {channels_count} channels with no EPG data.")
# Serialize the regular programs
serialized_programs = ProgramDataSerializer(programs, many=True).data
@ -188,122 +130,43 @@ class EPGGridAPIView(APIView):
(0, 4): [
"Late Night with {channel} - Where insomniacs unite!",
"The 'Why Am I Still Awake?' Show on {channel}",
"Counting Sheep - A {channel} production for the sleepless",
"Counting Sheep - A {channel} production for the sleepless"
],
(4, 8): [
"Dawn Patrol - Rise and shine with {channel}!",
"Early Bird Special - Coffee not included",
"Morning Zombies - Before coffee viewing on {channel}",
"Morning Zombies - Before coffee viewing on {channel}"
],
(8, 12): [
"Mid-Morning Meetings - Pretend you're paying attention while watching {channel}",
"The 'I Should Be Working' Hour on {channel}",
"Productivity Killer - {channel}'s daytime programming",
"Productivity Killer - {channel}'s daytime programming"
],
(12, 16): [
"Lunchtime Laziness with {channel}",
"The Afternoon Slump - Brought to you by {channel}",
"Post-Lunch Food Coma Theater on {channel}",
"Post-Lunch Food Coma Theater on {channel}"
],
(16, 20): [
"Rush Hour - {channel}'s alternative to traffic",
"The 'What's For Dinner?' Debate on {channel}",
"Evening Escapism - {channel}'s remedy for reality",
"Evening Escapism - {channel}'s remedy for reality"
],
(20, 24): [
"Prime Time Placeholder - {channel}'s finest not-programming",
"The 'Netflix Was Too Complicated' Show on {channel}",
"Family Argument Avoider - Courtesy of {channel}",
],
"Family Argument Avoider - Courtesy of {channel}"
]
}
# Generate and append dummy programs
dummy_programs = []
# Import the function from output.views
from apps.output.views import generate_dummy_programs as gen_dummy_progs
# Handle channels with CUSTOM dummy EPG sources (with patterns)
for channel in channels_with_custom_dummy:
# For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel
# This prevents multiple channels assigned to the same dummy EPG from showing identical data
# Each channel gets its own unique program data even if they share the same EPG source
dummy_tvg_id = str(channel.uuid)
try:
# Get the custom dummy EPG source
epg_source = channel.epg_data.epg_source if channel.epg_data else None
logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})")
# Determine which name to parse based on custom properties
name_to_parse = channel.name
if epg_source and epg_source.custom_properties:
custom_props = epg_source.custom_properties
name_source = custom_props.get('name_source')
if name_source == 'stream':
# Get the stream index (1-based from user, convert to 0-based)
stream_index = custom_props.get('stream_index', 1) - 1
# Get streams ordered by channelstream order
channel_streams = channel.streams.all().order_by('channelstream__order')
if channel_streams.exists() and 0 <= stream_index < channel_streams.count():
stream = list(channel_streams)[stream_index]
name_to_parse = stream.name
logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})")
else:
logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name")
elif name_source == 'channel':
logger.debug(f"Using channel name for parsing: {name_to_parse}")
# Generate programs using custom patterns from the dummy EPG source
# Use the same tvg_id that will be set in the program data
generated = gen_dummy_progs(
channel_id=dummy_tvg_id,
channel_name=name_to_parse,
num_days=1,
program_length_hours=4,
epg_source=epg_source
)
# Custom dummy should always return data (either from patterns or fallback)
if generated:
logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}")
# Convert generated programs to API format
for program in generated:
dummy_program = {
"id": f"dummy-custom-{channel.id}-{program['start_time'].hour}",
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
"start_time": program['start_time'].isoformat(),
"end_time": program['end_time'].isoformat(),
"title": program['title'],
"description": program['description'],
"tvg_id": dummy_tvg_id,
"sub_title": None,
"custom_properties": None,
}
dummy_programs.append(dummy_program)
else:
logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}")
except Exception as e:
logger.error(
f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
)
# Handle channels with NO EPG data (standard dummy with humorous descriptions)
for channel in channels_without_epg:
# For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic)
# The frontend uses: tvgRecord?.tvg_id ?? channel.uuid
# Since there's no EPG data, it will fall back to UUID
# Use the channel UUID as tvg_id for dummy programs to match in the guide
dummy_tvg_id = str(channel.uuid)
try:
logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})")
# Create programs every 4 hours for the next 24 hours with humorous descriptions
# Create programs every 4 hours for the next 24 hours
for hour_offset in range(0, 24, 4):
# Use timedelta for time arithmetic instead of replace() to avoid hour overflow
start_time = now + timedelta(hours=hour_offset)
@ -321,9 +184,7 @@ class EPGGridAPIView(APIView):
if start_range <= hour < end_range:
# Pick a description using the sum of the hour and day as seed
# This makes it somewhat random but consistent for the same timeslot
description = descriptions[
(hour + day) % len(descriptions)
].format(channel=channel.name)
description = descriptions[(hour + day) % len(descriptions)].format(channel=channel.name)
break
else:
# Fallback description if somehow no range matches
@ -331,31 +192,29 @@ class EPGGridAPIView(APIView):
# Create a dummy program in the same format as regular programs
dummy_program = {
"id": f"dummy-standard-{channel.id}-{hour_offset}",
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
"start_time": start_time.isoformat(),
"end_time": end_time.isoformat(),
"title": f"{channel.name}",
"description": description,
"tvg_id": dummy_tvg_id,
"sub_title": None,
"custom_properties": None,
'id': f"dummy-{channel.id}-{hour_offset}", # Create a unique ID
'epg': {
'tvg_id': dummy_tvg_id,
'name': channel.name
},
'start_time': start_time.isoformat(),
'end_time': end_time.isoformat(),
'title': f"{channel.name}",
'description': description,
'tvg_id': dummy_tvg_id,
'sub_title': None,
'custom_properties': None
}
dummy_programs.append(dummy_program)
except Exception as e:
logger.error(
f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
)
logger.error(f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}")
# Combine regular and dummy programs
all_programs = list(serialized_programs) + dummy_programs
logger.debug(
f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs)."
)
return Response({"data": all_programs}, status=status.HTTP_200_OK)
logger.debug(f"EPGGridAPIView: Returning {len(all_programs)} total programs (including {len(dummy_programs)} dummy programs).")
return Response({'data': all_programs}, status=status.HTTP_200_OK)
# ─────────────────────────────
# 4) EPG Import View
@ -363,41 +222,15 @@ class EPGGridAPIView(APIView):
class EPGImportAPIView(APIView):
"""Triggers an EPG data refresh"""
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
@swagger_auto_schema(
operation_description="Triggers an EPG data import",
responses={202: "EPG data import initiated"},
responses={202: "EPG data import initiated"}
)
def post(self, request, format=None):
logger.info("EPGImportAPIView: Received request to import EPG data.")
epg_id = request.data.get("id", None)
# Check if this is a dummy EPG source
try:
from .models import EPGSource
epg_source = EPGSource.objects.get(id=epg_id)
if epg_source.source_type == 'dummy':
logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}")
return Response(
{"success": False, "message": "Dummy EPG sources do not require refreshing."},
status=status.HTTP_400_BAD_REQUEST,
)
except EPGSource.DoesNotExist:
pass # Let the task handle the missing source
refresh_epg_data.delay(epg_id) # Trigger Celery task
refresh_epg_data.delay(request.data.get('id', None)) # Trigger Celery task
logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.")
return Response(
{"success": True, "message": "EPG data import initiated."},
status=status.HTTP_202_ACCEPTED,
)
return Response({'success': True, 'message': 'EPG data import initiated.'}, status=status.HTTP_202_ACCEPTED)
# ─────────────────────────────
@ -407,13 +240,6 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows EPGData objects to be viewed.
"""
queryset = EPGData.objects.all()
serializer_class = EPGDataSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-02 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0014_epgsource_extracted_file_path'),
]
operations = [
migrations.AlterField(
model_name='programdata',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-16 22:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0015_alter_programdata_custom_properties'),
]
operations = [
migrations.AddField(
model_name='epgdata',
name='icon_url',
field=models.URLField(blank=True, max_length=500, null=True),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-24 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0016_epgdata_icon_url'),
]
operations = [
migrations.AlterField(
model_name='epgsource',
name='url',
field=models.URLField(blank=True, max_length=1000, null=True),
),
]

View file

@ -1,23 +0,0 @@
# Generated by Django 5.2.4 on 2025-10-17 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0017_alter_epgsource_url'),
]
operations = [
migrations.AddField(
model_name='epgsource',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True),
),
migrations.AlterField(
model_name='epgsource',
name='source_type',
field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-10-22 21:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0018_epgsource_custom_properties_and_more'),
]
operations = [
migrations.AlterField(
model_name='programdata',
name='sub_title',
field=models.TextField(blank=True, null=True),
),
]

View file

@ -1,119 +0,0 @@
# Generated migration to replace {time} placeholders with {starttime}
import re
from django.db import migrations
def migrate_time_placeholders(apps, schema_editor):
"""
Replace {time} with {starttime} and {time24} with {starttime24}
in all dummy EPG source custom_properties templates.
"""
EPGSource = apps.get_model('epg', 'EPGSource')
# Fields that contain templates with placeholders
template_fields = [
'title_template',
'description_template',
'upcoming_title_template',
'upcoming_description_template',
'ended_title_template',
'ended_description_template',
'channel_logo_url',
'program_poster_url',
]
# Get all dummy EPG sources
dummy_sources = EPGSource.objects.filter(source_type='dummy')
updated_count = 0
for source in dummy_sources:
if not source.custom_properties:
continue
modified = False
custom_props = source.custom_properties.copy()
for field in template_fields:
if field in custom_props and custom_props[field]:
original_value = custom_props[field]
# Replace {time24} first (before {time}) to avoid double replacement
# e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime}
new_value = original_value
new_value = re.sub(r'\{time24\}', '{starttime24}', new_value)
new_value = re.sub(r'\{time\}', '{starttime}', new_value)
if new_value != original_value:
custom_props[field] = new_value
modified = True
if modified:
source.custom_properties = custom_props
source.save(update_fields=['custom_properties'])
updated_count += 1
if updated_count > 0:
print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.")
else:
print("No dummy EPG sources needed placeholder updates.")
def reverse_migration(apps, schema_editor):
"""
Reverse the migration by replacing {starttime} back to {time}.
"""
EPGSource = apps.get_model('epg', 'EPGSource')
template_fields = [
'title_template',
'description_template',
'upcoming_title_template',
'upcoming_description_template',
'ended_title_template',
'ended_description_template',
'channel_logo_url',
'program_poster_url',
]
dummy_sources = EPGSource.objects.filter(source_type='dummy')
updated_count = 0
for source in dummy_sources:
if not source.custom_properties:
continue
modified = False
custom_props = source.custom_properties.copy()
for field in template_fields:
if field in custom_props and custom_props[field]:
original_value = custom_props[field]
# Reverse the replacements
new_value = original_value
new_value = re.sub(r'\{starttime24\}', '{time24}', new_value)
new_value = re.sub(r'\{starttime\}', '{time}', new_value)
if new_value != original_value:
custom_props[field] = new_value
modified = True
if modified:
source.custom_properties = custom_props
source.save(update_fields=['custom_properties'])
updated_count += 1
if updated_count > 0:
print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.")
class Migration(migrations.Migration):
dependencies = [
('epg', '0019_alter_programdata_sub_title'),
]
operations = [
migrations.RunPython(migrate_time_placeholders, reverse_migration),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-12-05 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0020_migrate_time_to_starttime_placeholders'),
]
operations = [
migrations.AddField(
model_name='epgsource',
name='priority',
field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'),
),
]

View file

@ -8,7 +8,6 @@ class EPGSource(models.Model):
SOURCE_TYPE_CHOICES = [
('xmltv', 'XMLTV URL'),
('schedules_direct', 'Schedules Direct API'),
('dummy', 'Custom Dummy EPG'),
]
STATUS_IDLE = 'idle'
@ -29,7 +28,7 @@ class EPGSource(models.Model):
name = models.CharField(max_length=255, unique=True)
source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES)
url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV
url = models.URLField(blank=True, null=True) # For XMLTV
api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct
is_active = models.BooleanField(default=True)
file_path = models.CharField(max_length=1024, blank=True, null=True)
@ -39,16 +38,6 @@ class EPGSource(models.Model):
refresh_task = models.ForeignKey(
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
)
custom_properties = models.JSONField(
default=dict,
blank=True,
null=True,
help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)"
)
priority = models.PositiveIntegerField(
default=0,
help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel."
)
status = models.CharField(
max_length=20,
choices=STATUS_CHOICES,
@ -138,7 +127,6 @@ class EPGData(models.Model):
# and a name (which might simply be the tvg_id if no real channel exists).
tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True)
name = models.CharField(max_length=255)
icon_url = models.URLField(max_length=500, null=True, blank=True)
epg_source = models.ForeignKey(
EPGSource,
on_delete=models.CASCADE,
@ -159,10 +147,10 @@ class ProgramData(models.Model):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
title = models.CharField(max_length=255)
sub_title = models.TextField(blank=True, null=True)
sub_title = models.CharField(max_length=255, blank=True, null=True)
description = models.TextField(blank=True, null=True)
tvg_id = models.CharField(max_length=255, null=True, blank=True)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
custom_properties = models.TextField(null=True, blank=True)
def __str__(self):
return f"{self.title} ({self.start_time} - {self.end_time})"

View file

@ -1,17 +1,10 @@
from core.utils import validate_flexible_url
from rest_framework import serializers
from .models import EPGSource, EPGData, ProgramData
from apps.channels.models import Channel
class EPGSourceSerializer(serializers.ModelSerializer):
epg_data_count = serializers.SerializerMethodField()
epg_data_ids = serializers.SerializerMethodField()
read_only_fields = ['created_at', 'updated_at']
url = serializers.CharField(
required=False,
allow_blank=True,
allow_null=True,
validators=[validate_flexible_url]
)
class Meta:
model = EPGSource
@ -24,18 +17,15 @@ class EPGSourceSerializer(serializers.ModelSerializer):
'is_active',
'file_path',
'refresh_interval',
'priority',
'status',
'last_message',
'created_at',
'updated_at',
'custom_properties',
'epg_data_count'
'epg_data_ids'
]
def get_epg_data_count(self, obj):
"""Return the count of EPG data entries instead of all IDs to prevent large payloads"""
return obj.epgs.count()
def get_epg_data_ids(self, obj):
return list(obj.epgs.values_list('id', flat=True))
class ProgramDataSerializer(serializers.ModelSerializer):
class Meta:
@ -55,6 +45,5 @@ class EPGDataSerializer(serializers.ModelSerializer):
'id',
'tvg_id',
'name',
'icon_url',
'epg_source',
]

View file

@ -1,9 +1,9 @@
from django.db.models.signals import post_save, post_delete, pre_save
from django.dispatch import receiver
from .models import EPGSource, EPGData
from .models import EPGSource
from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id
from django_celery_beat.models import PeriodicTask, IntervalSchedule
from core.utils import is_protected_path, send_websocket_update
from core.utils import is_protected_path
import json
import logging
import os
@ -12,77 +12,15 @@ logger = logging.getLogger(__name__)
@receiver(post_save, sender=EPGSource)
def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs):
# Trigger refresh only if the source is newly created, active, and not a dummy EPG
if created and instance.is_active and instance.source_type != 'dummy':
# Trigger refresh only if the source is newly created and active
if created and instance.is_active:
refresh_epg_data.delay(instance.id)
@receiver(post_save, sender=EPGSource)
def create_dummy_epg_data(sender, instance, created, **kwargs):
"""
Automatically create EPGData for dummy EPG sources when they are created.
This allows channels to be assigned to dummy EPGs immediately without
requiring a refresh first.
"""
if instance.source_type == 'dummy':
# Ensure dummy EPGs always have idle status and no status message
if instance.status != EPGSource.STATUS_IDLE or instance.last_message:
instance.status = EPGSource.STATUS_IDLE
instance.last_message = None
instance.save(update_fields=['status', 'last_message'])
# Create a URL-friendly tvg_id from the dummy EPG name
# Replace spaces and special characters with underscores
friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_')
# Remove any characters that aren't alphanumeric or underscores
friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_')
# Convert to lowercase for consistency
friendly_tvg_id = friendly_tvg_id.lower()
# Prefix with 'dummy_' to make it clear this is a dummy EPG
friendly_tvg_id = f"dummy_{friendly_tvg_id}"
# Create or update the EPGData record
epg_data, data_created = EPGData.objects.get_or_create(
tvg_id=friendly_tvg_id,
epg_source=instance,
defaults={
'name': instance.name,
'icon_url': None
}
)
# Update name if it changed and record already existed
if not data_created and epg_data.name != instance.name:
epg_data.name = instance.name
epg_data.save(update_fields=['name'])
if data_created:
logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})")
# Send websocket update to notify frontend that EPG data has been created
# This allows the channel form to immediately show the new dummy EPG without refreshing
send_websocket_update('updates', 'update', {
'type': 'epg_data_created',
'source_id': instance.id,
'source_name': instance.name,
'epg_data_id': epg_data.id
})
else:
logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})")
@receiver(post_save, sender=EPGSource)
def create_or_update_refresh_task(sender, instance, **kwargs):
"""
Create or update a Celery Beat periodic task when an EPGSource is created/updated.
Skip creating tasks for dummy EPG sources as they don't need refreshing.
"""
# Skip task creation for dummy EPGs
if instance.source_type == 'dummy':
# If there's an existing task, disable it
if instance.refresh_task:
instance.refresh_task.enabled = False
instance.refresh_task.save(update_fields=['enabled'])
return
task_name = f"epg_source-refresh-{instance.id}"
interval, _ = IntervalSchedule.objects.get_or_create(
every=int(instance.refresh_interval),
@ -142,14 +80,7 @@ def delete_refresh_task(sender, instance, **kwargs):
def update_status_on_active_change(sender, instance, **kwargs):
"""
When an EPGSource's is_active field changes, update the status accordingly.
For dummy EPGs, always ensure status is idle and no status message.
"""
# Dummy EPGs should always be idle with no status message
if instance.source_type == 'dummy':
instance.status = EPGSource.STATUS_IDLE
instance.last_message = None
return
if instance.pk: # Only for existing records, not new ones
try:
# Get the current record from the database

View file

@ -24,30 +24,11 @@ from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from .models import EPGSource, EPGData, ProgramData
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
logger = logging.getLogger(__name__)
def validate_icon_url_fast(icon_url, max_length=None):
"""
Fast validation for icon URLs during parsing.
Returns None if URL is too long, original URL otherwise.
If max_length is None, gets it dynamically from the EPGData model field.
"""
if max_length is None:
# Get max_length dynamically from the model field
max_length = EPGData._meta.get_field('icon_url').max_length
if icon_url and len(icon_url) > max_length:
logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...")
return None
return icon_url
MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2)
def send_epg_update(source_id, action, progress, **kwargs):
"""Send WebSocket update about EPG download/parsing progress"""
# Start with the base data dictionary
@ -133,9 +114,8 @@ def delete_epg_refresh_task_by_id(epg_id):
@shared_task
def refresh_all_epg_data():
logger.info("Starting refresh_epg_data task.")
# Exclude dummy EPG sources from refresh - they don't need refreshing
active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy')
logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).")
active_sources = EPGSource.objects.filter(is_active=True)
logger.debug(f"Found {active_sources.count()} active EPGSource(s).")
for source in active_sources:
refresh_epg_data(source.id)
@ -181,13 +161,6 @@ def refresh_epg_data(source_id):
gc.collect()
return
# Skip refresh for dummy EPG sources - they don't need refreshing
if source.source_type == 'dummy':
logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})")
release_task_lock('refresh_epg_data', source_id)
gc.collect()
return
# Continue with the normal processing...
logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})")
if source.source_type == 'xmltv':
@ -213,12 +186,6 @@ def refresh_epg_data(source_id):
fetch_schedules_direct(source)
source.save(update_fields=['updated_at'])
# After successful EPG refresh, evaluate DVR series rules to schedule new episodes
try:
from apps.channels.tasks import evaluate_series_rules
evaluate_series_rules.delay()
except Exception:
pass
except Exception as e:
logger.error(f"Error in refresh_epg_data for source {source_id}: {e}", exc_info=True)
try:
@ -286,12 +253,11 @@ def fetch_xmltv(source):
logger.info(f"Fetching XMLTV data from source: {source.name}")
try:
# Get default user agent from settings
stream_settings = CoreSettings.get_stream_settings()
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
default_user_agent_id = stream_settings.get('default_user_agent')
if default_user_agent_id:
if default_user_agent_setting and default_user_agent_setting.value:
try:
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
if user_agent_obj and user_agent_obj.user_agent:
user_agent = user_agent_obj.user_agent
logger.debug(f"Using default user agent: {user_agent}")
@ -675,11 +641,7 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
# Reset file pointer and extract the content
gz_file.seek(0)
with open(extracted_path, 'wb') as out_file:
while True:
chunk = gz_file.read(MAX_EXTRACT_CHUNK_SIZE)
if not chunk or len(chunk) == 0:
break
out_file.write(chunk)
out_file.write(gz_file.read())
except Exception as e:
logger.error(f"Error extracting GZIP file: {e}", exc_info=True)
return None
@ -723,13 +685,9 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
return None
# Extract the first XML file
xml_content = zip_file.read(xml_files[0])
with open(extracted_path, 'wb') as out_file:
with zip_file.open(xml_files[0], "r") as xml_file:
while True:
chunk = xml_file.read(MAX_EXTRACT_CHUNK_SIZE)
if not chunk or len(chunk) == 0:
break
out_file.write(chunk)
out_file.write(xml_content)
logger.info(f"Successfully extracted zip file to: {extracted_path}")
@ -857,7 +815,6 @@ def parse_channels_only(source):
processed_channels = 0
batch_size = 500 # Process in batches to limit memory usage
progress = 0 # Initialize progress variable here
icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field
# Track memory at key points
if process:
@ -886,7 +843,7 @@ def parse_channels_only(source):
# Change iterparse to look for both channel and programme elements
logger.debug(f"Creating iterparse context for channels and programmes")
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True)
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
if process:
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
@ -900,15 +857,10 @@ def parse_channels_only(source):
tvg_id = elem.get('id', '').strip()
if tvg_id:
display_name = None
icon_url = None
for child in elem:
if display_name is None and child.tag == 'display-name' and child.text:
if child.tag == 'display-name' and child.text:
display_name = child.text.strip()
elif child.tag == 'icon':
raw_icon_url = child.get('src', '').strip()
icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length)
if display_name and icon_url:
break # No need to continue if we have both
break
if not display_name:
display_name = tvg_id
@ -926,24 +878,17 @@ def parse_channels_only(source):
epgs_to_create.append(EPGData(
tvg_id=tvg_id,
name=display_name,
icon_url=icon_url,
epg_source=source,
))
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}")
processed_channels += 1
continue
# We use the cached object to check if the name or icon_url has changed
# We use the cached object to check if the name has changed
epg_obj = existing_epgs[tvg_id]
needs_update = False
if epg_obj.name != display_name:
# Only update if the name actually changed
epg_obj.name = display_name
needs_update = True
if epg_obj.icon_url != icon_url:
epg_obj.icon_url = icon_url
needs_update = True
if needs_update:
epgs_to_update.append(epg_obj)
logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}")
else:
@ -954,7 +899,6 @@ def parse_channels_only(source):
epgs_to_create.append(EPGData(
tvg_id=tvg_id,
name=display_name,
icon_url=icon_url,
epg_source=source,
))
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}")
@ -977,7 +921,7 @@ def parse_channels_only(source):
logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries")
if process:
logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
EPGData.objects.bulk_update(epgs_to_update, ["name"])
if process:
logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
epgs_to_update = []
@ -1044,7 +988,7 @@ def parse_channels_only(source):
logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries")
if epgs_to_update:
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
EPGData.objects.bulk_update(epgs_to_update, ["name"])
logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries")
if process:
logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB")
@ -1158,12 +1102,6 @@ def parse_programs_for_tvg_id(epg_id):
epg = EPGData.objects.get(id=epg_id)
epg_source = epg.epg_source
# Skip program parsing for dummy EPG sources - they don't have program data files
if epg_source.source_type == 'dummy':
logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})")
release_task_lock('parse_epg_programs', epg_id)
return
if not Channel.objects.filter(epg_data=epg).exists():
logger.info(f"No channels matched to EPG {epg.tvg_id}")
release_task_lock('parse_epg_programs', epg_id)
@ -1257,7 +1195,7 @@ def parse_programs_for_tvg_id(epg_id):
source_file = open(file_path, 'rb')
# Stream parse the file using lxml's iterparse
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
for _, elem in program_parser:
if elem.get('channel') == epg.tvg_id:
@ -1286,7 +1224,10 @@ def parse_programs_for_tvg_id(epg_id):
if custom_props:
logger.trace(f"Number of custom properties: {len(custom_props)}")
custom_properties_json = custom_props
try:
custom_properties_json = json.dumps(custom_props)
except Exception as e:
logger.error(f"Error serializing custom properties to JSON: {e}", exc_info=True)
programs_to_create.append(ProgramData(
epg=epg,
@ -1394,23 +1335,11 @@ def parse_programs_for_tvg_id(epg_id):
def parse_programs_for_source(epg_source, tvg_id=None):
"""
Parse programs for all MAPPED channels from an EPG source in a single pass.
This is an optimized version that:
1. Only processes EPG entries that are actually mapped to channels
2. Parses the XML file ONCE instead of once per channel
3. Skips programmes for unmapped channels entirely during parsing
This dramatically improves performance when an EPG source has many channels
but only a fraction are mapped.
"""
# Send initial programs parsing notification
send_epg_update(epg_source.id, "parsing_programs", 0)
should_log_memory = False
process = None
initial_memory = 0
source_file = None
# Add memory tracking only in trace mode or higher
try:
@ -1430,251 +1359,91 @@ def parse_programs_for_source(epg_source, tvg_id=None):
should_log_memory = False
try:
# Only get EPG entries that are actually mapped to channels
mapped_epg_ids = set(
Channel.objects.filter(
epg_data__epg_source=epg_source,
epg_data__isnull=False
).values_list('epg_data_id', flat=True)
)
# Process EPG entries in batches rather than all at once
batch_size = 20 # Process fewer channels at once to reduce memory usage
epg_count = EPGData.objects.filter(epg_source=epg_source).count()
if not mapped_epg_ids:
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} "
f"(source has {total_epg_count} EPG entries, 0 mapped)")
# Update status - this is not an error, just no mapped entries
if epg_count == 0:
logger.info(f"No EPG entries found for source: {epg_source.name}")
# Update status - this is not an error, just no entries
epg_source.status = 'success'
epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)"
epg_source.save(update_fields=['status', 'last_message'])
epg_source.save(update_fields=['status'])
send_epg_update(epg_source.id, "parsing_programs", 100, status="success")
return True
# Get the mapped EPG entries with their tvg_ids
mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id')
tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']}
mapped_tvg_ids = set(tvg_id_to_epg_id.keys())
logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}")
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
mapped_count = len(mapped_tvg_ids)
failed_entries = []
program_count = 0
channel_count = 0
updated_count = 0
processed = 0
# Process in batches using cursor-based approach to limit memory usage
last_id = 0
while True:
# Get a batch of EPG entries
batch_entries = list(EPGData.objects.filter(
epg_source=epg_source,
id__gt=last_id
).order_by('id')[:batch_size])
logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} "
f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)")
if not batch_entries:
break # No more entries to process
# Get the file path
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
if not file_path:
file_path = epg_source.get_cache_file()
# Update last_id for next iteration
last_id = batch_entries[-1].id
# Check if the file exists
if not os.path.exists(file_path):
logger.error(f"EPG file not found at: {file_path}")
# Process this batch
for epg in batch_entries:
if epg.tvg_id:
try:
result = parse_programs_for_tvg_id(epg.id)
if result == "Task already running":
logger.info(f"Program parse for {epg.id} already in progress, skipping")
if epg_source.url:
# Update the file path in the database
new_path = epg_source.get_cache_file()
logger.info(f"Updating file_path from '{file_path}' to '{new_path}'")
epg_source.file_path = new_path
epg_source.save(update_fields=['file_path'])
logger.info(f"Fetching new EPG data from URL: {epg_source.url}")
processed += 1
progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50
send_epg_update(epg_source.id, "parsing_programs", progress)
except Exception as e:
logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True)
failed_entries.append(f"{epg.tvg_id}: {str(e)}")
# Fetch new data before continuing
fetch_success = fetch_xmltv(epg_source)
if not fetch_success:
logger.error(f"Failed to fetch EPG data for source: {epg_source.name}")
epg_source.status = 'error'
epg_source.last_message = f"Failed to download EPG data"
epg_source.save(update_fields=['status', 'last_message'])
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file")
return False
# Update file_path with the new location
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
else:
logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data")
epg_source.status = 'error'
epg_source.last_message = f"No URL provided, cannot fetch EPG data"
epg_source.save(update_fields=['status', 'last_message'])
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided")
return False
# SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory
# We parse FIRST, then do an atomic delete+insert to avoid race conditions
# where clients might see empty/partial EPG data during the transition
all_programs_to_create = []
programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel
total_programs = 0
skipped_programs = 0
last_progress_update = 0
try:
logger.debug(f"Opening file for single-pass parsing: {file_path}")
source_file = open(file_path, 'rb')
# Stream parse the file using lxml's iterparse
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
for _, elem in program_parser:
channel_id = elem.get('channel')
# Skip programmes for unmapped channels immediately
if channel_id not in mapped_tvg_ids:
skipped_programs += 1
# Clear element to free memory
clear_element(elem)
continue
# This programme is for a mapped channel - process it
try:
start_time = parse_xmltv_time(elem.get('start'))
end_time = parse_xmltv_time(elem.get('stop'))
title = None
desc = None
sub_title = None
# Efficiently process child elements
for child in elem:
if child.tag == 'title':
title = child.text or 'No Title'
elif child.tag == 'desc':
desc = child.text or ''
elif child.tag == 'sub-title':
sub_title = child.text or ''
if not title:
title = 'No Title'
# Extract custom properties
custom_props = extract_custom_properties(elem)
custom_properties_json = custom_props if custom_props else None
epg_id = tvg_id_to_epg_id[channel_id]
all_programs_to_create.append(ProgramData(
epg_id=epg_id,
start_time=start_time,
end_time=end_time,
title=title,
description=desc,
sub_title=sub_title,
tvg_id=channel_id,
custom_properties=custom_properties_json
))
total_programs += 1
programs_by_channel[channel_id] += 1
# Clear the element to free memory
clear_element(elem)
# Send progress update (estimate based on programs processed)
if total_programs - last_progress_update >= 5000:
last_progress_update = total_programs
# Cap at 70% during parsing phase (save 30% for DB operations)
progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60))
send_epg_update(epg_source.id, "parsing_programs", progress,
processed=total_programs, channels=mapped_count)
# Periodic garbage collection during parsing
if total_programs % 5000 == 0:
gc.collect()
except Exception as e:
logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True)
clear_element(elem)
continue
except etree.XMLSyntaxError as xml_error:
logger.error(f"XML syntax error parsing program data: {xml_error}")
epg_source.status = EPGSource.STATUS_ERROR
epg_source.last_message = f"XML parsing error: {str(xml_error)}"
epg_source.save(update_fields=['status', 'last_message'])
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error))
return False
except Exception as e:
logger.error(f"Error parsing XML for programs: {e}", exc_info=True)
raise
finally:
if source_file:
source_file.close()
source_file = None
# Now perform atomic delete + bulk insert
# This ensures clients never see empty/partial EPG data
logger.info(f"Parsed {total_programs} programs, performing atomic database update...")
send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...")
batch_size = 1000
try:
with transaction.atomic():
# Delete existing programs for mapped EPGs
deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0]
logger.debug(f"Deleted {deleted_count} existing programs")
# Clean up orphaned programs for unmapped EPG entries
unmapped_epg_ids = list(EPGData.objects.filter(
epg_source=epg_source
).exclude(id__in=mapped_epg_ids).values_list('id', flat=True))
if unmapped_epg_ids:
orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0]
if orphaned_count > 0:
logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries")
# Bulk insert all new programs in batches within the same transaction
for i in range(0, len(all_programs_to_create), batch_size):
batch = all_programs_to_create[i:i + batch_size]
ProgramData.objects.bulk_create(batch)
# Update progress during insertion
progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95
if i % (batch_size * 5) == 0:
send_epg_update(epg_source.id, "parsing_programs", min(95, progress),
message=f"Inserting programs... {i}/{len(all_programs_to_create)}")
logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs")
except Exception as db_error:
logger.error(f"Database error during atomic update: {db_error}", exc_info=True)
epg_source.status = EPGSource.STATUS_ERROR
epg_source.last_message = f"Database error: {str(db_error)}"
epg_source.save(update_fields=['status', 'last_message'])
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error))
return False
finally:
# Clear the large list to free memory
all_programs_to_create = None
# Force garbage collection after each batch
batch_entries = None # Remove reference to help garbage collection
gc.collect()
# Count channels that actually got programs
channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0)
# If there were failures, include them in the message but continue
if failed_entries:
epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed
error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries"
stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
epg_source.last_message = f"{stats_summary} Warning: {error_summary}"
epg_source.updated_at = timezone.now()
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
# Success message
# Send completion notification with mixed status
send_epg_update(epg_source.id, "parsing_programs", 100,
status="success",
message=epg_source.last_message)
# Explicitly release memory of large lists before returning
del failed_entries
gc.collect()
return True
# If all successful, set a comprehensive success message
epg_source.status = EPGSource.STATUS_SUCCESS
epg_source.last_message = (
f"Parsed {total_programs:,} programs for {channels_with_programs} channels "
f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)"
)
epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
epg_source.updated_at = timezone.now()
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
# Log system event for EPG refresh
log_system_event(
event_type='epg_refresh',
source_name=epg_source.name,
programs=total_programs,
channels=channels_with_programs,
skipped_programs=skipped_programs,
unmapped_channels=total_epg_count - mapped_count,
)
# Send completion notification with status
send_epg_update(epg_source.id, "parsing_programs", 100,
status="success",
message=epg_source.last_message,
updated_at=epg_source.updated_at.isoformat())
message=epg_source.last_message)
logger.info(f"Completed parsing programs for source: {epg_source.name} - "
f"{total_programs:,} programs for {channels_with_programs} channels, "
f"skipped {skipped_programs:,} programs for unmapped channels")
logger.info(f"Completed parsing all programs for source: {epg_source.name}")
return True
except Exception as e:
@ -1689,19 +1458,14 @@ def parse_programs_for_source(epg_source, tvg_id=None):
return False
finally:
# Final memory cleanup and tracking
if source_file:
try:
source_file.close()
except:
pass
source_file = None
# Explicitly release any remaining large data structures
programs_to_create = None
programs_by_channel = None
mapped_epg_ids = None
mapped_tvg_ids = None
tvg_id_to_epg_id = None
failed_entries = None
program_count = None
channel_count = None
updated_count = None
processed = None
gc.collect()
# Add comprehensive memory cleanup at the end
@ -1715,13 +1479,12 @@ def fetch_schedules_direct(source):
logger.info(f"Fetching Schedules Direct data from source: {source.name}")
try:
# Get default user agent from settings
stream_settings = CoreSettings.get_stream_settings()
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
default_user_agent_id = stream_settings.get('default_user_agent')
if default_user_agent_id:
if default_user_agent_setting and default_user_agent_setting.value:
try:
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
if user_agent_obj and user_agent_obj.user_agent:
user_agent = user_agent_obj.user_agent
logger.debug(f"Using default user agent: {user_agent}")
@ -1849,11 +1612,6 @@ def extract_custom_properties(prog):
if categories:
custom_props['categories'] = categories
# Extract keywords (new)
keywords = [kw.text.strip() for kw in prog.findall('keyword') if kw.text and kw.text.strip()]
if keywords:
custom_props['keywords'] = keywords
# Extract episode numbers
for ep_num in prog.findall('episode-num'):
system = ep_num.get('system', '')
@ -1876,12 +1634,6 @@ def extract_custom_properties(prog):
elif system == 'onscreen' and ep_num.text:
# Just store the raw onscreen format
custom_props['onscreen_episode'] = ep_num.text.strip()
elif system == 'dd_progid' and ep_num.text:
# Store the dd_progid format
custom_props['dd_progid'] = ep_num.text.strip()
# Add support for other systems like thetvdb.com, themoviedb.org, imdb.com
elif system in ['thetvdb.com', 'themoviedb.org', 'imdb.com'] and ep_num.text:
custom_props[f'{system}_id'] = ep_num.text.strip()
# Extract ratings more efficiently
rating_elem = prog.find('rating')
@ -1892,172 +1644,37 @@ def extract_custom_properties(prog):
if rating_elem.get('system'):
custom_props['rating_system'] = rating_elem.get('system')
# Extract star ratings (new)
star_ratings = []
for star_rating in prog.findall('star-rating'):
value_elem = star_rating.find('value')
if value_elem is not None and value_elem.text:
rating_data = {'value': value_elem.text.strip()}
if star_rating.get('system'):
rating_data['system'] = star_rating.get('system')
star_ratings.append(rating_data)
if star_ratings:
custom_props['star_ratings'] = star_ratings
# Extract credits more efficiently
credits_elem = prog.find('credits')
if credits_elem is not None:
credits = {}
for credit_type in ['director', 'actor', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']:
if credit_type == 'actor':
# Handle actors with roles and guest status
actors = []
for actor_elem in credits_elem.findall('actor'):
if actor_elem.text and actor_elem.text.strip():
actor_data = {'name': actor_elem.text.strip()}
if actor_elem.get('role'):
actor_data['role'] = actor_elem.get('role')
if actor_elem.get('guest') == 'yes':
actor_data['guest'] = True
actors.append(actor_data)
if actors:
credits['actor'] = actors
else:
names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()]
if names:
credits[credit_type] = names
for credit_type in ['director', 'actor', 'writer', 'presenter', 'producer']:
names = [e.text.strip() for e in credits_elem.findall(credit_type) if e.text and e.text.strip()]
if names:
credits[credit_type] = names
if credits:
custom_props['credits'] = credits
# Extract other common program metadata
date_elem = prog.find('date')
if date_elem is not None and date_elem.text:
custom_props['date'] = date_elem.text.strip()
custom_props['year'] = date_elem.text.strip()[:4] # Just the year part
country_elem = prog.find('country')
if country_elem is not None and country_elem.text:
custom_props['country'] = country_elem.text.strip()
# Extract language information (new)
language_elem = prog.find('language')
if language_elem is not None and language_elem.text:
custom_props['language'] = language_elem.text.strip()
orig_language_elem = prog.find('orig-language')
if orig_language_elem is not None and orig_language_elem.text:
custom_props['original_language'] = orig_language_elem.text.strip()
# Extract length (new)
length_elem = prog.find('length')
if length_elem is not None and length_elem.text:
try:
length_value = int(length_elem.text.strip())
length_units = length_elem.get('units', 'minutes')
custom_props['length'] = {'value': length_value, 'units': length_units}
except ValueError:
pass
# Extract video information (new)
video_elem = prog.find('video')
if video_elem is not None:
video_info = {}
for video_attr in ['present', 'colour', 'aspect', 'quality']:
attr_elem = video_elem.find(video_attr)
if attr_elem is not None and attr_elem.text:
video_info[video_attr] = attr_elem.text.strip()
if video_info:
custom_props['video'] = video_info
# Extract audio information (new)
audio_elem = prog.find('audio')
if audio_elem is not None:
audio_info = {}
for audio_attr in ['present', 'stereo']:
attr_elem = audio_elem.find(audio_attr)
if attr_elem is not None and attr_elem.text:
audio_info[audio_attr] = attr_elem.text.strip()
if audio_info:
custom_props['audio'] = audio_info
# Extract subtitles information (new)
subtitles = []
for subtitle_elem in prog.findall('subtitles'):
subtitle_data = {}
if subtitle_elem.get('type'):
subtitle_data['type'] = subtitle_elem.get('type')
lang_elem = subtitle_elem.find('language')
if lang_elem is not None and lang_elem.text:
subtitle_data['language'] = lang_elem.text.strip()
if subtitle_data:
subtitles.append(subtitle_data)
if subtitles:
custom_props['subtitles'] = subtitles
# Extract reviews (new)
reviews = []
for review_elem in prog.findall('review'):
if review_elem.text and review_elem.text.strip():
review_data = {'content': review_elem.text.strip()}
if review_elem.get('type'):
review_data['type'] = review_elem.get('type')
if review_elem.get('source'):
review_data['source'] = review_elem.get('source')
if review_elem.get('reviewer'):
review_data['reviewer'] = review_elem.get('reviewer')
reviews.append(review_data)
if reviews:
custom_props['reviews'] = reviews
# Extract images (new)
images = []
for image_elem in prog.findall('image'):
if image_elem.text and image_elem.text.strip():
image_data = {'url': image_elem.text.strip()}
for attr in ['type', 'size', 'orient', 'system']:
if image_elem.get(attr):
image_data[attr] = image_elem.get(attr)
images.append(image_data)
if images:
custom_props['images'] = images
icon_elem = prog.find('icon')
if icon_elem is not None and icon_elem.get('src'):
custom_props['icon'] = icon_elem.get('src')
# Simpler approach for boolean flags - expanded list
for kw in ['previously-shown', 'premiere', 'new', 'live', 'last-chance']:
# Simpler approach for boolean flags
for kw in ['previously-shown', 'premiere', 'new']:
if prog.find(kw) is not None:
custom_props[kw.replace('-', '_')] = True
# Extract premiere and last-chance text content if available
premiere_elem = prog.find('premiere')
if premiere_elem is not None:
custom_props['premiere'] = True
if premiere_elem.text and premiere_elem.text.strip():
custom_props['premiere_text'] = premiere_elem.text.strip()
last_chance_elem = prog.find('last-chance')
if last_chance_elem is not None:
custom_props['last_chance'] = True
if last_chance_elem.text and last_chance_elem.text.strip():
custom_props['last_chance_text'] = last_chance_elem.text.strip()
# Extract previously-shown details
prev_shown_elem = prog.find('previously-shown')
if prev_shown_elem is not None:
custom_props['previously_shown'] = True
prev_shown_data = {}
if prev_shown_elem.get('start'):
prev_shown_data['start'] = prev_shown_elem.get('start')
if prev_shown_elem.get('channel'):
prev_shown_data['channel'] = prev_shown_elem.get('channel')
if prev_shown_data:
custom_props['previously_shown_details'] = prev_shown_data
return custom_props
def clear_element(elem):
"""Clear an XML element and its parent to free memory."""
try:
@ -2136,20 +1753,3 @@ def detect_file_format(file_path=None, content=None):
# If we reach here, we couldn't reliably determine the format
return format_type, is_compressed, file_extension
def generate_dummy_epg(source):
"""
DEPRECATED: This function is no longer used.
Dummy EPG programs are now generated on-demand when they are requested
(during XMLTV export or EPG grid display), rather than being pre-generated
and stored in the database.
See: apps/output/views.py - generate_custom_dummy_programs()
This function remains for backward compatibility but should not be called.
"""
logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. "
f"Dummy EPG programs are now generated on-demand.")
return True

View file

@ -1,7 +1,7 @@
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.accounts.permissions import Authenticated, permission_classes_by_action
from rest_framework.permissions import IsAuthenticated
from django.http import JsonResponse, HttpResponseForbidden, HttpResponse
import logging
from drf_yasg.utils import swagger_auto_schema
@ -17,30 +17,22 @@ from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from apps.m3u.models import M3UAccountProfile
# Configure logger
logger = logging.getLogger(__name__)
@login_required
def hdhr_dashboard_view(request):
"""Render the HDHR management page."""
hdhr_devices = HDHRDevice.objects.all()
return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices})
# 🔹 1) HDHomeRun Device API
class HDHRDeviceViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for HDHomeRun devices"""
queryset = HDHRDevice.objects.all()
serializer_class = HDHRDeviceSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
# 🔹 2) Discover API
@ -49,19 +41,48 @@ class DiscoverAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve HDHomeRun device discovery information",
responses={200: openapi.Response("HDHR Discovery JSON")},
responses={200: openapi.Response("HDHR Discovery JSON")}
)
def get(self, request, profile=None):
uri_parts = ["hdhr"]
if profile is not None:
uri_parts.append(profile)
base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip("/")
base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip('/')
device = HDHRDevice.objects.first()
# Calculate tuner count using centralized function
from apps.m3u.utils import calculate_tuner_count
tuner_count = calculate_tuner_count(minimum=1, unlimited_default=10)
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
profiles = M3UAccountProfile.objects.filter(
is_active=True,
m3u_account__is_active=True # Only include profiles from enabled M3U accounts
).exclude(id=1)
# 1. Check if any profile has unlimited streams (max_streams=0)
has_unlimited = profiles.filter(max_streams=0).exists()
# 2. Calculate tuner count from limited profiles
limited_tuners = 0
if not has_unlimited:
limited_tuners = profiles.filter(max_streams__gt=0).aggregate(
total=models.Sum('max_streams')
).get('total', 0) or 0
# 3. Add custom stream count to tuner count
custom_stream_count = Stream.objects.filter(is_custom=True).count()
logger.debug(f"Found {custom_stream_count} custom streams")
# 4. Calculate final tuner count
if has_unlimited:
# If there are unlimited profiles, start with 10 plus custom streams
tuner_count = 10 + custom_stream_count
else:
# Otherwise use the limited profile sum plus custom streams
tuner_count = limited_tuners + custom_stream_count
# 5. Ensure minimum of 2 tuners
tuner_count = max(2, tuner_count)
logger.debug(f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})")
# Create a unique DeviceID for the HDHomeRun device based on profile ID or a default value
device_ID = "12345678" # Default DeviceID
@ -102,17 +123,17 @@ class LineupAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the available channel lineup",
responses={200: openapi.Response("Channel Lineup JSON")},
responses={200: openapi.Response("Channel Lineup JSON")}
)
def get(self, request, profile=None):
if profile is not None:
channel_profile = ChannelProfile.objects.get(name=profile)
channels = Channel.objects.filter(
channelprofilemembership__channel_profile=channel_profile,
channelprofilemembership__enabled=True,
).order_by("channel_number")
channelprofilemembership__enabled=True
).order_by('channel_number')
else:
channels = Channel.objects.all().order_by("channel_number")
channels = Channel.objects.all().order_by('channel_number')
lineup = []
for ch in channels:
@ -125,15 +146,13 @@ class LineupAPIView(APIView):
else:
formatted_channel_number = ""
lineup.append(
{
"GuideNumber": formatted_channel_number,
"GuideName": ch.name,
"URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"),
"Guide_ID": formatted_channel_number,
"Station": formatted_channel_number,
}
)
lineup.append({
"GuideNumber": formatted_channel_number,
"GuideName": ch.name,
"URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"),
"Guide_ID": formatted_channel_number,
"Station": formatted_channel_number,
})
return JsonResponse(lineup, safe=False)
@ -143,14 +162,14 @@ class LineupStatusAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the HDHomeRun lineup status",
responses={200: openapi.Response("Lineup Status JSON")},
responses={200: openapi.Response("Lineup Status JSON")}
)
def get(self, request, profile=None):
data = {
"ScanInProgress": 0,
"ScanPossible": 0,
"Source": "Cable",
"SourceList": ["Cable"],
"SourceList": ["Cable"]
}
return JsonResponse(data)
@ -161,10 +180,10 @@ class HDHRDeviceXMLAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the HDHomeRun device XML configuration",
responses={200: openapi.Response("HDHR Device XML")},
responses={200: openapi.Response("HDHR Device XML")}
)
def get(self, request):
base_url = request.build_absolute_uri("/hdhr/").rstrip("/")
base_url = request.build_absolute_uri('/hdhr/').rstrip('/')
xml_response = f"""<?xml version="1.0" encoding="utf-8"?>
<root>

View file

@ -1,7 +1,7 @@
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.accounts.permissions import Authenticated, permission_classes_by_action
from rest_framework.permissions import IsAuthenticated
from django.http import JsonResponse, HttpResponseForbidden, HttpResponse
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
@ -16,26 +16,18 @@ from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
@login_required
def hdhr_dashboard_view(request):
"""Render the HDHR management page."""
hdhr_devices = HDHRDevice.objects.all()
return render(request, "hdhr/hdhr.html", {"hdhr_devices": hdhr_devices})
# 🔹 1) HDHomeRun Device API
class HDHRDeviceViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for HDHomeRun devices"""
queryset = HDHRDevice.objects.all()
serializer_class = HDHRDeviceSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
# 🔹 2) Discover API
@ -44,10 +36,10 @@ class DiscoverAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve HDHomeRun device discovery information",
responses={200: openapi.Response("HDHR Discovery JSON")},
responses={200: openapi.Response("HDHR Discovery JSON")}
)
def get(self, request):
base_url = request.build_absolute_uri("/hdhr/").rstrip("/")
base_url = request.build_absolute_uri('/hdhr/').rstrip('/')
device = HDHRDevice.objects.first()
if not device:
@ -83,15 +75,15 @@ class LineupAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the available channel lineup",
responses={200: openapi.Response("Channel Lineup JSON")},
responses={200: openapi.Response("Channel Lineup JSON")}
)
def get(self, request):
channels = Channel.objects.all().order_by("channel_number")
channels = Channel.objects.all().order_by('channel_number')
lineup = [
{
"GuideNumber": str(ch.channel_number),
"GuideName": ch.name,
"URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}"),
"URL": request.build_absolute_uri(f"/proxy/ts/stream/{ch.uuid}")
}
for ch in channels
]
@ -104,14 +96,14 @@ class LineupStatusAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the HDHomeRun lineup status",
responses={200: openapi.Response("Lineup Status JSON")},
responses={200: openapi.Response("Lineup Status JSON")}
)
def get(self, request):
data = {
"ScanInProgress": 0,
"ScanPossible": 0,
"Source": "Cable",
"SourceList": ["Cable"],
"SourceList": ["Cable"]
}
return JsonResponse(data)
@ -122,10 +114,10 @@ class HDHRDeviceXMLAPIView(APIView):
@swagger_auto_schema(
operation_description="Retrieve the HDHomeRun device XML configuration",
responses={200: openapi.Response("HDHR Device XML")},
responses={200: openapi.Response("HDHR Device XML")}
)
def get(self, request):
base_url = request.build_absolute_uri("/hdhr/").rstrip("/")
base_url = request.build_absolute_uri('/hdhr/').rstrip('/')
xml_response = f"""<?xml version="1.0" encoding="utf-8"?>
<root>

View file

@ -1,8 +1,6 @@
from django.contrib import admin
from django.utils.html import format_html
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent, M3UAccountProfile
import json
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent
class M3UFilterInline(admin.TabularInline):
model = M3UFilter
@ -10,181 +8,50 @@ class M3UFilterInline(admin.TabularInline):
verbose_name = "M3U Filter"
verbose_name_plural = "M3U Filters"
@admin.register(M3UAccount)
class M3UAccountAdmin(admin.ModelAdmin):
list_display = (
"name",
"server_url",
"server_group",
"max_streams",
"priority",
"is_active",
"user_agent_display",
"uploaded_file_link",
"created_at",
"updated_at",
)
list_filter = ("is_active", "server_group")
search_fields = ("name", "server_url", "server_group__name")
list_display = ('name', 'server_url', 'server_group', 'max_streams', 'is_active', 'user_agent_display', 'uploaded_file_link', 'created_at', 'updated_at')
list_filter = ('is_active', 'server_group')
search_fields = ('name', 'server_url', 'server_group__name')
inlines = [M3UFilterInline]
actions = ["activate_accounts", "deactivate_accounts"]
actions = ['activate_accounts', 'deactivate_accounts']
# Handle both ForeignKey and ManyToManyField cases for UserAgent
def user_agent_display(self, obj):
if hasattr(obj, "user_agent"): # ForeignKey case
if hasattr(obj, 'user_agent'): # ForeignKey case
return obj.user_agent.user_agent if obj.user_agent else "None"
elif hasattr(obj, "user_agents"): # ManyToManyField case
elif hasattr(obj, 'user_agents'): # ManyToManyField case
return ", ".join([ua.user_agent for ua in obj.user_agents.all()]) or "None"
return "None"
user_agent_display.short_description = "User Agent(s)"
def vod_enabled_display(self, obj):
"""Display whether VOD is enabled for this account"""
if obj.custom_properties:
custom_props = obj.custom_properties or {}
return "Yes" if custom_props.get('enable_vod', False) else "No"
return "No"
vod_enabled_display.short_description = "VOD Enabled"
vod_enabled_display.boolean = True
def uploaded_file_link(self, obj):
if obj.uploaded_file:
return format_html(
"<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url
)
return format_html("<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url)
return "No file uploaded"
uploaded_file_link.short_description = "Uploaded File"
@admin.action(description="Activate selected accounts")
@admin.action(description='Activate selected accounts')
def activate_accounts(self, request, queryset):
queryset.update(is_active=True)
@admin.action(description="Deactivate selected accounts")
@admin.action(description='Deactivate selected accounts')
def deactivate_accounts(self, request, queryset):
queryset.update(is_active=False)
# Add ManyToManyField for Django Admin (if applicable)
if hasattr(M3UAccount, "user_agents"):
filter_horizontal = ("user_agents",) # Only for ManyToManyField
if hasattr(M3UAccount, 'user_agents'):
filter_horizontal = ('user_agents',) # Only for ManyToManyField
@admin.register(M3UFilter)
class M3UFilterAdmin(admin.ModelAdmin):
list_display = ("m3u_account", "filter_type", "regex_pattern", "exclude")
list_filter = ("filter_type", "exclude")
search_fields = ("regex_pattern",)
ordering = ("m3u_account",)
list_display = ('m3u_account', 'filter_type', 'regex_pattern', 'exclude')
list_filter = ('filter_type', 'exclude')
search_fields = ('regex_pattern',)
ordering = ('m3u_account',)
@admin.register(ServerGroup)
class ServerGroupAdmin(admin.ModelAdmin):
list_display = ("name",)
search_fields = ("name",)
list_display = ('name',)
search_fields = ('name',)
@admin.register(M3UAccountProfile)
class M3UAccountProfileAdmin(admin.ModelAdmin):
list_display = (
"name",
"m3u_account",
"is_default",
"is_active",
"max_streams",
"current_viewers",
"account_status_display",
"account_expiration_display",
"last_refresh_display",
)
list_filter = ("is_active", "is_default", "m3u_account__account_type")
search_fields = ("name", "m3u_account__name")
readonly_fields = ("account_info_display",)
def account_status_display(self, obj):
"""Display account status from custom properties"""
status = obj.get_account_status()
if status:
# Create colored status display
color_map = {
'Active': 'green',
'Expired': 'red',
'Disabled': 'red',
'Banned': 'red',
}
color = color_map.get(status, 'black')
return format_html(
'<span style="color: {};">{}</span>',
color,
status
)
return "Unknown"
account_status_display.short_description = "Account Status"
def account_expiration_display(self, obj):
"""Display account expiration from custom properties"""
expiration = obj.get_account_expiration()
if expiration:
from datetime import datetime
if expiration < datetime.now():
return format_html(
'<span style="color: red;">{}</span>',
expiration.strftime('%Y-%m-%d %H:%M')
)
else:
return format_html(
'<span style="color: green;">{}</span>',
expiration.strftime('%Y-%m-%d %H:%M')
)
return "Unknown"
account_expiration_display.short_description = "Expires"
def last_refresh_display(self, obj):
"""Display last refresh time from custom properties"""
last_refresh = obj.get_last_refresh()
if last_refresh:
return last_refresh.strftime('%Y-%m-%d %H:%M:%S')
return "Never"
last_refresh_display.short_description = "Last Refresh"
def account_info_display(self, obj):
"""Display formatted account information from custom properties"""
if not obj.custom_properties:
return "No account information available"
html_parts = []
# User Info
user_info = obj.custom_properties.get('user_info', {})
if user_info:
html_parts.append("<h3>User Information:</h3>")
html_parts.append("<ul>")
for key, value in user_info.items():
if key == 'exp_date' and value:
try:
from datetime import datetime
exp_date = datetime.fromtimestamp(float(value))
value = exp_date.strftime('%Y-%m-%d %H:%M:%S')
except (ValueError, TypeError):
pass
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
html_parts.append("</ul>")
# Server Info
server_info = obj.custom_properties.get('server_info', {})
if server_info:
html_parts.append("<h3>Server Information:</h3>")
html_parts.append("<ul>")
for key, value in server_info.items():
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
html_parts.append("</ul>")
# Last Refresh
last_refresh = obj.custom_properties.get('last_refresh')
if last_refresh:
html_parts.append(f"<p><strong>Last Refresh:</strong> {last_refresh}</p>")
return format_html(''.join(html_parts)) if html_parts else "No account information available"
account_info_display.short_description = "Account Information"

View file

@ -1,44 +1,18 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .api_views import (
M3UAccountViewSet,
M3UFilterViewSet,
ServerGroupViewSet,
RefreshM3UAPIView,
RefreshSingleM3UAPIView,
RefreshAccountInfoAPIView,
UserAgentViewSet,
M3UAccountProfileViewSet,
)
from .api_views import M3UAccountViewSet, M3UFilterViewSet, ServerGroupViewSet, RefreshM3UAPIView, RefreshSingleM3UAPIView, UserAgentViewSet, M3UAccountProfileViewSet
app_name = "m3u"
app_name = 'm3u'
router = DefaultRouter()
router.register(r"accounts", M3UAccountViewSet, basename="m3u-account")
router.register(
r"accounts\/(?P<account_id>\d+)\/profiles",
M3UAccountProfileViewSet,
basename="m3u-account-profiles",
)
router.register(
r"accounts\/(?P<account_id>\d+)\/filters",
M3UFilterViewSet,
basename="m3u-filters",
)
router.register(r"server-groups", ServerGroupViewSet, basename="server-group")
router.register(r'accounts', M3UAccountViewSet, basename='m3u-account')
router.register(r'accounts\/(?P<account_id>\d+)\/profiles', M3UAccountProfileViewSet, basename='m3u-account-profiles')
router.register(r'filters', M3UFilterViewSet, basename='m3u-filter')
router.register(r'server-groups', ServerGroupViewSet, basename='server-group')
urlpatterns = [
path("refresh/", RefreshM3UAPIView.as_view(), name="m3u_refresh"),
path(
"refresh/<int:account_id>/",
RefreshSingleM3UAPIView.as_view(),
name="m3u_refresh_single",
),
path(
"refresh-account-info/<int:profile_id>/",
RefreshAccountInfoAPIView.as_view(),
name="m3u_refresh_account_info",
),
path('refresh/', RefreshM3UAPIView.as_view(), name='m3u_refresh'),
path('refresh/<int:account_id>/', RefreshSingleM3UAPIView.as_view(), name='m3u_refresh_single'),
]
urlpatterns += router.urls

View file

@ -1,11 +1,7 @@
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.accounts.permissions import (
Authenticated,
permission_classes_by_action,
permission_classes_by_method,
)
from rest_framework.permissions import IsAuthenticated
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.shortcuts import get_object_or_404
@ -15,14 +11,13 @@ import os
from rest_framework.decorators import action
from django.conf import settings
from .tasks import refresh_m3u_groups
import json
# Import all models, including UserAgent.
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
from core.models import UserAgent
from apps.channels.models import ChannelGroupM3UAccount
from core.serializers import UserAgentSerializer
from apps.vod.models import M3UVODCategoryRelation
# Import all serializers, including the UserAgentSerializer.
from .serializers import (
M3UAccountSerializer,
M3UFilterSerializer,
@ -30,112 +25,80 @@ from .serializers import (
M3UAccountProfileSerializer,
)
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts, refresh_account_info
import json
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
class M3UAccountViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for M3U accounts"""
queryset = M3UAccount.objects.prefetch_related("channel_group")
queryset = M3UAccount.objects.prefetch_related('channel_group')
serializer_class = M3UAccountSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
# Handle file upload first, if any
file_path = None
if "file" in request.FILES:
file = request.FILES["file"]
if 'file' in request.FILES:
file = request.FILES['file']
file_name = file.name
file_path = os.path.join("/data/uploads/m3us", file_name)
file_path = os.path.join('/data/uploads/m3us', file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as destination:
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
# Add file_path to the request data so it's available during creation
request.data._mutable = True # Allow modification of the request data
request.data["file_path"] = (
file_path # Include the file path if a file was uploaded
)
request.data['file_path'] = file_path # Include the file path if a file was uploaded
# Handle the user_agent field - convert "null" string to None
if "user_agent" in request.data and request.data["user_agent"] == "null":
request.data["user_agent"] = None
if 'user_agent' in request.data and request.data['user_agent'] == 'null':
request.data['user_agent'] = None
# Handle server_url appropriately
if "server_url" in request.data and not request.data["server_url"]:
request.data.pop("server_url")
if 'server_url' in request.data and not request.data['server_url']:
request.data.pop('server_url')
request.data._mutable = False # Make the request data immutable again
# Now call super().create() to create the instance
response = super().create(request, *args, **kwargs)
account_type = response.data.get("account_type")
account_id = response.data.get("id")
# Notify frontend that a new playlist was created
from core.utils import send_websocket_update
send_websocket_update('updates', 'update', {
'type': 'playlist_created',
'playlist_id': account_id
})
if account_type == M3UAccount.Types.XC:
refresh_m3u_groups(account_id)
# Check if VOD is enabled
enable_vod = request.data.get("enable_vod", False)
if enable_vod:
from apps.vod.tasks import refresh_categories
refresh_categories(account_id)
print(response.data.get('account_type'))
if response.data.get('account_type') == M3UAccount.Types.XC:
refresh_m3u_groups(response.data.get('id'))
# After the instance is created, return the response
return response
def update(self, request, *args, **kwargs):
instance = self.get_object()
old_vod_enabled = False
# Check current VOD setting
if instance.custom_properties:
custom_props = instance.custom_properties or {}
old_vod_enabled = custom_props.get("enable_vod", False)
# Handle file upload first, if any
file_path = None
if "file" in request.FILES:
file = request.FILES["file"]
if 'file' in request.FILES:
file = request.FILES['file']
file_name = file.name
file_path = os.path.join("/data/uploads/m3us", file_name)
file_path = os.path.join('/data/uploads/m3us', file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as destination:
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
# Add file_path to the request data so it's available during creation
request.data._mutable = True # Allow modification of the request data
request.data["file_path"] = (
file_path # Include the file path if a file was uploaded
)
request.data['file_path'] = file_path # Include the file path if a file was uploaded
# Handle the user_agent field - convert "null" string to None
if "user_agent" in request.data and request.data["user_agent"] == "null":
request.data["user_agent"] = None
if 'user_agent' in request.data and request.data['user_agent'] == 'null':
request.data['user_agent'] = None
# Handle server_url appropriately
if "server_url" in request.data and not request.data["server_url"]:
request.data.pop("server_url")
if 'server_url' in request.data and not request.data['server_url']:
request.data.pop('server_url')
request.data._mutable = False # Make the request data immutable again
if instance.file_path and os.path.exists(instance.file_path):
@ -144,58 +107,6 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
# Now call super().update() to update the instance
response = super().update(request, *args, **kwargs)
# Check if VOD setting changed and trigger refresh if needed
new_vod_enabled = request.data.get("enable_vod", old_vod_enabled)
if (
instance.account_type == M3UAccount.Types.XC
and not old_vod_enabled
and new_vod_enabled
):
# Create Uncategorized categories immediately so they're available in the UI
from apps.vod.models import VODCategory, M3UVODCategoryRelation
# Create movie Uncategorized category
movie_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="movie",
defaults={}
)
# Create series Uncategorized category
series_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="series",
defaults={}
)
# Create relations for both categories (disabled by default until first refresh)
account_custom_props = instance.custom_properties or {}
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
M3UVODCategoryRelation.objects.get_or_create(
category=movie_category,
m3u_account=instance,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
M3UVODCategoryRelation.objects.get_or_create(
category=series_category,
m3u_account=instance,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
# Trigger full VOD refresh
from apps.vod.tasks import refresh_vod_content
refresh_vod_content.delay(instance.id)
# After the instance is updated, return the response
return response
@ -204,281 +115,75 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
instance = self.get_object()
# Check if we're toggling is_active
if (
"is_active" in request.data
and instance.is_active != request.data["is_active"]
):
if 'is_active' in request.data and instance.is_active != request.data['is_active']:
# Set appropriate status based on new is_active value
if request.data["is_active"]:
request.data["status"] = M3UAccount.Status.IDLE
if request.data['is_active']:
request.data['status'] = M3UAccount.Status.IDLE
else:
request.data["status"] = M3UAccount.Status.DISABLED
request.data['status'] = M3UAccount.Status.DISABLED
# Continue with regular partial update
return super().partial_update(request, *args, **kwargs)
@action(detail=True, methods=["post"], url_path="refresh-vod")
def refresh_vod(self, request, pk=None):
"""Trigger VOD content refresh for XtreamCodes accounts"""
account = self.get_object()
if account.account_type != M3UAccount.Types.XC:
return Response(
{"error": "VOD refresh is only available for XtreamCodes accounts"},
status=status.HTTP_400_BAD_REQUEST,
)
# Check if VOD is enabled
vod_enabled = False
if account.custom_properties:
custom_props = account.custom_properties or {}
vod_enabled = custom_props.get("enable_vod", False)
if not vod_enabled:
return Response(
{"error": "VOD is not enabled for this account"},
status=status.HTTP_400_BAD_REQUEST,
)
try:
from apps.vod.tasks import refresh_vod_content
refresh_vod_content.delay(account.id)
return Response(
{"message": f"VOD refresh initiated for account {account.name}"},
status=status.HTTP_202_ACCEPTED,
)
except Exception as e:
return Response(
{"error": f"Failed to initiate VOD refresh: {str(e)}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@action(detail=True, methods=["patch"], url_path="group-settings")
def update_group_settings(self, request, pk=None):
"""Update auto channel sync settings for M3U account groups"""
account = self.get_object()
group_settings = request.data.get("group_settings", [])
category_settings = request.data.get("category_settings", [])
try:
for setting in group_settings:
group_id = setting.get("channel_group")
enabled = setting.get("enabled", True)
auto_sync = setting.get("auto_channel_sync", False)
sync_start = setting.get("auto_sync_channel_start")
custom_properties = setting.get("custom_properties", {})
if group_id:
ChannelGroupM3UAccount.objects.update_or_create(
channel_group_id=group_id,
m3u_account=account,
defaults={
"enabled": enabled,
"auto_channel_sync": auto_sync,
"auto_sync_channel_start": sync_start,
"custom_properties": custom_properties,
},
)
for setting in category_settings:
category_id = setting.get("id")
enabled = setting.get("enabled", True)
custom_properties = setting.get("custom_properties", {})
if category_id:
M3UVODCategoryRelation.objects.update_or_create(
category_id=category_id,
m3u_account=account,
defaults={
"enabled": enabled,
"custom_properties": custom_properties,
},
)
return Response({"message": "Group settings updated successfully"})
except Exception as e:
return Response(
{"error": f"Failed to update group settings: {str(e)}"},
status=status.HTTP_400_BAD_REQUEST,
)
class M3UFilterViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for M3U filters"""
queryset = M3UFilter.objects.all()
serializer_class = M3UFilterSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
def get_queryset(self):
m3u_account_id = self.kwargs["account_id"]
return M3UFilter.objects.filter(m3u_account_id=m3u_account_id)
def perform_create(self, serializer):
# Get the account ID from the URL
account_id = self.kwargs["account_id"]
# # Get the M3UAccount instance for the account_id
# m3u_account = M3UAccount.objects.get(id=account_id)
# Save the 'm3u_account' in the serializer context
serializer.context["m3u_account"] = account_id
# Perform the actual save
serializer.save(m3u_account_id=account_id)
permission_classes = [IsAuthenticated]
class ServerGroupViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for Server Groups"""
queryset = ServerGroup.objects.all()
serializer_class = ServerGroupSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
class RefreshM3UAPIView(APIView):
"""Triggers refresh for all active M3U accounts"""
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
@swagger_auto_schema(
operation_description="Triggers a refresh of all active M3U accounts",
responses={202: "M3U refresh initiated"},
responses={202: "M3U refresh initiated"}
)
def post(self, request, format=None):
refresh_m3u_accounts.delay()
return Response(
{"success": True, "message": "M3U refresh initiated."},
status=status.HTTP_202_ACCEPTED,
)
return Response({'success': True, 'message': 'M3U refresh initiated.'}, status=status.HTTP_202_ACCEPTED)
class RefreshSingleM3UAPIView(APIView):
"""Triggers refresh for a single M3U account"""
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
@swagger_auto_schema(
operation_description="Triggers a refresh of a single M3U account",
responses={202: "M3U account refresh initiated"},
responses={202: "M3U account refresh initiated"}
)
def post(self, request, account_id, format=None):
refresh_single_m3u_account.delay(account_id)
return Response(
{
"success": True,
"message": f"M3U account {account_id} refresh initiated.",
},
status=status.HTTP_202_ACCEPTED,
)
class RefreshAccountInfoAPIView(APIView):
"""Triggers account info refresh for a single M3U account"""
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
@swagger_auto_schema(
operation_description="Triggers a refresh of account information for a specific M3U profile",
responses={202: "Account info refresh initiated", 400: "Profile not found or not XtreamCodes"},
)
def post(self, request, profile_id, format=None):
try:
from .models import M3UAccountProfile
profile = M3UAccountProfile.objects.get(id=profile_id)
account = profile.m3u_account
if account.account_type != M3UAccount.Types.XC:
return Response(
{
"success": False,
"error": "Account info refresh is only available for XtreamCodes accounts",
},
status=status.HTTP_400_BAD_REQUEST,
)
refresh_account_info.delay(profile_id)
return Response(
{
"success": True,
"message": f"Account info refresh initiated for profile {profile.name}.",
},
status=status.HTTP_202_ACCEPTED,
)
except M3UAccountProfile.DoesNotExist:
return Response(
{
"success": False,
"error": "Profile not found",
},
status=status.HTTP_404_NOT_FOUND,
)
return Response({'success': True, 'message': f'M3U account {account_id} refresh initiated.'},
status=status.HTTP_202_ACCEPTED)
class UserAgentViewSet(viewsets.ModelViewSet):
"""Handles CRUD operations for User Agents"""
queryset = UserAgent.objects.all()
serializer_class = UserAgentSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
class M3UAccountProfileViewSet(viewsets.ModelViewSet):
queryset = M3UAccountProfile.objects.all()
serializer_class = M3UAccountProfileSerializer
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]
permission_classes = [IsAuthenticated]
def get_queryset(self):
m3u_account_id = self.kwargs["account_id"]
m3u_account_id = self.kwargs['account_id']
return M3UAccountProfile.objects.filter(m3u_account_id=m3u_account_id)
def perform_create(self, serializer):
# Get the account ID from the URL
account_id = self.kwargs["account_id"]
account_id = self.kwargs['account_id']
# Get the M3UAccount instance for the account_id
m3u_account = M3UAccount.objects.get(id=account_id)
# Save the 'm3u_account' in the serializer context
serializer.context["m3u_account"] = m3u_account
serializer.context['m3u_account'] = m3u_account
# Perform the actual save
serializer.save(m3u_account_id=m3u_account)

View file

@ -4,13 +4,6 @@ from .models import M3UAccount, M3UFilter
import re
class M3UAccountForm(forms.ModelForm):
enable_vod = forms.BooleanField(
required=False,
initial=False,
label="Enable VOD Content",
help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts"
)
class Meta:
model = M3UAccount
fields = [
@ -20,34 +13,8 @@ class M3UAccountForm(forms.ModelForm):
'server_group',
'max_streams',
'is_active',
'enable_vod',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set initial value for enable_vod from custom_properties
if self.instance and self.instance.custom_properties:
custom_props = self.instance.custom_properties or {}
self.fields['enable_vod'].initial = custom_props.get('enable_vod', False)
def save(self, commit=True):
instance = super().save(commit=False)
# Handle enable_vod field
enable_vod = self.cleaned_data.get('enable_vod', False)
# Parse existing custom_properties
custom_props = instance.custom_properties or {}
# Update VOD preference
custom_props['enable_vod'] = enable_vod
instance.custom_properties = custom_props
if commit:
instance.save()
return instance
def clean_uploaded_file(self):
uploaded_file = self.cleaned_data.get('uploaded_file')
if uploaded_file:

View file

@ -3,7 +3,6 @@
from django.db import migrations
from core.models import CoreSettings
def create_custom_account(apps, schema_editor):
default_user_agent_id = CoreSettings.get_default_user_agent_id()
@ -19,7 +18,7 @@ def create_custom_account(apps, schema_editor):
M3UAccountProfile = apps.get_model("m3u", "M3UAccountProfile")
M3UAccountProfile.objects.create(
m3u_account=m3u_account,
name=f"{m3u_account.name} Default",
name=f'{m3u_account.name} Default',
max_streams=m3u_account.max_streams,
is_default=True,
is_active=True,
@ -27,12 +26,10 @@ def create_custom_account(apps, schema_editor):
replace_pattern="$1",
)
class Migration(migrations.Migration):
dependencies = [
("m3u", "0002_m3uaccount_locked"),
("core", "0004_preload_core_settings"),
('m3u', '0002_m3uaccount_locked'),
]
operations = [

View file

@ -7,29 +7,24 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_celery_beat", "0019_alter_periodictasks_options"),
("m3u", "0004_m3uaccount_stream_profile"),
('django_celery_beat', '0019_alter_periodictasks_options'),
('m3u', '0004_m3uaccount_stream_profile'),
]
operations = [
migrations.AddField(
model_name="m3uaccount",
name="custom_properties",
model_name='m3uaccount',
name='custom_properties',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name="m3uaccount",
name="refresh_interval",
model_name='m3uaccount',
name='refresh_interval',
field=models.IntegerField(default=24),
),
migrations.AddField(
model_name="m3uaccount",
name="refresh_task",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="django_celery_beat.periodictask",
),
model_name='m3uaccount',
name='refresh_task',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_celery_beat.periodictask'),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.1.6 on 2025-07-22 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0012_alter_m3uaccount_refresh_interval'),
]
operations = [
migrations.AlterField(
model_name='m3ufilter',
name='filter_type',
field=models.CharField(choices=[('group', 'Group'), ('name', 'Stream Name'), ('url', 'Stream URL')], default='group', help_text='Filter based on either group title or stream name.', max_length=50),
),
]

View file

@ -1,22 +0,0 @@
# Generated by Django 5.1.6 on 2025-07-31 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0013_alter_m3ufilter_filter_type'),
]
operations = [
migrations.AlterModelOptions(
name='m3ufilter',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='m3ufilter',
name='order',
field=models.PositiveIntegerField(default=0),
),
]

View file

@ -1,22 +0,0 @@
# Generated by Django 5.2.4 on 2025-08-02 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0014_alter_m3ufilter_options_m3ufilter_order'),
]
operations = [
migrations.AlterModelOptions(
name='m3ufilter',
options={},
),
migrations.AddField(
model_name='m3ufilter',
name='custom_properties',
field=models.TextField(blank=True, null=True),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-08-20 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'),
]
operations = [
migrations.AddField(
model_name='m3uaccount',
name='priority',
field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'),
),
]

View file

@ -1,28 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-02 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0016_m3uaccount_priority'),
]
operations = [
migrations.AlterField(
model_name='m3uaccount',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='m3uaccount',
name='server_url',
field=models.URLField(blank=True, help_text='The base URL of the M3U server (optional if a file is uploaded)', max_length=1000, null=True),
),
migrations.AlterField(
model_name='m3ufilter',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, null=True),
),
]

View file

@ -1,18 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-09 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m3u', '0017_alter_m3uaccount_custom_properties_and_more'),
]
operations = [
migrations.AddField(
model_name='m3uaccountprofile',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for storing account information from provider (e.g., XC account details, expiration dates)', null=True),
),
]

View file

@ -7,8 +7,7 @@ from apps.channels.models import StreamProfile
from django_celery_beat.models import PeriodicTask
from core.models import CoreSettings, UserAgent
CUSTOM_M3U_ACCOUNT_NAME = "custom"
CUSTOM_M3U_ACCOUNT_NAME="custom"
class M3UAccount(models.Model):
class Types(models.TextChoices):
@ -26,78 +25,84 @@ class M3UAccount(models.Model):
"""Represents an M3U Account for IPTV streams."""
name = models.CharField(
max_length=255, unique=True, help_text="Unique name for this M3U account"
max_length=255,
unique=True,
help_text="Unique name for this M3U account"
)
server_url = models.URLField(
max_length=1000,
blank=True,
null=True,
help_text="The base URL of the M3U server (optional if a file is uploaded)",
help_text="The base URL of the M3U server (optional if a file is uploaded)"
)
file_path = models.CharField(
max_length=255,
blank=True,
null=True
)
file_path = models.CharField(max_length=255, blank=True, null=True)
server_group = models.ForeignKey(
"ServerGroup",
'ServerGroup',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="m3u_accounts",
help_text="The server group this M3U account belongs to",
related_name='m3u_accounts',
help_text="The server group this M3U account belongs to"
)
max_streams = models.PositiveIntegerField(
default=0, help_text="Maximum number of concurrent streams (0 for unlimited)"
default=0,
help_text="Maximum number of concurrent streams (0 for unlimited)"
)
is_active = models.BooleanField(
default=True, help_text="Set to false to deactivate this M3U account"
default=True,
help_text="Set to false to deactivate this M3U account"
)
created_at = models.DateTimeField(
auto_now_add=True, help_text="Time when this account was created"
auto_now_add=True,
help_text="Time when this account was created"
)
updated_at = models.DateTimeField(
null=True,
blank=True,
help_text="Time when this account was last successfully refreshed",
null=True, blank=True,
help_text="Time when this account was last successfully refreshed"
)
status = models.CharField(
max_length=20, choices=Status.choices, default=Status.IDLE
max_length=20,
choices=Status.choices,
default=Status.IDLE
)
last_message = models.TextField(
null=True,
blank=True,
help_text="Last status message, including success results or error information",
help_text="Last status message, including success results or error information"
)
user_agent = models.ForeignKey(
"core.UserAgent",
'core.UserAgent',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="m3u_accounts",
help_text="The User-Agent associated with this M3U account.",
related_name='m3u_accounts',
help_text="The User-Agent associated with this M3U account."
)
locked = models.BooleanField(
default=False, help_text="Protected - can't be deleted or modified"
default=False,
help_text="Protected - can't be deleted or modified"
)
stream_profile = models.ForeignKey(
StreamProfile,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="m3u_accounts",
related_name='m3u_accounts'
)
account_type = models.CharField(choices=Types.choices, default=Types.STADNARD)
username = models.CharField(max_length=255, null=True, blank=True)
password = models.CharField(max_length=255, null=True, blank=True)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
custom_properties = models.TextField(null=True, blank=True)
refresh_interval = models.IntegerField(default=0)
refresh_task = models.ForeignKey(
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
)
stale_stream_days = models.PositiveIntegerField(
default=7,
help_text="Number of days after which a stream will be removed if not seen in the M3U source.",
)
priority = models.PositiveIntegerField(
default=0,
help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.",
help_text="Number of days after which a stream will be removed if not seen in the M3U source."
)
def __str__(self):
@ -129,19 +134,17 @@ class M3UAccount(models.Model):
def get_user_agent(self):
user_agent = self.user_agent
if not user_agent:
user_agent = UserAgent.objects.get(
id=CoreSettings.get_default_user_agent_id()
)
user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id())
return user_agent
def save(self, *args, **kwargs):
# Prevent auto_now behavior by handling updated_at manually
if "update_fields" in kwargs and "updated_at" not in kwargs["update_fields"]:
if 'update_fields' in kwargs and 'updated_at' not in kwargs['update_fields']:
# Don't modify updated_at for regular updates
kwargs.setdefault("update_fields", [])
if "updated_at" in kwargs["update_fields"]:
kwargs["update_fields"].remove("updated_at")
kwargs.setdefault('update_fields', [])
if 'updated_at' in kwargs['update_fields']:
kwargs['update_fields'].remove('updated_at')
super().save(*args, **kwargs)
# def get_channel_groups(self):
@ -155,40 +158,35 @@ class M3UAccount(models.Model):
# """Return all streams linked to this account with enabled ChannelGroups."""
# return self.streams.filter(channel_group__in=ChannelGroup.objects.filter(m3u_account__enabled=True))
class M3UFilter(models.Model):
"""Defines filters for M3U accounts based on stream name or group title."""
FILTER_TYPE_CHOICES = (
("group", "Group"),
("name", "Stream Name"),
("url", "Stream URL"),
('group', 'Group Title'),
('name', 'Stream Name'),
)
m3u_account = models.ForeignKey(
M3UAccount,
on_delete=models.CASCADE,
related_name="filters",
help_text="The M3U account this filter is applied to.",
related_name='filters',
help_text="The M3U account this filter is applied to."
)
filter_type = models.CharField(
max_length=50,
choices=FILTER_TYPE_CHOICES,
default="group",
help_text="Filter based on either group title or stream name.",
default='group',
help_text="Filter based on either group title or stream name."
)
regex_pattern = models.CharField(
max_length=200, help_text="A regex pattern to match streams or groups."
max_length=200,
help_text="A regex pattern to match streams or groups."
)
exclude = models.BooleanField(
default=True,
help_text="If True, matching items are excluded; if False, only matches are included.",
help_text="If True, matching items are excluded; if False, only matches are included."
)
order = models.PositiveIntegerField(default=0)
custom_properties = models.JSONField(default=dict, blank=True, null=True)
def applies_to(self, stream_name, group_name):
target = group_name if self.filter_type == "group" else stream_name
target = group_name if self.filter_type == 'group' else stream_name
return bool(re.search(self.regex_pattern, target, re.IGNORECASE))
def clean(self):
@ -198,9 +196,7 @@ class M3UFilter(models.Model):
raise ValidationError(f"Invalid regex pattern: {self.regex_pattern}")
def __str__(self):
filter_type_display = dict(self.FILTER_TYPE_CHOICES).get(
self.filter_type, "Unknown"
)
filter_type_display = dict(self.FILTER_TYPE_CHOICES).get(self.filter_type, 'Unknown')
exclude_status = "Exclude" if self.exclude else "Include"
return f"[{self.m3u_account.name}] {filter_type_display}: {self.regex_pattern} ({exclude_status})"
@ -226,35 +222,40 @@ class M3UFilter(models.Model):
class ServerGroup(models.Model):
"""Represents a logical grouping of servers or channels."""
name = models.CharField(
max_length=100, unique=True, help_text="Unique name for this server group."
max_length=100,
unique=True,
help_text="Unique name for this server group."
)
def __str__(self):
return self.name
from django.db import models
class M3UAccountProfile(models.Model):
"""Represents a profile associated with an M3U Account."""
m3u_account = models.ForeignKey(
"M3UAccount",
'M3UAccount',
on_delete=models.CASCADE,
related_name="profiles",
help_text="The M3U account this profile belongs to.",
related_name='profiles',
help_text="The M3U account this profile belongs to."
)
name = models.CharField(
max_length=255, help_text="Name for the M3U account profile"
max_length=255,
help_text="Name for the M3U account profile"
)
is_default = models.BooleanField(
default=False, help_text="Set to false to deactivate this profile"
default=False,
help_text="Set to false to deactivate this profile"
)
max_streams = models.PositiveIntegerField(
default=0, help_text="Maximum number of concurrent streams (0 for unlimited)"
default=0,
help_text="Maximum number of concurrent streams (0 for unlimited)"
)
is_active = models.BooleanField(
default=True, help_text="Set to false to deactivate this profile"
default=True,
help_text="Set to false to deactivate this profile"
)
search_pattern = models.CharField(
max_length=255,
@ -263,95 +264,22 @@ class M3UAccountProfile(models.Model):
max_length=255,
)
current_viewers = models.PositiveIntegerField(default=0)
custom_properties = models.JSONField(
default=dict,
blank=True,
null=True,
help_text="Custom properties for storing account information from provider (e.g., XC account details, expiration dates)"
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["m3u_account", "name"], name="unique_account_name"
)
models.UniqueConstraint(fields=['m3u_account', 'name'], name='unique_account_name')
]
def __str__(self):
return f"{self.name} ({self.m3u_account.name})"
def get_account_expiration(self):
"""Get account expiration date from custom properties if available"""
if not self.custom_properties:
return None
user_info = self.custom_properties.get('user_info', {})
exp_date = user_info.get('exp_date')
if exp_date:
try:
from datetime import datetime
# XC exp_date is typically a Unix timestamp
if isinstance(exp_date, (int, float)):
return datetime.fromtimestamp(exp_date)
elif isinstance(exp_date, str):
# Try to parse as timestamp first, then as ISO date
try:
return datetime.fromtimestamp(float(exp_date))
except ValueError:
return datetime.fromisoformat(exp_date)
except (ValueError, TypeError):
pass
return None
def get_account_status(self):
"""Get account status from custom properties if available"""
if not self.custom_properties:
return None
user_info = self.custom_properties.get('user_info', {})
return user_info.get('status')
def get_max_connections(self):
"""Get maximum connections from custom properties if available"""
if not self.custom_properties:
return None
user_info = self.custom_properties.get('user_info', {})
return user_info.get('max_connections')
def get_active_connections(self):
"""Get active connections from custom properties if available"""
if not self.custom_properties:
return None
user_info = self.custom_properties.get('user_info', {})
return user_info.get('active_cons')
def get_last_refresh(self):
"""Get last refresh timestamp from custom properties if available"""
if not self.custom_properties:
return None
last_refresh = self.custom_properties.get('last_refresh')
if last_refresh:
try:
from datetime import datetime
return datetime.fromisoformat(last_refresh)
except (ValueError, TypeError):
pass
return None
@receiver(models.signals.post_save, sender=M3UAccount)
def create_profile_for_m3u_account(sender, instance, created, **kwargs):
"""Automatically create an M3UAccountProfile when M3UAccount is created."""
if created:
M3UAccountProfile.objects.create(
m3u_account=instance,
name=f"{instance.name} Default",
name=f'{instance.name} Default',
max_streams=instance.max_streams,
is_default=True,
is_active=True,
@ -364,5 +292,6 @@ def create_profile_for_m3u_account(sender, instance, created, **kwargs):
is_default=True,
)
profile.max_streams = instance.max_streams
profile.save()

View file

@ -1,106 +1,41 @@
from core.utils import validate_flexible_url
from rest_framework import serializers, status
from rest_framework import serializers
from rest_framework.response import Response
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
from core.models import UserAgent
from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount
from apps.channels.serializers import (
ChannelGroupM3UAccountSerializer,
)
from apps.channels.serializers import ChannelGroupM3UAccountSerializer, ChannelGroupSerializer
import logging
import json
logger = logging.getLogger(__name__)
class M3UFilterSerializer(serializers.ModelSerializer):
"""Serializer for M3U Filters"""
channel_groups = ChannelGroupM3UAccountSerializer(source='m3u_account', many=True)
class Meta:
model = M3UFilter
fields = [
"id",
"filter_type",
"regex_pattern",
"exclude",
"order",
"custom_properties",
]
fields = ['id', 'filter_type', 'regex_pattern', 'exclude', 'channel_groups']
from rest_framework import serializers
from .models import M3UAccountProfile
class M3UAccountProfileSerializer(serializers.ModelSerializer):
account = serializers.SerializerMethodField()
def get_account(self, obj):
"""Include basic account information for frontend use"""
return {
'id': obj.m3u_account.id,
'name': obj.m3u_account.name,
'account_type': obj.m3u_account.account_type,
'is_xtream_codes': obj.m3u_account.account_type == 'XC'
}
class Meta:
model = M3UAccountProfile
fields = [
"id",
"name",
"max_streams",
"is_active",
"is_default",
"current_viewers",
"search_pattern",
"replace_pattern",
"custom_properties",
"account",
]
read_only_fields = ["id", "account"]
extra_kwargs = {
'search_pattern': {'required': False, 'allow_blank': True},
'replace_pattern': {'required': False, 'allow_blank': True},
}
fields = ['id', 'name', 'max_streams', 'is_active', 'is_default', 'current_viewers', 'search_pattern', 'replace_pattern']
read_only_fields = ['id']
def create(self, validated_data):
m3u_account = self.context.get("m3u_account")
m3u_account = self.context.get('m3u_account')
# Use the m3u_account when creating the profile
validated_data["m3u_account_id"] = m3u_account.id
validated_data['m3u_account_id'] = m3u_account.id
return super().create(validated_data)
def validate(self, data):
"""Custom validation to handle default profiles"""
# For updates to existing instances
if self.instance and self.instance.is_default:
# For default profiles, search_pattern and replace_pattern are not required
# and we don't want to validate them since they shouldn't be changed
return data
# For non-default profiles or new profiles, ensure required fields are present
if not data.get('search_pattern'):
raise serializers.ValidationError({
'search_pattern': ['This field is required for non-default profiles.']
})
if not data.get('replace_pattern'):
raise serializers.ValidationError({
'replace_pattern': ['This field is required for non-default profiles.']
})
return data
def update(self, instance, validated_data):
if instance.is_default:
# For default profiles, only allow updating name and custom_properties (for notes)
allowed_fields = {'name', 'custom_properties'}
# Remove any fields that aren't allowed for default profiles
disallowed_fields = set(validated_data.keys()) - allowed_fields
if disallowed_fields:
raise serializers.ValidationError(
f"Default profiles can only modify name and notes. "
f"Cannot modify: {', '.join(disallowed_fields)}"
)
raise serializers.ValidationError("Default profiles cannot be modified.")
return super().update(instance, validated_data)
def destroy(self, request, *args, **kwargs):
@ -108,15 +43,13 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
if instance.is_default:
return Response(
{"error": "Default profiles cannot be deleted."},
status=status.HTTP_400_BAD_REQUEST,
status=status.HTTP_400_BAD_REQUEST
)
return super().destroy(request, *args, **kwargs)
class M3UAccountSerializer(serializers.ModelSerializer):
"""Serializer for M3U Account"""
filters = serializers.SerializerMethodField()
filters = M3UFilterSerializer(many=True, read_only=True)
# Include user_agent as a mandatory field using its primary key.
user_agent = serializers.PrimaryKeyRelatedField(
queryset=UserAgent.objects.all(),
@ -124,96 +57,28 @@ class M3UAccountSerializer(serializers.ModelSerializer):
allow_null=True,
)
profiles = M3UAccountProfileSerializer(many=True, read_only=True)
read_only_fields = ["locked", "created_at", "updated_at"]
read_only_fields = ['locked', 'created_at', 'updated_at']
# channel_groups = serializers.SerializerMethodField()
channel_groups = ChannelGroupM3UAccountSerializer(
source="channel_group", many=True, required=False
)
server_url = serializers.CharField(
required=False,
allow_blank=True,
allow_null=True,
validators=[validate_flexible_url],
)
enable_vod = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True)
channel_groups = ChannelGroupM3UAccountSerializer(source='channel_group', many=True, required=False)
class Meta:
model = M3UAccount
fields = [
"id",
"name",
"server_url",
"file_path",
"server_group",
"max_streams",
"is_active",
"created_at",
"updated_at",
"filters",
"user_agent",
"profiles",
"locked",
"channel_groups",
"refresh_interval",
"custom_properties",
"account_type",
"username",
"password",
"stale_stream_days",
"priority",
"status",
"last_message",
"enable_vod",
"auto_enable_new_groups_live",
"auto_enable_new_groups_vod",
"auto_enable_new_groups_series",
'id', 'name', 'server_url', 'file_path', 'server_group',
'max_streams', 'is_active', 'created_at', 'updated_at', 'filters', 'user_agent', 'profiles', 'locked',
'channel_groups', 'refresh_interval', 'custom_properties', 'account_type', 'username', 'password', 'stale_stream_days',
'status', 'last_message',
]
extra_kwargs = {
"password": {
"required": False,
"allow_blank": True,
'password': {
'required': False,
'allow_blank': True,
},
}
def to_representation(self, instance):
data = super().to_representation(instance)
# Parse custom_properties to get VOD preference and auto_enable_new_groups settings
custom_props = instance.custom_properties or {}
data["enable_vod"] = custom_props.get("enable_vod", False)
data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True)
data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True)
data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True)
return data
def update(self, instance, validated_data):
# Handle enable_vod preference and auto_enable_new_groups settings
enable_vod = validated_data.pop("enable_vod", None)
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None)
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None)
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None)
# Get existing custom_properties
custom_props = instance.custom_properties or {}
# Update preferences
if enable_vod is not None:
custom_props["enable_vod"] = enable_vod
if auto_enable_new_groups_live is not None:
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
if auto_enable_new_groups_vod is not None:
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
if auto_enable_new_groups_series is not None:
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
validated_data["custom_properties"] = custom_props
# Pop out channel group memberships so we can handle them manually
channel_group_data = validated_data.pop("channel_group", [])
channel_group_data = validated_data.pop('channel_group', [])
# First, update the M3UAccount itself
for attr, value in validated_data.items():
@ -223,12 +88,13 @@ class M3UAccountSerializer(serializers.ModelSerializer):
# Prepare a list of memberships to update
memberships_to_update = []
for group_data in channel_group_data:
group = group_data.get("channel_group")
enabled = group_data.get("enabled")
group = group_data.get('channel_group')
enabled = group_data.get('enabled')
try:
membership = ChannelGroupM3UAccount.objects.get(
m3u_account=instance, channel_group=group
m3u_account=instance,
channel_group=group
)
membership.enabled = enabled
memberships_to_update.append(membership)
@ -237,39 +103,13 @@ class M3UAccountSerializer(serializers.ModelSerializer):
# Perform the bulk update
if memberships_to_update:
ChannelGroupM3UAccount.objects.bulk_update(
memberships_to_update, ["enabled"]
)
ChannelGroupM3UAccount.objects.bulk_update(memberships_to_update, ['enabled'])
return instance
def create(self, validated_data):
# Handle enable_vod preference and auto_enable_new_groups settings during creation
enable_vod = validated_data.pop("enable_vod", False)
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True)
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True)
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True)
# Parse existing custom_properties or create new
custom_props = validated_data.get("custom_properties", {})
# Set preferences (default to True for auto_enable_new_groups)
custom_props["enable_vod"] = enable_vod
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
validated_data["custom_properties"] = custom_props
return super().create(validated_data)
def get_filters(self, obj):
filters = obj.filters.order_by("order")
return M3UFilterSerializer(filters, many=True).data
class ServerGroupSerializer(serializers.ModelSerializer):
"""Serializer for Server Group"""
class Meta:
model = ServerGroup
fields = ["id", "name"]
fields = ['id', 'name']

File diff suppressed because it is too large Load diff

View file

@ -1,40 +1,9 @@
# apps/m3u/utils.py
import threading
import logging
from django.db import models
lock = threading.Lock()
# Dictionary to track usage: {m3u_account_id: current_usage}
active_streams_map = {}
logger = logging.getLogger(__name__)
def normalize_stream_url(url):
"""
Normalize stream URLs for compatibility with FFmpeg.
Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol.
FFmpeg doesn't recognize the @ prefix for multicast addresses.
Args:
url (str): The stream URL to normalize
Returns:
str: The normalized URL
"""
if not url:
return url
# Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234
# The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax
if url.startswith('udp://@'):
normalized = url.replace('udp://@', 'udp://', 1)
logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}")
return normalized
# Could add other normalizations here in the future (rtp://@, etc.)
return url
def increment_stream_count(account):
with lock:
@ -55,64 +24,3 @@ def decrement_stream_count(account):
active_streams_map[account.id] = current_usage
account.active_streams = current_usage
account.save(update_fields=['active_streams'])
def calculate_tuner_count(minimum=1, unlimited_default=10):
"""
Calculate tuner/connection count from active M3U profiles and custom streams.
This is the centralized function used by both HDHR and XtreamCodes APIs.
Args:
minimum (int): Minimum number to return (default: 1)
unlimited_default (int): Default value when unlimited profiles exist (default: 10)
Returns:
int: Calculated tuner/connection count
"""
try:
from apps.m3u.models import M3UAccountProfile
from apps.channels.models import Stream
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
profiles = M3UAccountProfile.objects.filter(
is_active=True,
m3u_account__is_active=True, # Only include profiles from enabled M3U accounts
).exclude(id=1)
# 1. Check if any profile has unlimited streams (max_streams=0)
has_unlimited = profiles.filter(max_streams=0).exists()
# 2. Calculate tuner count from limited profiles
limited_tuners = 0
if not has_unlimited:
limited_tuners = (
profiles.filter(max_streams__gt=0)
.aggregate(total=models.Sum("max_streams"))
.get("total", 0)
or 0
)
# 3. Add custom stream count to tuner count
custom_stream_count = Stream.objects.filter(is_custom=True).count()
logger.debug(f"Found {custom_stream_count} custom streams")
# 4. Calculate final tuner count
if has_unlimited:
# If there are unlimited profiles, start with unlimited_default plus custom streams
tuner_count = unlimited_default + custom_stream_count
else:
# Otherwise use the limited profile sum plus custom streams
tuner_count = limited_tuners + custom_stream_count
# 5. Ensure minimum number
tuner_count = max(minimum, tuner_count)
logger.debug(
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"
)
return tuner_count
except Exception as e:
logger.error(f"Error calculating tuner count: {e}")
return minimum # Fallback to minimum value

View file

@ -3,7 +3,6 @@ from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from apps.m3u.models import M3UAccount
import json

View file

@ -14,26 +14,3 @@ class OutputM3UTest(TestCase):
self.assertEqual(response.status_code, 200)
content = response.content.decode()
self.assertIn("#EXTM3U", content)
def test_generate_m3u_response_post_empty_body(self):
"""
Test that a POST request with an empty body returns 200 OK.
"""
url = reverse('output:generate_m3u')
response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded')
content = response.content.decode()
self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK")
self.assertIn("#EXTM3U", content)
def test_generate_m3u_response_post_with_body(self):
"""
Test that a POST request with a non-empty body returns 403 Forbidden.
"""
url = reverse('output:generate_m3u')
response = self.client.post(url, data={'evilstring': 'muhahaha'})
self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden")
self.assertIn("POST requests with body are not allowed, body is:", response.content.decode())

View file

@ -1,14 +1,16 @@
from django.urls import path, re_path, include
from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream
from .views import generate_m3u, generate_epg
from core.views import stream_view
app_name = "output"
app_name = 'output'
urlpatterns = [
# Allow `/m3u`, `/m3u/`, `/m3u/profile_name`, and `/m3u/profile_name/`
re_path(r"^m3u(?:/(?P<profile_name>[^/]+))?/?$", m3u_endpoint, name="m3u_endpoint"),
re_path(r'^m3u(?:/(?P<profile_name>[^/]+))?/?$', generate_m3u, name='generate_m3u'),
# Allow `/epg`, `/epg/`, `/epg/profile_name`, and `/epg/profile_name/`
re_path(r"^epg(?:/(?P<profile_name>[^/]+))?/?$", epg_endpoint, name="epg_endpoint"),
re_path(r'^epg(?:/(?P<profile_name>[^/]+))?/?$', generate_epg, name='generate_epg'),
# Allow both `/stream/<int:stream_id>` and `/stream/<int:stream_id>/`
re_path(r"^stream/(?P<channel_uuid>[0-9a-fA-F\-]+)/?$", stream_view, name="stream"),
re_path(r'^stream/(?P<channel_uuid>[0-9a-fA-F\-]+)/?$', stream_view, name='stream'),
]

File diff suppressed because it is too large Load diff

View file

@ -1,2 +0,0 @@
default_app_config = "apps.plugins.apps.PluginsConfig"

View file

@ -1,22 +0,0 @@
from django.urls import path
from .api_views import (
PluginsListAPIView,
PluginReloadAPIView,
PluginSettingsAPIView,
PluginRunAPIView,
PluginEnabledAPIView,
PluginImportAPIView,
PluginDeleteAPIView,
)
app_name = "plugins"
urlpatterns = [
path("plugins/", PluginsListAPIView.as_view(), name="list"),
path("plugins/reload/", PluginReloadAPIView.as_view(), name="reload"),
path("plugins/import/", PluginImportAPIView.as_view(), name="import"),
path("plugins/<str:key>/delete/", PluginDeleteAPIView.as_view(), name="delete"),
path("plugins/<str:key>/settings/", PluginSettingsAPIView.as_view(), name="settings"),
path("plugins/<str:key>/run/", PluginRunAPIView.as_view(), name="run"),
path("plugins/<str:key>/enabled/", PluginEnabledAPIView.as_view(), name="enabled"),
]

View file

@ -1,306 +0,0 @@
import logging
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
import io
import os
import zipfile
import shutil
import tempfile
from apps.accounts.permissions import (
Authenticated,
permission_classes_by_method,
)
from .loader import PluginManager
from .models import PluginConfig
logger = logging.getLogger(__name__)
class PluginsListAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def get(self, request):
pm = PluginManager.get()
# Ensure registry is up-to-date on each request
pm.discover_plugins()
return Response({"plugins": pm.list_plugins()})
class PluginReloadAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def post(self, request):
pm = PluginManager.get()
pm.discover_plugins()
return Response({"success": True, "count": len(pm._registry)})
class PluginImportAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def post(self, request):
file: UploadedFile = request.FILES.get("file")
if not file:
return Response({"success": False, "error": "Missing 'file' upload"}, status=status.HTTP_400_BAD_REQUEST)
pm = PluginManager.get()
plugins_dir = pm.plugins_dir
try:
zf = zipfile.ZipFile(file)
except zipfile.BadZipFile:
return Response({"success": False, "error": "Invalid zip file"}, status=status.HTTP_400_BAD_REQUEST)
# Extract to a temporary directory first to avoid server reload thrash
tmp_root = tempfile.mkdtemp(prefix="plugin_import_")
try:
file_members = [m for m in zf.infolist() if not m.is_dir()]
if not file_members:
shutil.rmtree(tmp_root, ignore_errors=True)
return Response({"success": False, "error": "Archive is empty"}, status=status.HTTP_400_BAD_REQUEST)
for member in file_members:
name = member.filename
if not name or name.endswith("/"):
continue
# Normalize and prevent path traversal
norm = os.path.normpath(name)
if norm.startswith("..") or os.path.isabs(norm):
shutil.rmtree(tmp_root, ignore_errors=True)
return Response({"success": False, "error": "Unsafe path in archive"}, status=status.HTTP_400_BAD_REQUEST)
dest_path = os.path.join(tmp_root, norm)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
with zf.open(member, 'r') as src, open(dest_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Find candidate directory containing plugin.py or __init__.py
candidates = []
for dirpath, dirnames, filenames in os.walk(tmp_root):
has_pluginpy = "plugin.py" in filenames
has_init = "__init__.py" in filenames
if has_pluginpy or has_init:
depth = len(os.path.relpath(dirpath, tmp_root).split(os.sep))
candidates.append((0 if has_pluginpy else 1, depth, dirpath))
if not candidates:
shutil.rmtree(tmp_root, ignore_errors=True)
return Response({"success": False, "error": "Invalid plugin: missing plugin.py or package __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
candidates.sort()
chosen = candidates[0][2]
# Determine plugin key: prefer chosen folder name; if chosen is tmp_root, use zip base name
base_name = os.path.splitext(getattr(file, "name", "plugin"))[0]
plugin_key = os.path.basename(chosen.rstrip(os.sep))
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
plugin_key = base_name
plugin_key = plugin_key.replace(" ", "_").lower()
final_dir = os.path.join(plugins_dir, plugin_key)
if os.path.exists(final_dir):
# If final dir exists but contains a valid plugin, refuse; otherwise clear it
if os.path.exists(os.path.join(final_dir, "plugin.py")) or os.path.exists(os.path.join(final_dir, "__init__.py")):
shutil.rmtree(tmp_root, ignore_errors=True)
return Response({"success": False, "error": f"Plugin '{plugin_key}' already exists"}, status=status.HTTP_400_BAD_REQUEST)
try:
shutil.rmtree(final_dir)
except Exception:
pass
# Move chosen directory into final location
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
# Move all contents into final_dir
os.makedirs(final_dir, exist_ok=True)
for item in os.listdir(tmp_root):
shutil.move(os.path.join(tmp_root, item), os.path.join(final_dir, item))
else:
shutil.move(chosen, final_dir)
# Cleanup temp
shutil.rmtree(tmp_root, ignore_errors=True)
target_dir = final_dir
finally:
try:
shutil.rmtree(tmp_root, ignore_errors=True)
except Exception:
pass
# Reload discovery and validate plugin entry
pm.discover_plugins()
plugin = pm._registry.get(plugin_key)
if not plugin:
# Cleanup the copied folder to avoid leaving invalid plugin behind
try:
shutil.rmtree(target_dir, ignore_errors=True)
except Exception:
pass
return Response({"success": False, "error": "Invalid plugin: missing Plugin class in plugin.py or __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
# Extra validation: ensure Plugin.run exists
instance = getattr(plugin, "instance", None)
run_method = getattr(instance, "run", None)
if not callable(run_method):
try:
shutil.rmtree(target_dir, ignore_errors=True)
except Exception:
pass
return Response({"success": False, "error": "Invalid plugin: Plugin class must define a callable run(action, params, context)"}, status=status.HTTP_400_BAD_REQUEST)
# Find DB config to return enabled/ever_enabled
try:
cfg = PluginConfig.objects.get(key=plugin_key)
enabled = cfg.enabled
ever_enabled = getattr(cfg, "ever_enabled", False)
except PluginConfig.DoesNotExist:
enabled = False
ever_enabled = False
return Response({
"success": True,
"plugin": {
"key": plugin.key,
"name": plugin.name,
"version": plugin.version,
"description": plugin.description,
"enabled": enabled,
"ever_enabled": ever_enabled,
"fields": plugin.fields or [],
"actions": plugin.actions or [],
}
})
class PluginSettingsAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def post(self, request, key):
pm = PluginManager.get()
data = request.data or {}
settings = data.get("settings", {})
try:
updated = pm.update_settings(key, settings)
return Response({"success": True, "settings": updated})
except Exception as e:
return Response({"success": False, "error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
class PluginRunAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def post(self, request, key):
pm = PluginManager.get()
action = request.data.get("action")
params = request.data.get("params", {})
if not action:
return Response({"success": False, "error": "Missing 'action'"}, status=status.HTTP_400_BAD_REQUEST)
# Respect plugin enabled flag
try:
cfg = PluginConfig.objects.get(key=key)
if not cfg.enabled:
return Response({"success": False, "error": "Plugin is disabled"}, status=status.HTTP_403_FORBIDDEN)
except PluginConfig.DoesNotExist:
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
try:
result = pm.run_action(key, action, params)
return Response({"success": True, "result": result})
except PermissionError as e:
return Response({"success": False, "error": str(e)}, status=status.HTTP_403_FORBIDDEN)
except Exception as e:
logger.exception("Plugin action failed")
return Response({"success": False, "error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class PluginEnabledAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def post(self, request, key):
enabled = request.data.get("enabled")
if enabled is None:
return Response({"success": False, "error": "Missing 'enabled' boolean"}, status=status.HTTP_400_BAD_REQUEST)
try:
cfg = PluginConfig.objects.get(key=key)
cfg.enabled = bool(enabled)
# Mark that this plugin has been enabled at least once
if cfg.enabled and not cfg.ever_enabled:
cfg.ever_enabled = True
cfg.save(update_fields=["enabled", "ever_enabled", "updated_at"])
return Response({"success": True, "enabled": cfg.enabled, "ever_enabled": cfg.ever_enabled})
except PluginConfig.DoesNotExist:
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
class PluginDeleteAPIView(APIView):
def get_permissions(self):
try:
return [
perm() for perm in permission_classes_by_method[self.request.method]
]
except KeyError:
return [Authenticated()]
def delete(self, request, key):
pm = PluginManager.get()
plugins_dir = pm.plugins_dir
target_dir = os.path.join(plugins_dir, key)
# Safety: ensure path inside plugins_dir
abs_plugins = os.path.abspath(plugins_dir) + os.sep
abs_target = os.path.abspath(target_dir)
if not abs_target.startswith(abs_plugins):
return Response({"success": False, "error": "Invalid plugin path"}, status=status.HTTP_400_BAD_REQUEST)
# Remove files
if os.path.isdir(target_dir):
try:
shutil.rmtree(target_dir)
except Exception as e:
return Response({"success": False, "error": f"Failed to delete plugin files: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Remove DB record
try:
PluginConfig.objects.filter(key=key).delete()
except Exception:
pass
# Reload registry
pm.discover_plugins()
return Response({"success": True})

View file

@ -1,54 +0,0 @@
from django.apps import AppConfig
import os
import sys
from django.db.models.signals import post_migrate
class PluginsConfig(AppConfig):
name = "apps.plugins"
verbose_name = "Plugins"
def ready(self):
"""Wire up plugin discovery without hitting the DB during app init.
- Skip during common management commands that don't need discovery.
- Register post_migrate handler to sync plugin registry to DB after migrations.
- Do an in-memory discovery (no DB) so registry is available early.
"""
try:
# Allow explicit opt-out via env var
if os.environ.get("DISPATCHARR_SKIP_PLUGIN_AUTODISCOVERY", "").lower() in ("1", "true", "yes"):
return
argv = sys.argv[1:] if len(sys.argv) > 1 else []
mgmt_cmds_to_skip = {
# Skip immediate discovery for these commands
"makemigrations", "collectstatic", "check", "test", "shell", "showmigrations",
}
if argv and argv[0] in mgmt_cmds_to_skip:
return
# Run discovery with DB sync after the plugins app has been migrated
def _post_migrate_discover(sender=None, app_config=None, **kwargs):
try:
if app_config and getattr(app_config, 'label', None) != 'plugins':
return
from .loader import PluginManager
PluginManager.get().discover_plugins(sync_db=True)
except Exception:
import logging
logging.getLogger(__name__).exception("Plugin discovery failed in post_migrate")
post_migrate.connect(
_post_migrate_discover,
dispatch_uid="apps.plugins.post_migrate_discover",
)
# Perform non-DB discovery now to populate in-memory registry.
from .loader import PluginManager
PluginManager.get().discover_plugins(sync_db=False)
except Exception:
# Avoid breaking startup due to plugin errors
import logging
logging.getLogger(__name__).exception("Plugin discovery wiring failed during app ready")

View file

@ -1,254 +0,0 @@
import importlib
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from django.db import transaction
from .models import PluginConfig
logger = logging.getLogger(__name__)
@dataclass
class LoadedPlugin:
key: str
name: str
version: str = ""
description: str = ""
module: Any = None
instance: Any = None
fields: List[Dict[str, Any]] = field(default_factory=list)
actions: List[Dict[str, Any]] = field(default_factory=list)
class PluginManager:
"""Singleton manager that discovers and runs plugins from /data/plugins."""
_instance: Optional["PluginManager"] = None
@classmethod
def get(cls) -> "PluginManager":
if not cls._instance:
cls._instance = PluginManager()
return cls._instance
def __init__(self) -> None:
self.plugins_dir = os.environ.get("DISPATCHARR_PLUGINS_DIR", "/data/plugins")
self._registry: Dict[str, LoadedPlugin] = {}
# Ensure plugins directory exists
os.makedirs(self.plugins_dir, exist_ok=True)
if self.plugins_dir not in sys.path:
sys.path.append(self.plugins_dir)
def discover_plugins(self, *, sync_db: bool = True) -> Dict[str, LoadedPlugin]:
if sync_db:
logger.info(f"Discovering plugins in {self.plugins_dir}")
else:
logger.debug(f"Discovering plugins (no DB sync) in {self.plugins_dir}")
self._registry.clear()
try:
for entry in sorted(os.listdir(self.plugins_dir)):
path = os.path.join(self.plugins_dir, entry)
if not os.path.isdir(path):
continue
plugin_key = entry.replace(" ", "_").lower()
try:
self._load_plugin(plugin_key, path)
except Exception:
logger.exception(f"Failed to load plugin '{plugin_key}' from {path}")
logger.info(f"Discovered {len(self._registry)} plugin(s)")
except FileNotFoundError:
logger.warning(f"Plugins directory not found: {self.plugins_dir}")
# Sync DB records (optional)
if sync_db:
try:
self._sync_db_with_registry()
except Exception:
# Defer sync if database is not ready (e.g., first startup before migrate)
logger.exception("Deferring plugin DB sync; database not ready yet")
return self._registry
def _load_plugin(self, key: str, path: str):
# Plugin can be a package and/or contain plugin.py. Prefer plugin.py when present.
has_pkg = os.path.exists(os.path.join(path, "__init__.py"))
has_pluginpy = os.path.exists(os.path.join(path, "plugin.py"))
if not (has_pkg or has_pluginpy):
logger.debug(f"Skipping {path}: no plugin.py or package")
return
candidate_modules = []
if has_pluginpy:
candidate_modules.append(f"{key}.plugin")
if has_pkg:
candidate_modules.append(key)
module = None
plugin_cls = None
last_error = None
for module_name in candidate_modules:
try:
logger.debug(f"Importing plugin module {module_name}")
module = importlib.import_module(module_name)
plugin_cls = getattr(module, "Plugin", None)
if plugin_cls is not None:
break
else:
logger.warning(f"Module {module_name} has no Plugin class")
except Exception as e:
last_error = e
logger.exception(f"Error importing module {module_name}")
if plugin_cls is None:
if last_error:
raise last_error
else:
logger.warning(f"No Plugin class found for {key}; skipping")
return
instance = plugin_cls()
name = getattr(instance, "name", key)
version = getattr(instance, "version", "")
description = getattr(instance, "description", "")
fields = getattr(instance, "fields", [])
actions = getattr(instance, "actions", [])
self._registry[key] = LoadedPlugin(
key=key,
name=name,
version=version,
description=description,
module=module,
instance=instance,
fields=fields,
actions=actions,
)
def _sync_db_with_registry(self):
with transaction.atomic():
for key, lp in self._registry.items():
obj, _ = PluginConfig.objects.get_or_create(
key=key,
defaults={
"name": lp.name,
"version": lp.version,
"description": lp.description,
"settings": {},
},
)
# Update meta if changed
changed = False
if obj.name != lp.name:
obj.name = lp.name
changed = True
if obj.version != lp.version:
obj.version = lp.version
changed = True
if obj.description != lp.description:
obj.description = lp.description
changed = True
if changed:
obj.save()
def list_plugins(self) -> List[Dict[str, Any]]:
from .models import PluginConfig
plugins: List[Dict[str, Any]] = []
try:
configs = {c.key: c for c in PluginConfig.objects.all()}
except Exception as e:
# Database might not be migrated yet; fall back to registry only
logger.warning("PluginConfig table unavailable; listing registry only: %s", e)
configs = {}
# First, include all discovered plugins
for key, lp in self._registry.items():
conf = configs.get(key)
plugins.append(
{
"key": key,
"name": lp.name,
"version": lp.version,
"description": lp.description,
"enabled": conf.enabled if conf else False,
"ever_enabled": getattr(conf, "ever_enabled", False) if conf else False,
"fields": lp.fields or [],
"settings": (conf.settings if conf else {}),
"actions": lp.actions or [],
"missing": False,
}
)
# Then, include any DB-only configs (files missing or failed to load)
discovered_keys = set(self._registry.keys())
for key, conf in configs.items():
if key in discovered_keys:
continue
plugins.append(
{
"key": key,
"name": conf.name,
"version": conf.version,
"description": conf.description,
"enabled": conf.enabled,
"ever_enabled": getattr(conf, "ever_enabled", False),
"fields": [],
"settings": conf.settings or {},
"actions": [],
"missing": True,
}
)
return plugins
def get_plugin(self, key: str) -> Optional[LoadedPlugin]:
return self._registry.get(key)
def update_settings(self, key: str, settings: Dict[str, Any]) -> Dict[str, Any]:
cfg = PluginConfig.objects.get(key=key)
cfg.settings = settings or {}
cfg.save(update_fields=["settings", "updated_at"])
return cfg.settings
def run_action(self, key: str, action_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
lp = self.get_plugin(key)
if not lp or not lp.instance:
raise ValueError(f"Plugin '{key}' not found")
cfg = PluginConfig.objects.get(key=key)
if not cfg.enabled:
raise PermissionError(f"Plugin '{key}' is disabled")
params = params or {}
# Provide a context object to the plugin
context = {
"settings": cfg.settings or {},
"logger": logger,
"actions": {a.get("id"): a for a in (lp.actions or [])},
}
# Run either via Celery if plugin provides a delayed method, or inline
run_method = getattr(lp.instance, "run", None)
if not callable(run_method):
raise ValueError(f"Plugin '{key}' has no runnable 'run' method")
try:
result = run_method(action_id, params, context)
except Exception:
logger.exception(f"Plugin '{key}' action '{action_id}' failed")
raise
# Normalize return
if isinstance(result, dict):
return result
return {"status": "ok", "result": result}

View file

@ -1,29 +0,0 @@
# Generated by Django 5.2.4 on 2025-09-13 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PluginConfig',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=128, unique=True)),
('name', models.CharField(max_length=255)),
('version', models.CharField(blank=True, default='', max_length=64)),
('description', models.TextField(blank=True, default='')),
('enabled', models.BooleanField(default=False)),
('ever_enabled', models.BooleanField(default=False)),
('settings', models.JSONField(blank=True, default=dict)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]

View file

@ -1 +0,0 @@
# This file marks the migrations package for the plugins app.

View file

@ -1,19 +0,0 @@
from django.db import models
class PluginConfig(models.Model):
"""Stores discovered plugins and their persisted settings."""
key = models.CharField(max_length=128, unique=True)
name = models.CharField(max_length=255)
version = models.CharField(max_length=64, blank=True, default="")
description = models.TextField(blank=True, default="")
enabled = models.BooleanField(default=False)
# Tracks whether this plugin has ever been enabled at least once
ever_enabled = models.BooleanField(default=False)
settings = models.JSONField(default=dict, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return f"{self.name} ({self.key})"

View file

@ -1,28 +0,0 @@
from rest_framework import serializers
class PluginActionSerializer(serializers.Serializer):
id = serializers.CharField()
label = serializers.CharField()
description = serializers.CharField(required=False, allow_blank=True)
class PluginFieldSerializer(serializers.Serializer):
id = serializers.CharField()
label = serializers.CharField()
type = serializers.ChoiceField(choices=["string", "number", "boolean", "select"]) # simple types
default = serializers.JSONField(required=False)
help_text = serializers.CharField(required=False, allow_blank=True)
options = serializers.ListField(child=serializers.DictField(), required=False)
class PluginSerializer(serializers.Serializer):
key = serializers.CharField()
name = serializers.CharField()
version = serializers.CharField(allow_blank=True)
description = serializers.CharField(allow_blank=True)
enabled = serializers.BooleanField()
fields = PluginFieldSerializer(many=True)
settings = serializers.JSONField()
actions = PluginActionSerializer(many=True)

View file

@ -1,6 +1,4 @@
"""Shared configuration between proxy types"""
import time
from django.db import connection
class BaseConfig:
DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail
@ -11,56 +9,8 @@ class BaseConfig:
CONNECTION_TIMEOUT = 10 # seconds to wait for initial connection
MAX_STREAM_SWITCHES = 10 # Maximum number of stream switch attempts before giving up
BUFFER_CHUNK_SIZE = 188 * 1361 # ~256KB
BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams
BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc.
# Cache for proxy settings (class-level, shared across all instances)
_proxy_settings_cache = None
_proxy_settings_cache_time = 0
_proxy_settings_cache_ttl = 10 # Cache for 10 seconds
@classmethod
def get_proxy_settings(cls):
"""Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)"""
# Check if cache is still valid
now = time.time()
if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl:
return cls._proxy_settings_cache
# Cache miss or expired - fetch from database
try:
from core.models import CoreSettings
settings = CoreSettings.get_proxy_settings()
cls._proxy_settings_cache = settings
cls._proxy_settings_cache_time = now
return settings
except Exception:
# Return defaults if database query fails
return {
"buffering_timeout": 15,
"buffering_speed": 1.0,
"redis_chunk_ttl": 60,
"channel_shutdown_delay": 0,
"channel_init_grace_period": 5,
}
finally:
# Always close the connection after reading settings
try:
connection.close()
except Exception:
pass
@classmethod
def get_redis_chunk_ttl(cls):
"""Get Redis chunk TTL from database or default"""
settings = cls.get_proxy_settings()
return settings.get("redis_chunk_ttl", 60)
@property
def REDIS_CHUNK_TTL(self):
return self.get_redis_chunk_ttl()
# Redis settings
REDIS_CHUNK_TTL = 60 # Number in seconds - Chunks expire after 1 minute
class HLSConfig(BaseConfig):
MIN_SEGMENTS = 12
@ -82,8 +32,6 @@ class TSConfig(BaseConfig):
INITIAL_BEHIND_CHUNKS = 4 # How many chunks behind to start a client (4 chunks = ~1MB)
CHUNK_BATCH_SIZE = 5 # How many chunks to fetch in one batch
KEEPALIVE_INTERVAL = 0.5 # Seconds between keepalive packets when at buffer head
# Chunk read timeout
CHUNK_TIMEOUT = 5 # Seconds to wait for each chunk read
# Streaming settings
TARGET_BITRATE = 8000000 # Target bitrate (8 Mbps)
@ -92,14 +40,21 @@ class TSConfig(BaseConfig):
# Resource management
CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds
CHANNEL_SHUTDOWN_DELAY = 0 # How long to wait after last client before shutdown (seconds)
# Client tracking settings
CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds)
CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds)
GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6)
CHANNEL_INIT_GRACE_PERIOD = 5 # How long to wait for first client after initialization (seconds)
CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds)
GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1)
CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect
# TS packets are 188 bytes
# Make chunk size a multiple of TS packet size for perfect alignment
# ~1MB is ideal for streaming (matches typical media buffer sizes)
# Stream health and recovery settings
MAX_HEALTH_RECOVERY_ATTEMPTS = 2 # Maximum times to attempt recovery for a single stream
MAX_RECONNECT_ATTEMPTS = 3 # Maximum reconnects to try before switching streams
@ -109,47 +64,3 @@ class TSConfig(BaseConfig):
# Database-dependent settings with fallbacks
@classmethod
def get_channel_shutdown_delay(cls):
"""Get channel shutdown delay from database or default"""
settings = cls.get_proxy_settings()
return settings.get("channel_shutdown_delay", 0)
@classmethod
def get_buffering_timeout(cls):
"""Get buffering timeout from database or default"""
settings = cls.get_proxy_settings()
return settings.get("buffering_timeout", 15)
@classmethod
def get_buffering_speed(cls):
"""Get buffering speed threshold from database or default"""
settings = cls.get_proxy_settings()
return settings.get("buffering_speed", 1.0)
@classmethod
def get_channel_init_grace_period(cls):
"""Get channel init grace period from database or default"""
settings = cls.get_proxy_settings()
return settings.get("channel_init_grace_period", 5)
# Dynamic property access for these settings
@property
def CHANNEL_SHUTDOWN_DELAY(self):
return self.get_channel_shutdown_delay()
@property
def BUFFERING_TIMEOUT(self):
return self.get_buffering_timeout()
@property
def BUFFERING_SPEED(self):
return self.get_buffering_speed()
@property
def CHANNEL_INIT_GRACE_PERIOD(self):
return self.get_channel_init_grace_period()

View file

@ -10,7 +10,6 @@ import gc # Add import for garbage collection
from core.utils import RedisClient
from apps.proxy.ts_proxy.channel_status import ChannelStatus
from core.utils import send_websocket_update
from apps.proxy.vod_proxy.connection_manager import get_connection_manager
logger = logging.getLogger(__name__)
@ -60,13 +59,3 @@ def fetch_channel_stats():
# Explicitly clean up large data structures
all_channels = None
gc.collect()
@shared_task
def cleanup_vod_connections():
"""Clean up stale VOD connections"""
try:
connection_manager = get_connection_manager()
connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour
logger.info("VOD connection cleanup completed")
except Exception as e:
logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True)

View file

@ -264,63 +264,6 @@ class ChannelStatus:
'last_data_age': time.time() - manager.last_data_time
}
# Add FFmpeg stream information
video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8'))
if video_codec:
info['video_codec'] = video_codec.decode('utf-8')
resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8'))
if resolution:
info['resolution'] = resolution.decode('utf-8')
source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8'))
if source_fps:
info['source_fps'] = float(source_fps.decode('utf-8'))
pixel_format = metadata.get(ChannelMetadataField.PIXEL_FORMAT.encode('utf-8'))
if pixel_format:
info['pixel_format'] = pixel_format.decode('utf-8')
source_bitrate = metadata.get(ChannelMetadataField.SOURCE_BITRATE.encode('utf-8'))
if source_bitrate:
info['source_bitrate'] = float(source_bitrate.decode('utf-8'))
audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8'))
if audio_codec:
info['audio_codec'] = audio_codec.decode('utf-8')
sample_rate = metadata.get(ChannelMetadataField.SAMPLE_RATE.encode('utf-8'))
if sample_rate:
info['sample_rate'] = int(sample_rate.decode('utf-8'))
audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8'))
if audio_channels:
info['audio_channels'] = audio_channels.decode('utf-8')
audio_bitrate = metadata.get(ChannelMetadataField.AUDIO_BITRATE.encode('utf-8'))
if audio_bitrate:
info['audio_bitrate'] = float(audio_bitrate.decode('utf-8'))
# Add FFmpeg performance stats
ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8'))
if ffmpeg_speed:
info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8'))
ffmpeg_fps = metadata.get(ChannelMetadataField.FFMPEG_FPS.encode('utf-8'))
if ffmpeg_fps:
info['ffmpeg_fps'] = float(ffmpeg_fps.decode('utf-8'))
actual_fps = metadata.get(ChannelMetadataField.ACTUAL_FPS.encode('utf-8'))
if actual_fps:
info['actual_fps'] = float(actual_fps.decode('utf-8'))
ffmpeg_bitrate = metadata.get(ChannelMetadataField.FFMPEG_BITRATE.encode('utf-8'))
if ffmpeg_bitrate:
info['ffmpeg_bitrate'] = float(ffmpeg_bitrate.decode('utf-8'))
stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8'))
if stream_type:
info['stream_type'] = stream_type.decode('utf-8')
return info
@staticmethod
@ -479,31 +422,6 @@ class ChannelStatus:
except ValueError:
logger.warning(f"Invalid m3u_profile_id format in Redis: {m3u_profile_id_bytes}")
# Add stream info to basic info as well
video_codec = metadata.get(ChannelMetadataField.VIDEO_CODEC.encode('utf-8'))
if video_codec:
info['video_codec'] = video_codec.decode('utf-8')
resolution = metadata.get(ChannelMetadataField.RESOLUTION.encode('utf-8'))
if resolution:
info['resolution'] = resolution.decode('utf-8')
source_fps = metadata.get(ChannelMetadataField.SOURCE_FPS.encode('utf-8'))
if source_fps:
info['source_fps'] = float(source_fps.decode('utf-8'))
ffmpeg_speed = metadata.get(ChannelMetadataField.FFMPEG_SPEED.encode('utf-8'))
if ffmpeg_speed:
info['ffmpeg_speed'] = float(ffmpeg_speed.decode('utf-8'))
audio_codec = metadata.get(ChannelMetadataField.AUDIO_CODEC.encode('utf-8'))
if audio_codec:
info['audio_codec'] = audio_codec.decode('utf-8')
audio_channels = metadata.get(ChannelMetadataField.AUDIO_CHANNELS.encode('utf-8'))
if audio_channels:
info['audio_channels'] = audio_channels.decode('utf-8')
stream_type = metadata.get(ChannelMetadataField.STREAM_TYPE.encode('utf-8'))
if stream_type:
info['stream_type'] = stream_type.decode('utf-8')
return info
except Exception as e:
logger.error(f"Error getting channel info: {e}", exc_info=True) # Added exc_info for better debugging

View file

@ -4,15 +4,13 @@ import threading
import logging
import time
import json
import gevent
from typing import Set, Optional
from apps.proxy.config import TSConfig as Config
from redis.exceptions import ConnectionError, TimeoutError
from .constants import EventType, ChannelState, ChannelMetadataField
from .constants import EventType
from .config_helper import ConfigHelper
from .redis_keys import RedisKeys
from .utils import get_logger
from core.utils import send_websocket_update
logger = get_logger()
@ -26,7 +24,6 @@ class ClientManager:
self.lock = threading.Lock()
self.last_active_time = time.time()
self.worker_id = worker_id # Store worker ID as instance variable
self._heartbeat_running = True # Flag to control heartbeat thread
# STANDARDIZED KEYS: Move client set under channel namespace
self.client_set_key = RedisKeys.clients(channel_id)
@ -34,78 +31,39 @@ class ClientManager:
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
self.last_heartbeat_time = {}
# Get ProxyServer instance for ownership checks
from .server import ProxyServer
self.proxy_server = ProxyServer.get_instance()
# Start heartbeat thread for local clients
self._start_heartbeat_thread()
self._registered_clients = set() # Track already registered client IDs
def _trigger_stats_update(self):
"""Trigger a channel stats update via WebSocket"""
try:
# Import here to avoid potential import issues
from apps.proxy.ts_proxy.channel_status import ChannelStatus
import redis
from django.conf import settings
# Get all channels from Redis using settings
redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0')
redis_client = redis.Redis.from_url(redis_url, decode_responses=True)
all_channels = []
cursor = 0
while True:
cursor, keys = redis_client.scan(cursor, match="ts_proxy:channel:*:clients", count=100)
for key in keys:
# Extract channel ID from key
parts = key.split(':')
if len(parts) >= 4:
ch_id = parts[2]
channel_info = ChannelStatus.get_basic_channel_info(ch_id)
if channel_info:
all_channels.append(channel_info)
if cursor == 0:
break
# Send WebSocket update using existing infrastructure
send_websocket_update(
"updates",
"update",
{
"success": True,
"type": "channel_stats",
"stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})
}
)
except Exception as e:
logger.debug(f"Failed to trigger stats update: {e}")
def _start_heartbeat_thread(self):
"""Start thread to regularly refresh client presence in Redis for local clients"""
"""Start thread to regularly refresh client presence in Redis"""
def heartbeat_task():
no_clients_count = 0 # Track consecutive empty cycles
max_empty_cycles = 3 # Exit after this many consecutive empty checks
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
while self._heartbeat_running:
while True:
try:
# Wait for the interval, but check stop flag frequently for quick shutdown
# Sleep in 1-second increments to allow faster response to stop signal
for _ in range(int(self.heartbeat_interval)):
if not self._heartbeat_running:
break
time.sleep(1)
# Final check before doing work
if not self._heartbeat_running:
break
# Wait for the interval
time.sleep(self.heartbeat_interval)
# Send heartbeat for all local clients
with self.lock:
# Skip this cycle if we have no local clients
if not self.clients:
if not self.clients or not self.redis_client:
# No clients left, increment our counter
no_clients_count += 1
# If we've seen no clients for several consecutive checks, exit the thread
if no_clients_count >= max_empty_cycles:
logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks, exiting heartbeat thread")
return # This exits the thread
# Skip this cycle if we have no clients
continue
else:
# Reset counter when we see clients
no_clients_count = 0
# IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats
current_time = time.time()
@ -176,20 +134,11 @@ class ClientManager:
except Exception as e:
logger.error(f"Error in client heartbeat thread: {e}")
logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}")
thread = threading.Thread(target=heartbeat_task, daemon=True)
thread.name = f"client-heartbeat-{self.channel_id}"
thread.start()
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
def stop(self):
"""Stop the heartbeat thread and cleanup"""
logger.debug(f"Stopping ClientManager for channel {self.channel_id}")
self._heartbeat_running = False
# Give the thread a moment to exit gracefully
# Note: We don't join() here because it's a daemon thread and will exit on its own
def _execute_redis_command(self, command_func):
"""Execute Redis command with error handling"""
if not self.redis_client:
@ -288,9 +237,6 @@ class ClientManager:
json.dumps(event_data)
)
# Trigger channel stats update via WebSocket
self._trigger_stats_update()
# Get total clients across all workers
total_clients = self.get_total_client_count()
logger.info(f"New client connected: {client_id} (local: {len(self.clients)}, total: {total_clients})")
@ -305,8 +251,6 @@ class ClientManager:
def remove_client(self, client_id):
"""Remove a client from this channel and Redis"""
client_ip = None
with self.lock:
if client_id in self.clients:
self.clients.remove(client_id)
@ -317,14 +261,6 @@ class ClientManager:
self.last_active_time = time.time()
if self.redis_client:
# Get client IP before removing the data
client_key = f"ts_proxy:channel:{self.channel_id}:clients:{client_id}"
client_data = self.redis_client.hgetall(client_key)
if client_data and b'ip_address' in client_data:
client_ip = client_data[b'ip_address'].decode('utf-8')
elif client_data and 'ip_address' in client_data:
client_ip = client_data['ip_address']
# Remove from channel's client set
self.redis_client.srem(self.client_set_key, client_id)
@ -343,33 +279,16 @@ class ClientManager:
self._notify_owner_of_activity()
# Check if we're the owner - if so, handle locally; if not, publish event
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
if am_i_owner:
# We're the owner - handle the disconnect directly
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
if remaining == 0:
# Trigger shutdown check directly via ProxyServer method
logger.debug(f"No clients left - triggering immediate shutdown check")
# Spawn greenlet to avoid blocking
import gevent
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
else:
# We're not the owner - publish event so owner can handle it
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
event_data = json.dumps({
"event": EventType.CLIENT_DISCONNECTED,
"channel_id": self.channel_id,
"client_id": client_id,
"worker_id": self.worker_id or "unknown",
"timestamp": time.time(),
"remaining_clients": remaining
})
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
# Trigger channel stats update via WebSocket
self._trigger_stats_update()
# Publish client disconnected event
event_data = json.dumps({
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
"channel_id": self.channel_id,
"client_id": client_id,
"worker_id": self.worker_id or "unknown",
"timestamp": time.time(),
"remaining_clients": remaining
})
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
total_clients = self.get_total_client_count()
logger.info(f"Client disconnected: {client_id} (local: {len(self.clients)}, total: {total_clients})")

View file

@ -34,7 +34,7 @@ class ConfigHelper:
@staticmethod
def channel_shutdown_delay():
"""Get channel shutdown delay in seconds"""
return Config.get_channel_shutdown_delay()
return ConfigHelper.get('CHANNEL_SHUTDOWN_DELAY', 0)
@staticmethod
def initial_behind_chunks():
@ -54,7 +54,7 @@ class ConfigHelper:
@staticmethod
def redis_chunk_ttl():
"""Get Redis chunk TTL in seconds"""
return Config.get_redis_chunk_ttl()
return ConfigHelper.get('REDIS_CHUNK_TTL', 60)
@staticmethod
def chunk_size():
@ -85,27 +85,3 @@ class ConfigHelper:
def failover_grace_period():
"""Get extra time (in seconds) to allow for stream switching before disconnecting clients"""
return ConfigHelper.get('FAILOVER_GRACE_PERIOD', 20) # Default to 20 seconds
@staticmethod
def buffering_timeout():
"""Get buffering timeout in seconds"""
return Config.get_buffering_timeout()
@staticmethod
def buffering_speed():
"""Get buffering speed threshold"""
return Config.get_buffering_speed()
@staticmethod
def channel_init_grace_period():
"""Get channel initialization grace period in seconds"""
return Config.get_channel_init_grace_period()
@staticmethod
def chunk_timeout():
"""
Get chunk timeout in seconds (used for both socket and HTTP read timeouts).
This controls how long we wait for each chunk before timing out.
Set this higher (e.g., 30s) for slow providers that may have intermittent delays.
"""
return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds

View file

@ -18,7 +18,6 @@ class ChannelState:
ERROR = "error"
STOPPING = "stopping"
STOPPED = "stopped"
BUFFERING = "buffering"
# Event types
class EventType:
@ -33,8 +32,6 @@ class EventType:
# Stream types
class StreamType:
HLS = "hls"
RTSP = "rtsp"
UDP = "udp"
TS = "ts"
UNKNOWN = "unknown"
@ -66,33 +63,6 @@ class ChannelMetadataField:
STREAM_SWITCH_TIME = "stream_switch_time"
STREAM_SWITCH_REASON = "stream_switch_reason"
# FFmpeg performance metrics
FFMPEG_SPEED = "ffmpeg_speed"
FFMPEG_FPS = "ffmpeg_fps"
ACTUAL_FPS = "actual_fps"
FFMPEG_OUTPUT_BITRATE = "ffmpeg_output_bitrate"
FFMPEG_STATS_UPDATED = "ffmpeg_stats_updated"
# Video stream info
VIDEO_CODEC = "video_codec"
RESOLUTION = "resolution"
WIDTH = "width"
HEIGHT = "height"
SOURCE_FPS = "source_fps"
PIXEL_FORMAT = "pixel_format"
VIDEO_BITRATE = "video_bitrate"
# Audio stream info
AUDIO_CODEC = "audio_codec"
SAMPLE_RATE = "sample_rate"
AUDIO_CHANNELS = "audio_channels"
AUDIO_BITRATE = "audio_bitrate"
# Stream format info
STREAM_TYPE = "stream_type"
# Stream info timestamp
STREAM_INFO_UPDATED = "stream_info_updated"
# Client metadata fields
CONNECTED_AT = "connected_at"
LAST_ACTIVE = "last_active"

Some files were not shown because too many files have changed in this diff Show more