Merge branch 'dev' of https://github.com/Dispatcharr/Dispatcharr into pr/stlalpha/488

This commit is contained in:
SergeantPanda 2025-12-02 13:39:06 -06:00
commit f1320c9a5d
122 changed files with 15791 additions and 3215 deletions

View file

@ -2,42 +2,37 @@ name: Base Image Build
on:
push:
branches: [ main, dev ]
branches: [main, dev]
paths:
- 'docker/DispatcharrBase'
- '.github/workflows/base-image.yml'
- 'requirements.txt'
pull_request:
branches: [ main, dev ]
branches: [main, dev]
paths:
- 'docker/DispatcharrBase'
- '.github/workflows/base-image.yml'
- 'requirements.txt'
workflow_dispatch: # Allow manual triggering
workflow_dispatch: # Allow manual triggering
permissions:
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
jobs:
build-base-image:
runs-on: ubuntu-latest
prepare:
runs-on: ubuntu-24.04
outputs:
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
branch_tag: ${{ steps.meta.outputs.branch_tag }}
timestamp: ${{ steps.timestamp.outputs.timestamp }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
@ -66,13 +61,111 @@ jobs:
echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT
fi
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Git
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push Docker base image
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/DispatcharrBase
push: true
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/${{ matrix.platform }}
tags: |
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
build-args: |
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
create-manifest:
needs: [prepare, docker]
runs-on: ubuntu-24.04
if: ${{ github.event_name != 'pull_request' }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# GitHub Container Registry manifests
# branch tag (e.g. base or base-dev)
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
# branch + timestamp tag
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
# Docker Hub manifests
# branch tag (e.g. base or base-dev)
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
# branch + timestamp tag
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64

View file

@ -2,19 +2,84 @@ name: CI Pipeline
on:
push:
branches: [ dev ]
branches: [dev]
pull_request:
branches: [ dev ]
workflow_dispatch: # Allow manual triggering
branches: [dev]
workflow_dispatch:
# Add explicit permissions for the workflow
permissions:
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
contents: write
packages: write
jobs:
build:
runs-on: ubuntu-latest
prepare:
runs-on: ubuntu-24.04
# compute a single timestamp, version, and repo metadata for the entire workflow
outputs:
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
branch_tag: ${{ steps.meta.outputs.branch_tag }}
version: ${{ steps.version.outputs.version }}
timestamp: ${{ steps.timestamp.outputs.timestamp }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
- name: Extract version info
id: version
run: |
VERSION=$(python -c "import version; print(version.__version__)")
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Set repository and image metadata
id: meta
run: |
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "branch_tag=latest" >> $GITHUB_OUTPUT
echo "is_main=true" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "branch_tag=dev" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
else
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
fi
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
echo "is_fork=true" >> $GITHUB_OUTPUT
else
echo "is_fork=false" >> $GITHUB_OUTPUT
fi
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
# no per-job outputs here; shared metadata comes from the `prepare` job
steps:
- uses: actions/checkout@v3
with:
@ -45,66 +110,85 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate timestamp for build
id: timestamp
run: |
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract version info
id: version
run: |
VERSION=$(python -c "import version; print(version.__version__)")
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
- name: Set repository and image metadata
id: meta
run: |
# Get lowercase repository owner
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
# Get repository name
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
# Determine branch name
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "branch_tag=latest" >> $GITHUB_OUTPUT
echo "is_main=true" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
echo "branch_tag=dev" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
else
# For other branches, use the branch name
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
echo "is_main=false" >> $GITHUB_OUTPUT
fi
# Determine if this is from a fork
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
echo "is_fork=true" >> $GITHUB_OUTPUT
else
echo "is_fork=false" >> $GITHUB_OUTPUT
fi
# use metadata from the prepare job
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64,linux/arm64
# Build only the platform for this matrix job to avoid running amd64
# stages under qemu on an arm64 runner (and vice-versa). This makes
# the matrix runner's platform the one built by buildx.
platforms: linux/${{ matrix.platform }}
# push arch-specific tags from each matrix job (they will be combined
# into a multi-arch manifest in a follow-up job)
tags: |
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }}
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }}
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
build-args: |
REPO_OWNER=${{ steps.meta.outputs.repo_owner }}
REPO_NAME=${{ steps.meta.outputs.repo_name }}
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
BASE_TAG=base
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
TIMESTAMP=${{ steps.timestamp.outputs.timestamp }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
file: ./docker/Dockerfile
create-manifest:
# wait for prepare and all matrix builds to finish
needs: [prepare, docker]
runs-on: ubuntu-24.04
if: ${{ github.event_name != 'pull_request' }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
VERSION=${{ needs.prepare.outputs.version }}
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# branch tag (e.g. latest or dev)
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
# version + timestamp tag
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
# also create Docker Hub manifests using the same username
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64

View file

@ -15,16 +15,21 @@ on:
# Add explicit permissions for the workflow
permissions:
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
contents: write # For managing releases and pushing tags
packages: write # For publishing to GitHub Container Registry
jobs:
release:
runs-on: ubuntu-latest
prepare:
runs-on: ubuntu-24.04
outputs:
new_version: ${{ steps.update_version.outputs.new_version }}
repo_owner: ${{ steps.meta.outputs.repo_owner }}
repo_name: ${{ steps.meta.outputs.repo_name }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Git
run: |
@ -38,14 +43,49 @@ jobs:
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: Set lowercase repo owner
id: repo_owner
- name: Update Changelog
run: |
python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }}
- name: Set repository metadata
id: meta
run: |
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
- name: Commit and Tag
run: |
git add version.py CHANGELOG.md
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
git push origin main --tags
docker:
needs: [prepare]
strategy:
fail-fast: false
matrix:
platform: [amd64, arm64]
include:
- platform: amd64
runner: ubuntu-24.04
- platform: arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
ref: main
- name: Configure Git
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
@ -57,36 +97,88 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Commit and Tag
run: |
git add version.py
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
git push origin main --tags
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and Push Release Image
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases
platforms: linux/${{ matrix.platform }}
tags: |
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
build-args: |
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
BRANCH=${{ github.ref_name }}
REPO_URL=https://github.com/${{ github.repository }}
file: ./docker/Dockerfile
create-manifest:
needs: [prepare, docker]
runs-on: ubuntu-24.04
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Create multi-arch manifest tags
run: |
set -euo pipefail
OWNER=${{ needs.prepare.outputs.repo_owner }}
REPO=${{ needs.prepare.outputs.repo_name }}
VERSION=${{ needs.prepare.outputs.new_version }}
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
# GitHub Container Registry manifests
# latest tag
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \
ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64
# version tag
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
# Docker Hub manifests
# latest tag
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64
# version tag
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
create-release:
needs: [prepare, create-manifest]
runs-on: ubuntu-24.04
steps:
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.update_version.outputs.new_version }}
name: Release v${{ steps.update_version.outputs.new_version }}
tag_name: v${{ needs.prepare.outputs.new_version }}
name: Release v${{ needs.prepare.outputs.new_version }}
draft: false
prerelease: false
token: ${{ secrets.GITHUB_TOKEN }}

839
CHANGELOG.md Normal file
View file

@ -0,0 +1,839 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.13.0] - 2025-12-02
### Added
- `CHANGELOG.md` file following Keep a Changelog format to document all notable changes and project history
- System event logging and viewer: Comprehensive logging system that tracks internal application events (M3U refreshes, EPG updates, stream switches, errors) with a dedicated UI viewer for filtering and reviewing historical events. Improves monitoring, troubleshooting, and understanding system behavior
- M3U/EPG endpoint caching: Implements intelligent caching for frequently requested M3U playlists and EPG data to reduce database load and improve response times for clients.
- Search icon to name headers for the channels and streams tables (#686)
- Comprehensive logging for user authentication events and network access restrictions
- Validation for EPG objects and payloads in updateEPG functions to prevent errors from invalid data
- Referrerpolicy to YouTube iframes in series and VOD modals for better compatibility
### Changed
- XC player API now returns server_info for unknown actions to align with provider behavior
- XC player API refactored to streamline action handling and ensure consistent responses
- Date parsing logic in generate_custom_dummy_programs improved to handle empty or invalid inputs
- DVR cards now reflect date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten)
- "Uncategorized" categories and relations now automatically created for VOD accounts to improve content management (#627)
- Improved minimum horizontal size in the stats page for better usability on smaller displays
- M3U and EPG generation now handles missing channel profiles with appropriate error logging
### Fixed
- Episode URLs in series modal now use UUID instead of ID, fixing broken links (#684, #694)
- Stream preview now respects selected M3U profile instead of always using default profile (#690)
- Channel groups filter in M3UGroupFilter component now filters out non-existent groups (prevents blank webui when editing M3U after a group was removed)
- Stream order now preserved in PATCH/PUT responses from ChannelSerializer, ensuring consistent ordering across all API operations - Thanks [@FiveBoroughs](https://github.com/FiveBoroughs) (#643)
- XC client compatibility: float channel numbers now converted to integers
- M3U account and profile modals now scrollable on mobile devices for improved usability
## [0.12.0] - 2025-11-19
### Added
- RTSP stream support with automatic protocol detection when a proxy profile requires it. The proxy now forces FFmpeg for RTSP sources and properly handles RTSP URLs - Thanks [@ragchuck](https://github.com/ragchuck) (#184)
- UDP stream support, including correct handling when a proxy profile specifies a UDP source. The proxy now skips HTTP-specific headers (like `user_agent`) for non-HTTP protocols and performs manual redirect handling to improve reliability (#617)
- Separate VOD logos system with a new `VODLogo` model, database migration, dedicated API/viewset, and server-paginated UI. This separates movie/series logos from channel logos, making cleanup safer and enabling independent bulk operations
### Changed
- Background profile refresh now uses a rate-limiting/backoff strategy to avoid provider bans
- Bulk channel editing now validates all requested changes up front and applies updates in a single database transaction
- ProxyServer shutdown & ghost-client handling improved to avoid initializing channels for transient clients and prevent duplicate reinitialization during rapid reconnects
- URL / Stream validation expanded to support credentials on non-FQDN hosts, skips HTTP-only checks for RTSP/RTP/UDP streams, and improved host/port normalization
- TV guide scrolling & timeline synchronization improved with mouse-wheel scrolling, synchronized timeline position with guide navigation, and improved mobile momentum scrolling (#252)
- EPG Source dropdown now sorts alphabetically - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff)
- M3U POST handling restored and improved for clients (e.g., Smarters) that request playlists using HTTP POST - Thanks [@maluueu](https://github.com/maluueu)
- Login form revamped with branding, cleaner layout, loading state, "Remember Me" option, and focused sign-in flow
- Series & VOD now have copy-link buttons in modals for easier URL sharing
- `get_host_and_port` now prioritizes verified port sources and handles reverse-proxy edge cases more accurately (#618)
### Fixed
- EXTINF parsing overhauled to correctly extract attributes such as `tvg-id`, `tvg-name`, and `group-title`, even when values include quotes or commas (#637)
- Websocket payload size reduced during EPG processing to avoid UI freezes, blank screens, or memory spikes in the browser (#327)
- Logo management UI fixes including confirmation dialogs, header checkbox reset, delete button reliability, and full client refetch after cleanup
## [0.11.2] - 2025-11-04
### Added
- Custom Dummy EPG improvements:
- Support for using an existing Custom Dummy EPG as a template for creating new EPGs
- Custom fallback templates for unmatched patterns
- `{endtime}` as an available output placeholder and renamed `{time}``{starttime}` (#590)
- Support for date placeholders that respect both source and output timezones (#597)
- Ability to bulk assign Custom Dummy EPGs to multiple channels
- "Include New Tag" option to mark programs as new in Dummy EPG output
- Support for month strings in date parsing
- Ability to set custom posters and channel logos via regex patterns for Custom Dummy EPGs
- Improved DST handling by calculating offsets based on the actual program date, not today's date
### Changed
- Stream model maximum URL length increased from 2000 to 4096 characters (#585)
- Groups now sorted during `xc_get_live_categories` based on the order they first appear (by lowest channel number)
- Client TTL settings updated and periodic refresh implemented during active streaming to maintain accurate connection tracking
- `ProgramData.sub_title` field changed from `CharField` to `TextField` to allow subtitles longer than 255 characters (#579)
- Startup improved by verifying `/data` directory ownership and automatically fixing permissions if needed. Pre-creates `/data/models` during initialization (#614)
- Port detection enhanced to check `request.META.get("SERVER_PORT")` before falling back to defaults, ensuring correct port when generating M3U, EPG, and logo URLs - Thanks [@lasharor](https://github.com/lasharor)
### Fixed
- Custom Dummy EPG frontend DST calculation now uses program date instead of current date
- Channel titles no longer truncated early after an apostrophe - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff)
## [0.11.1] - 2025-10-22
### Fixed
- uWSGI not receiving environmental variables
- LXC unable to access daemons launched by uWSGI ([#575](https://github.com/Dispatcharr/Dispatcharr/issues/575), [#576](https://github.com/Dispatcharr/Dispatcharr/issues/576), [#577](https://github.com/Dispatcharr/Dispatcharr/issues/577))
## [0.11.0] - 2025-10-22
### Added
- Custom Dummy EPG system:
- Regex pattern matching and name source selection
- Support for custom upcoming and ended programs
- Timezone-aware with source and local timezone selection
- Option to include categories and date/live tags in Dummy EPG output
- (#293)
- Auto-Enable & Category Improvements:
- Auto-enable settings for new groups and categories in M3U and VOD components (#208)
- IPv6 CIDR validation in Settings - Thanks [@jordandalley](https://github.com/jordandalley) (#236)
- Custom logo support for channel groups in Auto Sync Channels (#555)
- Tooltips added to the Stream Table
### Changed
- Celery and uWSGI now have configurable `nice` levels (defaults: `uWSGI=0`, `Celery=5`) to prioritize streaming when needed. (#571)
- Directory creation and ownership management refactored in init scripts to avoid unnecessary recursive `chown` operations and improve boot speed
- HTTP streamer switched to threaded model with piped output for improved robustness
- Chunk timeout configuration improved and StreamManager timeout handling enhanced
- Proxy timeout values reduced to avoid unnecessary waiting
- Resource cleanup improved to prevent "Too many open files" errors
- Proxy settings caching implemented and database connections properly closed after use
- EPG program fetching optimized with chunked retrieval and explicit ordering to reduce memory usage during output
- EPG output now sorted by channel number for consistent presentation
- Stream Table buttons reordered for better usability
- Database connection handling improved throughout the codebase to reduce overall connection count
### Fixed
- Crash when resizing columns in the Channel Table (#516)
- Errors when saving stream settings (#535)
- Preview and edit bugs for custom streams where profile and group selections did not display correctly
- `channel_id` and `channel.uuid` now converted to strings before processing to fix manual switching when the uWSGI worker was not the stream owner (#269)
- Stream locking and connection search issues when switching channels; increased search timeout to reduce premature failures (#503)
- Stream Table buttons no longer shift into multiple rows when selecting many streams
- Custom stream previews
- Custom Stream settings not loading properly (#186)
- Orphaned categories now automatically removed for VOD and Series during M3U refresh (#540)
## [0.10.4] - 2025-10-08
### Added
- "Assign TVG-ID from EPG" functionality with frontend actions for single-channel and batch operations
- Confirmation dialogs in `ChannelBatchForm` for setting names, logos, TVG-IDs, and clearing EPG assignments
- "Clear EPG" button to `ChannelBatchForm` for easy reset of assignments
- Batch editing of channel logos - Thanks [@EmeraldPi](https://github.com/EmeraldPi)
- Ability to set logo name from URL - Thanks [@EmeraldPi](https://github.com/EmeraldPi)
- Proper timestamp tracking for channel creation and updates; `XC Get Live Streams` now uses this information
- Time Zone Settings added to the application ([#482](https://github.com/Dispatcharr/Dispatcharr/issues/482), [#347](https://github.com/Dispatcharr/Dispatcharr/issues/347))
- Comskip settings support including comskip.ini upload and custom directory selection (#418)
- Manual recording scheduling for channels without EPG data (#162)
### Changed
- Default M3U account type is now set to XC for new accounts
- Performance optimization: Only fetch playlists and channel profiles after a successful M3U refresh (rather than every status update)
- Playlist retrieval now includes current connection counts and improved session handling during VOD session start
- Improved stream selection logic when all profiles have reached max connections (retries faster)
### Fixed
- Large EPGs now fully parse all channels
- Duplicate channel outputs for streamer profiles set to "All"
- Streamer profiles with "All" assigned now receive all eligible channels
- PostgreSQL btree index errors from logo URL validation during channel creation (#519)
- M3U processing lock not releasing when no streams found during XC refresh, which also skipped VOD scanning (#449)
- Float conversion errors by normalizing decimal format during VOD scanning (#526)
- Direct URL ordering in M3U output to use correct stream sequence (#528)
- Adding multiple M3U accounts without refreshing modified only the first entry (#397)
- UI state bug where new playlist creation was not notified to frontend ("Fetching Groups" stuck)
- Minor FFmpeg task and stream termination bugs in DVR module
- Input escaping issue where single quotes were interpreted as code delimiters (#406)
## [0.10.3] - 2025-10-04
### Added
- Logo management UI improvements where Channel editor now uses the Logo Manager modal, allowing users to add logos by URL directly from the edit form - Thanks [@EmeraldPi](https://github.com/EmeraldPi)
### Changed
- FFmpeg base container rebuilt with improved native build support - Thanks [@EmeraldPi](https://github.com/EmeraldPi)
- GitHub Actions workflow updated to use native runners instead of QEMU emulation for more reliable multi-architecture builds
### Fixed
- EPG parsing stability when large EPG files would not fully parse all channels. Parser now uses `iterparse` with `recover=True` for both channel and program-level parsing, ensuring complete and resilient XML processing even when Cloudflare injects additional root elements
## [0.10.2] - 2025-10-03
### Added
- `m3u_id` parameter to `generate_hash_key` and updated related calls
- Support for `x-tvg-url` and `url-tvg` generation with preserved query parameters (#345)
- Exact Gracenote ID matching for EPG channel mapping (#291)
- Recovery handling for XMLTV parser errors
- `nice -n 5` added to Celery commands for better process priority management
### Changed
- Default M3U hash key changed to URL only for new installs
- M3U profile retrieval now includes current connection counts and improved session handling during VOD session start
- Improved stream selection logic when all profiles have reached max connections (retries faster)
- XMLTV parsing refactored to use `iterparse` for `<tv>` element
- Release workflow refactored to run on native architecture
- Docker build system improvements:
- Split install/build steps
- Switch from Yarn → NPM
- Updated to Node.js 24 (frontend build)
- Improved ARM build reliability
- Pushes to DockerHub with combined manifest
- Removed redundant tags and improved build organization
### Fixed
- Cloudflare-hosted EPG feeds breaking parsing (#497)
- Bulk channel creation now preserves the order channels were selected in (no longer reversed)
- M3U hash settings not saving properly
- VOD selecting the wrong M3U profile at session start (#461)
- Redundant `h` removed from 12-hour time format in settings page
## [0.10.1] - 2025-09-24
### Added
- Virtualized rendering for TV Guide for smoother performance when displaying large guides - Thanks [@stlalpha](https://github.com/stlalpha) (#438)
- Enhanced channel/program mapping to reuse EPG data across multiple channels that share the same TVG-ID
### Changed
- `URL` field length in EPGSource model increased from 200 → 1000 characters to support long URLs with tokens
- Improved URL transformation logic with more advanced regex during profile refreshes
- During EPG scanning, the first display name for a channel is now used instead of the last
- `whiteSpace` style changed from `nowrap``pre` in StreamsTable for better text formatting
### Fixed
- EPG channel parsing failure when channel `URL` exceeded 500 characters by adding validation during scanning (#452)
- Frontend incorrectly saving case-sensitive setting as a JSON string for stream filters
## [0.10.0] - 2025-09-18
### Added
- Channel Creation Improvements:
- Ability to specify channel number during channel creation ([#377](https://github.com/Dispatcharr/Dispatcharr/issues/377), [#169](https://github.com/Dispatcharr/Dispatcharr/issues/169))
- Asynchronous bulk channel creation from stream IDs with WebSocket progress updates
- WebSocket notifications when channels are created
- EPG Auto-Matching (Rewritten & Enhanced):
- Completely refactored for improved accuracy and efficiency
- Can now be applied to selected channels or triggered directly from the channel edit form
- Uses stricter matching logic with support from sentence transformers
- Added progress notifications during the matching process
- Implemented memory cleanup for ML models after matching operations
- Removed deprecated matching scripts
- Logo & EPG Management:
- Ability in channel edit form and bulk channel editor to set logos and names from assigned EPG (#157)
- Improved logo update flow: frontend refreshes on changes, store updates after bulk changes, progress shown via notifications
- Table Enhancements:
- All tables now support adjustable column resizing (#295)
- Channels and Streams tables persist column widths and center divider position to local storage
- Improved sizing and layout for user-agents, stream profiles, logos, M3U, and EPG tables
### Changed
- Simplified VOD and series access: removed user-level restrictions on M3U accounts
- Skip disabled M3U accounts when choosing streams during playback (#402)
- Enhanced `UserViewSet` queryset to prefetch related channel profiles for better performance
- Auto-focus added to EPG filter input
- Category API retrieval now sorts by name
- Increased default column size for EPG fields and removed max size on group/EPG columns
- Standardized EPG column header to display `(EPG ID - TVG-ID)`
### Fixed
- Bug during VOD cleanup where all VODs not from the current M3U scan could be deleted
- Logos not being set correctly in some cases
- Bug where not setting a channel number caused an error when creating a channel (#422)
- Bug where clicking "Add Channel" with a channel selected opened the edit form instead
- Bug where a newly created channel could reuse streams from another channel due to form not clearing properly
- VOD page not displaying correct order while changing pages
- `ReferenceError: setIsInitialized is not defined` when logging into web UI
- `cannot access local variable 'total_chunks' where it is not associated with a value` during VOD refresh
## [0.9.1] - 2025-09-13
### Fixed
- Broken migrations affecting the plugins system
- DVR and plugin paths to ensure proper functionality (#381)
## [0.9.0] - 2025-09-12
### Added
- **Video on Demand (VOD) System:**
- Complete VOD infrastructure with support for movies and TV series
- Advanced VOD metadata including IMDB/TMDB integration, trailers, cast information
- Smart VOD categorization with filtering by type (movies vs series)
- Multi-provider VOD support with priority-based selection
- VOD streaming proxy with connection tracking and statistics
- Season/episode organization for TV series with expandable episode details
- VOD statistics and monitoring integrated with existing stats dashboard
- Optimized VOD parsing and category filtering
- Dedicated VOD page with movies and series tabs
- Rich VOD modals with backdrop images, trailers, and metadata
- Episode management with season-based organization
- Play button integration with external player support
- VOD statistics cards similar to channel cards
- **Plugin System:**
- Extensible Plugin Framework - Developers can build custom functionality without modifying Dispatcharr core
- Plugin Discovery & Management - Automatic detection of installed plugins, with enable/disable controls in the UI
- Backend API Support - New APIs for listing, loading, and managing plugins programmatically
- Plugin Registry - Structured models for plugin metadata (name, version, author, description)
- UI Enhancements - Dedicated Plugins page in the admin panel for centralized plugin management
- Documentation & Scaffolding - Initial documentation and scaffolding to accelerate plugin development
- **DVR System:**
- Refreshed DVR page for managing scheduled and completed recordings
- Global pre/post padding controls surfaced in Settings
- Playback support for completed recordings directly in the UI
- DVR table view includes title, channel, time, and padding adjustments for clear scheduling
- Improved population of DVR listings, fixing intermittent blank screen issues
- Comskip integration for automated commercial detection and skipping in recordings
- User-configurable comskip toggle in Settings
- **Enhanced Channel Management:**
- EPG column added to channels table for better organization
- EPG filtering by channel assignment and source name
- Channel batch renaming for efficient bulk channel name updates
- Auto channel sync improvements with custom stream profile override
- Channel logo management overhaul with background loading
- Date and time format customization in settings - Thanks [@Biologisten](https://github.com/Biologisten)
- Auto-refresh intervals for statistics with better UI controls
- M3U profile notes field for better organization
- XC account information retrieval and display with account refresh functionality and notifications
### Changed
- JSONB field conversion for custom properties (replacing text fields) for better performance
- Database encoding converted from ASCII to UTF8 for better character support
- Batch processing for M3U updates and channel operations
- Query optimization with prefetch_related to eliminate N+1 queries
- Reduced API calls by fetching all data at once instead of per-category
- Buffering speed setting now affects UI indicators
- Swagger endpoint accessible with or without trailing slash
- EPG source names displayed before channel names in edit forms
- Logo loading improvements with background processing
- Channel card enhancements with better status indicators
- Group column width optimization
- Better content-type detection for streams
- Improved headers with content-range and total length
- Enhanced user-agent handling for M3U accounts
- HEAD request support with connection keep-alive
- Progress tracking improvements for clients with new sessions
- Server URL length increased to 1000 characters for token support
- Prettier formatting applied to all frontend code
- String quote standardization and code formatting improvements
### Fixed
- Logo loading issues in channel edit forms resolved
- M3U download error handling and user feedback improved
- Unique constraint violations fixed during stream rehashing
- Channel stats fetching moved from Celery beat task to configurable API calls
- Speed badge colors now use configurable buffering speed setting
- Channel cards properly close when streams stop
- Active streams labeling updated from "Active Channels"
- WebSocket updates for client connect/disconnect events
- Null value handling before database saves
- Empty string scrubbing for cleaner data
- Group relationship cleanup for removed M3U groups
- Logo cleanup for unused files with proper batch processing
- Recordings start 5 mins after show starts (#102)
### Closed
- [#350](https://github.com/Dispatcharr/Dispatcharr/issues/350): Allow DVR recordings to be played via the UI
- [#349](https://github.com/Dispatcharr/Dispatcharr/issues/349): DVR screen doesn't populate consistently
- [#340](https://github.com/Dispatcharr/Dispatcharr/issues/340): Global find and replace
- [#311](https://github.com/Dispatcharr/Dispatcharr/issues/311): Stat's "Current Speed" does not reflect "Buffering Speed" setting
- [#304](https://github.com/Dispatcharr/Dispatcharr/issues/304): Name ignored when uploading logo
- [#300](https://github.com/Dispatcharr/Dispatcharr/issues/300): Updating Logo throws error
- [#286](https://github.com/Dispatcharr/Dispatcharr/issues/286): 2 Value/Column EPG in Channel Edit
- [#280](https://github.com/Dispatcharr/Dispatcharr/issues/280): Add general text field in M3U/XS profiles
- [#190](https://github.com/Dispatcharr/Dispatcharr/issues/190): Show which stream is being used and allow it to be altered in channel properties
- [#155](https://github.com/Dispatcharr/Dispatcharr/issues/155): Additional column with EPG assignment information / Allow filtering by EPG assignment
- [#138](https://github.com/Dispatcharr/Dispatcharr/issues/138): Bulk Channel Edit Functions
## [0.8.0] - 2025-08-19
### Added
- Channel & Stream Enhancements:
- Preview streams under a channel, with stream logo and name displayed in the channel card
- Advanced stats for channel streams
- Stream qualities displayed in the channel table
- Stream stats now saved to the database
- URL badges can now be clicked to copy stream links to the clipboard
- M3U Filtering for Streams:
- Streams for an M3U account can now be filtered using flexible parameters
- Apply filters based on stream name, group title, or stream URL (via regex)
- Filters support both inclusion and exclusion logic for precise control
- Multiple filters can be layered with a priority order for complex rules
- Ability to reverse the sort order for auto channel sync
- Custom validator for URL fields now allows non-FQDN hostnames (#63)
- Membership creation added in `UpdateChannelMembershipAPIView` if not found (#275)
### Changed
- Bumped Postgres to version 17
- Updated dependencies in `requirements.txt` for compatibility and improvements
- Improved chunked extraction to prevent memory issues - Thanks [@pantherale0](https://github.com/pantherale0)
### Fixed
- XML escaping for channel ID in `generate_dummy_epg` function
- Bug where creating a channel from a stream not displayed in the table used an invalid stream name
- Debian install script - Thanks [@deku-m](https://github.com/deku-m)
## [0.7.1] - 2025-07-29
### Added
- Natural sorting for channel names during auto channel sync
- Ability to sort auto sync order by provider order (default), channel name, TVG ID, or last updated time
- Auto-created channels can now be assigned to specific channel profiles (#255)
- Channel profiles are now fetched automatically after a successful M3U refresh
- Uses only whole numbers when assigning the next available channel number
### Changed
- Logo upload behavior changed to wait for the Create button before saving
- Uses the channel name as the display name in EPG output for improved readability
- Ensures channels are only added to a selected profile if one is explicitly chosen
### Fixed
- Logo Manager prevents redundant messages from the file scanner by properly tracking uploaded logos in Redis
- Fixed an issue preventing logo uploads via URL
- Adds internal support for assigning multiple profiles via API
## [0.7.0] - 2025-07-19
### Added
- **Logo Manager:**
- Complete logo management system with filtering, search, and usage tracking
- Upload logos directly through the UI
- Automatically scan `/data/logos` for existing files (#69)
- View which channels use each logo
- Bulk delete unused logos with cleanup
- Enhanced display with hover effects and improved sizing
- Improved logo fetching with timeouts and user-agent headers to prevent hanging
- **Group Manager:**
- Comprehensive group management interface (#128)
- Search and filter groups with ease
- Bulk operations for cleanup
- Filter channels by group membership
- Automatically clean up unused groups
- **Auto Channel Sync:**
- Automatic channel synchronization from M3U sources (#147)
- Configure auto-sync settings per M3U account group
- Set starting channel numbers by group
- Override group names during sync
- Apply regex match and replace for channel names
- Filter channels by regex match on stream name
- Track auto-created vs manually added channels
- Smart updates preserve UUIDs and existing links
- Stream rehashing with WebSocket notifications
- Better error handling for blocked rehash attempts
- Lock acquisition to prevent conflicts
- Real-time progress tracking
### Changed
- Persist table page sizes in local storage (streams & channels)
- Smoother pagination and improved UX
- Fixed z-index issues during table refreshes
- Improved XC client with connection pooling
- Better error handling for API and JSON decode failures
- Smarter handling of empty content and blocking responses
- Improved EPG XML generation with richer metadata
- Better support for keywords, languages, ratings, and credits
- Better form layouts and responsive buttons
- Enhanced confirmation dialogs and feedback
### Fixed
- Channel table now correctly restores page size from local storage
- Resolved WebSocket message formatting issues
- Fixed logo uploads and edits
- Corrected ESLint issues across the codebase
- Fixed HTML validation errors in menus
- Optimized logo fetching with proper timeouts and headers ([#101](https://github.com/Dispatcharr/Dispatcharr/issues/101), [#217](https://github.com/Dispatcharr/Dispatcharr/issues/217))
## [0.6.2] - 2025-07-10
### Fixed
- **Streaming & Connection Stability:**
- Provider timeout issues - Slow but responsive providers no longer cause channel lockups
- Added chunk and process timeouts - Prevents hanging during stream processing and transcoding
- Improved connection handling - Enhanced process management and socket closure detection for safer streaming
- Enhanced health monitoring - Health monitor now properly notifies main thread without attempting reconnections
- **User Interface & Experience:**
- Touch screen compatibility - Web player can now be properly closed on touch devices
- Improved user management - Added support for first/last names, login tracking, and standardized table formatting
- Improved logging - Enhanced log messages with channel IDs for better debugging
- Code cleanup - Removed unused imports, variables, and dead links
## [0.6.1] - 2025-06-27
### Added
- Dynamic parameter options for M3U and EPG URLs (#207)
- Support for 'num' property in channel number extraction (fixes channel creation from XC streams not having channel numbers)
### Changed
- EPG generation now uses streaming responses to prevent client timeouts during large EPG file generation (#179)
- Improved reliability when downloading EPG data from external sources
- Better program positioning - Programs that start before the current view now have proper text positioning (#223)
- Better mobile support - Improved sizing and layout for mobile devices across multiple tables
- Responsive stats cards - Better calculation for card layout and improved filling on different screen sizes (#218)
- Enhanced table rendering - M3U and EPG tables now render better on small screens
- Optimized spacing - Removed unnecessary padding and blank space throughout the interface
- Better settings layout - Improved minimum widths and mobile support for settings pages
- Always show 2 decimal places for FFmpeg speed values
### Fixed
- TV Guide now properly filters channels based on selected channel group
- Resolved loading issues - Fixed channels and groups not loading correctly in the TV Guide
- Stream profile fixes - Resolved issue with setting stream profile to 'use default'
- Single channel editing - When only one channel is selected, the correct channel editor now opens
- Bulk edit improvements - Added "no change" options for bulk editing operations
- Bulk channel editor now properly saves changes (#222)
- Link form improvements - Better sizing and rendering of link forms with proper layering
- Confirmation dialogs added with warning suppression for user deletion, channel profile deletion, and M3U profile deletion
## [0.6.0] - 2025-06-19
### Added
- **User Management & Access Control:**
- Complete user management system with user levels and channel access controls
- Network access control with CIDR validation and IP-based restrictions
- Logout functionality and improved loading states for authenticated users
- **Xtream Codes Output:**
- Xtream Codes support enables easy output to IPTV clients (#195)
- **Stream Management & Monitoring:**
- FFmpeg statistics integration - Real-time display of video/audio codec info, resolution, speed, and stream type
- Automatic stream switching when buffering is detected
- Enhanced stream profile management with better connection tracking
- Improved stream state detection, including buffering as an active state
- **Channel Management:**
- Bulk channel editing for channel group, stream profile, and user access level
- **Enhanced M3U & EPG Features:**
- Dynamic `tvg-id` source selection for M3U and EPG (`tvg_id`, `gracenote`, or `channel_number`)
- Direct URL support in M3U output via `direct=true` parameter
- Flexible EPG output with a configurable day limit via `days=#` parameter
- Support for LIVE tags and `dd_progrid` numbering in EPG processing
- Proxy settings configuration with UI integration and improved validation
- Stream retention controls - Set stale stream days to `0` to disable retention completely (#123)
- Tuner flexibility - Minimum of 1 tuner now allowed for HDHomeRun output
- Fallback IP geolocation provider (#127) - Thanks [@maluueu](https://github.com/maluueu)
- POST method now allowed for M3U output, enabling support for Smarters IPTV - Thanks [@maluueu](https://github.com/maluueu)
### Changed
- Improved channel cards with better status indicators and tooltips
- Clearer error messaging for unsupported codecs in the web player
- Network access warnings to prevent accidental lockouts
- Case-insensitive M3U parsing for improved compatibility
- Better EPG processing with improved channel matching
- Replaced Mantine React Table with custom implementations
- Improved tooltips and parameter wrapping for cleaner interfaces
- Better badge colors and status indicators
- Stronger form validation and user feedback
- Streamlined settings management using JSON configs
- Default value population for clean installs
- Environment-specific configuration support for multiple deployment scenarios
### Fixed
- FFmpeg process cleanup - Ensures FFmpeg fully exits before marking connection closed
- Resolved stream profile update issues in statistics display
- Fixed M3U profile ID behavior when switching streams
- Corrected stream switching logic - Redis is only updated on successful switches
- Fixed connection counting - Excludes the current profile from available connection counts
- Fixed custom stream channel creation when no group is assigned (#122)
- Resolved EPG auto-matching deadlock when many channels match simultaneously - Thanks [@xham3](https://github.com/xham3)
## [0.5.2] - 2025-06-03
### Added
- Direct Logo Support: Added ability to bypass logo caching by adding `?cachedlogos=false` to the end of M3U and EPG URLs (#109)
### Changed
- Dynamic Resource Management: Auto-scales Celery workers based on demand, reducing overall memory and CPU usage while still allowing high-demand tasks to complete quickly (#111)
- Enhanced Logging:
- Improved logging for M3U processing
- Better error output from XML parser for easier troubleshooting
### Fixed
- XMLTV Parsing: Added `remove_blank_text=True` to lxml parser to prevent crashes with poorly formatted XMLTV files (#115)
- Stats Display: Refactored channel info retrieval for safer decoding and improved error logging, fixing intermittent issues with statistics not displaying properly
## [0.5.1] - 2025-05-28
### Added
- Support for ZIP-compressed EPG files
- Automatic extraction of compressed files after downloading
- Intelligent file type detection for EPG sources:
- Reads the first bits of files to determine file type
- If a compressed file is detected, it peeks inside to find XML files
- Random descriptions for dummy channels in the TV guide
- Support for decimal channel numbers (converted from integer to float) - Thanks [@MooseyOnTheLoosey](https://github.com/MooseyOnTheLoosey)
- Show channels without EPG data in TV Guide
- Profile name added to HDHR-friendly name and device ID (allows adding multiple HDHR profiles to Plex)
### Changed
- About 30% faster EPG processing
- Significantly improved memory usage for large EPG files
- Improved timezone handling
- Cleaned up cached files when deleting EPG sources
- Performance improvements when processing extremely large M3U files
- Improved batch processing with better cleanup
- Enhanced WebSocket update handling for large operations
- Redis configured for better performance (no longer saves to disk)
- Improved memory management for Celery tasks
- Separated beat schedules with a file scanning interval set to 20 seconds
- Improved authentication error handling with user redirection to the login page
- Improved channel card formatting for different screen resolutions (can now actually read the channel stats card on mobile)
- Decreased line height for status messages in the EPG and M3U tables for better appearance on smaller screens
- Updated the EPG form to match the M3U form for consistency
### Fixed
- Profile selection issues that previously caused WebUI crashes
- Issue with `tvc-guide-id` (Gracenote ID) in bulk channel creation
- Bug when uploading an M3U with the default user-agent set
- Bug where multiple channel initializations could occur, causing zombie streams and performance issues (choppy streams)
- Better error handling for buffer overflow issues
- Fixed various memory leaks
- Bug in the TV Guide that would crash the web UI when selecting a profile to filter by
- Multiple minor bug fixes and code cleanup
## [0.5.0] - 2025-05-15
### Added
- **XtreamCodes Support:**
- Initial XtreamCodes client support
- Option to add EPG source with XC account
- Improved XC login and authentication
- Improved error handling for XC connections
- **Hardware Acceleration:**
- Detection of hardware acceleration capabilities with recommendations (available in logs after startup)
- Improved support for NVIDIA, Intel (QSV), and VAAPI acceleration methods
- Added necessary drivers and libraries for hardware acceleration
- Automatically assigns required permissions for hardware acceleration
- Thanks to [@BXWeb](https://github.com/BXWeb), @chris.r3x, [@rykr](https://github.com/rykr), @j3111, [@jesmannstl](https://github.com/jesmannstl), @jimmycarbone, [@gordlaben](https://github.com/gordlaben), [@roofussummers](https://github.com/roofussummers), [@slamanna212](https://github.com/slamanna212)
- **M3U and EPG Management:**
- Enhanced M3U profile creation with live regex results
- Added stale stream detection with configurable thresholds
- Improved status messaging for M3U and EPG operations:
- Shows download speed with estimated time remaining
- Shows parsing time remaining
- Added "Pending Setup" status for M3U's requiring group selection
- Improved handling of M3U group filtering
- **UI Improvements:**
- Added configurable table sizes
- Enhanced video player with loading and error states
- Improved WebSocket connection handling with authentication
- Added confirmation dialogs for critical operations
- Auto-assign numbers now configurable by selection
- Added bulk editing of channel profile membership (select multiple channels, then click the profile toggle on any selected channel to apply the change to all)
- **Infrastructure & Performance:**
- Standardized and improved the logging system
- New environment variable to set logging level: `DISPATCHARR_LOG_LEVEL` (default: `INFO`, available: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`)
- Introduced a new base image build process: updates are now significantly smaller (typically under 15MB unless the base image changes)
- Improved environment variable handling in container
- Support for Gracenote ID (`tvc-guide-stationid`) - Thanks [@rykr](https://github.com/rykr)
- Improved file upload handling with size limits removed
### Fixed
- Issues with profiles not loading correctly
- Problems with stream previews in tables
- Channel creation and editing workflows
- Logo display issues
- WebSocket connection problems
- Multiple React-related errors and warnings
- Pagination and filtering issues in tables
## [0.4.1] - 2025-05-01
### Changed
- Optimized uWSGI configuration settings for better server performance
- Improved asynchronous processing by converting additional timers to gevent
- Enhanced EPG (Electronic Program Guide) downloading with proper user agent headers
### Fixed
- Issue with "add streams to channel" functionality to correctly follow disabled state logic
## [0.4.0] - 2025-05-01
### Added
- URL copy buttons for stream and channel URLs
- Manual stream switching ability
- EPG auto-match notifications - Users now receive feedback about how many matches were found
- Informative tooltips throughout the interface, including stream profiles and user-agent details
- Display of connected time for each client
- Current M3U profile information to stats
- Better logging for which channel clients are getting chunks from
### Changed
- Table System Rewrite: Completely refactored channel and stream tables for dramatically improved performance with large datasets
- Improved Concurrency: Replaced time.sleep with gevent.sleep for better performance when handling multiple streams
- Improved table interactions:
- Restored alternating row colors and hover effects
- Added shift-click support for multiple row selection
- Preserved drag-and-drop functionality
- Adjusted logo display to prevent layout shifts with different sized logos
- Improved sticky headers in tables
- Fixed spacing and padding in EPG and M3U tables for better readability on smaller displays
- Stream URL handling improved for search/replace patterns
- Enhanced stream lock management for better reliability
- Added stream name to channel status for better visibility
- Properly track current stream ID during stream switches
- Improved EPG cache handling and cleanup of old cache files
- Corrected content type for M3U file (using m3u instead of m3u8)
- Fixed logo URL handling in M3U generation
- Enhanced tuner count calculation to include only active M3U accounts
- Increased thread stack size in uwsgi configuration
- Changed proxy to use uwsgi socket
- Added build timestamp to version information
- Reduced excessive logging during M3U/EPG file importing
- Improved store variable handling to increase application efficiency
- Frontend now being built by Yarn instead of NPM
### Fixed
- Issues with channel statistics randomly not working
- Stream ordering in channel selection
- M3U profile name added to stream names for better identification
- Channel form not updating some properties after saving
- Issue with setting logos to default
- Channel creation from streams
- Channel group saving
- Improved error handling throughout the application
- Bugs in deleting stream profiles
- Resolved mimetype detection issues
- Fixed form display issues
- Added proper requerying after form submissions and item deletions
- Bug overwriting tvg-id when loading TV Guide
- Bug that prevented large m3u's and epg's from uploading
- Typo in Stream Profile header column for Description - Thanks [@LoudSoftware](https://github.com/LoudSoftware)
- Typo in m3u input processing (tv-chno instead of tvg-chno) - Thanks @www2a
## [0.3.3] - 2025-04-18
### Fixed
- Issue with dummy EPG calculating hours above 24, ensuring time values remain within valid 24-hour format
- Auto import functionality to properly process old files that hadn't been imported yet, rather than ignoring them
## [0.3.2] - 2025-04-16
### Fixed
- Issue with stream ordering for channels - resolved problem where stream objects were incorrectly processed when assigning order in channel configurations
## [0.3.1] - 2025-04-16
### Added
- Key to navigation links in sidebar to resolve DOM errors when loading web UI
- Channels that are set to 'dummy' epg to the TV Guide
### Fixed
- Issue preventing dummy EPG from being set
- Channel numbers not saving properly
- EPGs not refreshing when linking EPG to channel
- Improved error messages in notifications
## [0.3.0] - 2025-04-15
### Added
- URL validation for redirect profile:
- Validates stream URLs before redirecting clients
- Prevents clients from being redirected to unavailable streams
- Now tries alternate streams when primary stream validation fails
- Dynamic tuner configuration for HDHomeRun devices:
- TunerCount is now dynamically created based on profile max connections
- Sets minimum of 2 tuners, up to 10 for unlimited profiles
### Changed
- More robust stream switching:
- Clients now wait properly if a stream is in the switching state
- Improved reliability during stream transitions
- Performance enhancements:
- Increased workers and threads for uwsgi for better concurrency
### Fixed
- Issue with multiple dead streams in a row - System now properly handles cases where several sequential streams are unavailable
- Broken links to compose files in documentation
## [0.2.1] - 2025-04-13
### Fixed
- Stream preview (not channel)
- Streaming wouldn't work when using default user-agent for an M3U
- WebSockets and M3U profile form issues
## [0.2.0] - 2025-04-12
Initial beta public release.

View file

@ -20,30 +20,88 @@ class TokenObtainPairView(TokenObtainPairView):
def post(self, request, *args, **kwargs):
# Custom logic here
if not network_access_allowed(request, "UI"):
# Log blocked login attempt due to network restrictions
from core.utils import log_system_event
username = request.data.get("username", 'unknown')
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='login_failed',
user=username,
client_ip=client_ip,
user_agent=user_agent,
reason='Network access denied',
)
return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
# Get the response from the parent class first
response = super().post(request, *args, **kwargs)
username = request.data.get("username")
# If login was successful, update last_login
if response.status_code == 200:
username = request.data.get("username")
if username:
from django.utils import timezone
try:
user = User.objects.get(username=username)
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
except User.DoesNotExist:
pass # User doesn't exist, but login somehow succeeded
# Log login attempt
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
return response
try:
response = super().post(request, *args, **kwargs)
# If login was successful, update last_login and log success
if response.status_code == 200:
if username:
from django.utils import timezone
try:
user = User.objects.get(username=username)
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
# Log successful login
log_system_event(
event_type='login_success',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
except User.DoesNotExist:
pass # User doesn't exist, but login somehow succeeded
else:
# Log failed login attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason='Invalid credentials',
)
return response
except Exception as e:
# If parent class raises an exception (e.g., validation error), log failed attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason=f'Authentication error: {str(e)[:100]}',
)
raise # Re-raise the exception to maintain normal error flow
class TokenRefreshView(TokenRefreshView):
def post(self, request, *args, **kwargs):
# Custom logic here
if not network_access_allowed(request, "UI"):
# Log blocked token refresh attempt due to network restrictions
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='login_failed',
user='token_refresh',
client_ip=client_ip,
user_agent=user_agent,
reason='Network access denied (token refresh)',
)
return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN)
return super().post(request, *args, **kwargs)
@ -80,6 +138,15 @@ def initialize_superuser(request):
class AuthViewSet(viewsets.ViewSet):
"""Handles user login and logout"""
def get_permissions(self):
"""
Login doesn't require auth, but logout does
"""
if self.action == 'logout':
from rest_framework.permissions import IsAuthenticated
return [IsAuthenticated()]
return []
@swagger_auto_schema(
operation_description="Authenticate and log in a user",
request_body=openapi.Schema(
@ -100,6 +167,11 @@ class AuthViewSet(viewsets.ViewSet):
password = request.data.get("password")
user = authenticate(request, username=username, password=password)
# Get client info for logging
from core.utils import log_system_event
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
if user:
login(request, user)
# Update last_login timestamp
@ -107,6 +179,14 @@ class AuthViewSet(viewsets.ViewSet):
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
# Log successful login
log_system_event(
event_type='login_success',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
return Response(
{
"message": "Login successful",
@ -118,6 +198,15 @@ class AuthViewSet(viewsets.ViewSet):
},
}
)
# Log failed login attempt
log_system_event(
event_type='login_failed',
user=username or 'unknown',
client_ip=client_ip,
user_agent=user_agent,
reason='Invalid credentials',
)
return Response({"error": "Invalid credentials"}, status=400)
@swagger_auto_schema(
@ -126,6 +215,19 @@ class AuthViewSet(viewsets.ViewSet):
)
def logout(self, request):
"""Logs out the authenticated user"""
# Log logout event before actually logging out
from core.utils import log_system_event
username = request.user.username if request.user and request.user.is_authenticated else 'unknown'
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
log_system_event(
event_type='logout',
user=username,
client_ip=client_ip,
user_agent=user_agent,
)
logout(request)
return Response({"message": "Logout successful"})

View file

@ -13,12 +13,14 @@ from .api_views import (
UpdateChannelMembershipAPIView,
BulkUpdateChannelMembershipAPIView,
RecordingViewSet,
RecurringRecordingRuleViewSet,
GetChannelStreamsAPIView,
SeriesRulesAPIView,
DeleteSeriesRuleAPIView,
EvaluateSeriesRulesAPIView,
BulkRemoveSeriesRecordingsAPIView,
BulkDeleteUpcomingRecordingsAPIView,
ComskipConfigAPIView,
)
app_name = 'channels' # for DRF routing
@ -30,6 +32,7 @@ router.register(r'channels', ChannelViewSet, basename='channel')
router.register(r'logos', LogoViewSet, basename='logo')
router.register(r'profiles', ChannelProfileViewSet, basename='profile')
router.register(r'recordings', RecordingViewSet, basename='recording')
router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule')
urlpatterns = [
# Bulk delete is a single APIView, not a ViewSet
@ -46,6 +49,7 @@ urlpatterns = [
path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'),
path('series-rules/<str:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'),
path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'),
]
urlpatterns += router.urls

View file

@ -28,6 +28,7 @@ from .models import (
ChannelProfile,
ChannelProfileMembership,
Recording,
RecurringRecordingRule,
)
from .serializers import (
StreamSerializer,
@ -38,8 +39,17 @@ from .serializers import (
BulkChannelProfileMembershipSerializer,
ChannelProfileSerializer,
RecordingSerializer,
RecurringRecordingRuleSerializer,
)
from .tasks import (
match_epg_channels,
evaluate_series_rules,
evaluate_series_rules_impl,
match_single_channel_epg,
match_selected_channels_epg,
sync_recurring_rule_impl,
purge_recurring_rule_impl,
)
from .tasks import match_epg_channels, evaluate_series_rules, evaluate_series_rules_impl, match_single_channel_epg, match_selected_channels_epg
import django_filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter, OrderingFilter
@ -49,10 +59,12 @@ from django.db.models import Q
from django.http import StreamingHttpResponse, FileResponse, Http404
from django.utils import timezone
import mimetypes
from django.conf import settings
from rest_framework.pagination import PageNumberPagination
logger = logging.getLogger(__name__)
@ -423,8 +435,8 @@ class ChannelViewSet(viewsets.ModelViewSet):
@action(detail=False, methods=["patch"], url_path="edit/bulk")
def edit_bulk(self, request):
"""
Bulk edit channels.
Expects a list of channels with their updates.
Bulk edit channels efficiently.
Validates all updates first, then applies in a single transaction.
"""
data = request.data
if not isinstance(data, list):
@ -433,63 +445,94 @@ class ChannelViewSet(viewsets.ModelViewSet):
status=status.HTTP_400_BAD_REQUEST,
)
updated_channels = []
errors = []
# Extract IDs and validate presence
channel_updates = {}
missing_ids = []
for channel_data in data:
for i, channel_data in enumerate(data):
channel_id = channel_data.get("id")
if not channel_id:
errors.append({"error": "Channel ID is required"})
continue
missing_ids.append(f"Item {i}: Channel ID is required")
else:
channel_updates[channel_id] = channel_data
try:
channel = Channel.objects.get(id=channel_id)
if missing_ids:
return Response(
{"errors": missing_ids},
status=status.HTTP_400_BAD_REQUEST,
)
# Handle channel_group_id properly - convert string to integer if needed
if 'channel_group_id' in channel_data:
group_id = channel_data['channel_group_id']
if group_id is not None:
try:
channel_data['channel_group_id'] = int(group_id)
except (ValueError, TypeError):
channel_data['channel_group_id'] = None
# Fetch all channels at once (one query)
channels_dict = {
c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys())
}
# Use the serializer to validate and update
serializer = ChannelSerializer(
channel, data=channel_data, partial=True
)
# Validate and prepare updates
validated_updates = []
errors = []
if serializer.is_valid():
updated_channel = serializer.save()
updated_channels.append(updated_channel)
else:
errors.append({
"channel_id": channel_id,
"errors": serializer.errors
})
for channel_id, channel_data in channel_updates.items():
channel = channels_dict.get(channel_id)
except Channel.DoesNotExist:
if not channel:
errors.append({
"channel_id": channel_id,
"error": "Channel not found"
})
except Exception as e:
continue
# Handle channel_group_id conversion
if 'channel_group_id' in channel_data:
group_id = channel_data['channel_group_id']
if group_id is not None:
try:
channel_data['channel_group_id'] = int(group_id)
except (ValueError, TypeError):
channel_data['channel_group_id'] = None
# Validate with serializer
serializer = ChannelSerializer(
channel, data=channel_data, partial=True
)
if serializer.is_valid():
validated_updates.append((channel, serializer.validated_data))
else:
errors.append({
"channel_id": channel_id,
"error": str(e)
"errors": serializer.errors
})
if errors:
return Response(
{"errors": errors, "updated_count": len(updated_channels)},
{"errors": errors, "updated_count": len(validated_updates)},
status=status.HTTP_400_BAD_REQUEST,
)
# Serialize the updated channels for response
serialized_channels = ChannelSerializer(updated_channels, many=True).data
# Apply all updates in a transaction
with transaction.atomic():
for channel, validated_data in validated_updates:
for key, value in validated_data.items():
setattr(channel, key, value)
# Single bulk_update query instead of individual saves
channels_to_update = [channel for channel, _ in validated_updates]
if channels_to_update:
Channel.objects.bulk_update(
channels_to_update,
fields=list(validated_updates[0][1].keys()),
batch_size=100
)
# Return the updated objects (already in memory)
serialized_channels = ChannelSerializer(
[channel for channel, _ in validated_updates],
many=True,
context=self.get_serializer_context()
).data
return Response({
"message": f"Successfully updated {len(updated_channels)} channels",
"message": f"Successfully updated {len(validated_updates)} channels",
"channels": serialized_channels
})
@ -555,6 +598,37 @@ class ChannelViewSet(viewsets.ModelViewSet):
"channel_count": len(channel_ids)
})
@action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg")
def set_tvg_ids_from_epg(self, request):
"""
Trigger a Celery task to set channel TVG-IDs from EPG data
"""
from .tasks import set_channels_tvg_ids_from_epg
data = request.data
channel_ids = data.get("channel_ids", [])
if not channel_ids:
return Response(
{"error": "channel_ids is required"},
status=status.HTTP_400_BAD_REQUEST,
)
if not isinstance(channel_ids, list):
return Response(
{"error": "channel_ids must be a list"},
status=status.HTTP_400_BAD_REQUEST,
)
# Start the Celery task
task = set_channels_tvg_ids_from_epg.delay(channel_ids)
return Response({
"message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels",
"task_id": task.id,
"channel_count": len(channel_ids)
})
@action(detail=False, methods=["get"], url_path="ids")
def get_ids(self, request, *args, **kwargs):
# Get the filtered queryset
@ -704,10 +778,14 @@ class ChannelViewSet(viewsets.ModelViewSet):
channel_data["channel_group_id"] = channel_group.id
if stream.logo_url:
logo, _ = Logo.objects.get_or_create(
url=stream.logo_url, defaults={"name": stream.name or stream.tvg_id}
)
channel_data["logo_id"] = logo.id
# Import validation function
from apps.channels.tasks import validate_logo_url
validated_logo_url = validate_logo_url(stream.logo_url)
if validated_logo_url:
logo, _ = Logo.objects.get_or_create(
url=validated_logo_url, defaults={"name": stream.name or stream.tvg_id}
)
channel_data["logo_id"] = logo.id
# Attempt to find existing EPGs with the same tvg-id
epgs = EPGData.objects.filter(tvg_id=stream.tvg_id)
@ -940,19 +1018,27 @@ class ChannelViewSet(viewsets.ModelViewSet):
channel.epg_data = epg_data
channel.save(update_fields=["epg_data"])
# Explicitly trigger program refresh for this EPG
from apps.epg.tasks import parse_programs_for_tvg_id
# Only trigger program refresh for non-dummy EPG sources
status_message = None
if epg_data.epg_source.source_type != 'dummy':
# Explicitly trigger program refresh for this EPG
from apps.epg.tasks import parse_programs_for_tvg_id
task_result = parse_programs_for_tvg_id.delay(epg_data.id)
task_result = parse_programs_for_tvg_id.delay(epg_data.id)
# Prepare response with task status info
status_message = "EPG refresh queued"
if task_result.result == "Task already running":
status_message = "EPG refresh already in progress"
# Prepare response with task status info
status_message = "EPG refresh queued"
if task_result.result == "Task already running":
status_message = "EPG refresh already in progress"
# Build response message
message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}"
if status_message:
message += f". {status_message}"
return Response(
{
"message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.",
"message": message,
"channel": self.get_serializer(channel).data,
"task_status": status_message,
}
@ -984,8 +1070,15 @@ class ChannelViewSet(viewsets.ModelViewSet):
def batch_set_epg(self, request):
"""Efficiently associate multiple channels with EPG data at once."""
associations = request.data.get("associations", [])
channels_updated = 0
programs_refreshed = 0
if not associations:
return Response(
{"error": "associations list is required"},
status=status.HTTP_400_BAD_REQUEST,
)
# Extract channel IDs upfront
channel_updates = {}
unique_epg_ids = set()
for assoc in associations:
@ -995,32 +1088,58 @@ class ChannelViewSet(viewsets.ModelViewSet):
if not channel_id:
continue
try:
# Get the channel
channel = Channel.objects.get(id=channel_id)
channel_updates[channel_id] = epg_data_id
if epg_data_id:
unique_epg_ids.add(epg_data_id)
# Set the EPG data
channel.epg_data_id = epg_data_id
channel.save(update_fields=["epg_data"])
channels_updated += 1
# Batch fetch all channels (single query)
channels_dict = {
c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys())
}
# Track unique EPG data IDs
if epg_data_id:
unique_epg_ids.add(epg_data_id)
except Channel.DoesNotExist:
# Collect channels to update
channels_to_update = []
for channel_id, epg_data_id in channel_updates.items():
if channel_id not in channels_dict:
logger.error(f"Channel with ID {channel_id} not found")
except Exception as e:
logger.error(
f"Error setting EPG data for channel {channel_id}: {str(e)}"
continue
channel = channels_dict[channel_id]
channel.epg_data_id = epg_data_id
channels_to_update.append(channel)
# Bulk update all channels (single query)
if channels_to_update:
with transaction.atomic():
Channel.objects.bulk_update(
channels_to_update,
fields=["epg_data_id"],
batch_size=100
)
# Trigger program refresh for unique EPG data IDs
from apps.epg.tasks import parse_programs_for_tvg_id
channels_updated = len(channels_to_update)
# Trigger program refresh for unique EPG data IDs (skip dummy EPGs)
from apps.epg.tasks import parse_programs_for_tvg_id
from apps.epg.models import EPGData
# Batch fetch EPG data (single query)
epg_data_dict = {
epg.id: epg
for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source')
}
programs_refreshed = 0
for epg_id in unique_epg_ids:
parse_programs_for_tvg_id.delay(epg_id)
programs_refreshed += 1
epg_data = epg_data_dict.get(epg_id)
if not epg_data:
logger.error(f"EPGData with ID {epg_id} not found")
continue
# Only refresh non-dummy EPG sources
if epg_data.epg_source.source_type != 'dummy':
parse_programs_for_tvg_id.delay(epg_id)
programs_refreshed += 1
return Response(
{
@ -1185,7 +1304,7 @@ class CleanupUnusedLogosAPIView(APIView):
return [Authenticated()]
@swagger_auto_schema(
operation_description="Delete all logos that are not used by any channels, movies, or series",
operation_description="Delete all channel logos that are not used by any channels",
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
@ -1199,24 +1318,11 @@ class CleanupUnusedLogosAPIView(APIView):
responses={200: "Cleanup completed"},
)
def post(self, request):
"""Delete all logos with no channel, movie, or series associations"""
"""Delete all channel logos with no channel associations"""
delete_files = request.data.get("delete_files", False)
# Find logos that are not used by channels, movies, or series
filter_conditions = Q(channels__isnull=True)
# Add VOD conditions if models are available
try:
filter_conditions &= Q(movie__isnull=True)
except:
pass
try:
filter_conditions &= Q(series__isnull=True)
except:
pass
unused_logos = Logo.objects.filter(filter_conditions)
# Find logos that are not used by any channels
unused_logos = Logo.objects.filter(channels__isnull=True)
deleted_count = unused_logos.count()
logo_names = list(unused_logos.values_list('name', flat=True))
local_files_deleted = 0
@ -1288,13 +1394,6 @@ class LogoViewSet(viewsets.ModelViewSet):
# Start with basic prefetch for channels
queryset = Logo.objects.prefetch_related('channels').order_by('name')
# Try to prefetch VOD relations if available
try:
queryset = queryset.prefetch_related('movie', 'series')
except:
# VOD app might not be available, continue without VOD prefetch
pass
# Filter by specific IDs
ids = self.request.query_params.getlist('ids')
if ids:
@ -1307,62 +1406,14 @@ class LogoViewSet(viewsets.ModelViewSet):
pass # Invalid IDs, return empty queryset
queryset = Logo.objects.none()
# Filter by usage - now includes VOD content
# Filter by usage
used_filter = self.request.query_params.get('used', None)
if used_filter == 'true':
# Logo is used if it has any channels, movies, or series
filter_conditions = Q(channels__isnull=False)
# Add VOD conditions if models are available
try:
filter_conditions |= Q(movie__isnull=False)
except:
pass
try:
filter_conditions |= Q(series__isnull=False)
except:
pass
queryset = queryset.filter(filter_conditions).distinct()
# Logo is used if it has any channels
queryset = queryset.filter(channels__isnull=False).distinct()
elif used_filter == 'false':
# Logo is unused if it has no channels, movies, or series
filter_conditions = Q(channels__isnull=True)
# Add VOD conditions if models are available
try:
filter_conditions &= Q(movie__isnull=True)
except:
pass
try:
filter_conditions &= Q(series__isnull=True)
except:
pass
queryset = queryset.filter(filter_conditions)
# Filter for channel assignment (unused + channel-used, exclude VOD-only)
channel_assignable = self.request.query_params.get('channel_assignable', None)
if channel_assignable == 'true':
# Include logos that are either:
# 1. Completely unused, OR
# 2. Used by channels (but may also be used by VOD)
# Exclude logos that are ONLY used by VOD content
unused_condition = Q(channels__isnull=True)
channel_used_condition = Q(channels__isnull=False)
# Add VOD conditions if models are available
try:
unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True)
except:
pass
# Combine: unused OR used by channels
filter_conditions = unused_condition | channel_used_condition
queryset = queryset.filter(filter_conditions).distinct()
# Logo is unused if it has no channels
queryset = queryset.filter(channels__isnull=True)
# Filter by name
name_filter = self.request.query_params.get('name', None)
@ -1653,6 +1704,41 @@ class BulkUpdateChannelMembershipAPIView(APIView):
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RecurringRecordingRuleViewSet(viewsets.ModelViewSet):
queryset = RecurringRecordingRule.objects.all().select_related("channel")
serializer_class = RecurringRecordingRuleSerializer
def get_permissions(self):
return [IsAdmin()]
def perform_create(self, serializer):
rule = serializer.save()
try:
sync_recurring_rule_impl(rule.id, drop_existing=True)
except Exception as err:
logger.warning(f"Failed to initialize recurring rule {rule.id}: {err}")
return rule
def perform_update(self, serializer):
rule = serializer.save()
try:
if rule.enabled:
sync_recurring_rule_impl(rule.id, drop_existing=True)
else:
purge_recurring_rule_impl(rule.id)
except Exception as err:
logger.warning(f"Failed to resync recurring rule {rule.id}: {err}")
return rule
def perform_destroy(self, instance):
rule_id = instance.id
super().perform_destroy(instance)
try:
purge_recurring_rule_impl(rule_id)
except Exception as err:
logger.warning(f"Failed to purge recordings for rule {rule_id}: {err}")
class RecordingViewSet(viewsets.ModelViewSet):
queryset = Recording.objects.all()
serializer_class = RecordingSerializer
@ -1832,6 +1918,49 @@ class RecordingViewSet(viewsets.ModelViewSet):
return response
class ComskipConfigAPIView(APIView):
"""Upload or inspect the custom comskip.ini used by DVR processing."""
parser_classes = [MultiPartParser, FormParser]
def get_permissions(self):
return [IsAdmin()]
def get(self, request):
path = CoreSettings.get_dvr_comskip_custom_path()
exists = bool(path and os.path.exists(path))
return Response({"path": path, "exists": exists})
def post(self, request):
uploaded = request.FILES.get("file") or request.FILES.get("comskip_ini")
if not uploaded:
return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
name = (uploaded.name or "").lower()
if not name.endswith(".ini"):
return Response({"error": "Only .ini files are allowed"}, status=status.HTTP_400_BAD_REQUEST)
if uploaded.size and uploaded.size > 1024 * 1024:
return Response({"error": "File too large (limit 1MB)"}, status=status.HTTP_400_BAD_REQUEST)
dest_dir = os.path.join(settings.MEDIA_ROOT, "comskip")
os.makedirs(dest_dir, exist_ok=True)
dest_path = os.path.join(dest_dir, "comskip.ini")
try:
with open(dest_path, "wb") as dest:
for chunk in uploaded.chunks():
dest.write(chunk)
except Exception as e:
logger.error(f"Failed to save uploaded comskip.ini: {e}")
return Response({"error": "Unable to save file"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Persist path setting so DVR processing picks it up immediately
CoreSettings.set_dvr_comskip_custom_path(dest_path)
return Response({"success": True, "path": dest_path, "exists": os.path.exists(dest_path)})
class BulkDeleteUpcomingRecordingsAPIView(APIView):
"""Delete all upcoming (future) recordings."""
def get_permissions(self):

View file

@ -0,0 +1,31 @@
# Generated by Django 5.0.14 on 2025-09-18 14:56
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'),
]
operations = [
migrations.CreateModel(
name='RecurringRecordingRule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('days_of_week', models.JSONField(default=list)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('enabled', models.BooleanField(default=True)),
('name', models.CharField(blank=True, max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')),
],
options={
'ordering': ['channel', 'start_time'],
},
),
]

View file

@ -0,0 +1,23 @@
# Generated by Django 5.2.4 on 2025-10-05 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0026_recurringrecordingrule'),
]
operations = [
migrations.AddField(
model_name='recurringrecordingrule',
name='end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='recurringrecordingrule',
name='start_date',
field=models.DateField(blank=True, null=True),
),
]

View file

@ -0,0 +1,25 @@
# Generated by Django 5.2.4 on 2025-10-06 22:55
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'),
]
operations = [
migrations.AddField(
model_name='channel',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'),
preserve_default=False,
),
migrations.AddField(
model_name='channel',
name='updated_at',
field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'),
),
]

View file

@ -0,0 +1,54 @@
# Generated migration to backfill stream_hash for existing custom streams
from django.db import migrations
import hashlib
def backfill_custom_stream_hashes(apps, schema_editor):
"""
Generate stream_hash for all custom streams that don't have one.
Uses stream ID to create a stable hash that won't change when name/url is edited.
"""
Stream = apps.get_model('dispatcharr_channels', 'Stream')
custom_streams_without_hash = Stream.objects.filter(
is_custom=True,
stream_hash__isnull=True
)
updated_count = 0
for stream in custom_streams_without_hash:
# Generate a stable hash using the stream's ID
# This ensures the hash never changes even if name/url is edited
unique_string = f"custom_stream_{stream.id}"
stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
stream.save(update_fields=['stream_hash'])
updated_count += 1
if updated_count > 0:
print(f"Backfilled stream_hash for {updated_count} custom streams")
else:
print("No custom streams needed stream_hash backfill")
def reverse_backfill(apps, schema_editor):
"""
Reverse migration - clear stream_hash for custom streams.
Note: This will break preview functionality for custom streams.
"""
Stream = apps.get_model('dispatcharr_channels', 'Stream')
custom_streams = Stream.objects.filter(is_custom=True)
count = custom_streams.update(stream_hash=None)
print(f"Cleared stream_hash for {count} custom streams")
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'),
]
operations = [
migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 5.2.4 on 2025-10-28 20:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dispatcharr_channels', '0029_backfill_custom_stream_hashes'),
]
operations = [
migrations.AlterField(
model_name='stream',
name='url',
field=models.URLField(blank=True, max_length=4096, null=True),
),
]

View file

@ -55,7 +55,7 @@ class Stream(models.Model):
"""
name = models.CharField(max_length=255, default="Default Stream")
url = models.URLField(max_length=2000, blank=True, null=True)
url = models.URLField(max_length=4096, blank=True, null=True)
m3u_account = models.ForeignKey(
M3UAccount,
on_delete=models.CASCADE,
@ -119,11 +119,11 @@ class Stream(models.Model):
return self.name or self.url or f"Stream ID {self.id}"
@classmethod
def generate_hash_key(cls, name, url, tvg_id, keys=None):
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None):
if keys is None:
keys = CoreSettings.get_m3u_hash_key().split(",")
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id}
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id}
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
@ -152,8 +152,14 @@ class Stream(models.Model):
stream = cls.objects.create(**fields_to_update)
return stream, True # True means it was created
# @TODO: honor stream's stream profile
def get_stream_profile(self):
"""
Get the stream profile for this stream.
Uses the stream's own profile if set, otherwise returns the default.
"""
if self.stream_profile:
return self.stream_profile
stream_profile = StreamProfile.objects.get(
id=CoreSettings.get_default_stream_profile_id()
)
@ -303,6 +309,15 @@ class Channel(models.Model):
help_text="The M3U account that auto-created this channel"
)
created_at = models.DateTimeField(
auto_now_add=True,
help_text="Timestamp when this channel was created"
)
updated_at = models.DateTimeField(
auto_now=True,
help_text="Timestamp when this channel was last updated"
)
def clean(self):
# Enforce unique channel_number within a given group
existing = Channel.objects.filter(
@ -601,3 +616,35 @@ class Recording(models.Model):
def __str__(self):
return f"{self.channel.name} - {self.start_time} to {self.end_time}"
class RecurringRecordingRule(models.Model):
"""Rule describing a recurring manual DVR schedule."""
channel = models.ForeignKey(
"Channel",
on_delete=models.CASCADE,
related_name="recurring_rules",
)
days_of_week = models.JSONField(default=list)
start_time = models.TimeField()
end_time = models.TimeField()
enabled = models.BooleanField(default=True)
name = models.CharField(max_length=255, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["channel", "start_time"]
def __str__(self):
channel_name = getattr(self.channel, "name", str(self.channel_id))
return f"Recurring rule for {channel_name}"
def cleaned_days(self):
try:
return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6})
except Exception:
return []

View file

@ -1,4 +1,6 @@
import json
from datetime import datetime
from rest_framework import serializers
from .models import (
Stream,
@ -10,6 +12,7 @@ from .models import (
ChannelProfile,
ChannelProfileMembership,
Recording,
RecurringRecordingRule,
)
from apps.epg.serializers import EPGDataSerializer
from core.models import StreamProfile
@ -61,47 +64,15 @@ class LogoSerializer(serializers.ModelSerializer):
return reverse("api:channels:logo-cache", args=[obj.id])
def get_channel_count(self, obj):
"""Get the number of channels, movies, and series using this logo"""
channel_count = obj.channels.count()
# Safely get movie count
try:
movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0
except AttributeError:
movie_count = 0
# Safely get series count
try:
series_count = obj.series.count() if hasattr(obj, 'series') else 0
except AttributeError:
series_count = 0
return channel_count + movie_count + series_count
"""Get the number of channels using this logo"""
return obj.channels.count()
def get_is_used(self, obj):
"""Check if this logo is used by any channels, movies, or series"""
# Check if used by channels
if obj.channels.exists():
return True
# Check if used by movies (handle case where VOD app might not be available)
try:
if hasattr(obj, 'movie') and obj.movie.exists():
return True
except AttributeError:
pass
# Check if used by series (handle case where VOD app might not be available)
try:
if hasattr(obj, 'series') and obj.series.exists():
return True
except AttributeError:
pass
return False
"""Check if this logo is used by any channels"""
return obj.channels.exists()
def get_channel_names(self, obj):
"""Get the names of channels, movies, and series using this logo (limited to first 5)"""
"""Get the names of channels using this logo (limited to first 5)"""
names = []
# Get channel names
@ -109,28 +80,6 @@ class LogoSerializer(serializers.ModelSerializer):
for channel in channels:
names.append(f"Channel: {channel.name}")
# Get movie names (only if we haven't reached limit)
if len(names) < 5:
try:
if hasattr(obj, 'movie'):
remaining_slots = 5 - len(names)
movies = obj.movie.all()[:remaining_slots]
for movie in movies:
names.append(f"Movie: {movie.name}")
except AttributeError:
pass
# Get series names (only if we haven't reached limit)
if len(names) < 5:
try:
if hasattr(obj, 'series'):
remaining_slots = 5 - len(names)
series = obj.series.all()[:remaining_slots]
for series_item in series:
names.append(f"Series: {series_item.name}")
except AttributeError:
pass
# Calculate total count for "more" message
total_count = self.get_channel_count(obj)
if total_count > 5:
@ -345,8 +294,17 @@ class ChannelSerializer(serializers.ModelSerializer):
if include_streams:
self.fields["streams"] = serializers.SerializerMethodField()
return super().to_representation(instance)
return super().to_representation(instance)
else:
# Fix: For PATCH/PUT responses, ensure streams are ordered
representation = super().to_representation(instance)
if "streams" in representation:
representation["streams"] = list(
instance.streams.all()
.order_by("channelstream__order")
.values_list("id", flat=True)
)
return representation
def get_logo(self, obj):
return LogoSerializer(obj.logo).data
@ -454,6 +412,13 @@ class RecordingSerializer(serializers.ModelSerializer):
start_time = data.get("start_time")
end_time = data.get("end_time")
if start_time and timezone.is_naive(start_time):
start_time = timezone.make_aware(start_time, timezone.get_current_timezone())
data["start_time"] = start_time
if end_time and timezone.is_naive(end_time):
end_time = timezone.make_aware(end_time, timezone.get_current_timezone())
data["end_time"] = end_time
# If this is an EPG-based recording (program provided), apply global pre/post offsets
try:
cp = data.get("custom_properties") or {}
@ -497,3 +462,56 @@ class RecordingSerializer(serializers.ModelSerializer):
raise serializers.ValidationError("End time must be after start time.")
return data
class RecurringRecordingRuleSerializer(serializers.ModelSerializer):
class Meta:
model = RecurringRecordingRule
fields = "__all__"
read_only_fields = ["created_at", "updated_at"]
def validate_days_of_week(self, value):
if not value:
raise serializers.ValidationError("Select at least one day of the week")
cleaned = []
for entry in value:
try:
iv = int(entry)
except (TypeError, ValueError):
raise serializers.ValidationError("Days of week must be integers 0-6")
if iv < 0 or iv > 6:
raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)")
cleaned.append(iv)
return sorted(set(cleaned))
def validate(self, attrs):
start = attrs.get("start_time") or getattr(self.instance, "start_time", None)
end = attrs.get("end_time") or getattr(self.instance, "end_time", None)
start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None)
end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None)
if start_date is None:
existing_start = getattr(self.instance, "start_date", None)
if existing_start is None:
raise serializers.ValidationError("Start date is required")
if start_date and end_date and end_date < start_date:
raise serializers.ValidationError("End date must be on or after start date")
if end_date is None:
existing_end = getattr(self.instance, "end_date", None)
if existing_end is None:
raise serializers.ValidationError("End date is required")
if start and end and start_date and end_date:
start_dt = datetime.combine(start_date, start)
end_dt = datetime.combine(end_date, end)
if end_dt <= start_dt:
raise serializers.ValidationError("End datetime must be after start datetime")
elif start and end and end == start:
raise serializers.ValidationError("End time must be different from start time")
# Normalize empty strings to None for dates
if attrs.get("end_date") == "":
attrs["end_date"] = None
if attrs.get("start_date") == "":
attrs["start_date"] = None
return super().validate(attrs)
def create(self, validated_data):
return super().create(validated_data)

View file

@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs):
else:
raise ValueError("No default M3UAccount found.")
@receiver(post_save, sender=Stream)
def generate_custom_stream_hash(sender, instance, created, **kwargs):
"""
Generate a stable stream_hash for custom streams after creation.
Uses the stream's ID to ensure the hash never changes even if name/url is edited.
"""
if instance.is_custom and not instance.stream_hash and created:
import hashlib
# Use stream ID for a stable, unique hash that never changes
unique_string = f"custom_stream_{instance.id}"
instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
# Use update to avoid triggering signals again
Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash)
@receiver(post_save, sender=Channel)
def refresh_epg_programs(sender, instance, created, **kwargs):
"""

View file

@ -7,6 +7,8 @@ import requests
import time
import json
import subprocess
import signal
from zoneinfo import ZoneInfo
from datetime import datetime, timedelta
import gc
@ -28,6 +30,23 @@ from urllib.parse import quote
logger = logging.getLogger(__name__)
# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size)
# We use 2000 as a safe maximum to account for multibyte characters
def validate_logo_url(logo_url, max_length=2000):
"""
Fast validation for logo URLs during bulk creation.
Returns None if URL is too long (would exceed PostgreSQL btree index limit),
original URL otherwise.
PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than
this cannot be indexed and would cause database errors. These are typically
base64-encoded images embedded in URLs.
"""
if logo_url and len(logo_url) > max_length:
logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...")
return None
return logo_url
def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"):
"""
Send EPG matching progress via WebSocket
@ -227,6 +246,17 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True
logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by secondary tvg_id={chan['tvg_id']}")
continue
# Step 2.5: Exact Gracenote ID match
normalized_gracenote_id = chan.get("gracenote_id", "")
if normalized_gracenote_id:
epg_by_gracenote_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_gracenote_id), None)
if epg_by_gracenote_id:
chan["epg_data_id"] = epg_by_gracenote_id["id"]
channels_to_update.append(chan)
matched_channels.append((chan['id'], fallback_name, f"gracenote:{epg_by_gracenote_id['tvg_id']}"))
logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact gracenote_id={normalized_gracenote_id}")
continue
# Step 3: Name-based fuzzy matching
if not chan["norm_chan"]:
logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping")
@ -429,11 +459,14 @@ def match_epg_channels():
channels_data = []
for channel in channels_without_epg:
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
channels_data.append({
"id": channel.id,
"name": channel.name,
"tvg_id": normalized_tvg_id,
"original_tvg_id": channel.tvg_id,
"gracenote_id": normalized_gracenote_id,
"original_gracenote_id": channel.tvc_guide_stationid,
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
"norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching!
})
@ -573,11 +606,14 @@ def match_selected_channels_epg(channel_ids):
channels_data = []
for channel in channels_without_epg:
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
channels_data.append({
"id": channel.id,
"name": channel.name,
"tvg_id": normalized_tvg_id,
"original_tvg_id": channel.tvg_id,
"gracenote_id": normalized_gracenote_id,
"original_gracenote_id": channel.tvc_guide_stationid,
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
"norm_chan": normalize_name(channel.name)
})
@ -694,16 +730,19 @@ def match_single_channel_epg(channel_id):
# Prepare single channel data for matching (same format as bulk matching)
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
channel_data = {
"id": channel.id,
"name": channel.name,
"tvg_id": normalized_tvg_id,
"original_tvg_id": channel.tvg_id,
"gracenote_id": normalized_gracenote_id,
"original_gracenote_id": channel.tvc_guide_stationid,
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
"norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching!
}
logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', norm_chan='{channel_data['norm_chan']}'")
logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', gracenote_id='{normalized_gracenote_id}', norm_chan='{channel_data['norm_chan']}'")
# Debug: Test what the normalization does to preserve call signs
test_name = "NBC 11 (KVLY) - Fargo" # Example for testing
@ -1095,6 +1134,148 @@ def reschedule_upcoming_recordings_for_offset_change():
return reschedule_upcoming_recordings_for_offset_change_impl()
def _notify_recordings_refresh():
try:
from core.utils import send_websocket_update
send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"})
except Exception:
pass
def purge_recurring_rule_impl(rule_id: int) -> int:
"""Remove all future recordings created by a recurring rule."""
from django.utils import timezone
from .models import Recording
now = timezone.now()
try:
removed, _ = Recording.objects.filter(
start_time__gte=now,
custom_properties__rule__id=rule_id,
).delete()
except Exception:
removed = 0
if removed:
_notify_recordings_refresh()
return removed
def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int:
"""Ensure recordings exist for a recurring rule within the scheduling horizon."""
from django.utils import timezone
from .models import RecurringRecordingRule, Recording
rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first()
now = timezone.now()
removed = 0
if drop_existing:
removed = purge_recurring_rule_impl(rule_id)
if not rule or not rule.enabled:
return 0
days = rule.cleaned_days()
if not days:
return 0
tz_name = CoreSettings.get_system_time_zone()
try:
tz = ZoneInfo(tz_name)
except Exception:
logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name)
tz = timezone.get_current_timezone()
start_limit = rule.start_date or now.date()
end_limit = rule.end_date
horizon = now + timedelta(days=horizon_days)
start_window = max(start_limit, now.date())
if drop_existing and end_limit:
end_window = end_limit
else:
end_window = horizon.date()
if end_limit and end_limit < end_window:
end_window = end_limit
if end_window < start_window:
return 0
total_created = 0
for offset in range((end_window - start_window).days + 1):
target_date = start_window + timedelta(days=offset)
if target_date.weekday() not in days:
continue
if end_limit and target_date > end_limit:
continue
try:
start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz)
end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz)
except Exception:
continue
if end_dt <= start_dt:
end_dt = end_dt + timedelta(days=1)
if start_dt <= now:
continue
exists = Recording.objects.filter(
channel=rule.channel,
start_time=start_dt,
custom_properties__rule__id=rule.id,
).exists()
if exists:
continue
description = rule.name or f"Recurring recording for {rule.channel.name}"
cp = {
"rule": {
"type": "recurring",
"id": rule.id,
"days_of_week": days,
"name": rule.name or "",
},
"status": "scheduled",
"description": description,
"program": {
"title": rule.name or rule.channel.name,
"description": description,
"start_time": start_dt.isoformat(),
"end_time": end_dt.isoformat(),
},
}
try:
Recording.objects.create(
channel=rule.channel,
start_time=start_dt,
end_time=end_dt,
custom_properties=cp,
)
total_created += 1
except Exception as err:
logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}")
if removed or total_created:
_notify_recordings_refresh()
return total_created
@shared_task
def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14):
return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days)
@shared_task
def maintain_recurring_recordings():
from .models import RecurringRecordingRule
total = 0
for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True):
try:
total += sync_recurring_rule_impl(rule_id, drop_existing=False)
except Exception as err:
logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}")
return total
@shared_task
def purge_recurring_rule(rule_id: int):
return purge_recurring_rule_impl(rule_id)
@shared_task
def _safe_name(s):
try:
@ -1253,6 +1434,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
logger.info(f"Starting recording for channel {channel.name}")
# Log system event for recording start
try:
from core.utils import log_system_event
log_system_event(
'recording_start',
channel_id=channel.uuid,
channel_name=channel.name,
recording_id=recording_id
)
except Exception as e:
logger.error(f"Could not log recording start event: {e}")
# Try to resolve the Recording row up front
recording_obj = None
try:
@ -1646,6 +1839,20 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
# After the loop, the file and response are closed automatically.
logger.info(f"Finished recording for channel {channel.name}")
# Log system event for recording end
try:
from core.utils import log_system_event
log_system_event(
'recording_end',
channel_id=channel.uuid,
channel_name=channel.name,
recording_id=recording_id,
interrupted=interrupted,
bytes_written=bytes_written
)
except Exception as e:
logger.error(f"Could not log recording end event: {e}")
# Remux TS to MKV container
remux_success = False
try:
@ -1817,6 +2024,7 @@ def comskip_process_recording(recording_id: int):
Safe to call even if comskip is not installed; stores status in custom_properties.comskip.
"""
import shutil
from django.db import DatabaseError
from .models import Recording
# Helper to broadcast status over websocket
def _ws(status: str, extra: dict | None = None):
@ -1834,7 +2042,33 @@ def comskip_process_recording(recording_id: int):
except Recording.DoesNotExist:
return "not_found"
cp = rec.custom_properties or {}
cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {}
def _persist_custom_properties():
"""Persist updated custom_properties without raising if the row disappeared."""
try:
updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp)
if not updated:
logger.warning(
"Recording %s vanished before comskip status could be saved",
recording_id,
)
return False
except DatabaseError as db_err:
logger.warning(
"Failed to persist comskip status for recording %s: %s",
recording_id,
db_err,
)
return False
except Exception as unexpected:
logger.warning(
"Unexpected error while saving comskip status for recording %s: %s",
recording_id,
unexpected,
)
return False
return True
file_path = (cp or {}).get("file_path")
if not file_path or not os.path.exists(file_path):
return "no_file"
@ -1845,8 +2079,7 @@ def comskip_process_recording(recording_id: int):
comskip_bin = shutil.which("comskip")
if not comskip_bin:
cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
_persist_custom_properties()
_ws('skipped', {"reason": "comskip_not_installed"})
return "comskip_missing"
@ -1858,24 +2091,59 @@ def comskip_process_recording(recording_id: int):
try:
cmd = [comskip_bin, "--output", os.path.dirname(file_path)]
# Prefer system ini if present to squelch warning and get sane defaults
for ini_path in ("/etc/comskip/comskip.ini", "/app/docker/comskip.ini"):
if os.path.exists(ini_path):
# Prefer user-specified INI, fall back to known defaults
ini_candidates = []
try:
custom_ini = CoreSettings.get_dvr_comskip_custom_path()
if custom_ini:
ini_candidates.append(custom_ini)
except Exception as ini_err:
logger.debug(f"Unable to load custom comskip.ini path: {ini_err}")
ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"])
selected_ini = None
for ini_path in ini_candidates:
if ini_path and os.path.exists(ini_path):
selected_ini = ini_path
cmd.extend([f"--ini={ini_path}"])
break
cmd.append(file_path)
subprocess.run(cmd, check=True)
subprocess.run(
cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
except subprocess.CalledProcessError as e:
stderr_tail = (e.stderr or "").strip().splitlines()
stderr_tail = stderr_tail[-5:] if stderr_tail else []
detail = {
"status": "error",
"reason": "comskip_failed",
"returncode": e.returncode,
}
if e.returncode and e.returncode < 0:
try:
detail["signal"] = signal.Signals(-e.returncode).name
except Exception:
detail["signal"] = f"signal_{-e.returncode}"
if stderr_tail:
detail["stderr"] = "\n".join(stderr_tail)
if selected_ini:
detail["ini_path"] = selected_ini
cp["comskip"] = detail
_persist_custom_properties()
_ws('error', {"reason": "comskip_failed", "returncode": e.returncode})
return "comskip_failed"
except Exception as e:
cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
_persist_custom_properties()
_ws('error', {"reason": str(e)})
return "comskip_failed"
if not os.path.exists(edl_path):
cp["comskip"] = {"status": "error", "reason": "edl_not_found"}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
_persist_custom_properties()
_ws('error', {"reason": "edl_not_found"})
return "no_edl"
@ -1893,8 +2161,7 @@ def comskip_process_recording(recording_id: int):
duration = _ffprobe_duration(file_path)
if duration is None:
cp["comskip"] = {"status": "error", "reason": "duration_unknown"}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
_persist_custom_properties()
_ws('error', {"reason": "duration_unknown"})
return "no_duration"
@ -1923,9 +2190,14 @@ def comskip_process_recording(recording_id: int):
keep.append((cur, duration))
if not commercials or sum((e - s) for s, e in commercials) <= 0.5:
cp["comskip"] = {"status": "completed", "skipped": True, "edl": os.path.basename(edl_path)}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
cp["comskip"] = {
"status": "completed",
"skipped": True,
"edl": os.path.basename(edl_path),
}
if selected_ini:
cp["comskip"]["ini_path"] = selected_ini
_persist_custom_properties()
_ws('skipped', {"reason": "no_commercials", "commercials": 0})
return "no_commercials"
@ -1949,7 +2221,8 @@ def comskip_process_recording(recording_id: int):
list_path = os.path.join(workdir, "concat_list.txt")
with open(list_path, "w") as lf:
for pth in parts:
lf.write(f"file '{pth}'\n")
escaped = pth.replace("'", "'\\''")
lf.write(f"file '{escaped}'\n")
output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv")
subprocess.run([
@ -1975,14 +2248,14 @@ def comskip_process_recording(recording_id: int):
"segments_kept": len(parts),
"commercials": len(commercials),
}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
if selected_ini:
cp["comskip"]["ini_path"] = selected_ini
_persist_custom_properties()
_ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)})
return "ok"
except Exception as e:
cp["comskip"] = {"status": "error", "reason": str(e)}
rec.custom_properties = cp
rec.save(update_fields=["custom_properties"])
_persist_custom_properties()
_ws('error', {"reason": str(e)})
return f"error:{e}"
def _resolve_poster_for_program(channel_name, program):
@ -2236,7 +2509,9 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
for i in range(0, total_streams, batch_size):
batch_stream_ids = stream_ids[i:i + batch_size]
batch_streams = Stream.objects.filter(id__in=batch_stream_ids)
# Fetch streams and preserve the order from batch_stream_ids
batch_streams_dict = {stream.id: stream for stream in Stream.objects.filter(id__in=batch_stream_ids)}
batch_streams = [batch_streams_dict[stream_id] for stream_id in batch_stream_ids if stream_id in batch_streams_dict]
# Send progress update
send_websocket_update('updates', 'update', {
@ -2311,15 +2586,16 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
# Store profile IDs for this channel
profile_map.append(channel_profile_ids)
# Handle logo
if stream.logo_url:
# Handle logo - validate URL length to avoid PostgreSQL btree index errors
validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None
if validated_logo_url:
logos_to_create.append(
Logo(
url=stream.logo_url,
url=validated_logo_url,
name=stream.name or stream.tvg_id,
)
)
logo_map.append(stream.logo_url)
logo_map.append(validated_logo_url)
else:
logo_map.append(None)
@ -2689,3 +2965,98 @@ def set_channels_logos_from_epg(self, channel_ids):
'error': str(e)
})
raise
@shared_task(bind=True)
def set_channels_tvg_ids_from_epg(self, channel_ids):
"""
Celery task to set channel TVG-IDs from EPG data for multiple channels
"""
from core.utils import send_websocket_update
task_id = self.request.id
total_channels = len(channel_ids)
updated_count = 0
errors = []
try:
logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels")
# Send initial progress
send_websocket_update('updates', 'update', {
'type': 'epg_tvg_id_setting_progress',
'task_id': task_id,
'progress': 0,
'total': total_channels,
'status': 'running',
'message': 'Starting EPG TVG-ID setting...'
})
batch_size = 100
for i in range(0, total_channels, batch_size):
batch_ids = channel_ids[i:i + batch_size]
batch_updates = []
# Get channels and their EPG data
channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data')
for channel in channels:
try:
if channel.epg_data and channel.epg_data.tvg_id:
if channel.tvg_id != channel.epg_data.tvg_id:
channel.tvg_id = channel.epg_data.tvg_id
batch_updates.append(channel)
updated_count += 1
except Exception as e:
errors.append(f"Channel {channel.id}: {str(e)}")
logger.error(f"Error processing channel {channel.id}: {e}")
# Bulk update the batch
if batch_updates:
Channel.objects.bulk_update(batch_updates, ['tvg_id'])
# Send progress update
progress = min(i + batch_size, total_channels)
send_websocket_update('updates', 'update', {
'type': 'epg_tvg_id_setting_progress',
'task_id': task_id,
'progress': progress,
'total': total_channels,
'status': 'running',
'message': f'Updated {updated_count} channel TVG-IDs...',
'updated_count': updated_count
})
# Send completion notification
send_websocket_update('updates', 'update', {
'type': 'epg_tvg_id_setting_progress',
'task_id': task_id,
'progress': total_channels,
'total': total_channels,
'status': 'completed',
'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data',
'updated_count': updated_count,
'error_count': len(errors),
'errors': errors
})
logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels")
return {
'status': 'completed',
'updated_count': updated_count,
'error_count': len(errors),
'errors': errors
}
except Exception as e:
logger.error(f"EPG TVG-ID setting task failed: {e}")
send_websocket_update('updates', 'update', {
'type': 'epg_tvg_id_setting_progress',
'task_id': task_id,
'progress': 0,
'total': total_channels,
'status': 'failed',
'message': f'Task failed: {str(e)}',
'error': str(e)
})
raise

View file

View file

@ -0,0 +1,40 @@
from datetime import datetime, timedelta
from django.test import TestCase
from django.utils import timezone
from apps.channels.models import Channel, RecurringRecordingRule, Recording
from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl
class RecurringRecordingRuleTasksTests(TestCase):
def test_sync_recurring_rule_creates_and_purges_recordings(self):
now = timezone.now()
channel = Channel.objects.create(channel_number=1, name='Test Channel')
start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0)
end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0)
rule = RecurringRecordingRule.objects.create(
channel=channel,
days_of_week=[now.weekday()],
start_time=start_time,
end_time=end_time,
)
created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1)
self.assertEqual(created, 1)
recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first()
self.assertIsNotNone(recording)
self.assertEqual(recording.channel, channel)
self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id)
expected_start = timezone.make_aware(
datetime.combine(recording.start_time.date(), start_time),
timezone.get_current_timezone(),
)
self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60)
removed = purge_recurring_rule_impl(rule.id)
self.assertEqual(removed, 1)
self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists())

View file

@ -147,23 +147,37 @@ class EPGGridAPIView(APIView):
f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows."
)
# Generate dummy programs for channels that have no EPG data
# Generate dummy programs for channels that have no EPG data OR dummy EPG sources
from apps.channels.models import Channel
from apps.epg.models import EPGSource
from django.db.models import Q
# Get channels with no EPG data
# Get channels with no EPG data at all (standard dummy)
channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True))
channels_count = channels_without_epg.count()
# Log more detailed information about channels missing EPG data
if channels_count > 0:
# Get channels with custom dummy EPG sources (generate on-demand with patterns)
channels_with_custom_dummy = Channel.objects.filter(
epg_data__epg_source__source_type='dummy'
).distinct()
# Log what we found
without_count = channels_without_epg.count()
custom_count = channels_with_custom_dummy.count()
if without_count > 0:
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg]
logger.warning(
f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}"
logger.debug(
f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}"
)
if custom_count > 0:
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy]
logger.debug(
f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}"
)
logger.debug(
f"EPGGridAPIView: Found {channels_count} channels with no EPG data."
f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG."
)
# Serialize the regular programs
@ -205,12 +219,91 @@ class EPGGridAPIView(APIView):
# Generate and append dummy programs
dummy_programs = []
for channel in channels_without_epg:
# Use the channel UUID as tvg_id for dummy programs to match in the guide
# Import the function from output.views
from apps.output.views import generate_dummy_programs as gen_dummy_progs
# Handle channels with CUSTOM dummy EPG sources (with patterns)
for channel in channels_with_custom_dummy:
# For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel
# This prevents multiple channels assigned to the same dummy EPG from showing identical data
# Each channel gets its own unique program data even if they share the same EPG source
dummy_tvg_id = str(channel.uuid)
try:
# Create programs every 4 hours for the next 24 hours
# Get the custom dummy EPG source
epg_source = channel.epg_data.epg_source if channel.epg_data else None
logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})")
# Determine which name to parse based on custom properties
name_to_parse = channel.name
if epg_source and epg_source.custom_properties:
custom_props = epg_source.custom_properties
name_source = custom_props.get('name_source')
if name_source == 'stream':
# Get the stream index (1-based from user, convert to 0-based)
stream_index = custom_props.get('stream_index', 1) - 1
# Get streams ordered by channelstream order
channel_streams = channel.streams.all().order_by('channelstream__order')
if channel_streams.exists() and 0 <= stream_index < channel_streams.count():
stream = list(channel_streams)[stream_index]
name_to_parse = stream.name
logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})")
else:
logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name")
elif name_source == 'channel':
logger.debug(f"Using channel name for parsing: {name_to_parse}")
# Generate programs using custom patterns from the dummy EPG source
# Use the same tvg_id that will be set in the program data
generated = gen_dummy_progs(
channel_id=dummy_tvg_id,
channel_name=name_to_parse,
num_days=1,
program_length_hours=4,
epg_source=epg_source
)
# Custom dummy should always return data (either from patterns or fallback)
if generated:
logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}")
# Convert generated programs to API format
for program in generated:
dummy_program = {
"id": f"dummy-custom-{channel.id}-{program['start_time'].hour}",
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
"start_time": program['start_time'].isoformat(),
"end_time": program['end_time'].isoformat(),
"title": program['title'],
"description": program['description'],
"tvg_id": dummy_tvg_id,
"sub_title": None,
"custom_properties": None,
}
dummy_programs.append(dummy_program)
else:
logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}")
except Exception as e:
logger.error(
f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
)
# Handle channels with NO EPG data (standard dummy with humorous descriptions)
for channel in channels_without_epg:
# For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic)
# The frontend uses: tvgRecord?.tvg_id ?? channel.uuid
# Since there's no EPG data, it will fall back to UUID
dummy_tvg_id = str(channel.uuid)
try:
logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})")
# Create programs every 4 hours for the next 24 hours with humorous descriptions
for hour_offset in range(0, 24, 4):
# Use timedelta for time arithmetic instead of replace() to avoid hour overflow
start_time = now + timedelta(hours=hour_offset)
@ -238,7 +331,7 @@ class EPGGridAPIView(APIView):
# Create a dummy program in the same format as regular programs
dummy_program = {
"id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID
"id": f"dummy-standard-{channel.id}-{hour_offset}",
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
"start_time": start_time.isoformat(),
"end_time": end_time.isoformat(),
@ -252,7 +345,7 @@ class EPGGridAPIView(APIView):
except Exception as e:
logger.error(
f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
)
# Combine regular and dummy programs
@ -284,7 +377,22 @@ class EPGImportAPIView(APIView):
)
def post(self, request, format=None):
logger.info("EPGImportAPIView: Received request to import EPG data.")
refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task
epg_id = request.data.get("id", None)
# Check if this is a dummy EPG source
try:
from .models import EPGSource
epg_source = EPGSource.objects.get(id=epg_id)
if epg_source.source_type == 'dummy':
logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}")
return Response(
{"success": False, "message": "Dummy EPG sources do not require refreshing."},
status=status.HTTP_400_BAD_REQUEST,
)
except EPGSource.DoesNotExist:
pass # Let the task handle the missing source
refresh_epg_data.delay(epg_id) # Trigger Celery task
logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.")
return Response(
{"success": True, "message": "EPG data import initiated."},
@ -308,3 +416,4 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet):
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
return [Authenticated()]

View file

@ -0,0 +1,23 @@
# Generated by Django 5.2.4 on 2025-10-17 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0017_alter_epgsource_url'),
]
operations = [
migrations.AddField(
model_name='epgsource',
name='custom_properties',
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True),
),
migrations.AlterField(
model_name='epgsource',
name='source_type',
field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20),
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 5.2.4 on 2025-10-22 21:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epg', '0018_epgsource_custom_properties_and_more'),
]
operations = [
migrations.AlterField(
model_name='programdata',
name='sub_title',
field=models.TextField(blank=True, null=True),
),
]

View file

@ -0,0 +1,119 @@
# Generated migration to replace {time} placeholders with {starttime}
import re
from django.db import migrations
def migrate_time_placeholders(apps, schema_editor):
"""
Replace {time} with {starttime} and {time24} with {starttime24}
in all dummy EPG source custom_properties templates.
"""
EPGSource = apps.get_model('epg', 'EPGSource')
# Fields that contain templates with placeholders
template_fields = [
'title_template',
'description_template',
'upcoming_title_template',
'upcoming_description_template',
'ended_title_template',
'ended_description_template',
'channel_logo_url',
'program_poster_url',
]
# Get all dummy EPG sources
dummy_sources = EPGSource.objects.filter(source_type='dummy')
updated_count = 0
for source in dummy_sources:
if not source.custom_properties:
continue
modified = False
custom_props = source.custom_properties.copy()
for field in template_fields:
if field in custom_props and custom_props[field]:
original_value = custom_props[field]
# Replace {time24} first (before {time}) to avoid double replacement
# e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime}
new_value = original_value
new_value = re.sub(r'\{time24\}', '{starttime24}', new_value)
new_value = re.sub(r'\{time\}', '{starttime}', new_value)
if new_value != original_value:
custom_props[field] = new_value
modified = True
if modified:
source.custom_properties = custom_props
source.save(update_fields=['custom_properties'])
updated_count += 1
if updated_count > 0:
print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.")
else:
print("No dummy EPG sources needed placeholder updates.")
def reverse_migration(apps, schema_editor):
"""
Reverse the migration by replacing {starttime} back to {time}.
"""
EPGSource = apps.get_model('epg', 'EPGSource')
template_fields = [
'title_template',
'description_template',
'upcoming_title_template',
'upcoming_description_template',
'ended_title_template',
'ended_description_template',
'channel_logo_url',
'program_poster_url',
]
dummy_sources = EPGSource.objects.filter(source_type='dummy')
updated_count = 0
for source in dummy_sources:
if not source.custom_properties:
continue
modified = False
custom_props = source.custom_properties.copy()
for field in template_fields:
if field in custom_props and custom_props[field]:
original_value = custom_props[field]
# Reverse the replacements
new_value = original_value
new_value = re.sub(r'\{starttime24\}', '{time24}', new_value)
new_value = re.sub(r'\{starttime\}', '{time}', new_value)
if new_value != original_value:
custom_props[field] = new_value
modified = True
if modified:
source.custom_properties = custom_props
source.save(update_fields=['custom_properties'])
updated_count += 1
if updated_count > 0:
print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.")
class Migration(migrations.Migration):
dependencies = [
('epg', '0019_alter_programdata_sub_title'),
]
operations = [
migrations.RunPython(migrate_time_placeholders, reverse_migration),
]

View file

@ -8,6 +8,7 @@ class EPGSource(models.Model):
SOURCE_TYPE_CHOICES = [
('xmltv', 'XMLTV URL'),
('schedules_direct', 'Schedules Direct API'),
('dummy', 'Custom Dummy EPG'),
]
STATUS_IDLE = 'idle'
@ -38,6 +39,12 @@ class EPGSource(models.Model):
refresh_task = models.ForeignKey(
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
)
custom_properties = models.JSONField(
default=dict,
blank=True,
null=True,
help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)"
)
status = models.CharField(
max_length=20,
choices=STATUS_CHOICES,
@ -148,7 +155,7 @@ class ProgramData(models.Model):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
title = models.CharField(max_length=255)
sub_title = models.CharField(max_length=255, blank=True, null=True)
sub_title = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
tvg_id = models.CharField(max_length=255, null=True, blank=True)
custom_properties = models.JSONField(default=dict, blank=True, null=True)

View file

@ -4,7 +4,7 @@ from .models import EPGSource, EPGData, ProgramData
from apps.channels.models import Channel
class EPGSourceSerializer(serializers.ModelSerializer):
epg_data_ids = serializers.SerializerMethodField()
epg_data_count = serializers.SerializerMethodField()
read_only_fields = ['created_at', 'updated_at']
url = serializers.CharField(
required=False,
@ -28,11 +28,13 @@ class EPGSourceSerializer(serializers.ModelSerializer):
'last_message',
'created_at',
'updated_at',
'epg_data_ids'
'custom_properties',
'epg_data_count'
]
def get_epg_data_ids(self, obj):
return list(obj.epgs.values_list('id', flat=True))
def get_epg_data_count(self, obj):
"""Return the count of EPG data entries instead of all IDs to prevent large payloads"""
return obj.epgs.count()
class ProgramDataSerializer(serializers.ModelSerializer):
class Meta:

View file

@ -1,9 +1,9 @@
from django.db.models.signals import post_save, post_delete, pre_save
from django.dispatch import receiver
from .models import EPGSource
from .models import EPGSource, EPGData
from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id
from django_celery_beat.models import PeriodicTask, IntervalSchedule
from core.utils import is_protected_path
from core.utils import is_protected_path, send_websocket_update
import json
import logging
import os
@ -12,15 +12,77 @@ logger = logging.getLogger(__name__)
@receiver(post_save, sender=EPGSource)
def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs):
# Trigger refresh only if the source is newly created and active
if created and instance.is_active:
# Trigger refresh only if the source is newly created, active, and not a dummy EPG
if created and instance.is_active and instance.source_type != 'dummy':
refresh_epg_data.delay(instance.id)
@receiver(post_save, sender=EPGSource)
def create_dummy_epg_data(sender, instance, created, **kwargs):
"""
Automatically create EPGData for dummy EPG sources when they are created.
This allows channels to be assigned to dummy EPGs immediately without
requiring a refresh first.
"""
if instance.source_type == 'dummy':
# Ensure dummy EPGs always have idle status and no status message
if instance.status != EPGSource.STATUS_IDLE or instance.last_message:
instance.status = EPGSource.STATUS_IDLE
instance.last_message = None
instance.save(update_fields=['status', 'last_message'])
# Create a URL-friendly tvg_id from the dummy EPG name
# Replace spaces and special characters with underscores
friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_')
# Remove any characters that aren't alphanumeric or underscores
friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_')
# Convert to lowercase for consistency
friendly_tvg_id = friendly_tvg_id.lower()
# Prefix with 'dummy_' to make it clear this is a dummy EPG
friendly_tvg_id = f"dummy_{friendly_tvg_id}"
# Create or update the EPGData record
epg_data, data_created = EPGData.objects.get_or_create(
tvg_id=friendly_tvg_id,
epg_source=instance,
defaults={
'name': instance.name,
'icon_url': None
}
)
# Update name if it changed and record already existed
if not data_created and epg_data.name != instance.name:
epg_data.name = instance.name
epg_data.save(update_fields=['name'])
if data_created:
logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})")
# Send websocket update to notify frontend that EPG data has been created
# This allows the channel form to immediately show the new dummy EPG without refreshing
send_websocket_update('updates', 'update', {
'type': 'epg_data_created',
'source_id': instance.id,
'source_name': instance.name,
'epg_data_id': epg_data.id
})
else:
logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})")
@receiver(post_save, sender=EPGSource)
def create_or_update_refresh_task(sender, instance, **kwargs):
"""
Create or update a Celery Beat periodic task when an EPGSource is created/updated.
Skip creating tasks for dummy EPG sources as they don't need refreshing.
"""
# Skip task creation for dummy EPGs
if instance.source_type == 'dummy':
# If there's an existing task, disable it
if instance.refresh_task:
instance.refresh_task.enabled = False
instance.refresh_task.save(update_fields=['enabled'])
return
task_name = f"epg_source-refresh-{instance.id}"
interval, _ = IntervalSchedule.objects.get_or_create(
every=int(instance.refresh_interval),
@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs):
def update_status_on_active_change(sender, instance, **kwargs):
"""
When an EPGSource's is_active field changes, update the status accordingly.
For dummy EPGs, always ensure status is idle and no status message.
"""
# Dummy EPGs should always be idle with no status message
if instance.source_type == 'dummy':
instance.status = EPGSource.STATUS_IDLE
instance.last_message = None
return
if instance.pk: # Only for existing records, not new ones
try:
# Get the current record from the database

View file

@ -24,7 +24,7 @@ from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from .models import EPGSource, EPGData, ProgramData
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
logger = logging.getLogger(__name__)
@ -133,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id):
@shared_task
def refresh_all_epg_data():
logger.info("Starting refresh_epg_data task.")
active_sources = EPGSource.objects.filter(is_active=True)
logger.debug(f"Found {active_sources.count()} active EPGSource(s).")
# Exclude dummy EPG sources from refresh - they don't need refreshing
active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy')
logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).")
for source in active_sources:
refresh_epg_data(source.id)
@ -180,6 +181,13 @@ def refresh_epg_data(source_id):
gc.collect()
return
# Skip refresh for dummy EPG sources - they don't need refreshing
if source.source_type == 'dummy':
logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})")
release_task_lock('refresh_epg_data', source_id)
gc.collect()
return
# Continue with the normal processing...
logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})")
if source.source_type == 'xmltv':
@ -877,7 +885,7 @@ def parse_channels_only(source):
# Change iterparse to look for both channel and programme elements
logger.debug(f"Creating iterparse context for channels and programmes")
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True)
if process:
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
@ -1149,6 +1157,12 @@ def parse_programs_for_tvg_id(epg_id):
epg = EPGData.objects.get(id=epg_id)
epg_source = epg.epg_source
# Skip program parsing for dummy EPG sources - they don't have program data files
if epg_source.source_type == 'dummy':
logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})")
release_task_lock('parse_epg_programs', epg_id)
return
if not Channel.objects.filter(epg_data=epg).exists():
logger.info(f"No channels matched to EPG {epg.tvg_id}")
release_task_lock('parse_epg_programs', epg_id)
@ -1242,7 +1256,7 @@ def parse_programs_for_tvg_id(epg_id):
source_file = open(file_path, 'rb')
# Stream parse the file using lxml's iterparse
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
for _, elem in program_parser:
if elem.get('channel') == epg.tvg_id:
@ -1482,6 +1496,15 @@ def parse_programs_for_source(epg_source, tvg_id=None):
epg_source.updated_at = timezone.now()
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
# Log system event for EPG refresh
log_system_event(
event_type='epg_refresh',
source_name=epg_source.name,
programs=program_count,
channels=channel_count,
updated=updated_count,
)
# Send completion notification with status
send_epg_update(epg_source.id, "parsing_programs", 100,
status="success",
@ -1943,3 +1966,20 @@ def detect_file_format(file_path=None, content=None):
# If we reach here, we couldn't reliably determine the format
return format_type, is_compressed, file_extension
def generate_dummy_epg(source):
"""
DEPRECATED: This function is no longer used.
Dummy EPG programs are now generated on-demand when they are requested
(during XMLTV export or EPG grid display), rather than being pre-generated
and stored in the database.
See: apps/output/views.py - generate_custom_dummy_programs()
This function remains for backward compatibility but should not be called.
"""
logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. "
f"Dummy EPG programs are now generated on-demand.")
return True

View file

@ -81,6 +81,13 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
account_type = response.data.get("account_type")
account_id = response.data.get("id")
# Notify frontend that a new playlist was created
from core.utils import send_websocket_update
send_websocket_update('updates', 'update', {
'type': 'playlist_created',
'playlist_id': account_id
})
if account_type == M3UAccount.Types.XC:
refresh_m3u_groups(account_id)
@ -145,6 +152,46 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
and not old_vod_enabled
and new_vod_enabled
):
# Create Uncategorized categories immediately so they're available in the UI
from apps.vod.models import VODCategory, M3UVODCategoryRelation
# Create movie Uncategorized category
movie_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="movie",
defaults={}
)
# Create series Uncategorized category
series_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="series",
defaults={}
)
# Create relations for both categories (disabled by default until first refresh)
account_custom_props = instance.custom_properties or {}
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
M3UVODCategoryRelation.objects.get_or_create(
category=movie_category,
m3u_account=instance,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
M3UVODCategoryRelation.objects.get_or_create(
category=series_category,
m3u_account=instance,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
# Trigger full VOD refresh
from apps.vod.tasks import refresh_vod_content
refresh_vod_content.delay(instance.id)

View file

@ -136,6 +136,9 @@ class M3UAccountSerializer(serializers.ModelSerializer):
validators=[validate_flexible_url],
)
enable_vod = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True)
auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True)
class Meta:
model = M3UAccount
@ -164,6 +167,9 @@ class M3UAccountSerializer(serializers.ModelSerializer):
"status",
"last_message",
"enable_vod",
"auto_enable_new_groups_live",
"auto_enable_new_groups_vod",
"auto_enable_new_groups_series",
]
extra_kwargs = {
"password": {
@ -175,23 +181,36 @@ class M3UAccountSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
data = super().to_representation(instance)
# Parse custom_properties to get VOD preference
# Parse custom_properties to get VOD preference and auto_enable_new_groups settings
custom_props = instance.custom_properties or {}
data["enable_vod"] = custom_props.get("enable_vod", False)
data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True)
data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True)
data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True)
return data
def update(self, instance, validated_data):
# Handle enable_vod preference
# Handle enable_vod preference and auto_enable_new_groups settings
enable_vod = validated_data.pop("enable_vod", None)
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None)
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None)
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None)
# Get existing custom_properties
custom_props = instance.custom_properties or {}
# Update preferences
if enable_vod is not None:
# Get existing custom_properties
custom_props = instance.custom_properties or {}
# Update VOD preference
custom_props["enable_vod"] = enable_vod
validated_data["custom_properties"] = custom_props
if auto_enable_new_groups_live is not None:
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
if auto_enable_new_groups_vod is not None:
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
if auto_enable_new_groups_series is not None:
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
validated_data["custom_properties"] = custom_props
# Pop out channel group memberships so we can handle them manually
channel_group_data = validated_data.pop("channel_group", [])
@ -225,14 +244,20 @@ class M3UAccountSerializer(serializers.ModelSerializer):
return instance
def create(self, validated_data):
# Handle enable_vod preference during creation
# Handle enable_vod preference and auto_enable_new_groups settings during creation
enable_vod = validated_data.pop("enable_vod", False)
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True)
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True)
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True)
# Parse existing custom_properties or create new
custom_props = validated_data.get("custom_properties", {})
# Set VOD preference
# Set preferences (default to True for auto_enable_new_groups)
custom_props["enable_vod"] = enable_vod
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
validated_data["custom_properties"] = custom_props
return super().create(validated_data)

View file

@ -24,11 +24,13 @@ from core.utils import (
acquire_task_lock,
release_task_lock,
natural_sort_key,
log_system_event,
)
from core.models import CoreSettings, UserAgent
from asgiref.sync import async_to_sync
from core.xtream_codes import Client as XCClient
from core.utils import send_websocket_update
from .utils import normalize_stream_url
logger = logging.getLogger(__name__)
@ -219,6 +221,10 @@ def fetch_m3u_lines(account, use_cache=False):
# Has HTTP URLs, might be a simple M3U without headers
is_valid_m3u = True
logger.info("Content validated as M3U: contains HTTP URLs")
elif any(line.strip().startswith(('rtsp', 'rtp', 'udp')) for line in content_lines):
# Has RTSP/RTP/UDP URLs, might be a simple M3U without headers
is_valid_m3u = True
logger.info("Content validated as M3U: contains RTSP/RTP/UDP URLs")
if not is_valid_m3u:
# Log what we actually received for debugging
@ -434,25 +440,51 @@ def get_case_insensitive_attr(attributes, key, default=""):
def parse_extinf_line(line: str) -> dict:
"""
Parse an EXTINF line from an M3U file.
This function removes the "#EXTINF:" prefix, then splits the remaining
string on the first comma that is not enclosed in quotes.
This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes,
and treats everything after the last attribute as the display name.
Returns a dictionary with:
- 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title)
- 'display_name': the text after the comma (the fallback display name)
- 'display_name': the text after the attributes (the fallback display name)
- 'name': the value from tvg-name (if present) or the display name otherwise.
"""
if not line.startswith("#EXTINF:"):
return None
content = line[len("#EXTINF:") :].strip()
# Split on the first comma that is not inside quotes.
parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1)
if len(parts) != 2:
return None
attributes_part, display_name = parts[0], parts[1].strip()
attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part))
# Use tvg-name attribute if available; otherwise, use the display name.
name = get_case_insensitive_attr(attrs, "tvg-name", display_name)
# Single pass: extract all attributes AND track the last attribute position
# This regex matches both key="value" and key='value' patterns
attrs = {}
last_attr_end = 0
# Use a single regex that handles both quote types
for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content):
key = match.group(1)
value = match.group(3)
attrs[key] = value
last_attr_end = match.end()
# Everything after the last attribute (skipping leading comma and whitespace) is the display name
if last_attr_end > 0:
remaining = content[last_attr_end:].strip()
# Remove leading comma if present
if remaining.startswith(','):
remaining = remaining[1:].strip()
display_name = remaining
else:
# No attributes found, try the old comma-split method as fallback
parts = content.split(',', 1)
if len(parts) == 2:
display_name = parts[1].strip()
else:
display_name = content.strip()
# Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name.
name = get_case_insensitive_attr(attrs, "tvg-name", None)
if not name:
name = get_case_insensitive_attr(attrs, "tvc-guide-title", None)
if not name:
name = display_name
return {"attributes": attrs, "display_name": display_name, "name": name}
@ -488,25 +520,29 @@ def process_groups(account, groups):
}
logger.info(f"Currently {len(existing_groups)} existing groups")
group_objs = []
# Check if we should auto-enable new groups based on account settings
account_custom_props = account.custom_properties or {}
auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True)
# Separate existing groups from groups that need to be created
existing_group_objs = []
groups_to_create = []
for group_name, custom_props in groups.items():
logger.debug(f"Handling group for M3U account {account.id}: {group_name}")
if group_name not in existing_groups:
groups_to_create.append(
ChannelGroup(
name=group_name,
)
)
if group_name in existing_groups:
existing_group_objs.append(existing_groups[group_name])
else:
group_objs.append(existing_groups[group_name])
groups_to_create.append(ChannelGroup(name=group_name))
# Create new groups and fetch them back with IDs
newly_created_group_objs = []
if groups_to_create:
logger.debug(f"Creating {len(groups_to_create)} groups")
created = ChannelGroup.bulk_create_and_fetch(groups_to_create)
logger.debug(f"Created {len(created)} groups")
group_objs.extend(created)
logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}")
newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create))
logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups")
# Combine all groups
all_group_objs = existing_group_objs + newly_created_group_objs
# Get existing relationships for this account
existing_relationships = {
@ -536,7 +572,7 @@ def process_groups(account, groups):
relations_to_delete.append(rel)
logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}")
for group in group_objs:
for group in all_group_objs:
custom_props = groups.get(group.name, {})
if group.name in existing_relationships:
@ -566,35 +602,17 @@ def process_groups(account, groups):
else:
logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}")
else:
# Create new relationship - but check if there's an existing relationship that might have user settings
# This can happen if the group was temporarily removed and is now back
try:
potential_existing = ChannelGroupM3UAccount.objects.filter(
m3u_account=account,
channel_group=group
).first()
# Create new relationship - this group is new to this M3U account
# Use the auto_enable setting to determine if it should start enabled
if not auto_enable_new_groups_live:
logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)")
if potential_existing:
# Merge with existing custom properties to preserve user settings
existing_custom_props = potential_existing.custom_properties or {}
# Merge new properties with existing ones
merged_custom_props = existing_custom_props.copy()
merged_custom_props.update(custom_props)
custom_props = merged_custom_props
logger.debug(f"Merged custom properties for existing relationship: group '{group.name}' - account {account.id}")
except Exception as e:
logger.debug(f"Could not check for existing relationship: {str(e)}")
# Fall back to using just the new custom properties
pass
# Create new relationship
relations_to_create.append(
ChannelGroupM3UAccount(
channel_group=group,
m3u_account=account,
custom_properties=custom_props,
enabled=True, # Default to enabled
enabled=auto_enable_new_groups_live,
)
)
@ -774,7 +792,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
group_title = group_name
stream_hash = Stream.generate_hash_key(
name, url, tvg_id, hash_keys
name, url, tvg_id, hash_keys, m3u_id=account_id
)
stream_props = {
"name": name,
@ -908,6 +926,12 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
for stream_info in batch:
try:
name, url = stream_info["name"], stream_info["url"]
# Validate URL length - maximum of 4096 characters
if url and len(url) > 4096:
logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)")
continue
tvg_id, tvg_logo = get_case_insensitive_attr(
stream_info["attributes"], "tvg-id", ""
), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "")
@ -942,7 +966,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
)
continue
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys)
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id)
stream_props = {
"name": name,
"url": url,
@ -1194,52 +1218,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
auth_result = xc_client.authenticate()
logger.debug(f"Authentication response: {auth_result}")
# Save account information to all active profiles
# Queue async profile refresh task to run in background
# This prevents any delay in the main refresh process
try:
from apps.m3u.models import M3UAccountProfile
profiles = M3UAccountProfile.objects.filter(
m3u_account=account,
is_active=True
)
# Update each profile with account information using its own transformed credentials
for profile in profiles:
try:
# Get transformed credentials for this specific profile
profile_url, profile_username, profile_password = get_transformed_credentials(account, profile)
# Create a separate XC client for this profile's credentials
with XCClient(
profile_url,
profile_username,
profile_password,
user_agent_string
) as profile_client:
# Authenticate with this profile's credentials
if profile_client.authenticate():
# Get account information specific to this profile's credentials
profile_account_info = profile_client.get_account_info()
# Merge with existing custom_properties if they exist
existing_props = profile.custom_properties or {}
existing_props.update(profile_account_info)
profile.custom_properties = existing_props
profile.save(update_fields=['custom_properties'])
logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials")
else:
logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials")
except Exception as profile_error:
logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}")
# Continue with other profiles even if one fails
logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}")
except Exception as save_error:
logger.warning(f"Failed to process profile account information: {str(save_error)}")
# Don't fail the whole process if saving account info fails
logger.info(f"Queueing background profile refresh for account {account.name}")
refresh_account_profiles.delay(account.id)
except Exception as e:
logger.warning(f"Failed to queue profile refresh task: {str(e)}")
# Don't fail the main refresh if profile refresh can't be queued
except Exception as e:
error_msg = f"Failed to authenticate with XC server: {str(e)}"
@ -1381,10 +1367,12 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
)
problematic_lines.append((line_index + 1, line[:200]))
elif extinf_data and line.startswith("http"):
elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")):
url_count += 1
# Normalize UDP URLs only (e.g., remove VLC-specific @ prefix)
normalized_url = normalize_stream_url(line) if line.startswith("udp") else line
# Associate URL with the last EXTINF line
extinf_data[-1]["url"] = line
extinf_data[-1]["url"] = normalized_url
valid_stream_count += 1
# Periodically log progress for large files
@ -1562,7 +1550,7 @@ def sync_auto_channels(account_id, scan_start_time=None):
# Get force_dummy_epg, group_override, and regex patterns from group custom_properties
group_custom_props = {}
force_dummy_epg = False
force_dummy_epg = False # Backward compatibility: legacy option to disable EPG
override_group_id = None
name_regex_pattern = None
name_replace_pattern = None
@ -1571,6 +1559,8 @@ def sync_auto_channels(account_id, scan_start_time=None):
channel_sort_order = None
channel_sort_reverse = False
stream_profile_id = None
custom_logo_id = None
custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg)
if group_relation.custom_properties:
group_custom_props = group_relation.custom_properties
force_dummy_epg = group_custom_props.get("force_dummy_epg", False)
@ -1581,11 +1571,13 @@ def sync_auto_channels(account_id, scan_start_time=None):
)
name_match_regex = group_custom_props.get("name_match_regex")
channel_profile_ids = group_custom_props.get("channel_profile_ids")
custom_epg_id = group_custom_props.get("custom_epg_id")
channel_sort_order = group_custom_props.get("channel_sort_order")
channel_sort_reverse = group_custom_props.get(
"channel_sort_reverse", False
)
stream_profile_id = group_custom_props.get("stream_profile_id")
custom_logo_id = group_custom_props.get("custom_logo_id")
# Determine which group to use for created channels
target_group = channel_group
@ -1840,7 +1832,25 @@ def sync_auto_channels(account_id, scan_start_time=None):
# Handle logo updates
current_logo = None
if stream.logo_url:
if custom_logo_id:
# Use the custom logo specified in group settings
from apps.channels.models import Logo
try:
current_logo = Logo.objects.get(id=custom_logo_id)
except Logo.DoesNotExist:
logger.warning(
f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo"
)
# Fall back to stream logo if custom logo not found
if stream.logo_url:
current_logo, _ = Logo.objects.get_or_create(
url=stream.logo_url,
defaults={
"name": stream.name or stream.tvg_id or "Unknown"
},
)
elif stream.logo_url:
# No custom logo configured, use stream logo
from apps.channels.models import Logo
current_logo, _ = Logo.objects.get_or_create(
@ -1856,10 +1866,42 @@ def sync_auto_channels(account_id, scan_start_time=None):
# Handle EPG data updates
current_epg_data = None
if stream.tvg_id and not force_dummy_epg:
if custom_epg_id:
# Use the custom EPG specified in group settings (e.g., a dummy EPG)
from apps.epg.models import EPGSource
try:
epg_source = EPGSource.objects.get(id=custom_epg_id)
# For dummy EPGs, select the first (and typically only) EPGData entry from this source
if epg_source.source_type == 'dummy':
current_epg_data = EPGData.objects.filter(
epg_source=epg_source
).first()
if not current_epg_data:
logger.warning(
f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})"
)
else:
# For non-dummy sources, try to find existing EPGData by tvg_id
if stream.tvg_id:
current_epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id,
epg_source=epg_source
).first()
except EPGSource.DoesNotExist:
logger.warning(
f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match"
)
# Fall back to auto-match by tvg_id
if stream.tvg_id and not force_dummy_epg:
current_epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id
).first()
elif stream.tvg_id and not force_dummy_epg:
# Auto-match EPG by tvg_id (original behavior)
current_epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id
).first()
# If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None
if existing_channel.epg_data != current_epg_data:
existing_channel.epg_data = current_epg_data
@ -1949,19 +1991,81 @@ def sync_auto_channels(account_id, scan_start_time=None):
ChannelProfileMembership.objects.bulk_create(memberships)
# Try to match EPG data
if stream.tvg_id and not force_dummy_epg:
if custom_epg_id:
# Use the custom EPG specified in group settings (e.g., a dummy EPG)
from apps.epg.models import EPGSource
try:
epg_source = EPGSource.objects.get(id=custom_epg_id)
# For dummy EPGs, select the first (and typically only) EPGData entry from this source
if epg_source.source_type == 'dummy':
epg_data = EPGData.objects.filter(
epg_source=epg_source
).first()
if epg_data:
channel.epg_data = epg_data
channel.save(update_fields=["epg_data"])
else:
logger.warning(
f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})"
)
else:
# For non-dummy sources, try to find existing EPGData by tvg_id
if stream.tvg_id:
epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id,
epg_source=epg_source
).first()
if epg_data:
channel.epg_data = epg_data
channel.save(update_fields=["epg_data"])
except EPGSource.DoesNotExist:
logger.warning(
f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match"
)
# Fall back to auto-match by tvg_id
if stream.tvg_id and not force_dummy_epg:
epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id
).first()
if epg_data:
channel.epg_data = epg_data
channel.save(update_fields=["epg_data"])
elif stream.tvg_id and not force_dummy_epg:
# Auto-match EPG by tvg_id (original behavior)
epg_data = EPGData.objects.filter(
tvg_id=stream.tvg_id
).first()
if epg_data:
channel.epg_data = epg_data
channel.save(update_fields=["epg_data"])
elif stream.tvg_id and force_dummy_epg:
elif force_dummy_epg:
# Force dummy EPG with no custom EPG selected (set to None)
channel.epg_data = None
channel.save(update_fields=["epg_data"])
# Handle logo
if stream.logo_url:
if custom_logo_id:
# Use the custom logo specified in group settings
from apps.channels.models import Logo
try:
custom_logo = Logo.objects.get(id=custom_logo_id)
channel.logo = custom_logo
channel.save(update_fields=["logo"])
except Logo.DoesNotExist:
logger.warning(
f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo"
)
# Fall back to stream logo if custom logo not found
if stream.logo_url:
logo, _ = Logo.objects.get_or_create(
url=stream.logo_url,
defaults={
"name": stream.name or stream.tvg_id or "Unknown"
},
)
channel.logo = logo
channel.save(update_fields=["logo"])
elif stream.logo_url:
from apps.channels.models import Logo
logo, _ = Logo.objects.get_or_create(
@ -2128,6 +2232,106 @@ def get_transformed_credentials(account, profile=None):
return base_url, base_username, base_password
@shared_task
def refresh_account_profiles(account_id):
"""Refresh account information for all active profiles of an XC account.
This task runs asynchronously in the background after account refresh completes.
It includes rate limiting delays between profile authentications to prevent provider bans.
"""
from django.conf import settings
import time
try:
account = M3UAccount.objects.get(id=account_id, is_active=True)
if account.account_type != M3UAccount.Types.XC:
logger.debug(f"Account {account_id} is not XC type, skipping profile refresh")
return f"Account {account_id} is not an XtreamCodes account"
from apps.m3u.models import M3UAccountProfile
profiles = M3UAccountProfile.objects.filter(
m3u_account=account,
is_active=True
)
if not profiles.exists():
logger.info(f"No active profiles found for account {account.name}")
return f"No active profiles for account {account_id}"
# Get user agent for this account
try:
user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
if account.user_agent_id:
from core.models import UserAgent
ua_obj = UserAgent.objects.get(id=account.user_agent_id)
if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent:
user_agent_string = ua_obj.user_agent
except Exception as e:
logger.warning(f"Error getting user agent, using fallback: {str(e)}")
logger.debug(f"Using user agent for profile refresh: {user_agent_string}")
# Get rate limiting delay from settings
profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5)
profiles_updated = 0
profiles_failed = 0
logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}")
for idx, profile in enumerate(profiles):
try:
# Add delay between profiles to prevent rate limiting (except for first profile)
if idx > 0:
logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting")
time.sleep(profile_delay)
# Get transformed credentials for this specific profile
profile_url, profile_username, profile_password = get_transformed_credentials(account, profile)
# Create a separate XC client for this profile's credentials
with XCClient(
profile_url,
profile_username,
profile_password,
user_agent_string
) as profile_client:
# Authenticate with this profile's credentials
if profile_client.authenticate():
# Get account information specific to this profile's credentials
profile_account_info = profile_client.get_account_info()
# Merge with existing custom_properties if they exist
existing_props = profile.custom_properties or {}
existing_props.update(profile_account_info)
profile.custom_properties = existing_props
profile.save(update_fields=['custom_properties'])
profiles_updated += 1
logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})")
else:
profiles_failed += 1
logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials")
except Exception as profile_error:
profiles_failed += 1
logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}")
# Continue with other profiles even if one fails
result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed"
logger.info(result_msg)
return result_msg
except M3UAccount.DoesNotExist:
error_msg = f"Account {account_id} not found"
logger.error(error_msg)
return error_msg
except Exception as e:
error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}"
logger.error(error_msg)
return error_msg
@shared_task
def refresh_account_info(profile_id):
"""Refresh only the account information for a specific M3U profile."""
@ -2523,76 +2727,75 @@ def refresh_single_m3u_account(account_id):
if not all_xc_streams:
logger.warning("No streams collected from XC groups")
return f"No streams found for XC account {account_id}", None
else:
# Now batch by stream count (like standard M3U processing)
batches = [
all_xc_streams[i : i + BATCH_SIZE]
for i in range(0, len(all_xc_streams), BATCH_SIZE)
]
# Now batch by stream count (like standard M3U processing)
batches = [
all_xc_streams[i : i + BATCH_SIZE]
for i in range(0, len(all_xc_streams), BATCH_SIZE)
]
logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches")
logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches")
# Use threading for XC stream processing - now with consistent batch sizes
max_workers = min(4, len(batches))
logger.debug(f"Using {max_workers} threads for XC stream processing")
# Use threading for XC stream processing - now with consistent batch sizes
max_workers = min(4, len(batches))
logger.debug(f"Using {max_workers} threads for XC stream processing")
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit stream batch processing tasks (reuse standard M3U processing)
future_to_batch = {
executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i
for i, batch in enumerate(batches)
}
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit stream batch processing tasks (reuse standard M3U processing)
future_to_batch = {
executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i
for i, batch in enumerate(batches)
}
completed_batches = 0
total_batches = len(batches)
completed_batches = 0
total_batches = len(batches)
# Process completed batches as they finish
for future in as_completed(future_to_batch):
batch_idx = future_to_batch[future]
try:
result = future.result()
completed_batches += 1
# Process completed batches as they finish
for future in as_completed(future_to_batch):
batch_idx = future_to_batch[future]
try:
result = future.result()
completed_batches += 1
# Extract stream counts from result
if isinstance(result, str):
try:
created_match = re.search(r"(\d+) created", result)
updated_match = re.search(r"(\d+) updated", result)
if created_match and updated_match:
created_count = int(created_match.group(1))
updated_count = int(updated_match.group(1))
streams_created += created_count
streams_updated += updated_count
except (AttributeError, ValueError):
pass
# Extract stream counts from result
if isinstance(result, str):
try:
created_match = re.search(r"(\d+) created", result)
updated_match = re.search(r"(\d+) updated", result)
if created_match and updated_match:
created_count = int(created_match.group(1))
updated_count = int(updated_match.group(1))
streams_created += created_count
streams_updated += updated_count
except (AttributeError, ValueError):
pass
# Send progress update
progress = int((completed_batches / total_batches) * 100)
current_elapsed = time.time() - start_time
# Send progress update
progress = int((completed_batches / total_batches) * 100)
current_elapsed = time.time() - start_time
if progress > 0:
estimated_total = (current_elapsed / progress) * 100
time_remaining = max(0, estimated_total - current_elapsed)
else:
time_remaining = 0
if progress > 0:
estimated_total = (current_elapsed / progress) * 100
time_remaining = max(0, estimated_total - current_elapsed)
else:
time_remaining = 0
send_m3u_update(
account_id,
"parsing",
progress,
elapsed_time=current_elapsed,
time_remaining=time_remaining,
streams_processed=streams_created + streams_updated,
)
send_m3u_update(
account_id,
"parsing",
progress,
elapsed_time=current_elapsed,
time_remaining=time_remaining,
streams_processed=streams_created + streams_updated,
)
logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed")
logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed")
except Exception as e:
logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}")
completed_batches += 1 # Still count it to avoid hanging
except Exception as e:
logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}")
completed_batches += 1 # Still count it to avoid hanging
logger.info(f"XC thread-based processing completed for account {account_id}")
logger.info(f"XC thread-based processing completed for account {account_id}")
# Ensure all database transactions are committed before cleanup
logger.info(
@ -2638,6 +2841,17 @@ def refresh_single_m3u_account(account_id):
account.updated_at = timezone.now()
account.save(update_fields=["status", "last_message", "updated_at"])
# Log system event for M3U refresh
log_system_event(
event_type='m3u_refresh',
account_name=account.name,
elapsed_time=round(elapsed_time, 2),
streams_created=streams_created,
streams_updated=streams_updated,
streams_deleted=streams_deleted,
total_processed=streams_processed,
)
# Send final update with complete metrics and explicitly include success status
send_m3u_update(
account_id,
@ -2673,7 +2887,16 @@ def refresh_single_m3u_account(account_id):
release_task_lock("refresh_single_m3u_account", account_id)
# Aggressive garbage collection
del existing_groups, extinf_data, groups, batches
# Only delete variables if they exist
if 'existing_groups' in locals():
del existing_groups
if 'extinf_data' in locals():
del extinf_data
if 'groups' in locals():
del groups
if 'batches' in locals():
del batches
from core.utils import cleanup_memory
cleanup_memory(log_usage=True, force_collection=True)

View file

@ -8,6 +8,34 @@ lock = threading.Lock()
active_streams_map = {}
logger = logging.getLogger(__name__)
def normalize_stream_url(url):
"""
Normalize stream URLs for compatibility with FFmpeg.
Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol.
FFmpeg doesn't recognize the @ prefix for multicast addresses.
Args:
url (str): The stream URL to normalize
Returns:
str: The normalized URL
"""
if not url:
return url
# Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234
# The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax
if url.startswith('udp://@'):
normalized = url.replace('udp://@', 'udp://', 1)
logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}")
return normalized
# Could add other normalizations here in the future (rtp://@, etc.)
return url
def increment_stream_count(account):
with lock:
current_usage = active_streams_map.get(account.id, 0)

View file

@ -14,3 +14,26 @@ class OutputM3UTest(TestCase):
self.assertEqual(response.status_code, 200)
content = response.content.decode()
self.assertIn("#EXTM3U", content)
def test_generate_m3u_response_post_empty_body(self):
"""
Test that a POST request with an empty body returns 200 OK.
"""
url = reverse('output:generate_m3u')
response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded')
content = response.content.decode()
self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK")
self.assertIn("#EXTM3U", content)
def test_generate_m3u_response_post_with_body(self):
"""
Test that a POST request with a non-empty body returns 403 Forbidden.
"""
url = reverse('output:generate_m3u')
response = self.client.post(url, data={'evilstring': 'muhahaha'})
self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden")
self.assertIn("POST requests with body are not allowed, body is:", response.content.decode())

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,6 @@
"""Shared configuration between proxy types"""
import time
from django.db import connection
class BaseConfig:
DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail
@ -12,13 +14,29 @@ class BaseConfig:
BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams
BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc.
# Cache for proxy settings (class-level, shared across all instances)
_proxy_settings_cache = None
_proxy_settings_cache_time = 0
_proxy_settings_cache_ttl = 10 # Cache for 10 seconds
@classmethod
def get_proxy_settings(cls):
"""Get proxy settings from CoreSettings JSON data with fallback to defaults"""
"""Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)"""
# Check if cache is still valid
now = time.time()
if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl:
return cls._proxy_settings_cache
# Cache miss or expired - fetch from database
try:
from core.models import CoreSettings
return CoreSettings.get_proxy_settings()
settings = CoreSettings.get_proxy_settings()
cls._proxy_settings_cache = settings
cls._proxy_settings_cache_time = now
return settings
except Exception:
# Return defaults if database query fails
return {
"buffering_timeout": 15,
"buffering_speed": 1.0,
@ -27,6 +45,13 @@ class BaseConfig:
"channel_init_grace_period": 5,
}
finally:
# Always close the connection after reading settings
try:
connection.close()
except Exception:
pass
@classmethod
def get_redis_chunk_ttl(cls):
"""Get Redis chunk TTL from database or default"""
@ -69,10 +94,10 @@ class TSConfig(BaseConfig):
CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds
# Client tracking settings
CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds)
CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds)
GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1)
CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds)
GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6)
CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect
# Stream health and recovery settings

View file

@ -8,7 +8,7 @@ import gevent
from typing import Set, Optional
from apps.proxy.config import TSConfig as Config
from redis.exceptions import ConnectionError, TimeoutError
from .constants import EventType
from .constants import EventType, ChannelState, ChannelMetadataField
from .config_helper import ConfigHelper
from .redis_keys import RedisKeys
from .utils import get_logger
@ -26,6 +26,7 @@ class ClientManager:
self.lock = threading.Lock()
self.last_active_time = time.time()
self.worker_id = worker_id # Store worker ID as instance variable
self._heartbeat_running = True # Flag to control heartbeat thread
# STANDARDIZED KEYS: Move client set under channel namespace
self.client_set_key = RedisKeys.clients(channel_id)
@ -33,6 +34,10 @@ class ClientManager:
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
self.last_heartbeat_time = {}
# Get ProxyServer instance for ownership checks
from .server import ProxyServer
self.proxy_server = ProxyServer.get_instance()
# Start heartbeat thread for local clients
self._start_heartbeat_thread()
self._registered_clients = set() # Track already registered client IDs
@ -77,56 +82,28 @@ class ClientManager:
logger.debug(f"Failed to trigger stats update: {e}")
def _start_heartbeat_thread(self):
"""Start thread to regularly refresh client presence in Redis"""
"""Start thread to regularly refresh client presence in Redis for local clients"""
def heartbeat_task():
no_clients_count = 0 # Track consecutive empty cycles
max_empty_cycles = 3 # Exit after this many consecutive empty checks
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
while True:
while self._heartbeat_running:
try:
# Wait for the interval
gevent.sleep(self.heartbeat_interval)
# Wait for the interval, but check stop flag frequently for quick shutdown
# Sleep in 1-second increments to allow faster response to stop signal
for _ in range(int(self.heartbeat_interval)):
if not self._heartbeat_running:
break
time.sleep(1)
# Final check before doing work
if not self._heartbeat_running:
break
# Send heartbeat for all local clients
with self.lock:
if not self.clients or not self.redis_client:
# No clients left, increment our counter
no_clients_count += 1
# Check if we're in a shutdown delay period before exiting
in_shutdown_delay = False
if self.redis_client:
try:
disconnect_key = RedisKeys.last_client_disconnect(self.channel_id)
disconnect_time_bytes = self.redis_client.get(disconnect_key)
if disconnect_time_bytes:
disconnect_time = float(disconnect_time_bytes.decode('utf-8'))
elapsed = time.time() - disconnect_time
shutdown_delay = ConfigHelper.channel_shutdown_delay()
if elapsed < shutdown_delay:
in_shutdown_delay = True
logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed")
except Exception as e:
logger.debug(f"Error checking shutdown delay: {e}")
# Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay
if no_clients_count >= max_empty_cycles and not in_shutdown_delay:
logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread")
return # This exits the thread
# Skip this cycle if we have no clients but continue if in shutdown delay
if not in_shutdown_delay:
continue
else:
# Reset counter during shutdown delay to prevent premature exit
no_clients_count = 0
continue
else:
# Reset counter when we see clients
no_clients_count = 0
# Skip this cycle if we have no local clients
if not self.clients:
continue
# IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats
current_time = time.time()
@ -197,11 +174,20 @@ class ClientManager:
except Exception as e:
logger.error(f"Error in client heartbeat thread: {e}")
logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}")
thread = threading.Thread(target=heartbeat_task, daemon=True)
thread.name = f"client-heartbeat-{self.channel_id}"
thread.start()
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
def stop(self):
"""Stop the heartbeat thread and cleanup"""
logger.debug(f"Stopping ClientManager for channel {self.channel_id}")
self._heartbeat_running = False
# Give the thread a moment to exit gracefully
# Note: We don't join() here because it's a daemon thread and will exit on its own
def _execute_redis_command(self, command_func):
"""Execute Redis command with error handling"""
if not self.redis_client:
@ -355,16 +341,30 @@ class ClientManager:
self._notify_owner_of_activity()
# Publish client disconnected event
event_data = json.dumps({
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
"channel_id": self.channel_id,
"client_id": client_id,
"worker_id": self.worker_id or "unknown",
"timestamp": time.time(),
"remaining_clients": remaining
})
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
# Check if we're the owner - if so, handle locally; if not, publish event
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
if am_i_owner:
# We're the owner - handle the disconnect directly
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
if remaining == 0:
# Trigger shutdown check directly via ProxyServer method
logger.debug(f"No clients left - triggering immediate shutdown check")
# Spawn greenlet to avoid blocking
import gevent
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
else:
# We're not the owner - publish event so owner can handle it
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
event_data = json.dumps({
"event": EventType.CLIENT_DISCONNECTED,
"channel_id": self.channel_id,
"client_id": client_id,
"worker_id": self.worker_id or "unknown",
"timestamp": time.time(),
"remaining_clients": remaining
})
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
# Trigger channel stats update via WebSocket
self._trigger_stats_update()

View file

@ -100,3 +100,12 @@ class ConfigHelper:
def channel_init_grace_period():
"""Get channel initialization grace period in seconds"""
return Config.get_channel_init_grace_period()
@staticmethod
def chunk_timeout():
"""
Get chunk timeout in seconds (used for both socket and HTTP read timeouts).
This controls how long we wait for each chunk before timing out.
Set this higher (e.g., 30s) for slow providers that may have intermittent delays.
"""
return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds

View file

@ -33,6 +33,8 @@ class EventType:
# Stream types
class StreamType:
HLS = "hls"
RTSP = "rtsp"
UDP = "udp"
TS = "ts"
UNKNOWN = "unknown"

View file

@ -0,0 +1,138 @@
"""
HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe.
This allows us to use the same fetch_chunk() path for both transcode and HTTP streams.
"""
import threading
import os
import requests
from requests.adapters import HTTPAdapter
from .utils import get_logger
logger = get_logger()
class HTTPStreamReader:
"""Thread-based HTTP stream reader that writes to a pipe"""
def __init__(self, url, user_agent=None, chunk_size=8192):
self.url = url
self.user_agent = user_agent
self.chunk_size = chunk_size
self.session = None
self.response = None
self.thread = None
self.pipe_read = None
self.pipe_write = None
self.running = False
def start(self):
"""Start the HTTP stream reader thread"""
# Create a pipe (works on Windows and Unix)
self.pipe_read, self.pipe_write = os.pipe()
# Start the reader thread
self.running = True
self.thread = threading.Thread(target=self._read_stream, daemon=True)
self.thread.start()
logger.info(f"Started HTTP stream reader thread for {self.url}")
return self.pipe_read
def _read_stream(self):
"""Thread worker that reads HTTP stream and writes to pipe"""
try:
# Build headers
headers = {}
if self.user_agent:
headers['User-Agent'] = self.user_agent
logger.info(f"HTTP reader connecting to {self.url}")
# Create session
self.session = requests.Session()
# Disable retries for faster failure detection
adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
# Stream the URL
self.response = self.session.get(
self.url,
headers=headers,
stream=True,
timeout=(5, 30) # 5s connect, 30s read
)
if self.response.status_code != 200:
logger.error(f"HTTP {self.response.status_code} from {self.url}")
return
logger.info(f"HTTP reader connected successfully, streaming data...")
# Stream chunks to pipe
chunk_count = 0
for chunk in self.response.iter_content(chunk_size=self.chunk_size):
if not self.running:
break
if chunk:
try:
# Write binary data to pipe
os.write(self.pipe_write, chunk)
chunk_count += 1
# Log progress periodically
if chunk_count % 1000 == 0:
logger.debug(f"HTTP reader streamed {chunk_count} chunks")
except OSError as e:
logger.error(f"Pipe write error: {e}")
break
logger.info("HTTP stream ended")
except requests.exceptions.RequestException as e:
logger.error(f"HTTP reader request error: {e}")
except Exception as e:
logger.error(f"HTTP reader unexpected error: {e}", exc_info=True)
finally:
self.running = False
# Close write end of pipe to signal EOF
try:
if self.pipe_write is not None:
os.close(self.pipe_write)
self.pipe_write = None
except:
pass
def stop(self):
"""Stop the HTTP stream reader"""
logger.info("Stopping HTTP stream reader")
self.running = False
# Close response
if self.response:
try:
self.response.close()
except:
pass
# Close session
if self.session:
try:
self.session.close()
except:
pass
# Close write end of pipe
if self.pipe_write is not None:
try:
os.close(self.pipe_write)
self.pipe_write = None
except:
pass
# Wait for thread
if self.thread and self.thread.is_alive():
self.thread.join(timeout=2.0)

View file

@ -19,7 +19,7 @@ import gevent # Add gevent import
from typing import Dict, Optional, Set
from apps.proxy.config import TSConfig as Config
from apps.channels.models import Channel, Stream
from core.utils import RedisClient
from core.utils import RedisClient, log_system_event
from redis.exceptions import ConnectionError, TimeoutError
from .stream_manager import StreamManager
from .stream_buffer import StreamBuffer
@ -131,6 +131,8 @@ class ProxyServer:
max_retries = 10
base_retry_delay = 1 # Start with 1 second delay
max_retry_delay = 30 # Cap at 30 seconds
pubsub_client = None
pubsub = None
while True:
try:
@ -192,35 +194,11 @@ class ProxyServer:
self.redis_client.delete(disconnect_key)
elif event_type == EventType.CLIENT_DISCONNECTED:
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
# Check if any clients remain
if channel_id in self.client_managers:
# VERIFY REDIS CLIENT COUNT DIRECTLY
client_set_key = RedisKeys.clients(channel_id)
total = self.redis_client.scard(client_set_key) or 0
if total == 0:
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
# Set the disconnect timer for other workers to see
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.setex(disconnect_key, 60, str(time.time()))
# Get configured shutdown delay or default
shutdown_delay = ConfigHelper.channel_shutdown_delay()
if shutdown_delay > 0:
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay)
# Re-check client count before stopping
total = self.redis_client.scard(client_set_key) or 0
if total > 0:
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
self.redis_client.delete(disconnect_key)
return
# Stop the channel directly
self.stop_channel(channel_id)
client_id = data.get("client_id")
worker_id = data.get("worker_id")
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}")
# Delegate to dedicated method
self.handle_client_disconnect(channel_id)
elif event_type == EventType.STREAM_SWITCH:
@ -339,20 +317,27 @@ class ProxyServer:
logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})")
gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay)
# Try to clean up the old connection
try:
if 'pubsub' in locals():
pubsub.close()
if 'pubsub_client' in locals():
pubsub_client.close()
except:
pass
except Exception as e:
logger.error(f"Error in event listener: {e}")
# Add a short delay to prevent rapid retries on persistent errors
gevent.sleep(5) # REPLACE: time.sleep(5)
finally:
# Always clean up PubSub connections in all error paths
try:
if pubsub:
pubsub.close()
pubsub = None
except Exception as e:
logger.debug(f"Error closing pubsub: {e}")
try:
if pubsub_client:
pubsub_client.close()
pubsub_client = None
except Exception as e:
logger.debug(f"Error closing pubsub_client: {e}")
thread = threading.Thread(target=event_listener, daemon=True)
thread.name = "redis-event-listener"
thread.start()
@ -486,17 +471,18 @@ class ProxyServer:
)
return True
# Create buffer and client manager instances
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
client_manager = ClientManager(
channel_id,
redis_client=self.redis_client,
worker_id=self.worker_id
)
# Create buffer and client manager instances (or reuse if they exist)
if channel_id not in self.stream_buffers:
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
self.stream_buffers[channel_id] = buffer
# Store in local tracking
self.stream_buffers[channel_id] = buffer
self.client_managers[channel_id] = client_manager
if channel_id not in self.client_managers:
client_manager = ClientManager(
channel_id,
redis_client=self.redis_client,
worker_id=self.worker_id
)
self.client_managers[channel_id] = client_manager
# IMPROVED: Set initializing state in Redis BEFORE any other operations
if self.redis_client:
@ -550,13 +536,15 @@ class ProxyServer:
logger.info(f"Channel {channel_id} already owned by worker {current_owner}")
logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only")
# Create buffer but not stream manager
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
self.stream_buffers[channel_id] = buffer
# Create buffer but not stream manager (only if not already exists)
if channel_id not in self.stream_buffers:
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
self.stream_buffers[channel_id] = buffer
# Create client manager with channel_id and redis_client
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
self.client_managers[channel_id] = client_manager
# Create client manager with channel_id and redis_client (only if not already exists)
if channel_id not in self.client_managers:
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
self.client_managers[channel_id] = client_manager
return True
@ -571,13 +559,15 @@ class ProxyServer:
# Another worker just acquired ownership
logger.info(f"Another worker just acquired ownership of channel {channel_id}")
# Create buffer but not stream manager
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
self.stream_buffers[channel_id] = buffer
# Create buffer but not stream manager (only if not already exists)
if channel_id not in self.stream_buffers:
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
self.stream_buffers[channel_id] = buffer
# Create client manager with channel_id and redis_client
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
self.client_managers[channel_id] = client_manager
# Create client manager with channel_id and redis_client (only if not already exists)
if channel_id not in self.client_managers:
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
self.client_managers[channel_id] = client_manager
return True
@ -596,7 +586,7 @@ class ProxyServer:
if channel_user_agent:
metadata["user_agent"] = channel_user_agent
# CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged
# Make sure stream_id is always set in metadata and properly logged
if channel_stream_id:
metadata["stream_id"] = str(channel_stream_id)
logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}")
@ -632,13 +622,37 @@ class ProxyServer:
logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}")
self.stream_managers[channel_id] = stream_manager
# Create client manager with channel_id, redis_client AND worker_id
client_manager = ClientManager(
channel_id=channel_id,
redis_client=self.redis_client,
worker_id=self.worker_id
)
self.client_managers[channel_id] = client_manager
# Log channel start event
try:
channel_obj = Channel.objects.get(uuid=channel_id)
# Get stream name if stream_id is available
stream_name = None
if channel_stream_id:
try:
stream_obj = Stream.objects.get(id=channel_stream_id)
stream_name = stream_obj.name
except Exception:
pass
log_system_event(
'channel_start',
channel_id=channel_id,
channel_name=channel_obj.name,
stream_name=stream_name,
stream_id=channel_stream_id
)
except Exception as e:
logger.error(f"Could not log channel start event: {e}")
# Create client manager with channel_id, redis_client AND worker_id (only if not already exists)
if channel_id not in self.client_managers:
client_manager = ClientManager(
channel_id=channel_id,
redis_client=self.redis_client,
worker_id=self.worker_id
)
self.client_managers[channel_id] = client_manager
# Start stream manager thread only for the owner
thread = threading.Thread(target=stream_manager.run, daemon=True)
@ -688,9 +702,10 @@ class ProxyServer:
state = metadata.get(b'state', b'unknown').decode('utf-8')
owner = metadata.get(b'owner', b'').decode('utf-8')
# States that indicate the channel is running properly
# States that indicate the channel is running properly or shutting down
valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS,
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING]
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING,
ChannelState.STOPPING]
# If the channel is in a valid state, check if the owner is still active
if state in valid_states:
@ -703,12 +718,24 @@ class ProxyServer:
else:
# This is a zombie channel - owner is gone but metadata still exists
logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active")
# Check if there are any clients connected
client_set_key = RedisKeys.clients(channel_id)
client_count = self.redis_client.scard(client_set_key) or 0
if client_count > 0:
logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover")
# Could potentially take ownership here in the future
# For now, just clean it up to be safe
else:
logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up")
self._clean_zombie_channel(channel_id, metadata)
return False
elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]:
# These states indicate the channel should be reinitialized
logger.info(f"Channel {channel_id} exists but in terminal state: {state}")
return True
elif state in [ChannelState.STOPPED, ChannelState.ERROR]:
# These terminal states indicate the channel should be cleaned up and reinitialized
logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup")
return False
else:
# Unknown or initializing state, check how long it's been in this state
if b'state_changed_at' in metadata:
@ -772,6 +799,44 @@ class ProxyServer:
logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True)
return False
def handle_client_disconnect(self, channel_id):
"""
Handle client disconnect event - check if channel should shut down.
Can be called directly by owner or via PubSub from non-owner workers.
"""
if channel_id not in self.client_managers:
return
try:
# VERIFY REDIS CLIENT COUNT DIRECTLY
client_set_key = RedisKeys.clients(channel_id)
total = self.redis_client.scard(client_set_key) or 0
if total == 0:
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
# Set the disconnect timer for other workers to see
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
self.redis_client.setex(disconnect_key, 60, str(time.time()))
# Get configured shutdown delay or default
shutdown_delay = ConfigHelper.channel_shutdown_delay()
if shutdown_delay > 0:
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
gevent.sleep(shutdown_delay)
# Re-check client count before stopping
total = self.redis_client.scard(client_set_key) or 0
if total > 0:
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
self.redis_client.delete(disconnect_key)
return
# Stop the channel directly
self.stop_channel(channel_id)
except Exception as e:
logger.error(f"Error handling client disconnect for channel {channel_id}: {e}")
def stop_channel(self, channel_id):
"""Stop a channel with proper ownership handling"""
try:
@ -819,6 +884,41 @@ class ProxyServer:
self.release_ownership(channel_id)
logger.info(f"Released ownership of channel {channel_id}")
# Log channel stop event (after cleanup, before releasing ownership section ends)
try:
channel_obj = Channel.objects.get(uuid=channel_id)
# Calculate runtime and get total bytes from metadata
runtime = None
total_bytes = None
if self.redis_client:
metadata_key = RedisKeys.channel_metadata(channel_id)
metadata = self.redis_client.hgetall(metadata_key)
if metadata:
# Calculate runtime from init_time
if b'init_time' in metadata:
try:
init_time = float(metadata[b'init_time'].decode('utf-8'))
runtime = round(time.time() - init_time, 2)
except Exception:
pass
# Get total bytes transferred
if b'total_bytes' in metadata:
try:
total_bytes = int(metadata[b'total_bytes'].decode('utf-8'))
except Exception:
pass
log_system_event(
'channel_stop',
channel_id=channel_id,
channel_name=channel_obj.name,
runtime=runtime,
total_bytes=total_bytes
)
except Exception as e:
logger.error(f"Could not log channel stop event: {e}")
# Always clean up local resources - WITH SAFE CHECKS
if channel_id in self.stream_managers:
del self.stream_managers[channel_id]
@ -846,6 +946,10 @@ class ProxyServer:
# Clean up client manager - SAFE CHECK HERE TOO
if channel_id in self.client_managers:
try:
client_manager = self.client_managers[channel_id]
# Stop the heartbeat thread before deleting
if hasattr(client_manager, 'stop'):
client_manager.stop()
del self.client_managers[channel_id]
logger.info(f"Removed client manager for channel {channel_id}")
except KeyError:
@ -920,6 +1024,15 @@ class ProxyServer:
if channel_id in self.client_managers:
client_manager = self.client_managers[channel_id]
total_clients = client_manager.get_total_client_count()
else:
# This can happen during reconnection attempts or crashes
# Check Redis directly for any connected clients
if self.redis_client:
client_set_key = RedisKeys.clients(channel_id)
total_clients = self.redis_client.scard(client_set_key) or 0
if total_clients == 0:
logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup")
# Log client count periodically
if time.time() % 30 < 1: # Every ~30 seconds
@ -927,7 +1040,14 @@ class ProxyServer:
# If in connecting or waiting_for_clients state, check grace period
if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]:
# Get connection ready time from metadata
# Check if channel is already stopping
if self.redis_client:
stop_key = RedisKeys.channel_stopping(channel_id)
if self.redis_client.exists(stop_key):
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
continue
# Get connection_ready_time from metadata (indicates if channel reached ready state)
connection_ready_time = None
if metadata and b'connection_ready_time' in metadata:
try:
@ -935,17 +1055,60 @@ class ProxyServer:
except (ValueError, TypeError):
pass
# If still connecting, give it more time
if channel_state == ChannelState.CONNECTING:
logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet")
continue
if total_clients == 0:
# Check if we have a connection_attempt timestamp (set when CONNECTING starts)
connection_attempt_time = None
attempt_key = RedisKeys.connection_attempt(channel_id)
if self.redis_client:
attempt_value = self.redis_client.get(attempt_key)
if attempt_value:
try:
connection_attempt_time = float(attempt_value.decode('utf-8'))
except (ValueError, TypeError):
pass
# If waiting for clients, check grace period
if connection_ready_time:
# Also get init time as a fallback
init_time = None
if metadata and b'init_time' in metadata:
try:
init_time = float(metadata[b'init_time'].decode('utf-8'))
except (ValueError, TypeError):
pass
# Use whichever timestamp we have (prefer connection_attempt as it's more recent)
start_time = connection_attempt_time or init_time
if start_time:
# Check which timeout to apply based on channel lifecycle
if connection_ready_time:
# Already reached ready - use shutdown_delay
time_since_ready = time.time() - connection_ready_time
shutdown_delay = ConfigHelper.channel_shutdown_delay()
if time_since_ready > shutdown_delay:
logger.warning(
f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s "
f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel"
)
self.stop_channel(channel_id)
continue
else:
# Never reached ready - use grace_period timeout
time_since_start = time.time() - start_time
connecting_timeout = ConfigHelper.channel_init_grace_period()
if time_since_start > connecting_timeout:
logger.warning(
f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s "
f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues"
)
self.stop_channel(channel_id)
continue
elif connection_ready_time:
# We have clients now, but check grace period for state transition
grace_period = ConfigHelper.channel_init_grace_period()
time_since_ready = time.time() - connection_ready_time
# Add this debug log
logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, "
f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, "
f"total_clients={total_clients}")
@ -954,16 +1117,9 @@ class ProxyServer:
# Still within grace period
logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed")
continue
elif total_clients == 0:
# Grace period expired with no clients
logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}")
self.stop_channel(channel_id)
else:
# Grace period expired but we have clients - mark channel as active
# Grace period expired with clients - mark channel as active
logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active")
old_state = "unknown"
if metadata and b'state' in metadata:
old_state = metadata[b'state'].decode('utf-8')
if self.update_channel_state(channel_id, ChannelState.ACTIVE, {
"grace_period_ended_at": str(time.time()),
"clients_at_activation": str(total_clients)
@ -971,6 +1127,13 @@ class ProxyServer:
logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period")
# If active and no clients, start normal shutdown procedure
elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0:
# Check if channel is already stopping
if self.redis_client:
stop_key = RedisKeys.channel_stopping(channel_id)
if self.redis_client.exists(stop_key):
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
continue
# Check if there's a pending no-clients timeout
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
disconnect_time = None
@ -1030,14 +1193,30 @@ class ProxyServer:
continue
# Check for local client count - if zero, clean up our local resources
if self.client_managers[channel_id].get_client_count() == 0:
# We're not the owner, and we have no local clients - clean up our resources
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
if channel_id in self.client_managers:
if self.client_managers[channel_id].get_client_count() == 0:
# We're not the owner, and we have no local clients - clean up our resources
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
self._cleanup_local_resources(channel_id)
else:
# This shouldn't happen, but clean up anyway
logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources")
self._cleanup_local_resources(channel_id)
except Exception as e:
logger.error(f"Error in cleanup thread: {e}", exc_info=True)
# Periodically check for orphaned channels (every 30 seconds)
if hasattr(self, '_last_orphan_check'):
if time.time() - self._last_orphan_check > 30:
try:
self._check_orphaned_metadata()
self._last_orphan_check = time.time()
except Exception as orphan_error:
logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True)
else:
self._last_orphan_check = time.time()
gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval())
thread = threading.Thread(target=cleanup_task, daemon=True)
@ -1059,10 +1238,6 @@ class ProxyServer:
try:
channel_id = key.decode('utf-8').split(':')[2]
# Skip channels we already have locally
if channel_id in self.stream_buffers:
continue
# Check if this channel has an owner
owner = self.get_channel_owner(channel_id)
@ -1077,13 +1252,84 @@ class ProxyServer:
else:
# Orphaned channel with no clients - clean it up
logger.info(f"Cleaning up orphaned channel {channel_id}")
self._clean_redis_keys(channel_id)
# If we have it locally, stop it properly to clean up processes
if channel_id in self.stream_managers or channel_id in self.client_managers:
logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel")
self.stop_channel(channel_id)
else:
# Just clean up Redis keys for remote channels
self._clean_redis_keys(channel_id)
except Exception as e:
logger.error(f"Error processing channel key {key}: {e}")
except Exception as e:
logger.error(f"Error checking orphaned channels: {e}")
def _check_orphaned_metadata(self):
"""
Check for metadata entries that have no owner and no clients.
This catches zombie channels that weren't cleaned up properly.
"""
if not self.redis_client:
return
try:
# Get all channel metadata keys
channel_pattern = "ts_proxy:channel:*:metadata"
channel_keys = self.redis_client.keys(channel_pattern)
for key in channel_keys:
try:
channel_id = key.decode('utf-8').split(':')[2]
# Get metadata first
metadata = self.redis_client.hgetall(key)
if not metadata:
# Empty metadata - clean it up
logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up")
# If we have it locally, stop it properly
if channel_id in self.stream_managers or channel_id in self.client_managers:
self.stop_channel(channel_id)
else:
self._clean_redis_keys(channel_id)
continue
# Get owner
owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else ''
# Check if owner is still alive
owner_alive = False
if owner:
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
owner_alive = self.redis_client.exists(owner_heartbeat_key)
# Check client count
client_set_key = RedisKeys.clients(channel_id)
client_count = self.redis_client.scard(client_set_key) or 0
# If no owner and no clients, clean it up
if not owner_alive and client_count == 0:
state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown'
logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up")
# If we have it locally, stop it properly to clean up transcode/proxy processes
if channel_id in self.stream_managers or channel_id in self.client_managers:
logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes")
self.stop_channel(channel_id)
else:
# Just clean up Redis keys for remote channels
self._clean_redis_keys(channel_id)
elif not owner_alive and client_count > 0:
# Owner is gone but clients remain - just log for now
logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover")
except Exception as e:
logger.error(f"Error processing metadata key {key}: {e}", exc_info=True)
except Exception as e:
logger.error(f"Error checking orphaned metadata: {e}", exc_info=True)
def _clean_redis_keys(self, channel_id):
"""Clean up all Redis keys for a channel more efficiently"""
# Release the channel, stream, and profile keys from the channel

View file

@ -14,6 +14,7 @@ from ..server import ProxyServer
from ..redis_keys import RedisKeys
from ..constants import EventType, ChannelState, ChannelMetadataField
from ..url_utils import get_stream_info_for_switch
from core.utils import log_system_event
logger = logging.getLogger("ts_proxy")
@ -597,32 +598,41 @@ class ChannelService:
@staticmethod
def _update_stream_stats_in_db(stream_id, **stats):
"""Update stream stats in database"""
from django.db import connection
try:
from apps.channels.models import Stream
from django.utils import timezone
stream = Stream.objects.get(id=stream_id)
# Get existing stats or create new dict
current_stats = stream.stream_stats or {}
# Update with new stats
for key, value in stats.items():
if value is not None:
current_stats[key] = value
# Save updated stats and timestamp
stream.stream_stats = current_stats
stream.stream_stats_updated_at = timezone.now()
stream.save(update_fields=['stream_stats', 'stream_stats_updated_at'])
logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}")
return True
except Exception as e:
logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}")
return False
finally:
# Always close database connection after update
try:
connection.close()
except Exception:
pass
# Helper methods for Redis operations
@staticmethod
@ -678,7 +688,7 @@ class ChannelService:
switch_request = {
"event": EventType.STREAM_SWITCH,
"channel_id": channel_id,
"channel_id": str(channel_id),
"url": new_url,
"user_agent": user_agent,
"stream_id": stream_id,
@ -691,6 +701,7 @@ class ChannelService:
RedisKeys.events_channel(channel_id),
json.dumps(switch_request)
)
return True
@staticmethod
@ -703,7 +714,7 @@ class ChannelService:
stop_request = {
"event": EventType.CHANNEL_STOP,
"channel_id": channel_id,
"channel_id": str(channel_id),
"requester_worker_id": proxy_server.worker_id,
"timestamp": time.time()
}
@ -726,7 +737,7 @@ class ChannelService:
stop_request = {
"event": EventType.CLIENT_STOP,
"channel_id": channel_id,
"channel_id": str(channel_id),
"client_id": client_id,
"requester_worker_id": proxy_server.worker_id,
"timestamp": time.time()

View file

@ -303,6 +303,14 @@ class StreamBuffer:
# Retrieve chunks
chunks = self.get_chunks_exact(client_index, chunk_count)
# Check if we got significantly fewer chunks than expected (likely due to expiration)
# Only check if we expected multiple chunks and got none or very few
if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10:
# Chunks are missing - likely expired from Redis
# Return empty list to signal client should skip forward
logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)")
return [], client_index
# Check total size
total_size = sum(len(c) for c in chunks)
@ -316,7 +324,7 @@ class StreamBuffer:
additional_size = sum(len(c) for c in more_chunks)
if total_size + additional_size <= MAX_SIZE:
chunks.extend(more_chunks)
chunk_count += additional
chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved
return chunks, client_index + chunk_count

View file

@ -8,6 +8,8 @@ import logging
import threading
import gevent # Add this import at the top of your file
from apps.proxy.config import TSConfig as Config
from apps.channels.models import Channel
from core.utils import log_system_event
from .server import ProxyServer
from .utils import create_ts_packet, get_logger
from .redis_keys import RedisKeys
@ -52,6 +54,10 @@ class StreamGenerator:
self.last_stats_bytes = 0
self.current_rate = 0.0
# TTL refresh tracking
self.last_ttl_refresh = time.time()
self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming
def generate(self):
"""
Generator function that produces the stream content for the client.
@ -84,6 +90,20 @@ class StreamGenerator:
if not self._setup_streaming():
return
# Log client connect event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'client_connect',
channel_id=self.channel_id,
channel_name=channel_obj.name,
client_ip=self.client_ip,
client_id=self.client_id,
user_agent=self.client_user_agent[:100] if self.client_user_agent else None
)
except Exception as e:
logger.error(f"Could not log client connect event: {e}")
# Main streaming loop
for chunk in self._stream_data_generator():
yield chunk
@ -204,6 +224,18 @@ class StreamGenerator:
self.empty_reads += 1
self.consecutive_empty += 1
# Check if we're too far behind (chunks expired from Redis)
chunks_behind = self.buffer.index - self.local_index
if chunks_behind > 50: # If more than 50 chunks behind, jump forward
# Calculate new position: stay a few chunks behind current buffer
initial_behind = ConfigHelper.initial_behind_chunks()
new_index = max(self.local_index, self.buffer.index - initial_behind)
logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}")
self.local_index = new_index
self.consecutive_empty = 0 # Reset since we're repositioning
continue # Try again immediately with new position
if self._should_send_keepalive(self.local_index):
keepalive_packet = create_ts_packet('keepalive')
logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head")
@ -324,7 +356,20 @@ class StreamGenerator:
ChannelMetadataField.STATS_UPDATED_AT: str(current_time)
}
proxy_server.redis_client.hset(client_key, mapping=stats)
# No need to set expiration as client heartbeat will refresh this key
# Refresh TTL periodically while actively streaming
# This provides proof-of-life independent of heartbeat thread
if current_time - self.last_ttl_refresh > self.ttl_refresh_interval:
try:
# Refresh TTL on client key
proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL)
# Also refresh the client set TTL
client_set_key = f"ts_proxy:channel:{self.channel_id}:clients"
proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL)
self.last_ttl_refresh = current_time
logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)")
except Exception as ttl_error:
logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}")
except Exception as e:
logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}")
@ -410,6 +455,22 @@ class StreamGenerator:
total_clients = client_manager.get_total_client_count()
logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})")
# Log client disconnect event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'client_disconnect',
channel_id=self.channel_id,
channel_name=channel_obj.name,
client_ip=self.client_ip,
client_id=self.client_id,
user_agent=self.client_user_agent[:100] if self.client_user_agent else None,
duration=round(elapsed, 2),
bytes_sent=self.bytes_sent
)
except Exception as e:
logger.error(f"Could not log client disconnect event: {e}")
# Schedule channel shutdown if no clients left
if not stream_released: # Only if we haven't already released the stream
self._schedule_channel_shutdown_if_needed(local_clients)

View file

@ -9,11 +9,14 @@ import subprocess
import gevent
import re
from typing import Optional, List
from django.db import connection
from django.shortcuts import get_object_or_404
from urllib3.exceptions import ReadTimeoutError
from apps.proxy.config import TSConfig as Config
from apps.channels.models import Channel, Stream
from apps.m3u.models import M3UAccount, M3UAccountProfile
from core.models import UserAgent, CoreSettings
from core.utils import log_system_event
from .stream_buffer import StreamBuffer
from .utils import detect_stream_type, get_logger
from .redis_keys import RedisKeys
@ -91,11 +94,13 @@ class StreamManager:
self.tried_stream_ids.add(self.current_stream_id)
logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}")
else:
logger.warning(f"No stream_id found in Redis for channel {channel_id}")
logger.warning(f"No stream_id found in Redis for channel {channel_id}. "
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
except Exception as e:
logger.warning(f"Error loading stream ID from Redis: {e}")
else:
logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly")
logger.warning(f"Unable to get stream ID for channel {channel_id}. "
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
logger.info(f"Initialized stream manager for channel {buffer.channel_id}")
@ -111,6 +116,9 @@ class StreamManager:
self.stderr_reader_thread = None
self.ffmpeg_input_phase = True # Track if we're still reading input info
# Add HTTP reader thread property
self.http_reader = None
def _create_session(self):
"""Create and configure requests session with optimal settings"""
session = requests.Session()
@ -220,11 +228,12 @@ class StreamManager:
# Continue with normal flow
# Check stream type before connecting
stream_type = detect_stream_type(self.url)
if self.transcode == False and stream_type == StreamType.HLS:
logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}")
logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}")
# Enable transcoding for HLS streams
self.stream_type = detect_stream_type(self.url)
if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP):
stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP")
logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}")
logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}")
# Enable transcoding for HLS, RTSP/RTP, and UDP streams
self.transcode = True
# We'll override the stream profile selection with ffmpeg in the transcoding section
self.force_ffmpeg = True
@ -252,6 +261,20 @@ class StreamManager:
# Store connection start time to measure success duration
connection_start_time = time.time()
# Log reconnection event if this is a retry (not first attempt)
if self.retry_count > 0:
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_reconnect',
channel_id=self.channel_id,
channel_name=channel_obj.name,
attempt=self.retry_count + 1,
max_attempts=self.max_retries
)
except Exception as e:
logger.error(f"Could not log reconnection event: {e}")
# Successfully connected - read stream data until disconnect/error
self._process_stream_data()
# If we get here, the connection was closed/failed
@ -281,6 +304,20 @@ class StreamManager:
if self.retry_count >= self.max_retries:
url_failed = True
logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}")
# Log connection error event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_error',
channel_id=self.channel_id,
channel_name=channel_obj.name,
error_type='connection_failed',
url=self.url[:100] if self.url else None,
attempts=self.max_retries
)
except Exception as e:
logger.error(f"Could not log connection error event: {e}")
else:
# Wait with exponential backoff before retrying
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
@ -294,6 +331,21 @@ class StreamManager:
if self.retry_count >= self.max_retries:
url_failed = True
# Log connection error event with exception details
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_error',
channel_id=self.channel_id,
channel_name=channel_obj.name,
error_type='connection_exception',
error_message=str(e)[:200],
url=self.url[:100] if self.url else None,
attempts=self.max_retries
)
except Exception as log_error:
logger.error(f"Could not log connection error event: {log_error}")
else:
# Wait with exponential backoff before retrying
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
@ -378,6 +430,12 @@ class StreamManager:
except Exception as e:
logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True)
# Close database connection for this thread
try:
connection.close()
except Exception:
pass
logger.info(f"Stream manager stopped for channel {self.channel_id}")
def _establish_transcode_connection(self):
@ -407,7 +465,7 @@ class StreamManager:
from core.models import StreamProfile
try:
stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True)
logger.info("Using FFmpeg stream profile for HLS content")
logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)")
except StreamProfile.DoesNotExist:
# Fall back to channel's profile if FFmpeg not found
stream_profile = channel.get_stream_profile()
@ -417,6 +475,13 @@ class StreamManager:
# Build and start transcode command
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
# For UDP streams, remove any user_agent parameters from the command
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
# Filter out any arguments that contain the user_agent value or related headers
self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()]
logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}")
logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}")
# Modified to capture stderr instead of discarding it
@ -681,6 +746,19 @@ class StreamManager:
# Reset buffering state
self.buffering = False
self.buffering_start_time = None
# Log failover event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_failover',
channel_id=self.channel_id,
channel_name=channel_obj.name,
reason='buffering_timeout',
duration=buffering_duration
)
except Exception as e:
logger.error(f"Could not log failover event: {e}")
else:
logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout")
else:
@ -688,6 +766,19 @@ class StreamManager:
self.buffering = True
self.buffering_start_time = time.time()
logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x")
# Log system event for buffering
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_buffering',
channel_id=self.channel_id,
channel_name=channel_obj.name,
speed=ffmpeg_speed
)
except Exception as e:
logger.error(f"Could not log buffering event: {e}")
# Log buffering warning
logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected")
# Set channel state to buffering
@ -737,9 +828,9 @@ class StreamManager:
def _establish_http_connection(self):
"""Establish a direct HTTP connection to the stream"""
"""Establish HTTP connection using thread-based reader (same as transcode path)"""
try:
logger.debug(f"Using TS Proxy to connect to stream: {self.url}")
logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}")
# Check if we already have active HTTP connections
if self.current_response or self.current_session:
@ -756,41 +847,39 @@ class StreamManager:
logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}")
self._close_socket()
# Create new session for each connection attempt
session = self._create_session()
self.current_session = session
# Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor
# This allows us to use the same fetch_chunk() path as transcode
from .http_streamer import HTTPStreamReader
# Stream the URL with proper timeout handling
response = session.get(
self.url,
stream=True,
timeout=(10, 60) # 10s connect timeout, 60s read timeout
# Create and start the HTTP stream reader
self.http_reader = HTTPStreamReader(
url=self.url,
user_agent=self.user_agent,
chunk_size=self.chunk_size
)
self.current_response = response
if response.status_code == 200:
self.connected = True
self.healthy = True
logger.info(f"Successfully connected to stream source for channel {self.channel_id}")
# Start the reader thread and get the read end of the pipe
pipe_fd = self.http_reader.start()
# Store connection start time for stability tracking
self.connection_start_time = time.time()
# Wrap the file descriptor in a file object (same as transcode stdout)
import os
self.socket = os.fdopen(pipe_fd, 'rb', buffering=0)
self.connected = True
self.healthy = True
# Set channel state to waiting for clients
self._set_waiting_for_clients()
logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}")
# Store connection start time for stability tracking
self.connection_start_time = time.time()
# Set channel state to waiting for clients
self._set_waiting_for_clients()
return True
return True
else:
logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}")
self._close_connection()
return False
except requests.exceptions.RequestException as e:
logger.error(f"HTTP request error: {e}")
self._close_connection()
return False
except Exception as e:
logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True)
self._close_connection()
self._close_socket()
return False
def _update_bytes_processed(self, chunk_size):
@ -818,48 +907,19 @@ class StreamManager:
logger.error(f"Error updating bytes processed: {e}")
def _process_stream_data(self):
"""Process stream data until disconnect or error"""
"""Process stream data until disconnect or error - unified path for both transcode and HTTP"""
try:
if self.transcode:
# Handle transcoded stream data
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
if self.fetch_chunk():
self.last_data_time = time.time()
else:
if not self.running:
break
gevent.sleep(0.1) # REPLACE time.sleep(0.1)
else:
# Handle direct HTTP connection
chunk_count = 0
try:
for chunk in self.current_response.iter_content(chunk_size=self.chunk_size):
# Check if we've been asked to stop
if self.stop_requested or self.url_switching or self.needs_stream_switch:
break
if chunk:
# Track chunk size before adding to buffer
chunk_size = len(chunk)
self._update_bytes_processed(chunk_size)
# Add chunk to buffer with TS packet alignment
success = self.buffer.add_chunk(chunk)
if success:
self.last_data_time = time.time()
chunk_count += 1
# Update last data timestamp in Redis
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
last_data_key = RedisKeys.last_data(self.buffer.channel_id)
self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60)
except (AttributeError, ConnectionError) as e:
if self.stop_requested or self.url_switching:
logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}")
else:
logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}")
raise
# Both transcode and HTTP now use the same subprocess/socket approach
# This gives us perfect control: check flags between chunks, timeout just returns False
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
if self.fetch_chunk():
self.last_data_time = time.time()
else:
# fetch_chunk() returned False - could be timeout, no data, or error
if not self.running:
break
# Brief sleep before retry to avoid tight loop
gevent.sleep(0.1)
except Exception as e:
logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True)
@ -948,6 +1008,7 @@ class StreamManager:
# Import both models for proper resource management
from apps.channels.models import Stream, Channel
from django.db import connection
# Update stream profile if we're switching streams
if self.current_stream_id and stream_id and self.current_stream_id != stream_id:
@ -965,9 +1026,17 @@ class StreamManager:
logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}")
else:
logger.warning(f"Failed to update stream profile for channel {self.channel_id}")
except Exception as e:
logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}")
finally:
# Always close database connection after profile update
try:
connection.close()
except Exception:
pass
# CRITICAL: Set a flag to prevent immediate reconnection with old URL
self.url_switching = True
self.url_switch_start_time = time.time()
@ -1005,6 +1074,19 @@ class StreamManager:
except Exception as e:
logger.warning(f"Failed to reset buffer position: {e}")
# Log stream switch event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'stream_switch',
channel_id=self.channel_id,
channel_name=channel_obj.name,
new_url=new_url[:100] if new_url else None,
stream_id=stream_id
)
except Exception as e:
logger.error(f"Could not log stream switch event: {e}")
return True
except Exception as e:
logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True)
@ -1123,6 +1205,19 @@ class StreamManager:
if connection_result:
self.connection_start_time = time.time()
logger.info(f"Reconnect successful for channel {self.channel_id}")
# Log reconnection event
try:
channel_obj = Channel.objects.get(uuid=self.channel_id)
log_system_event(
'channel_reconnect',
channel_id=self.channel_id,
channel_name=channel_obj.name,
reason='health_monitor'
)
except Exception as e:
logger.error(f"Could not log reconnection event: {e}")
return True
else:
logger.warning(f"Reconnect failed for channel {self.channel_id}")
@ -1183,6 +1278,15 @@ class StreamManager:
if self.current_response or self.current_session:
self._close_connection()
# Stop HTTP reader thread if it exists
if hasattr(self, 'http_reader') and self.http_reader:
try:
logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}")
self.http_reader.stop()
self.http_reader = None
except Exception as e:
logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}")
# Otherwise handle socket and transcode resources
if self.socket:
try:
@ -1191,25 +1295,17 @@ class StreamManager:
logger.debug(f"Error closing socket for channel {self.channel_id}: {e}")
pass
# Enhanced transcode process cleanup with more aggressive termination
# Enhanced transcode process cleanup with immediate termination
if self.transcode_process:
try:
# First try polite termination
logger.debug(f"Terminating transcode process for channel {self.channel_id}")
self.transcode_process.terminate()
logger.debug(f"Killing transcode process for channel {self.channel_id}")
self.transcode_process.kill()
# Give it a short time to terminate gracefully
# Give it a very short time to die
try:
self.transcode_process.wait(timeout=1.0)
self.transcode_process.wait(timeout=0.5)
except subprocess.TimeoutExpired:
# If it doesn't terminate quickly, kill it
logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}")
self.transcode_process.kill()
try:
self.transcode_process.wait(timeout=1.0)
except subprocess.TimeoutExpired:
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
except Exception as e:
logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}")
@ -1219,6 +1315,30 @@ class StreamManager:
except Exception as e:
logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}")
# Explicitly close all subprocess pipes to prevent file descriptor leaks
try:
if self.transcode_process.stdin:
self.transcode_process.stdin.close()
if self.transcode_process.stdout:
self.transcode_process.stdout.close()
if self.transcode_process.stderr:
self.transcode_process.stderr.close()
logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}")
except Exception as e:
logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}")
# Join stderr reader thread to ensure it's fully terminated
if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive():
try:
logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}")
self.stderr_reader_thread.join(timeout=2.0)
if self.stderr_reader_thread.is_alive():
logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}")
except Exception as e:
logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}")
finally:
self.stderr_reader_thread = None
self.transcode_process = None
self.transcode_process_active = False # Reset the flag
@ -1250,7 +1370,7 @@ class StreamManager:
try:
# Set timeout for chunk reads
chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds
chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration
try:
# Handle different socket types with timeout
@ -1333,7 +1453,17 @@ class StreamManager:
# Only update if not already past connecting
if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]:
# NEW CODE: Check if buffer has enough chunks
current_buffer_index = getattr(self.buffer, 'index', 0)
# IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup
# each worker has its own StreamBuffer instance with potentially stale local index
buffer_index_key = RedisKeys.buffer_index(channel_id)
current_buffer_index = 0
try:
redis_index = redis_client.get(buffer_index_key)
if redis_index:
current_buffer_index = int(redis_index)
except Exception as e:
logger.error(f"Error reading buffer index from Redis: {e}")
initial_chunks_needed = ConfigHelper.initial_behind_chunks()
if current_buffer_index < initial_chunks_needed:
@ -1381,10 +1511,21 @@ class StreamManager:
# Clean up completed timers
self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()]
if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'):
current_buffer_index = self.buffer.index
initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10)
if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'):
channel_id = self.buffer.channel_id
redis_client = self.buffer.redis_client
# IMPORTANT: Read from Redis, not local buffer.index
buffer_index_key = RedisKeys.buffer_index(channel_id)
current_buffer_index = 0
try:
redis_index = redis_client.get(buffer_index_key)
if redis_index:
current_buffer_index = int(redis_index)
except Exception as e:
logger.error(f"Error reading buffer index from Redis: {e}")
initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency
if current_buffer_index >= initial_chunks_needed:
# We now have enough buffer, call _set_waiting_for_clients again
@ -1409,6 +1550,7 @@ class StreamManager:
def _try_next_stream(self):
"""
Try to switch to the next available stream for this channel.
Will iterate through multiple alternate streams if needed to find one with a different URL.
Returns:
bool: True if successfully switched to a new stream, False otherwise
@ -1434,60 +1576,71 @@ class StreamManager:
logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}")
return False
# Get the next stream to try
next_stream = untried_streams[0]
stream_id = next_stream['stream_id']
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
# IMPROVED: Try multiple streams until we find one with a different URL
for next_stream in untried_streams:
stream_id = next_stream['stream_id']
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
# Add to tried streams
self.tried_stream_ids.add(stream_id)
# Add to tried streams
self.tried_stream_ids.add(stream_id)
# Get stream info including URL using the profile_id we already have
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
# Get stream info including URL using the profile_id we already have
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
if 'error' in stream_info or not stream_info.get('url'):
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
return False
if 'error' in stream_info or not stream_info.get('url'):
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
continue # Try next stream instead of giving up
# Update URL and user agent
new_url = stream_info['url']
new_user_agent = stream_info['user_agent']
new_transcode = stream_info['transcode']
# Update URL and user agent
new_url = stream_info['url']
new_user_agent = stream_info['user_agent']
new_transcode = stream_info['transcode']
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
# CRITICAL FIX: Check if the new URL is the same as current URL
# This can happen when current_stream_id is None and we accidentally select the same stream
if new_url == self.url:
logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). "
f"Skipping this stream and trying next alternative.")
continue # Try next stream instead of giving up
# IMPORTANT: Just update the URL, don't stop the channel or release resources
switch_result = self.update_url(new_url, stream_id, profile_id)
if not switch_result:
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
return False
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
# Update stream ID tracking
self.current_stream_id = stream_id
# IMPORTANT: Just update the URL, don't stop the channel or release resources
switch_result = self.update_url(new_url, stream_id, profile_id)
if not switch_result:
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
continue # Try next stream
# Store the new user agent and transcode settings
self.user_agent = new_user_agent
self.transcode = new_transcode
# Update stream ID tracking
self.current_stream_id = stream_id
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
metadata_key = RedisKeys.channel_metadata(self.channel_id)
self.buffer.redis_client.hset(metadata_key, mapping={
ChannelMetadataField.URL: new_url,
ChannelMetadataField.USER_AGENT: new_user_agent,
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
ChannelMetadataField.STREAM_ID: str(stream_id),
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
})
# Store the new user agent and transcode settings
self.user_agent = new_user_agent
self.transcode = new_transcode
# Log the switch
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
metadata_key = RedisKeys.channel_metadata(self.channel_id)
self.buffer.redis_client.hset(metadata_key, mapping={
ChannelMetadataField.URL: new_url,
ChannelMetadataField.USER_AGENT: new_user_agent,
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
ChannelMetadataField.STREAM_ID: str(stream_id),
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
})
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
return True
# Log the switch
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
return True
# If we get here, we tried all streams but none worked
logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}")
return False
except Exception as e:
logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True)

View file

@ -8,7 +8,7 @@ from typing import Optional, Tuple, List
from django.shortcuts import get_object_or_404
from apps.channels.models import Channel, Stream
from apps.m3u.models import M3UAccount, M3UAccountProfile
from core.models import UserAgent, CoreSettings
from core.models import UserAgent, CoreSettings, StreamProfile
from .utils import get_logger
from uuid import UUID
import requests
@ -26,16 +26,100 @@ def get_stream_object(id: str):
def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]:
"""
Generate the appropriate stream URL for a channel based on its profile settings.
Generate the appropriate stream URL for a channel or stream based on its profile settings.
Args:
channel_id: The UUID of the channel
channel_id: The UUID of the channel or stream hash
Returns:
Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id)
"""
try:
channel = get_stream_object(channel_id)
channel_or_stream = get_stream_object(channel_id)
# Handle direct stream preview (custom streams)
if isinstance(channel_or_stream, Stream):
from core.utils import RedisClient
stream = channel_or_stream
logger.info(f"Previewing stream directly: {stream.id} ({stream.name})")
# For custom streams, we need to get the M3U account and profile
m3u_account = stream.m3u_account
if not m3u_account:
logger.error(f"Stream {stream.id} has no M3U account")
return None, None, False, None
# Get active profiles for this M3U account
m3u_profiles = m3u_account.profiles.filter(is_active=True)
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
if not default_profile:
logger.error(f"No default active profile found for M3U account {m3u_account.id}")
return None, None, False, None
# Check profiles in order: default first, then others
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
# Try to find an available profile with connection capacity
redis_client = RedisClient.get_client()
selected_profile = None
for profile in profiles:
logger.info(profile)
# Check connection availability
if redis_client:
profile_connections_key = f"profile_connections:{profile.id}"
current_connections = int(redis_client.get(profile_connections_key) or 0)
# Check if profile has available slots (or unlimited connections)
if profile.max_streams == 0 or current_connections < profile.max_streams:
selected_profile = profile
logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview")
break
else:
logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}")
else:
# No Redis available, use first active profile
selected_profile = profile
break
if not selected_profile:
logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}")
return None, None, False, None
# Get the appropriate user agent
stream_user_agent = m3u_account.get_user_agent().user_agent
if stream_user_agent is None:
stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id())
logger.debug(f"No user agent found for account, using default: {stream_user_agent}")
# Get stream URL with the selected profile's URL transformation
stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern)
# Check if the stream has its own stream_profile set, otherwise use default
if stream.stream_profile:
stream_profile = stream.stream_profile
logger.debug(f"Using stream's own stream profile: {stream_profile.name}")
else:
stream_profile = StreamProfile.objects.get(
id=CoreSettings.get_default_stream_profile_id()
)
logger.debug(f"Using default stream profile: {stream_profile.name}")
# Check if transcoding is needed
if stream_profile.is_proxy() or stream_profile is None:
transcode = False
else:
transcode = True
stream_profile_id = stream_profile.id
return stream_url, stream_user_agent, transcode, stream_profile_id
# Handle channel preview (existing logic)
channel = channel_or_stream
# Get stream and profile for this channel
# Note: get_stream now returns 3 values (stream_id, profile_id, error_reason)
@ -351,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
"""
Validate if a stream URL is accessible without downloading the full content.
Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot
be validated via HTTP methods.
Args:
url (str): The URL to validate
user_agent (str): User agent to use for the request
@ -359,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
Returns:
tuple: (is_valid, final_url, status_code, message)
"""
# Check if URL uses non-HTTP protocols (UDP/RTP/RTSP)
# These cannot be validated via HTTP methods, so we skip validation
if url.startswith(('udp://', 'rtp://', 'rtsp://')):
logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}")
return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped"
try:
# Create session with proper headers
session = requests.Session()

View file

@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy")
def detect_stream_type(url):
"""
Detect if stream URL is HLS or TS format.
Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format.
Args:
url (str): The stream URL to analyze
Returns:
str: 'hls' or 'ts' depending on detected format
str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format
"""
if not url:
return 'unknown'
url_lower = url.lower()
# Check for UDP streams (requires FFmpeg)
if url_lower.startswith('udp://'):
return 'udp'
# Check for RTSP/RTP streams (requires FFmpeg)
if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'):
return 'rtsp'
# Look for common HLS indicators
if (url_lower.endswith('.m3u8') or
'.m3u8?' in url_lower or

View file

@ -4,7 +4,7 @@ import time
import random
import re
import pathlib
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from apps.proxy.config import TSConfig as Config
@ -84,11 +84,18 @@ def stream_ts(request, channel_id):
if state_field in metadata:
channel_state = metadata[state_field].decode("utf-8")
if channel_state:
# Channel is being initialized or already active - no need for reinitialization
# Active/running states - channel is operational, don't reinitialize
if channel_state in [
ChannelState.ACTIVE,
ChannelState.WAITING_FOR_CLIENTS,
ChannelState.BUFFERING,
ChannelState.INITIALIZING,
ChannelState.CONNECTING,
ChannelState.STOPPING,
]:
needs_initialization = False
logger.debug(
f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization"
f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization"
)
# Special handling for initializing/connecting states
@ -98,19 +105,34 @@ def stream_ts(request, channel_id):
]:
channel_initializing = True
logger.debug(
f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion"
f"[{client_id}] Channel {channel_id} is still initializing, client will wait"
)
# Terminal states - channel needs cleanup before reinitialization
elif channel_state in [
ChannelState.ERROR,
ChannelState.STOPPED,
]:
needs_initialization = True
logger.info(
f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize"
)
# Unknown/empty state - check if owner is alive
else:
# Only check for owner if channel is in a valid state
owner_field = ChannelMetadataField.OWNER.encode("utf-8")
if owner_field in metadata:
owner = metadata[owner_field].decode("utf-8")
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
if proxy_server.redis_client.exists(owner_heartbeat_key):
# Owner is still active, so we don't need to reinitialize
# Owner is still active with unknown state - don't reinitialize
needs_initialization = False
logger.debug(
f"[{client_id}] Channel {channel_id} has active owner {owner}"
f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init"
)
else:
# Owner dead - needs reinitialization
needs_initialization = True
logger.warning(
f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize"
)
# Start initialization if needed
@ -127,9 +149,9 @@ def stream_ts(request, channel_id):
)
ChannelService.stop_channel(channel_id)
# Use max retry attempts and connection timeout from config
max_retries = ConfigHelper.max_retries()
retry_timeout = ConfigHelper.connection_timeout()
# Use fixed retry interval and timeout
retry_timeout = 3 # 3 seconds total timeout
retry_interval = 0.1 # 100ms between attempts
wait_start_time = time.time()
stream_url = None
@ -137,54 +159,69 @@ def stream_ts(request, channel_id):
transcode = False
profile_value = None
error_reason = None
attempt = 0
should_retry = True
# Try to get a stream with configured retries
for attempt in range(max_retries):
# Try to get a stream with fixed interval retries
while should_retry and time.time() - wait_start_time < retry_timeout:
attempt += 1
stream_url, stream_user_agent, transcode, profile_value = (
generate_stream_url(channel_id)
)
if stream_url is not None:
logger.info(
f"[{client_id}] Successfully obtained stream for channel {channel_id}"
f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts"
)
break
# If we failed because there are no streams assigned, don't retry
_, _, error_reason = channel.get_stream()
if error_reason and "maximum connection limits" not in error_reason:
logger.warning(
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
# On first failure, check if the error is retryable
if attempt == 1:
_, _, error_reason = channel.get_stream()
if error_reason and "maximum connection limits" not in error_reason:
logger.warning(
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
)
should_retry = False
break
# Check if we have time remaining for another sleep cycle
elapsed_time = time.time() - wait_start_time
remaining_time = retry_timeout - elapsed_time
# If we don't have enough time for the next sleep interval, break
# but only after we've already made an attempt (the while condition will try one more time)
if remaining_time <= retry_interval:
logger.info(
f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt"
)
break
# Don't exceed the overall connection timeout
if time.time() - wait_start_time > retry_timeout:
logger.warning(
f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)"
)
break
# Wait before retrying (using exponential backoff with a cap)
wait_time = min(0.5 * (2**attempt), 2.0) # Caps at 2 seconds
# Wait before retrying
logger.info(
f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})"
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
)
gevent.sleep(
wait_time
) # FIXED: Using gevent.sleep instead of time.sleep
gevent.sleep(retry_interval)
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
# Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout
if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout:
attempt += 1
logger.info(
f"[{client_id}] Making final attempt {attempt} at timeout boundary"
)
stream_url, stream_user_agent, transcode, profile_value = (
generate_stream_url(channel_id)
)
if stream_url is not None:
logger.info(
f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}"
)
if stream_url is None:
# Make sure to release any stream locks that might have been acquired
if hasattr(channel, "streams") and channel.streams.exists():
for stream in channel.streams.all():
try:
stream.release_stream()
logger.info(
f"[{client_id}] Released stream {stream.id} for channel {channel_id}"
)
except Exception as e:
logger.error(f"[{client_id}] Error releasing stream: {e}")
# Release the channel's stream lock if one was acquired
# Note: Only call this if get_stream() actually assigned a stream
# In our case, if stream_url is None, no stream was ever assigned, so don't release
# Get the specific error message if available
wait_duration = f"{int(time.time() - wait_start_time)}s"
@ -193,6 +230,9 @@ def stream_ts(request, channel_id):
if error_reason
else "No available streams for this channel"
)
logger.info(
f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}"
)
return JsonResponse(
{"error": error_msg, "waited": wait_duration}, status=503
) # 503 Service Unavailable is appropriate here
@ -274,6 +314,15 @@ def stream_ts(request, channel_id):
logger.info(
f"[{client_id}] Redirecting to validated URL: {final_url} ({message})"
)
# For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect
# because Django's HttpResponseRedirect blocks them for security
if final_url.startswith(('rtsp://', 'rtp://', 'udp://')):
logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol")
response = HttpResponse(status=301)
response['Location'] = final_url
return response
return HttpResponseRedirect(final_url)
else:
logger.error(
@ -478,24 +527,33 @@ def stream_xc(request, username, password, channel_id):
print(f"Fetchin channel with ID: {channel_id}")
if user.user_level < 10:
filters = {
"id": int(channel_id),
"channelprofilemembership__enabled": True,
"user_level__lte": user.user_level,
}
user_profile_count = user.channel_profiles.count()
if user.channel_profiles.count() > 0:
channel_profiles = user.channel_profiles.all()
filters["channelprofilemembership__channel_profile__in"] = channel_profiles
# If user has ALL profiles or NO profiles, give unrestricted access
if user_profile_count == 0:
# No profile filtering - user sees all channels based on user_level
filters = {
"id": int(channel_id),
"user_level__lte": user.user_level
}
channel = Channel.objects.filter(**filters).first()
else:
# User has specific limited profiles assigned
filters = {
"id": int(channel_id),
"channelprofilemembership__enabled": True,
"user_level__lte": user.user_level,
"channelprofilemembership__channel_profile__in": user.channel_profiles.all()
}
channel = Channel.objects.filter(**filters).distinct().first()
channel = Channel.objects.filter(**filters).distinct().first()
if not channel:
return JsonResponse({"error": "Not found"}, status=404)
else:
channel = get_object_or_404(Channel, id=channel_id)
# @TODO: we've got the file 'type' via extension, support this when we support multiple outputs
return stream_ts(request._request, channel.uuid)
return stream_ts(request._request, str(channel.uuid))
@csrf_exempt

View file

@ -540,11 +540,9 @@ class RedisBackedVODConnection:
}
return {}
def cleanup(self, connection_manager=None):
"""Clean up local resources and Redis state"""
# Get connection state before cleanup to handle profile decrementing
state = self._get_connection_state()
def cleanup(self, connection_manager=None, current_worker_id=None):
"""Smart cleanup based on worker ownership and active streams"""
# Always clean up local resources first
if self.local_response:
self.local_response.close()
self.local_response = None
@ -552,38 +550,72 @@ class RedisBackedVODConnection:
self.local_session.close()
self.local_session = None
# Remove from Redis
if self.redis_client:
try:
# Use pipeline for atomic cleanup operations
pipe = self.redis_client.pipeline()
# Get current connection state to check ownership and active streams
state = self._get_connection_state()
# 1. Remove main connection state (now contains consolidated data)
pipe.delete(self.connection_key)
if not state:
logger.info(f"[{self.session_id}] No connection state found - local cleanup only")
return
# 2. Remove distributed lock
pipe.delete(self.lock_key)
# Check if there are active streams
if state.active_streams > 0:
# There are active streams - check ownership
if current_worker_id and state.worker_id == current_worker_id:
logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) and we own them - local cleanup only")
else:
logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) but owned by worker {state.worker_id} - local cleanup only")
return
# Execute all cleanup operations
pipe.execute()
# No active streams - we can clean up Redis state
if not self.redis_client:
logger.info(f"[{self.session_id}] No Redis client - local cleanup only")
return
logger.info(f"[{self.session_id}] Cleaned up all Redis keys (consolidated connection state, locks)")
# Acquire lock and do final check before cleanup to prevent race conditions
if not self._acquire_lock():
logger.warning(f"[{self.session_id}] Could not acquire lock for cleanup - skipping")
return
# Decrement profile connections if we have the state and connection manager
if state and state.m3u_profile_id and connection_manager:
logger.info(f"[{self.session_id}] Decrementing profile connection count for profile {state.m3u_profile_id}")
connection_manager._decrement_profile_connections(state.m3u_profile_id)
logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}")
else:
if not state:
logger.warning(f"[{self.session_id}] No connection state found during cleanup - cannot decrement profile connections")
elif not state.m3u_profile_id:
logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections")
elif not connection_manager:
logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections")
try:
# Re-check active streams with lock held to prevent race conditions
current_state = self._get_connection_state()
if not current_state:
logger.info(f"[{self.session_id}] Connection state no longer exists - cleanup already done")
return
except Exception as e:
logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}")
if current_state.active_streams > 0:
logger.info(f"[{self.session_id}] Active streams now present ({current_state.active_streams}) - skipping cleanup")
return
# Use pipeline for atomic cleanup operations
pipe = self.redis_client.pipeline()
# 1. Remove main connection state (contains consolidated data)
pipe.delete(self.connection_key)
# 2. Remove distributed lock (will be released below anyway)
pipe.delete(self.lock_key)
# Execute all cleanup operations
pipe.execute()
logger.info(f"[{self.session_id}] Cleaned up Redis keys (verified no active streams)")
# Decrement profile connections if we have the state and connection manager
if state.m3u_profile_id and connection_manager:
connection_manager._decrement_profile_connections(state.m3u_profile_id)
logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}")
else:
if not state.m3u_profile_id:
logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections")
elif not connection_manager:
logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections")
except Exception as e:
logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}")
finally:
# Always release the lock
self._release_lock()
# Modify the VODConnectionManager to use Redis-backed connections
@ -694,6 +726,15 @@ class MultiWorkerVODConnectionManager:
logger.info(f"[{client_id}] Worker {self.worker_id} - Found matching idle session: {matching_session_id}")
effective_session_id = matching_session_id
client_id = matching_session_id # Update client_id for logging consistency
# IMMEDIATELY reserve this session by incrementing active streams to prevent cleanup
temp_connection = RedisBackedVODConnection(effective_session_id, self.redis_client)
if temp_connection.increment_active_streams():
logger.info(f"[{client_id}] Reserved idle session - incremented active streams")
else:
logger.warning(f"[{client_id}] Failed to reserve idle session - falling back to new session")
effective_session_id = session_id
matching_session_id = None # Clear the match so we create a new connection
else:
logger.info(f"[{client_id}] Worker {self.worker_id} - No matching idle session found, using new session")
effective_session_id = session_id
@ -761,14 +802,20 @@ class MultiWorkerVODConnectionManager:
else:
logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection")
# Update session activity in consolidated connection state
# Transfer ownership to current worker and update session activity
if redis_connection._acquire_lock():
try:
state = redis_connection._get_connection_state()
if state:
old_worker = state.worker_id
state.last_activity = time.time()
state.worker_id = self.worker_id # Track which worker last accessed this
state.worker_id = self.worker_id # Transfer ownership to current worker
redis_connection._save_connection_state(state)
if old_worker != self.worker_id:
logger.info(f"[{client_id}] Ownership transferred from worker {old_worker} to {self.worker_id}")
else:
logger.debug(f"[{client_id}] Worker {self.worker_id} retaining ownership")
finally:
redis_connection._release_lock()
@ -788,8 +835,13 @@ class MultiWorkerVODConnectionManager:
try:
logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream")
# Increment active streams
redis_connection.increment_active_streams()
# Increment active streams (unless we already did it for session reuse)
if not matching_session_id:
# New session - increment active streams
redis_connection.increment_active_streams()
else:
# Reused session - we already incremented when reserving the session
logger.debug(f"[{client_id}] Using pre-reserved session - active streams already incremented")
bytes_sent = 0
chunk_count = 0
@ -819,13 +871,13 @@ class MultiWorkerVODConnectionManager:
redis_connection.decrement_active_streams()
decremented = True
# Schedule cleanup if no active streams after normal completion
# Schedule smart cleanup if no active streams after normal completion
if not redis_connection.has_active_streams():
def delayed_cleanup():
time.sleep(1) # Wait 1 second
if not redis_connection.has_active_streams():
logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection after normal completion")
redis_connection.cleanup(connection_manager=self)
# Smart cleanup: check active streams and ownership
logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after normal completion")
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
import threading
cleanup_thread = threading.Thread(target=delayed_cleanup)
@ -838,13 +890,13 @@ class MultiWorkerVODConnectionManager:
redis_connection.decrement_active_streams()
decremented = True
# Schedule cleanup if no active streams
# Schedule smart cleanup if no active streams
if not redis_connection.has_active_streams():
def delayed_cleanup():
time.sleep(1) # Wait 1 second
if not redis_connection.has_active_streams():
logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection")
redis_connection.cleanup(connection_manager=self)
# Smart cleanup: check active streams and ownership
logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after client disconnect")
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
import threading
cleanup_thread = threading.Thread(target=delayed_cleanup)
@ -856,7 +908,8 @@ class MultiWorkerVODConnectionManager:
if not decremented:
redis_connection.decrement_active_streams()
decremented = True
redis_connection.cleanup(connection_manager=self)
# Smart cleanup on error - immediate cleanup since we're in error state
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
yield b"Error: Stream interrupted"
finally:

View file

@ -176,14 +176,15 @@ class VODStreamView(View):
logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}")
return HttpResponse("No stream URL available", status=503)
# Get M3U profile
m3u_profile = self._get_m3u_profile(m3u_account, profile_id)
# Get M3U profile (returns profile and current connection count)
profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id)
if not m3u_profile:
if not profile_result or not profile_result[0]:
logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}")
return HttpResponse("No available stream", status=503)
logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {m3u_profile.current_viewers})")
m3u_profile, current_connections = profile_result
logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {current_connections})")
# Connection tracking is handled by the connection manager
# Transform URL based on profile
@ -279,11 +280,13 @@ class VODStreamView(View):
logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}")
return HttpResponse("No stream URL available", status=503)
# Get M3U profile
m3u_profile = self._get_m3u_profile(m3u_account, profile_id)
if not m3u_profile:
logger.error(f"[VOD-HEAD] No M3U profile found")
return HttpResponse("Profile not found", status=404)
# Get M3U profile (returns profile and current connection count)
profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id)
if not profile_result or not profile_result[0]:
logger.error(f"[VOD-HEAD] No M3U profile found or all profiles at capacity")
return HttpResponse("No available stream", status=503)
m3u_profile, current_connections = profile_result
# Transform URL if needed
final_stream_url = self._transform_url(stream_url, m3u_profile)
@ -517,10 +520,63 @@ class VODStreamView(View):
logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True)
return None
def _get_m3u_profile(self, m3u_account, profile_id):
"""Get appropriate M3U profile for streaming"""
def _get_m3u_profile(self, m3u_account, profile_id, session_id=None):
"""Get appropriate M3U profile for streaming using Redis-based viewer counts
Args:
m3u_account: M3UAccount instance
profile_id: Optional specific profile ID requested
session_id: Optional session ID to check for existing connections
Returns:
tuple: (M3UAccountProfile, current_connections) or None if no profile found
"""
try:
# If specific profile requested, try to use it
from core.utils import RedisClient
redis_client = RedisClient.get_client()
if not redis_client:
logger.warning("Redis not available, falling back to default profile")
default_profile = M3UAccountProfile.objects.filter(
m3u_account=m3u_account,
is_active=True,
is_default=True
).first()
return (default_profile, 0) if default_profile else None
# Check if this session already has an active connection
if session_id:
persistent_connection_key = f"vod_persistent_connection:{session_id}"
connection_data = redis_client.hgetall(persistent_connection_key)
if connection_data:
# Decode Redis hash data
decoded_data = {}
for k, v in connection_data.items():
k_str = k.decode('utf-8') if isinstance(k, bytes) else k
v_str = v.decode('utf-8') if isinstance(v, bytes) else v
decoded_data[k_str] = v_str
existing_profile_id = decoded_data.get('m3u_profile_id')
if existing_profile_id:
try:
existing_profile = M3UAccountProfile.objects.get(
id=int(existing_profile_id),
m3u_account=m3u_account,
is_active=True
)
# Get current connections for logging
profile_connections_key = f"profile_connections:{existing_profile.id}"
current_connections = int(redis_client.get(profile_connections_key) or 0)
logger.info(f"[PROFILE-SELECTION] Session {session_id} reusing existing profile {existing_profile.id}: {current_connections}/{existing_profile.max_streams} connections")
return (existing_profile, current_connections)
except (M3UAccountProfile.DoesNotExist, ValueError):
logger.warning(f"[PROFILE-SELECTION] Session {session_id} has invalid profile ID {existing_profile_id}, selecting new profile")
except Exception as e:
logger.warning(f"[PROFILE-SELECTION] Error checking existing profile for session {session_id}: {e}")
else:
logger.debug(f"[PROFILE-SELECTION] Session {session_id} exists but has no profile ID stored") # If specific profile requested, try to use it
if profile_id:
try:
profile = M3UAccountProfile.objects.get(
@ -528,24 +584,46 @@ class VODStreamView(View):
m3u_account=m3u_account,
is_active=True
)
if profile.current_viewers < profile.max_streams or profile.max_streams == 0:
return profile
except M3UAccountProfile.DoesNotExist:
pass
# Check Redis-based current connections
profile_connections_key = f"profile_connections:{profile.id}"
current_connections = int(redis_client.get(profile_connections_key) or 0)
# Find available profile ordered by current usage (least loaded first)
profiles = M3UAccountProfile.objects.filter(
if profile.max_streams == 0 or current_connections < profile.max_streams:
logger.info(f"[PROFILE-SELECTION] Using requested profile {profile.id}: {current_connections}/{profile.max_streams} connections")
return (profile, current_connections)
else:
logger.warning(f"[PROFILE-SELECTION] Requested profile {profile.id} is at capacity: {current_connections}/{profile.max_streams}")
except M3UAccountProfile.DoesNotExist:
logger.warning(f"[PROFILE-SELECTION] Requested profile {profile_id} not found")
# Get active profiles ordered by priority (default first)
m3u_profiles = M3UAccountProfile.objects.filter(
m3u_account=m3u_account,
is_active=True
).order_by('current_viewers')
)
default_profile = m3u_profiles.filter(is_default=True).first()
if not default_profile:
logger.error(f"[PROFILE-SELECTION] No default profile found for M3U account {m3u_account.id}")
return None
# Check profiles in order: default first, then others
profiles = [default_profile] + list(m3u_profiles.filter(is_default=False))
for profile in profiles:
# Check if profile has available connection slots
if profile.current_viewers < profile.max_streams or profile.max_streams == 0:
return profile
profile_connections_key = f"profile_connections:{profile.id}"
current_connections = int(redis_client.get(profile_connections_key) or 0)
# Fallback to default profile even if over limit
return profiles.filter(is_default=True).first()
# Check if profile has available connection slots
if profile.max_streams == 0 or current_connections < profile.max_streams:
logger.info(f"[PROFILE-SELECTION] Selected profile {profile.id} ({profile.name}): {current_connections}/{profile.max_streams} connections")
return (profile, current_connections)
else:
logger.debug(f"[PROFILE-SELECTION] Profile {profile.id} at capacity: {current_connections}/{profile.max_streams}")
# All profiles are at capacity - return None to trigger error response
logger.error(f"[PROFILE-SELECTION] All profiles at capacity for M3U account {m3u_account.id}, rejecting request")
return None
except Exception as e:
logger.error(f"Error getting M3U profile: {e}")

View file

@ -6,6 +6,7 @@ from .api_views import (
SeriesViewSet,
VODCategoryViewSet,
UnifiedContentViewSet,
VODLogoViewSet,
)
app_name = 'vod'
@ -16,5 +17,6 @@ router.register(r'episodes', EpisodeViewSet, basename='episode')
router.register(r'series', SeriesViewSet, basename='series')
router.register(r'categories', VODCategoryViewSet, basename='vodcategory')
router.register(r'all', UnifiedContentViewSet, basename='unified-content')
router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo')
urlpatterns = router.urls

View file

@ -3,23 +3,29 @@ from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import AllowAny
from django_filters.rest_framework import DjangoFilterBackend
from django.shortcuts import get_object_or_404
from django.http import StreamingHttpResponse, HttpResponse, FileResponse
from django.db.models import Q
import django_filters
import logging
import os
import requests
from apps.accounts.permissions import (
Authenticated,
permission_classes_by_action,
)
from .models import (
Series, VODCategory, Movie, Episode,
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation
Series, VODCategory, Movie, Episode, VODLogo,
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
)
from .serializers import (
MovieSerializer,
EpisodeSerializer,
SeriesSerializer,
VODCategorySerializer,
VODLogoSerializer,
M3UMovieRelationSerializer,
M3USeriesRelationSerializer,
M3UEpisodeRelationSerializer
@ -470,6 +476,59 @@ class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet):
except KeyError:
return [Authenticated()]
def list(self, request, *args, **kwargs):
"""Override list to ensure Uncategorized categories and relations exist for all XC accounts with VOD enabled"""
from apps.m3u.models import M3UAccount
# Ensure Uncategorized categories exist
movie_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="movie",
defaults={}
)
series_category, _ = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="series",
defaults={}
)
# Get all active XC accounts with VOD enabled
xc_accounts = M3UAccount.objects.filter(
account_type=M3UAccount.Types.XC,
is_active=True
)
for account in xc_accounts:
if account.custom_properties:
custom_props = account.custom_properties or {}
vod_enabled = custom_props.get("enable_vod", False)
if vod_enabled:
# Ensure relations exist for this account
auto_enable_new = custom_props.get("auto_enable_new_groups_vod", True)
M3UVODCategoryRelation.objects.get_or_create(
category=movie_category,
m3u_account=account,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
M3UVODCategoryRelation.objects.get_or_create(
category=series_category,
m3u_account=account,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
# Now proceed with normal list operation
return super().list(request, *args, **kwargs)
class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet that combines Movies and Series for unified 'All' view"""
@ -564,7 +623,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
logo.url as logo_url,
'movie' as content_type
FROM vod_movie movies
LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id
LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id
WHERE {where_conditions[0]}
UNION ALL
@ -586,7 +645,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
logo.url as logo_url,
'series' as content_type
FROM vod_series series
LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id
LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id
WHERE {where_conditions[1]}
)
SELECT * FROM unified_content
@ -613,10 +672,10 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
'id': item_dict['logo_id'],
'name': item_dict['logo_name'],
'url': item_dict['logo_url'],
'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None,
'channel_count': 0, # We don't need this for VOD
'is_used': True,
'channel_names': [] # We don't need this for VOD
'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/",
'movie_count': 0, # We don't calculate this in raw SQL
'series_count': 0, # We don't calculate this in raw SQL
'is_used': True
}
# Convert to the format expected by frontend
@ -668,4 +727,173 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
logger.error(f"Error in UnifiedContentViewSet.list(): {e}")
import traceback
logger.error(traceback.format_exc())
return Response({'error': str(e)}, status=500)
return Response({'error': str(e)}, status=500)
class VODLogoPagination(PageNumberPagination):
page_size = 100
page_size_query_param = "page_size"
max_page_size = 1000
class VODLogoViewSet(viewsets.ModelViewSet):
"""ViewSet for VOD Logo management"""
queryset = VODLogo.objects.all()
serializer_class = VODLogoSerializer
pagination_class = VODLogoPagination
filter_backends = [SearchFilter, OrderingFilter]
search_fields = ['name', 'url']
ordering_fields = ['name', 'id']
ordering = ['name']
def get_permissions(self):
try:
return [perm() for perm in permission_classes_by_action[self.action]]
except KeyError:
if self.action == 'cache':
return [AllowAny()]
return [Authenticated()]
def get_queryset(self):
"""Optimize queryset with prefetch and add filtering"""
queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name')
# Filter by specific IDs
ids = self.request.query_params.getlist('ids')
if ids:
try:
id_list = [int(id_str) for id_str in ids if id_str.isdigit()]
if id_list:
queryset = queryset.filter(id__in=id_list)
except (ValueError, TypeError):
queryset = VODLogo.objects.none()
# Filter by usage
used_filter = self.request.query_params.get('used', None)
if used_filter == 'true':
# Return logos that are used by movies OR series
queryset = queryset.filter(
Q(movie__isnull=False) | Q(series__isnull=False)
).distinct()
elif used_filter == 'false':
# Return logos that are NOT used by either
queryset = queryset.filter(
movie__isnull=True,
series__isnull=True
)
elif used_filter == 'movies':
# Return logos that are used by movies (may also be used by series)
queryset = queryset.filter(movie__isnull=False).distinct()
elif used_filter == 'series':
# Return logos that are used by series (may also be used by movies)
queryset = queryset.filter(series__isnull=False).distinct()
# Filter by name
name_query = self.request.query_params.get('name', None)
if name_query:
queryset = queryset.filter(name__icontains=name_query)
# No pagination mode
if self.request.query_params.get('no_pagination', 'false').lower() == 'true':
self.pagination_class = None
return queryset
@action(detail=True, methods=["get"], permission_classes=[AllowAny])
def cache(self, request, pk=None):
"""Streams the VOD logo file, whether it's local or remote."""
logo = self.get_object()
if not logo.url:
return HttpResponse(status=404)
# Check if this is a local file path
if logo.url.startswith('/data/'):
# It's a local file
file_path = logo.url
if not os.path.exists(file_path):
logger.error(f"VOD logo file not found: {file_path}")
return HttpResponse(status=404)
try:
return FileResponse(open(file_path, 'rb'), content_type='image/png')
except Exception as e:
logger.error(f"Error serving VOD logo file {file_path}: {str(e)}")
return HttpResponse(status=500)
else:
# It's a remote URL - proxy it
try:
response = requests.get(logo.url, stream=True, timeout=10)
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'image/png')
return StreamingHttpResponse(
response.iter_content(chunk_size=8192),
content_type=content_type
)
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}")
return HttpResponse(status=404)
@action(detail=False, methods=["delete"], url_path="bulk-delete")
def bulk_delete(self, request):
"""Delete multiple VOD logos at once"""
logo_ids = request.data.get('logo_ids', [])
if not logo_ids:
return Response(
{"error": "No logo IDs provided"},
status=status.HTTP_400_BAD_REQUEST
)
try:
# Get logos to delete
logos = VODLogo.objects.filter(id__in=logo_ids)
deleted_count = logos.count()
# Delete them
logos.delete()
return Response({
"deleted_count": deleted_count,
"message": f"Successfully deleted {deleted_count} VOD logo(s)"
})
except Exception as e:
logger.error(f"Error during bulk VOD logo deletion: {str(e)}")
return Response(
{"error": str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
@action(detail=False, methods=["post"])
def cleanup(self, request):
"""Delete all VOD logos that are not used by any movies or series"""
try:
# Find unused logos
unused_logos = VODLogo.objects.filter(
movie__isnull=True,
series__isnull=True
)
deleted_count = unused_logos.count()
logo_names = list(unused_logos.values_list('name', flat=True))
# Delete them
unused_logos.delete()
logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}")
return Response({
"deleted_count": deleted_count,
"deleted_logos": logo_names,
"message": f"Successfully deleted {deleted_count} unused VOD logo(s)"
})
except Exception as e:
logger.error(f"Error during VOD logo cleanup: {str(e)}")
return Response(
{"error": str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)

View file

@ -0,0 +1,264 @@
# Generated by Django 5.2.4 on 2025-11-06 23:01
import django.db.models.deletion
from django.db import migrations, models
def migrate_vod_logos_forward(apps, schema_editor):
"""
Migrate VOD logos from the Logo table to the new VODLogo table.
This copies all logos referenced by movies or series to VODLogo.
Uses pure SQL for maximum performance.
"""
from django.db import connection
print("\n" + "="*80)
print("Starting VOD logo migration...")
print("="*80)
with connection.cursor() as cursor:
# Step 1: Copy unique logos from Logo table to VODLogo table
# Only copy logos that are used by movies or series
print("Copying logos to VODLogo table...")
cursor.execute("""
INSERT INTO vod_vodlogo (name, url)
SELECT DISTINCT l.name, l.url
FROM dispatcharr_channels_logo l
WHERE l.id IN (
SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL
UNION
SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL
)
ON CONFLICT (url) DO NOTHING
""")
print(f"Created VODLogo entries")
# Step 2: Update movies to point to VODLogo IDs using JOIN
print("Updating movie references...")
cursor.execute("""
UPDATE vod_movie m
SET logo_id = v.id
FROM dispatcharr_channels_logo l
INNER JOIN vod_vodlogo v ON l.url = v.url
WHERE m.logo_id = l.id
AND m.logo_id IS NOT NULL
""")
movie_count = cursor.rowcount
print(f"Updated {movie_count} movies with new VOD logo references")
# Step 3: Update series to point to VODLogo IDs using JOIN
print("Updating series references...")
cursor.execute("""
UPDATE vod_series s
SET logo_id = v.id
FROM dispatcharr_channels_logo l
INNER JOIN vod_vodlogo v ON l.url = v.url
WHERE s.logo_id = l.id
AND s.logo_id IS NOT NULL
""")
series_count = cursor.rowcount
print(f"Updated {series_count} series with new VOD logo references")
print("="*80)
print("VOD logo migration completed successfully!")
print(f"Summary: Updated {movie_count} movies and {series_count} series")
print("="*80 + "\n")
def migrate_vod_logos_backward(apps, schema_editor):
"""
Reverse migration - moves VODLogos back to Logo table.
This recreates Logo entries for all VODLogos and updates Movie/Series references.
"""
Logo = apps.get_model('dispatcharr_channels', 'Logo')
VODLogo = apps.get_model('vod', 'VODLogo')
Movie = apps.get_model('vod', 'Movie')
Series = apps.get_model('vod', 'Series')
print("\n" + "="*80)
print("REVERSE: Moving VOD logos back to Logo table...")
print("="*80)
# Get all VODLogos
vod_logos = VODLogo.objects.all()
print(f"Found {vod_logos.count()} VOD logos to reverse migrate")
# Create Logo entries for each VODLogo
logos_to_create = []
vod_to_logo_mapping = {} # VODLogo ID -> Logo ID
for vod_logo in vod_logos:
# Check if a Logo with this URL already exists
existing_logo = Logo.objects.filter(url=vod_logo.url).first()
if existing_logo:
# Logo already exists, just map to it
vod_to_logo_mapping[vod_logo.id] = existing_logo.id
print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)")
else:
# Create new Logo entry
new_logo = Logo(name=vod_logo.name, url=vod_logo.url)
logos_to_create.append(new_logo)
# Bulk create new Logo entries
if logos_to_create:
print(f"Creating {len(logos_to_create)} new Logo entries...")
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
print("Logo entries created")
# Get the created Logo instances with their IDs
for vod_logo in vod_logos:
if vod_logo.id not in vod_to_logo_mapping:
try:
logo = Logo.objects.get(url=vod_logo.url)
vod_to_logo_mapping[vod_logo.id] = logo.id
except Logo.DoesNotExist:
print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...")
print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos")
# Update movies to point back to Logo table
movie_count = 0
for movie in Movie.objects.exclude(logo__isnull=True):
if movie.logo_id in vod_to_logo_mapping:
movie.logo_id = vod_to_logo_mapping[movie.logo_id]
movie.save(update_fields=['logo_id'])
movie_count += 1
print(f"Updated {movie_count} movies to use Logo table")
# Update series to point back to Logo table
series_count = 0
for series in Series.objects.exclude(logo__isnull=True):
if series.logo_id in vod_to_logo_mapping:
series.logo_id = vod_to_logo_mapping[series.logo_id]
series.save(update_fields=['logo_id'])
series_count += 1
print(f"Updated {series_count} series to use Logo table")
# Delete VODLogos (they're now redundant)
vod_logo_count = vod_logos.count()
vod_logos.delete()
print(f"Deleted {vod_logo_count} VOD logos")
print("="*80)
print("Reverse migration completed!")
print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series")
print("="*80 + "\n")
def cleanup_migrated_logos(apps, schema_editor):
"""
Delete Logo entries that were successfully migrated to VODLogo.
Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage.
"""
from django.db import connection
print("\n" + "="*80)
print("Cleaning up migrated Logo entries...")
print("="*80)
with connection.cursor() as cursor:
# Single efficient query using JOINs:
# - JOIN with vod_vodlogo to find migrated logos
# - LEFT JOIN with channels to find which aren't used
cursor.execute("""
DELETE FROM dispatcharr_channels_logo
WHERE id IN (
SELECT l.id
FROM dispatcharr_channels_logo l
INNER JOIN vod_vodlogo v ON l.url = v.url
LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id
WHERE c.id IS NULL
)
""")
deleted_count = cursor.rowcount
print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)")
print("="*80 + "\n")
class Migration(migrations.Migration):
dependencies = [
('vod', '0002_add_last_seen_with_default'),
('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists
]
operations = [
# Step 1: Create the VODLogo model
migrations.CreateModel(
name='VODLogo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('url', models.TextField(unique=True)),
],
options={
'verbose_name': 'VOD Logo',
'verbose_name_plural': 'VOD Logos',
},
),
# Step 2: Remove foreign key constraints temporarily (so we can change the IDs)
# We need to find and drop the actual constraint names dynamically
migrations.RunSQL(
sql=[
# Drop movie logo constraint (find it dynamically)
"""
DO $$
DECLARE
constraint_name text;
BEGIN
SELECT conname INTO constraint_name
FROM pg_constraint
WHERE conrelid = 'vod_movie'::regclass
AND conname LIKE '%logo_id%fk%';
IF constraint_name IS NOT NULL THEN
EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name;
END IF;
END $$;
""",
# Drop series logo constraint (find it dynamically)
"""
DO $$
DECLARE
constraint_name text;
BEGIN
SELECT conname INTO constraint_name
FROM pg_constraint
WHERE conrelid = 'vod_series'::regclass
AND conname LIKE '%logo_id%fk%';
IF constraint_name IS NOT NULL THEN
EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name;
END IF;
END $$;
""",
],
reverse_sql=[
# The AlterField operations will recreate the constraints pointing to VODLogo,
# so we don't need to manually recreate them in reverse
migrations.RunSQL.noop,
],
),
# Step 3: Migrate the data (this copies logos and updates references)
migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward),
# Step 4: Now we can safely alter the foreign keys to point to VODLogo
migrations.AlterField(
model_name='movie',
name='logo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'),
),
migrations.AlterField(
model_name='series',
name='logo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'),
),
# Step 5: Clean up migrated Logo entries
migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop),
]

View file

@ -4,10 +4,22 @@ from django.utils import timezone
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from apps.m3u.models import M3UAccount
from apps.channels.models import Logo
import uuid
class VODLogo(models.Model):
"""Logo model specifically for VOD content (movies and series)"""
name = models.CharField(max_length=255)
url = models.TextField(unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'VOD Logo'
verbose_name_plural = 'VOD Logos'
class VODCategory(models.Model):
"""Categories for organizing VODs (e.g., Action, Comedy, Drama)"""
@ -69,7 +81,7 @@ class Series(models.Model):
year = models.IntegerField(blank=True, null=True)
rating = models.CharField(max_length=10, blank=True, null=True)
genre = models.CharField(max_length=255, blank=True, null=True)
logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series')
logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series')
# Metadata IDs for deduplication - these should be globally unique when present
tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata")
@ -108,7 +120,7 @@ class Movie(models.Model):
rating = models.CharField(max_length=10, blank=True, null=True)
genre = models.CharField(max_length=255, blank=True, null=True)
duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds")
logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie')
logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie')
# Metadata IDs for deduplication - these should be globally unique when present
tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata")

View file

@ -1,12 +1,79 @@
from rest_framework import serializers
from django.urls import reverse
from .models import (
Series, VODCategory, Movie, Episode,
Series, VODCategory, Movie, Episode, VODLogo,
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
)
from apps.channels.serializers import LogoSerializer
from apps.m3u.serializers import M3UAccountSerializer
class VODLogoSerializer(serializers.ModelSerializer):
cache_url = serializers.SerializerMethodField()
movie_count = serializers.SerializerMethodField()
series_count = serializers.SerializerMethodField()
is_used = serializers.SerializerMethodField()
item_names = serializers.SerializerMethodField()
class Meta:
model = VODLogo
fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"]
def validate_url(self, value):
"""Validate that the URL is unique for creation or update"""
if self.instance and self.instance.url == value:
return value
if VODLogo.objects.filter(url=value).exists():
raise serializers.ValidationError("A VOD logo with this URL already exists.")
return value
def create(self, validated_data):
"""Handle logo creation with proper URL validation"""
return VODLogo.objects.create(**validated_data)
def update(self, instance, validated_data):
"""Handle logo updates"""
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
return instance
def get_cache_url(self, obj):
request = self.context.get("request")
if request:
return request.build_absolute_uri(
reverse("api:vod:vodlogo-cache", args=[obj.id])
)
return reverse("api:vod:vodlogo-cache", args=[obj.id])
def get_movie_count(self, obj):
"""Get the number of movies using this logo"""
return obj.movie.count() if hasattr(obj, 'movie') else 0
def get_series_count(self, obj):
"""Get the number of series using this logo"""
return obj.series.count() if hasattr(obj, 'series') else 0
def get_is_used(self, obj):
"""Check if this logo is used by any movies or series"""
return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists())
def get_item_names(self, obj):
"""Get the list of movies and series using this logo"""
names = []
if hasattr(obj, 'movie'):
for movie in obj.movie.all()[:10]: # Limit to 10 items for performance
names.append(f"Movie: {movie.name}")
if hasattr(obj, 'series'):
for series in obj.series.all()[:10]: # Limit to 10 items for performance
names.append(f"Series: {series.name}")
return names
class M3UVODCategoryRelationSerializer(serializers.ModelSerializer):
category = serializers.IntegerField(source="category.id")
m3u_account = serializers.IntegerField(source="m3u_account.id")
@ -31,7 +98,7 @@ class VODCategorySerializer(serializers.ModelSerializer):
]
class SeriesSerializer(serializers.ModelSerializer):
logo = LogoSerializer(read_only=True)
logo = VODLogoSerializer(read_only=True)
episode_count = serializers.SerializerMethodField()
class Meta:
@ -43,7 +110,7 @@ class SeriesSerializer(serializers.ModelSerializer):
class MovieSerializer(serializers.ModelSerializer):
logo = LogoSerializer(read_only=True)
logo = VODLogoSerializer(read_only=True)
class Meta:
model = Movie
@ -225,7 +292,7 @@ class M3UEpisodeRelationSerializer(serializers.ModelSerializer):
class EnhancedSeriesSerializer(serializers.ModelSerializer):
"""Enhanced serializer for series with provider information"""
logo = LogoSerializer(read_only=True)
logo = VODLogoSerializer(read_only=True)
providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True)
episode_count = serializers.SerializerMethodField()

View file

@ -5,10 +5,9 @@ from django.db.models import Q
from apps.m3u.models import M3UAccount
from core.xtream_codes import Client as XtreamCodesClient
from .models import (
VODCategory, Series, Movie, Episode,
VODCategory, Series, Movie, Episode, VODLogo,
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
)
from apps.channels.models import Logo
from datetime import datetime
import logging
import json
@ -128,6 +127,37 @@ def refresh_movies(client, account, categories_by_provider, relations, scan_star
"""Refresh movie content using single API call for all movies"""
logger.info(f"Refreshing movies for account {account.name}")
# Ensure "Uncategorized" category exists for movies without a category
uncategorized_category, created = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="movie",
defaults={}
)
# Ensure there's a relation for the Uncategorized category
account_custom_props = account.custom_properties or {}
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create(
category=uncategorized_category,
m3u_account=account,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
if created:
logger.info(f"Created 'Uncategorized' category for movies")
if rel_created:
logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})")
# Add uncategorized category to relations dict for easy access
relations[uncategorized_category.id] = uncategorized_relation
# Add to categories_by_provider with a special key for items without category
categories_by_provider['__uncategorized__'] = uncategorized_category
# Get all movies in a single API call
logger.info("Fetching all movies from provider...")
all_movies_data = client.get_vod_streams() # No category_id = get all movies
@ -151,6 +181,37 @@ def refresh_series(client, account, categories_by_provider, relations, scan_star
"""Refresh series content using single API call for all series"""
logger.info(f"Refreshing series for account {account.name}")
# Ensure "Uncategorized" category exists for series without a category
uncategorized_category, created = VODCategory.objects.get_or_create(
name="Uncategorized",
category_type="series",
defaults={}
)
# Ensure there's a relation for the Uncategorized category
account_custom_props = account.custom_properties or {}
auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True)
uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create(
category=uncategorized_category,
m3u_account=account,
defaults={
'enabled': auto_enable_new,
'custom_properties': {}
}
)
if created:
logger.info(f"Created 'Uncategorized' category for series")
if rel_created:
logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})")
# Add uncategorized category to relations dict for easy access
relations[uncategorized_category.id] = uncategorized_relation
# Add to categories_by_provider with a special key for items without category
categories_by_provider['__uncategorized__'] = uncategorized_category
# Get all series in a single API call
logger.info("Fetching all series from provider...")
all_series_data = client.get_series() # No category_id = get all series
@ -187,16 +248,28 @@ def batch_create_categories(categories_data, category_type, account):
logger.debug(f"Found {len(existing_categories)} existing categories")
# Check if we should auto-enable new categories based on account settings
account_custom_props = account.custom_properties or {}
if category_type == 'movie':
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
else: # series
auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True)
# Create missing categories in batch
new_categories = []
for name in category_names:
if name not in existing_categories:
# Always create new categories
new_categories.append(VODCategory(name=name, category_type=category_type))
else:
# Existing category - create relationship with enabled based on auto_enable setting
# (category exists globally but is new to this account)
relations_to_create.append(M3UVODCategoryRelation(
category=existing_categories[name],
m3u_account=account,
custom_properties={},
enabled=auto_enable_new,
))
logger.debug(f"{len(new_categories)} new categories found")
@ -204,24 +277,69 @@ def batch_create_categories(categories_data, category_type, account):
if new_categories:
logger.debug("Creating new categories...")
created_categories = VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)
created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True))
# Create relations for newly created categories with enabled based on auto_enable setting
for cat in created_categories:
if not auto_enable_new:
logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}")
relations_to_create.append(
M3UVODCategoryRelation(
category=cat,
m3u_account=account,
custom_properties={},
enabled=auto_enable_new,
)
)
# Convert to dictionary for easy lookup
newly_created = {cat.name: cat for cat in created_categories}
relations_to_create += [
M3UVODCategoryRelation(
category=cat,
m3u_account=account,
custom_properties={},
) for cat in newly_created.values()
]
existing_categories.update(newly_created)
# Create missing relations
logger.debug("Updating category account relations...")
M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
# Delete orphaned category relationships (categories no longer in the M3U source)
# Exclude "Uncategorized" from cleanup as it's a special category we manage
current_category_ids = set(existing_categories[name].id for name in category_names)
existing_relations = M3UVODCategoryRelation.objects.filter(
m3u_account=account,
category__category_type=category_type
).select_related('category')
relations_to_delete = [
rel for rel in existing_relations
if rel.category_id not in current_category_ids and rel.category.name != "Uncategorized"
]
if relations_to_delete:
M3UVODCategoryRelation.objects.filter(
id__in=[rel.id for rel in relations_to_delete]
).delete()
logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}")
# Check if any of the deleted relationships left categories with no remaining associations
orphaned_category_ids = []
for rel in relations_to_delete:
category = rel.category
# Check if this category has any remaining M3U account relationships
remaining_relationships = M3UVODCategoryRelation.objects.filter(
category=category
).exists()
# If no relationships remain, it's safe to delete the category
if not remaining_relationships:
orphaned_category_ids.append(category.id)
logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted")
# Delete orphaned categories
if orphaned_category_ids:
VODCategory.objects.filter(id__in=orphaned_category_ids).delete()
logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations")
# 🔑 Fetch all relations for this account, for all categories
# relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects
# .filter(category__in=existing_categories.values(), m3u_account=account)
@ -276,7 +394,16 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
logger.debug("Skipping disabled category")
continue
else:
logger.warning(f"No category ID provided for movie {name}")
# Assign to Uncategorized category if no category_id provided
logger.debug(f"No category ID provided for movie {name}, assigning to 'Uncategorized'")
category = categories.get('__uncategorized__')
if category:
movie_data['_category_id'] = category.id
# Check if uncategorized is disabled
relation = relations.get(category.id, None)
if relation and not relation.enabled:
logger.debug("Skipping disabled 'Uncategorized' category")
continue
# Extract metadata
year = extract_year_from_data(movie_data, 'name')
@ -303,7 +430,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
# Prepare movie properties
description = movie_data.get('description') or movie_data.get('plot') or ''
rating = movie_data.get('rating') or movie_data.get('vote_average') or ''
rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average'))
genre = movie_data.get('genre') or movie_data.get('category_name') or ''
duration_secs = extract_duration_from_data(movie_data)
trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or ''
@ -347,7 +474,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
# Get existing logos
existing_logos = {
logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls)
logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls)
} if logo_urls else {}
# Create missing logos
@ -355,20 +482,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
for logo_url in logo_urls:
if logo_url not in existing_logos:
movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie')
logos_to_create.append(Logo(url=logo_url, name=movie_name))
logos_to_create.append(VODLogo(url=logo_url, name=movie_name))
if logos_to_create:
try:
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
# Refresh existing_logos with newly created ones
new_logo_urls = [logo.url for logo in logos_to_create]
newly_created = {
logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls)
logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls)
}
existing_logos.update(newly_created)
logger.info(f"Created {len(newly_created)} new logos for movies")
logger.info(f"Created {len(newly_created)} new VOD logos for movies")
except Exception as e:
logger.warning(f"Failed to create logos: {e}")
logger.warning(f"Failed to create VOD logos: {e}")
# Get existing movies based on our keys
existing_movies = {}
@ -578,7 +705,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
logger.debug("Skipping disabled category")
continue
else:
logger.warning(f"No category ID provided for series {name}")
# Assign to Uncategorized category if no category_id provided
logger.debug(f"No category ID provided for series {name}, assigning to 'Uncategorized'")
category = categories.get('__uncategorized__')
if category:
series_data['_category_id'] = category.id
# Check if uncategorized is disabled
relation = relations.get(category.id, None)
if relation and not relation.enabled:
logger.debug("Skipping disabled 'Uncategorized' category")
continue
# Extract metadata
year = extract_year(series_data.get('releaseDate', ''))
@ -608,7 +744,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
# Prepare series properties
description = series_data.get('plot', '')
rating = series_data.get('rating', '')
rating = normalize_rating(series_data.get('rating'))
genre = series_data.get('genre', '')
logo_url = series_data.get('cover') or ''
@ -669,7 +805,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
# Get existing logos
existing_logos = {
logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls)
logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls)
} if logo_urls else {}
# Create missing logos
@ -677,20 +813,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
for logo_url in logo_urls:
if logo_url not in existing_logos:
series_name = logo_url_to_name.get(logo_url, 'Unknown Series')
logos_to_create.append(Logo(url=logo_url, name=series_name))
logos_to_create.append(VODLogo(url=logo_url, name=series_name))
if logos_to_create:
try:
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
# Refresh existing_logos with newly created ones
new_logo_urls = [logo.url for logo in logos_to_create]
newly_created = {
logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls)
logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls)
}
existing_logos.update(newly_created)
logger.info(f"Created {len(newly_created)} new logos for series")
logger.info(f"Created {len(newly_created)} new VOD logos for series")
except Exception as e:
logger.warning(f"Failed to create logos: {e}")
logger.warning(f"Failed to create VOD logos: {e}")
# Get existing series based on our keys - same pattern as movies
existing_series = {}
@ -896,6 +1032,33 @@ def extract_duration_from_data(movie_data):
return duration_secs
def normalize_rating(rating_value):
"""Normalize rating value by converting commas to decimals and validating as float"""
if not rating_value:
return None
try:
# Convert to string for processing
rating_str = str(rating_value).strip()
if not rating_str or rating_str == '':
return None
# Replace comma with decimal point (European format)
rating_str = rating_str.replace(',', '.')
# Try to convert to float
rating_float = float(rating_str)
# Return as string to maintain compatibility with existing code
# but ensure it's a valid numeric format
return str(rating_float)
except (ValueError, TypeError, AttributeError):
# If conversion fails, discard the rating
logger.debug(f"Invalid rating value discarded: {rating_value}")
return None
def extract_year(date_string):
"""Extract year from date string"""
if not date_string:
@ -1021,9 +1184,9 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N
if should_update_field(series.description, info.get('plot')):
series.description = extract_string_from_array_or_string(info.get('plot'))
updated = True
if (info.get('rating') and str(info.get('rating')).strip() and
(not series.rating or not str(series.rating).strip())):
series.rating = info.get('rating')
normalized_rating = normalize_rating(info.get('rating'))
if normalized_rating and (not series.rating or not str(series.rating).strip()):
series.rating = normalized_rating
updated = True
if should_update_field(series.genre, info.get('genre')):
series.genre = extract_string_from_array_or_string(info.get('genre'))
@ -1124,7 +1287,7 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
# Extract episode metadata
description = info.get('plot') or info.get('overview', '') if info else ''
rating = info.get('rating', '') if info else ''
rating = normalize_rating(info.get('rating')) if info else None
air_date = extract_date_from_data(info) if info else None
duration_secs = info.get('duration_secs') if info else None
tmdb_id = info.get('tmdb_id') if info else None
@ -1341,21 +1504,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id=
stale_episode_count = stale_episode_relations.count()
stale_episode_relations.delete()
# Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup)
if not account_id:
orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True)
orphaned_movie_count = orphaned_movies.count()
# Clean up movies with no relations (orphaned)
# Safe to delete even during account-specific cleanup because if ANY account
# has a relation, m3u_relations will not be null
orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True)
orphaned_movie_count = orphaned_movies.count()
if orphaned_movie_count > 0:
logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations")
orphaned_movies.delete()
# Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup)
orphaned_series = Series.objects.filter(m3u_relations__isnull=True)
orphaned_series_count = orphaned_series.count()
# Clean up series with no relations (orphaned)
orphaned_series = Series.objects.filter(m3u_relations__isnull=True)
orphaned_series_count = orphaned_series.count()
if orphaned_series_count > 0:
logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations")
orphaned_series.delete()
else:
# When cleaning up for specific account, we don't remove orphaned content
# as other accounts might still reference it
orphaned_movie_count = 0
orphaned_series_count = 0
# Episodes will be cleaned up via CASCADE when series are deleted
@ -1797,8 +1960,9 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False):
if info.get('plot') and info.get('plot') != movie.description:
movie.description = info.get('plot')
updated = True
if info.get('rating') and info.get('rating') != movie.rating:
movie.rating = info.get('rating')
normalized_rating = normalize_rating(info.get('rating'))
if normalized_rating and normalized_rating != movie.rating:
movie.rating = normalized_rating
updated = True
if info.get('genre') and info.get('genre') != movie.genre:
movie.genre = info.get('genre')
@ -1915,7 +2079,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False):
def validate_logo_reference(obj, obj_type="object"):
"""
Validate that a logo reference exists in the database.
Validate that a VOD logo reference exists in the database.
If not, set it to None to prevent foreign key constraint violations.
Args:
@ -1935,9 +2099,9 @@ def validate_logo_reference(obj, obj_type="object"):
try:
# Verify the logo exists in the database
Logo.objects.get(pk=obj.logo.pk)
VODLogo.objects.get(pk=obj.logo.pk)
return True
except Logo.DoesNotExist:
logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None")
except VODLogo.DoesNotExist:
logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None")
obj.logo = None
return False

View file

@ -2,7 +2,16 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint
from .api_views import (
UserAgentViewSet,
StreamProfileViewSet,
CoreSettingsViewSet,
environment,
version,
rehash_streams_endpoint,
TimezoneListView,
get_system_events
)
router = DefaultRouter()
router.register(r'useragents', UserAgentViewSet, basename='useragent')
@ -12,5 +21,7 @@ urlpatterns = [
path('settings/env/', environment, name='token_refresh'),
path('version/', version, name='version'),
path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'),
path('timezones/', TimezoneListView.as_view(), name='timezones'),
path('system-events/', get_system_events, name='system_events'),
path('', include(router.urls)),
]

View file

@ -5,10 +5,12 @@ import ipaddress
import logging
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes, action
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from .models import (
UserAgent,
StreamProfile,
@ -328,25 +330,130 @@ def rehash_streams_endpoint(request):
# Get the current hash keys from settings
hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY)
hash_keys = hash_key_setting.value.split(",")
# Queue the rehash task
task = rehash_streams.delay(hash_keys)
return Response({
"success": True,
"message": "Stream rehashing task has been queued",
"task_id": task.id
}, status=status.HTTP_200_OK)
except CoreSettings.DoesNotExist:
return Response({
"success": False,
"message": "Hash key settings not found"
}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
logger.error(f"Error triggering rehash streams: {e}")
return Response({
"success": False,
"message": "Failed to trigger rehash task"
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# ─────────────────────────────
# Timezone List API
# ─────────────────────────────
class TimezoneListView(APIView):
"""
API endpoint that returns all available timezones supported by pytz.
Returns a list of timezone names grouped by region for easy selection.
This is a general utility endpoint that can be used throughout the application.
"""
def get_permissions(self):
return [Authenticated()]
@swagger_auto_schema(
operation_description="Get list of all supported timezones",
responses={200: openapi.Response('List of timezones with grouping by region')}
)
def get(self, request):
import pytz
# Get all common timezones (excludes deprecated ones)
all_timezones = sorted(pytz.common_timezones)
# Group by region for better UX
grouped = {}
for tz in all_timezones:
if '/' in tz:
region = tz.split('/')[0]
if region not in grouped:
grouped[region] = []
grouped[region].append(tz)
else:
# Handle special zones like UTC, GMT, etc.
if 'Other' not in grouped:
grouped['Other'] = []
grouped['Other'].append(tz)
return Response({
'timezones': all_timezones,
'grouped': grouped,
'count': len(all_timezones)
})
# ─────────────────────────────
# System Events API
# ─────────────────────────────
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_system_events(request):
"""
Get recent system events (channel start/stop, buffering, client connections, etc.)
Query Parameters:
limit: Number of events to return per page (default: 100, max: 1000)
offset: Number of events to skip (for pagination, default: 0)
event_type: Filter by specific event type (optional)
"""
from core.models import SystemEvent
try:
# Get pagination params
limit = min(int(request.GET.get('limit', 100)), 1000)
offset = int(request.GET.get('offset', 0))
# Start with all events
events = SystemEvent.objects.all()
# Filter by event_type if provided
event_type = request.GET.get('event_type')
if event_type:
events = events.filter(event_type=event_type)
# Get total count before applying pagination
total_count = events.count()
# Apply offset and limit for pagination
events = events[offset:offset + limit]
# Serialize the data
events_data = [{
'id': event.id,
'event_type': event.event_type,
'event_type_display': event.get_event_type_display(),
'timestamp': event.timestamp.isoformat(),
'channel_id': str(event.channel_id) if event.channel_id else None,
'channel_name': event.channel_name,
'details': event.details
} for event in events]
return Response({
'events': events_data,
'count': len(events_data),
'total': total_count,
'offset': offset,
'limit': limit
})
except Exception as e:
logger.error(f"Error fetching system events: {e}")
return Response({
'error': 'Failed to fetch system events'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

View file

@ -8,7 +8,7 @@ def preload_core_settings(apps, schema_editor):
CoreSettings.objects.create(
key=slugify("M3U Hash Key"),
name="M3U Hash Key",
value="name,url,tvg_id",
value="url",
)
class Migration(migrations.Migration):

View file

@ -0,0 +1,28 @@
# Generated by Django 5.2.4 on 2025-11-20 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0016_update_dvr_template_paths'),
]
operations = [
migrations.CreateModel(
name='SystemEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)),
('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)),
('channel_id', models.UUIDField(blank=True, db_index=True, null=True)),
('channel_name', models.CharField(blank=True, max_length=255, null=True)),
('details', models.JSONField(blank=True, default=dict)),
],
options={
'ordering': ['-timestamp'],
'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')],
},
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 5.2.4 on 2025-11-21 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0017_systemevent'),
]
operations = [
migrations.AlterField(
model_name='systemevent',
name='event_type',
field=models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded'), ('login_success', 'Login Successful'), ('login_failed', 'Login Failed'), ('logout', 'User Logged Out'), ('m3u_blocked', 'M3U Download Blocked'), ('epg_blocked', 'EPG Download Blocked')], db_index=True, max_length=50),
),
]

View file

@ -1,4 +1,5 @@
# core/models.py
from django.conf import settings
from django.db import models
from django.utils.text import slugify
from django.core.exceptions import ValidationError
@ -158,8 +159,10 @@ DVR_TV_FALLBACK_DIR_KEY = slugify("DVR TV Fallback Dir")
DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template")
DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template")
DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled")
DVR_COMSKIP_CUSTOM_PATH_KEY = slugify("DVR Comskip Custom Path")
DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes")
DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes")
SYSTEM_TIME_ZONE_KEY = slugify("System Time Zone")
class CoreSettings(models.Model):
@ -274,6 +277,27 @@ class CoreSettings(models.Model):
except cls.DoesNotExist:
return False
@classmethod
def get_dvr_comskip_custom_path(cls):
"""Return configured comskip.ini path or empty string if unset."""
try:
return cls.objects.get(key=DVR_COMSKIP_CUSTOM_PATH_KEY).value
except cls.DoesNotExist:
return ""
@classmethod
def set_dvr_comskip_custom_path(cls, path: str | None):
"""Persist the comskip.ini path setting, normalizing nulls to empty string."""
value = (path or "").strip()
obj, _ = cls.objects.get_or_create(
key=DVR_COMSKIP_CUSTOM_PATH_KEY,
defaults={"name": "DVR Comskip Custom Path", "value": value},
)
if obj.value != value:
obj.value = value
obj.save(update_fields=["value"])
return value
@classmethod
def get_dvr_pre_offset_minutes(cls):
"""Minutes to start recording before scheduled start (default 0)."""
@ -302,6 +326,30 @@ class CoreSettings(models.Model):
except Exception:
return 0
@classmethod
def get_system_time_zone(cls):
"""Return configured system time zone or fall back to Django settings."""
try:
value = cls.objects.get(key=SYSTEM_TIME_ZONE_KEY).value
if value:
return value
except cls.DoesNotExist:
pass
return getattr(settings, "TIME_ZONE", "UTC") or "UTC"
@classmethod
def set_system_time_zone(cls, tz_name: str | None):
"""Persist the desired system time zone identifier."""
value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC"
obj, _ = cls.objects.get_or_create(
key=SYSTEM_TIME_ZONE_KEY,
defaults={"name": "System Time Zone", "value": value},
)
if obj.value != value:
obj.value = value
obj.save(update_fields=["value"])
return value
@classmethod
def get_dvr_series_rules(cls):
"""Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}"""
@ -327,3 +375,48 @@ class CoreSettings(models.Model):
return rules
except Exception:
return rules
class SystemEvent(models.Model):
"""
Tracks system events like channel start/stop, buffering, failover, client connections.
Maintains a rolling history based on max_system_events setting.
"""
EVENT_TYPES = [
('channel_start', 'Channel Started'),
('channel_stop', 'Channel Stopped'),
('channel_buffering', 'Channel Buffering'),
('channel_failover', 'Channel Failover'),
('channel_reconnect', 'Channel Reconnected'),
('channel_error', 'Channel Error'),
('client_connect', 'Client Connected'),
('client_disconnect', 'Client Disconnected'),
('recording_start', 'Recording Started'),
('recording_end', 'Recording Ended'),
('stream_switch', 'Stream Switched'),
('m3u_refresh', 'M3U Refreshed'),
('m3u_download', 'M3U Downloaded'),
('epg_refresh', 'EPG Refreshed'),
('epg_download', 'EPG Downloaded'),
('login_success', 'Login Successful'),
('login_failed', 'Login Failed'),
('logout', 'User Logged Out'),
('m3u_blocked', 'M3U Download Blocked'),
('epg_blocked', 'EPG Download Blocked'),
]
event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
channel_id = models.UUIDField(null=True, blank=True, db_index=True)
channel_name = models.CharField(max_length=255, null=True, blank=True)
details = models.JSONField(default=dict, blank=True)
class Meta:
ordering = ['-timestamp']
indexes = [
models.Index(fields=['-timestamp']),
models.Index(fields=['event_type', '-timestamp']),
]
def __str__(self):
return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}"

View file

@ -513,7 +513,7 @@ def rehash_streams(keys):
for obj in batch:
# Generate new hash
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys)
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id)
# Check if this hash already exists in our tracking dict or in database
if new_hash in hash_keys:

View file

@ -377,12 +377,59 @@ def validate_flexible_url(value):
import re
# More flexible pattern for non-FQDN hostnames with paths
# Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml
non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$'
# Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234
# Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value
# Also supports authentication: rtsp://user:pass@hostname/path
non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$'
non_fqdn_match = re.match(non_fqdn_pattern, value)
if non_fqdn_match:
return # Accept non-FQDN hostnames
return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication
# If it doesn't match our flexible patterns, raise the original error
raise ValidationError("Enter a valid URL.")
def log_system_event(event_type, channel_id=None, channel_name=None, **details):
"""
Log a system event and maintain the configured max history.
Args:
event_type: Type of event (e.g., 'channel_start', 'client_connect')
channel_id: Optional UUID of the channel
channel_name: Optional name of the channel
**details: Additional details to store in the event (stored as JSON)
Example:
log_system_event('channel_start', channel_id=uuid, channel_name='CNN',
stream_url='http://...', user='admin')
"""
from core.models import SystemEvent, CoreSettings
try:
# Create the event
SystemEvent.objects.create(
event_type=event_type,
channel_id=channel_id,
channel_name=channel_name,
details=details
)
# Get max events from settings (default 100)
try:
max_events_setting = CoreSettings.objects.filter(key='max-system-events').first()
max_events = int(max_events_setting.value) if max_events_setting else 100
except Exception:
max_events = 100
# Delete old events beyond the limit (keep it efficient with a single query)
total_count = SystemEvent.objects.count()
if total_count > max_events:
# Get the ID of the event at the cutoff point
cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events]
# Delete all events with ID less than cutoff (older events)
SystemEvent.objects.filter(id__lt=cutoff_event).delete()
except Exception as e:
# Don't let event logging break the main application
logger.error(f"Failed to log system event {event_type}: {e}")

View file

@ -68,15 +68,16 @@ install_packages() {
echo ">>> Installing system packages..."
apt-get update
declare -a packages=(
git curl wget build-essential gcc libpcre3-dev libpq-dev
git curl wget build-essential gcc libpq-dev
python3-dev python3-venv python3-pip nginx redis-server
postgresql postgresql-contrib ffmpeg procps streamlink
sudo
)
apt-get install -y --no-install-recommends "${packages[@]}"
if ! command -v node >/dev/null 2>&1; then
echo ">>> Installing Node.js..."
curl -sL https://deb.nodesource.com/setup_23.x | bash -
curl -sL https://deb.nodesource.com/setup_24.x | bash -
apt-get install -y nodejs
fi
@ -186,7 +187,32 @@ EOSU
}
##############################################################################
# 8) Django Migrations & Static
# 8) Create Directories
##############################################################################
create_directories() {
mkdir -p /data/logos
mkdir -p /data/recordings
mkdir -p /data/uploads/m3us
mkdir -p /data/uploads/epgs
mkdir -p /data/m3us
mkdir -p /data/epgs
mkdir -p /data/plugins
mkdir -p /data/db
# Needs to own ALL of /data except db
chown -R $DISPATCH_USER:$DISPATCH_GROUP /data
chown -R postgres:postgres /data/db
chmod +x /data
mkdir -p "$APP_DIR"/logo_cache
mkdir -p "$APP_DIR"/media
chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/logo_cache
chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/media
}
##############################################################################
# 9) Django Migrations & Static
##############################################################################
django_migrate_collectstatic() {
@ -204,7 +230,7 @@ EOSU
}
##############################################################################
# 9) Configure Services & Nginx
# 10) Configure Services & Nginx
##############################################################################
configure_services() {
@ -360,7 +386,7 @@ EOF
}
##############################################################################
# 10) Start Services
# 11) Start Services
##############################################################################
start_services() {
@ -371,7 +397,7 @@ start_services() {
}
##############################################################################
# 11) Summary
# 12) Summary
##############################################################################
show_summary() {
@ -408,10 +434,11 @@ main() {
clone_dispatcharr_repo
setup_python_env
build_frontend
create_directories
django_migrate_collectstatic
configure_services
start_services
show_summary
}
main "$@"
main "$@"

View file

@ -50,13 +50,21 @@ app.conf.update(
)
# Add memory cleanup after task completion
#@task_postrun.connect # Use the imported signal
@task_postrun.connect # Use the imported signal
def cleanup_task_memory(**kwargs):
"""Clean up memory after each task completes"""
"""Clean up memory and database connections after each task completes"""
from django.db import connection
# Get task name from kwargs
task_name = kwargs.get('task').name if kwargs.get('task') else ''
# Only run cleanup for memory-intensive tasks
# Close database connection for this Celery worker process
try:
connection.close()
except Exception:
pass
# Only run memory cleanup for memory-intensive tasks
memory_intensive_tasks = [
'apps.m3u.tasks.refresh_single_m3u_account',
'apps.m3u.tasks.refresh_m3u_accounts',

View file

@ -51,6 +51,11 @@ EPG_BATCH_SIZE = 1000 # Number of records to process in a batch
EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection
EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing
# XtreamCodes Rate Limiting Settings
# Delay between profile authentications when refreshing multiple profiles
# This prevents providers from temporarily banning users with many profiles
XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes
# Database optimization settings
DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries
DATABASE_CONN_MAX_AGE = (
@ -134,6 +139,7 @@ else:
"PASSWORD": os.environ.get("POSTGRES_PASSWORD", "secret"),
"HOST": os.environ.get("POSTGRES_HOST", "localhost"),
"PORT": int(os.environ.get("POSTGRES_PORT", 5432)),
"CONN_MAX_AGE": DATABASE_CONN_MAX_AGE,
}
}
@ -211,6 +217,10 @@ CELERY_BEAT_SCHEDULE = {
"task": "core.tasks.scan_and_process_files", # Direct task call
"schedule": 20.0, # Every 20 seconds
},
"maintain-recurring-recordings": {
"task": "apps.channels.tasks.maintain_recurring_recordings",
"schedule": 3600.0, # Once an hour ensure recurring schedules stay ahead
},
}
MEDIA_ROOT = BASE_DIR / "media"

View file

@ -4,11 +4,15 @@ ARG REPO_NAME=dispatcharr
ARG BASE_TAG=base
# --- Build frontend ---
FROM node:20 AS frontend-builder
FROM node:24 AS frontend-builder
WORKDIR /app/frontend
COPY ./frontend /app/frontend
RUN corepack enable && corepack prepare yarn@stable --activate && \
yarn install && yarn build && \
# remove any node_modules that may have been copied from the host (x86)
RUN rm -rf node_modules || true; \
npm install --no-audit --progress=false;
RUN npm run build; \
rm -rf node_modules .cache
# --- Redeclare build arguments for the next stage ---

View file

@ -14,6 +14,15 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=info
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
#
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
#cap_add:
# - SYS_NICE
# Optional for hardware acceleration
#devices:
# - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API)

View file

@ -18,3 +18,12 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=trace
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
#
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
#cap_add:
# - SYS_NICE

View file

@ -17,6 +17,15 @@ services:
- REDIS_HOST=localhost
- CELERY_BROKER_URL=redis://localhost:6379/0
- DISPATCHARR_LOG_LEVEL=debug
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
#
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
#cap_add:
# - SYS_NICE
pgadmin:
image: dpage/pgadmin4

View file

@ -17,6 +17,15 @@ services:
- REDIS_HOST=redis
- CELERY_BROKER_URL=redis://redis:6379/0
- DISPATCHARR_LOG_LEVEL=info
# Process Priority Configuration (Optional)
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
# Negative values require cap_add: SYS_NICE (uncomment below)
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
#
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
#cap_add:
# - SYS_NICE
# Optional for hardware acceleration
#group_add:
# - video
@ -53,7 +62,7 @@ services:
command: >
bash -c "
cd /app &&
celery -A dispatcharr worker -l info
nice -n 5 celery -A dispatcharr worker -l info
"
db:

View file

@ -40,6 +40,18 @@ export REDIS_DB=${REDIS_DB:-0}
export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191}
export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri'
export LD_LIBRARY_PATH='/usr/local/lib'
# Process priority configuration
# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority)
# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority)
# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI
export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0}
CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5}
# Calculate relative nice value for Celery (since nice is relative to parent process)
# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value
export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL))
# Set LIBVA_DRIVER_NAME if user has specified it
if [ -v LIBVA_DRIVER_NAME ]; then
export LIBVA_DRIVER_NAME
@ -78,6 +90,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then
DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL
REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT
DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH
CELERY_NICE_LEVEL UWSGI_NICE_LEVEL
)
# Process each variable for both profile.d and environment
@ -96,7 +109,16 @@ fi
chmod +x /etc/profile.d/dispatcharr.sh
pip install django-filter
# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells
if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then
cat >> /root/.bashrc << 'EOF'
# Source Dispatcharr environment variables
if [ -f /etc/profile.d/dispatcharr.sh ]; then
. /etc/profile.d/dispatcharr.sh
fi
EOF
fi
# Run init scripts
echo "Starting user setup..."
@ -161,10 +183,12 @@ if [ "$DISPATCHARR_DEBUG" != "true" ]; then
uwsgi_args+=" --disable-logging"
fi
# Launch uwsgi -p passes environment variables to the process
su -p - $POSTGRES_USER -c "cd /app && uwsgi $uwsgi_args &"
uwsgi_pid=$(pgrep uwsgi | sort | head -n1)
echo "✅ uwsgi started with PID $uwsgi_pid"
# Launch uwsgi with configurable nice level (default: 0 for normal priority)
# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose
# Start with nice as root, then use setpriv to drop privileges to dispatch user
# This preserves both the nice value and environment variables
nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$!
echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)"
pids+=("$uwsgi_pid")
# sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf
@ -209,7 +233,7 @@ echo "🔍 Running hardware acceleration check..."
# Wait for at least one process to exit and log the process that exited first
if [ ${#pids[@]} -gt 0 ]; then
echo "⏳ Waiting for processes to exit..."
echo "⏳ Dispatcharr is running. Monitoring processes..."
while kill -0 "${pids[@]}" 2>/dev/null; do
sleep 1 # Wait for a second before checking again
done

View file

@ -1,25 +1,67 @@
#!/bin/bash
mkdir -p /data/logos
mkdir -p /data/recordings
mkdir -p /data/uploads/m3us
mkdir -p /data/uploads/epgs
mkdir -p /data/m3us
mkdir -p /data/epgs
mkdir -p /data/plugins
mkdir -p /app/logo_cache
mkdir -p /app/media
# Define directories that need to exist and be owned by PUID:PGID
DATA_DIRS=(
"/data/logos"
"/data/recordings"
"/data/uploads/m3us"
"/data/uploads/epgs"
"/data/m3us"
"/data/epgs"
"/data/plugins"
"/data/models"
)
APP_DIRS=(
"/app/logo_cache"
"/app/media"
)
# Create all directories
for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do
mkdir -p "$dir"
done
# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation)
if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then
if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then
echo "Fixing ownership for /app (non-recursive)"
chown $PUID:$PGID /app
fi
fi
sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default
# NOTE: mac doesn't run as root, so only manage permissions
# if this script is running as root
if [ "$(id -u)" = "0" ]; then
# Needs to own ALL of /data except db, we handle that below
chown -R $PUID:$PGID /data
chown -R $PUID:$PGID /app
# Fix data directories (non-recursive to avoid touching user files)
for dir in "${DATA_DIRS[@]}"; do
if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then
echo "Fixing ownership for $dir"
chown $PUID:$PGID "$dir"
fi
done
# Fix app directories (recursive since they're managed by the app)
for dir in "${APP_DIRS[@]}"; do
if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then
echo "Fixing ownership for $dir (recursive)"
chown -R $PUID:$PGID "$dir"
fi
done
# Database permissions
if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then
echo "Fixing ownership for /data/db"
chown -R postgres:postgres /data/db
fi
# Fix /data directory ownership (non-recursive)
if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then
echo "Fixing ownership for /data (non-recursive)"
chown $PUID:$PGID /data
fi
# Permissions
chown -R postgres:postgres /data/db
chmod +x /data
fi
fi

View file

@ -7,9 +7,10 @@ exec-before = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
; Then start other services with configurable nice level (default: 5 for low priority)
; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev

View file

@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
; Then start other services with configurable nice level (default: 5 for low priority)
; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
attach-daemon = cd /app/frontend && npm run dev

View file

@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py
; Start Redis first
attach-daemon = redis-server
; Then start other services
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
attach-daemon = celery -A dispatcharr beat
; Then start other services with configurable nice level (default: 5 for low priority)
; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1
attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
# Core settings

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
{
"name": "vite",
"name": "frontend",
"private": true,
"version": "0.0.0",
"type": "module",
@ -24,14 +24,14 @@
"@mantine/hooks": "~8.0.1",
"@mantine/notifications": "~8.0.1",
"@tanstack/react-table": "^8.21.2",
"allotment": "^1.20.3",
"allotment": "^1.20.4",
"dayjs": "^1.11.13",
"formik": "^2.4.6",
"hls.js": "^1.5.20",
"lucide-react": "^0.511.0",
"mpegts.js": "^1.8.0",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-draggable": "^4.4.6",
"react-pro-sidebar": "^1.1.0",
"react-router-dom": "^7.3.0",
@ -45,12 +45,15 @@
},
"devDependencies": {
"@eslint/js": "^9.21.0",
"@swc/core": "npm:@swc/wasm@1.13.20",
"@swc/wasm": "^1.13.20",
"@testing-library/dom": "^10.4.1",
"@testing-library/jest-dom": "^6.8.0",
"@testing-library/react": "^16.3.0",
"@testing-library/user-event": "^14.6.1",
"@types/react": "^19.0.10",
"@types/react-dom": "^19.0.4",
"@vitejs/plugin-react-swc": "^3.8.0",
"@types/react": "^19.1.0",
"@types/react-dom": "^19.1.0",
"@vitejs/plugin-react-swc": "^4.1.0",
"eslint": "^9.21.0",
"eslint-plugin-react-hooks": "^5.1.0",
"eslint-plugin-react-refresh": "^0.4.19",
@ -59,5 +62,10 @@
"prettier": "^3.5.3",
"vite": "^6.2.0",
"vitest": "^3.2.4"
},
"resolutions": {
"vite": "7.1.7",
"react": "19.1.0",
"react-dom": "19.1.0"
}
}

View file

@ -112,15 +112,21 @@ const App = () => {
height: 0,
}}
navbar={{
width: open ? drawerWidth : miniDrawerWidth,
width: isAuthenticated
? open
? drawerWidth
: miniDrawerWidth
: 0,
}}
>
<Sidebar
drawerWidth
miniDrawerWidth
collapsed={!open}
toggleDrawer={toggleDrawer}
/>
{isAuthenticated && (
<Sidebar
drawerWidth={drawerWidth}
miniDrawerWidth={miniDrawerWidth}
collapsed={!open}
toggleDrawer={toggleDrawer}
/>
)}
<AppShell.Main>
<Box

View file

@ -36,7 +36,6 @@ export const WebsocketProvider = ({ children }) => {
const updateEPG = useEPGsStore((s) => s.updateEPG);
const updateEPGProgress = useEPGsStore((s) => s.updateEPGProgress);
const playlists = usePlaylistsStore((s) => s.playlists);
const updatePlaylist = usePlaylistsStore((s) => s.updatePlaylist);
// Calculate reconnection delay with exponential backoff
@ -247,10 +246,14 @@ export const WebsocketProvider = ({ children }) => {
// Update the playlist status whenever we receive a status update
// Not just when progress is 100% or status is pending_setup
if (parsedEvent.data.status && parsedEvent.data.account) {
// Check if playlists is an object with IDs as keys or an array
const playlist = Array.isArray(playlists)
? playlists.find((p) => p.id === parsedEvent.data.account)
: playlists[parsedEvent.data.account];
// Get fresh playlists from store to avoid stale state from React render cycle
const currentPlaylists = usePlaylistsStore.getState().playlists;
const isArray = Array.isArray(currentPlaylists);
const playlist = isArray
? currentPlaylists.find(
(p) => p.id === parsedEvent.data.account
)
: currentPlaylists[parsedEvent.data.account];
if (playlist) {
// When we receive a "success" status with 100% progress, this is a completed refresh
@ -273,19 +276,19 @@ export const WebsocketProvider = ({ children }) => {
'M3U refresh completed successfully:',
updateData
);
fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date
fetchChannelProfiles(); // Ensure channel profiles are updated
}
updatePlaylist(updateData);
fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date
fetchChannelProfiles(); // Ensure channel profiles are updated
} else {
// Log when playlist can't be found for debugging purposes
console.warn(
`Received update for unknown playlist ID: ${parsedEvent.data.account}`,
Array.isArray(playlists)
? 'playlists is array'
: 'playlists is object',
Object.keys(playlists).length
// Playlist not in store yet - this happens when backend sends websocket
// updates immediately after creating the playlist, before the API response
// returns. The frontend will receive a 'playlist_created' event shortly
// which will trigger a fetchPlaylists() to sync the store.
console.log(
`Received update for playlist ID ${parsedEvent.data.account} not yet in store. ` +
`Waiting for playlist_created event to sync...`
);
}
}
@ -566,14 +569,22 @@ export const WebsocketProvider = ({ children }) => {
break;
case 'epg_refresh':
// Update the store with progress information
updateEPGProgress(parsedEvent.data);
// If we have source_id/account info, update the EPG source status
if (parsedEvent.data.source_id || parsedEvent.data.account) {
// If we have source/account info, check if EPG exists before processing
if (parsedEvent.data.source || parsedEvent.data.account) {
const sourceId =
parsedEvent.data.source_id || parsedEvent.data.account;
parsedEvent.data.source || parsedEvent.data.account;
const epg = epgs[sourceId];
// Only update progress if the EPG still exists in the store
// This prevents crashes when receiving updates for deleted EPGs
if (epg) {
// Update the store with progress information
updateEPGProgress(parsedEvent.data);
} else {
// EPG was deleted, ignore this update
console.debug(`Ignoring EPG refresh update for deleted EPG ${sourceId}`);
break;
}
if (epg) {
// Check for any indication of an error (either via status or error field)
@ -639,6 +650,16 @@ export const WebsocketProvider = ({ children }) => {
}
break;
case 'epg_data_created':
// A new EPG data entry was created (e.g., for a dummy EPG)
// Fetch EPG data so the channel form can immediately assign it
try {
await fetchEPGData();
} catch (e) {
console.warn('Failed to refresh EPG data after creation:', e);
}
break;
case 'stream_rehash':
// Handle stream rehash progress updates
if (parsedEvent.data.action === 'starting') {
@ -739,6 +760,14 @@ export const WebsocketProvider = ({ children }) => {
break;
case 'playlist_created':
// Backend signals that a new playlist has been created and we should refresh
console.log(
'Playlist created event received, refreshing playlists...'
);
fetchPlaylists();
break;
case 'bulk_channel_creation_progress': {
// Handle progress updates with persistent notifications like stream rehash
const data = parsedEvent.data;

View file

@ -170,7 +170,7 @@ export default class API {
static async logout() {
return await request(`${host}/api/accounts/auth/logout/`, {
auth: false,
auth: true, // Send JWT token so backend can identify the user
method: 'POST',
});
}
@ -462,7 +462,16 @@ export default class API {
}
);
// Don't automatically update the store here - let the caller handle it
// Show success notification
if (response.message) {
notifications.show({
title: 'Channels Updated',
message: response.message,
color: 'green',
autoClose: 4000,
});
}
return response;
} catch (e) {
errorNotification('Failed to update channels', e);
@ -562,6 +571,29 @@ export default class API {
}
}
static async setChannelTvgIdsFromEpg(channelIds) {
try {
const response = await request(
`${host}/api/channels/channels/set-tvg-ids-from-epg/`,
{
method: 'POST',
body: { channel_ids: channelIds },
}
);
notifications.show({
title: 'Task Started',
message: response.message,
color: 'blue',
});
return response;
} catch (e) {
errorNotification('Failed to start EPG TVG-ID setting task', e);
throw e;
}
}
static async assignChannelNumbers(channelIds, startingNum = 1) {
try {
const response = await request(`${host}/api/channels/channels/assign/`, {
@ -1021,8 +1053,20 @@ export default class API {
}
static async updateEPG(values, isToggle = false) {
// Validate that values is an object
if (!values || typeof values !== 'object') {
console.error('updateEPG called with invalid values:', values);
return;
}
const { id, ...payload } = values;
// Validate that we have an ID and payload is an object
if (!id || typeof payload !== 'object') {
console.error('updateEPG: invalid id or payload', { id, payload });
return;
}
try {
// If this is just toggling the active state, make a simpler request
if (
@ -1095,6 +1139,21 @@ export default class API {
}
}
static async getTimezones() {
try {
const response = await request(`${host}/api/core/timezones/`);
return response;
} catch (e) {
errorNotification('Failed to retrieve timezones', e);
// Return fallback data instead of throwing
return {
timezones: ['UTC', 'US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific'],
grouped: {},
count: 5
};
}
}
static async getStreamProfiles() {
try {
const response = await request(`${host}/api/core/streamprofiles/`);
@ -1927,6 +1986,77 @@ export default class API {
}
}
// VOD Logo Methods
static async getVODLogos(params = {}) {
try {
// Transform usage filter to match backend expectations
const apiParams = { ...params };
if (apiParams.usage === 'used') {
apiParams.used = 'true';
delete apiParams.usage;
} else if (apiParams.usage === 'unused') {
apiParams.used = 'false';
delete apiParams.usage;
} else if (apiParams.usage === 'movies') {
apiParams.used = 'movies';
delete apiParams.usage;
} else if (apiParams.usage === 'series') {
apiParams.used = 'series';
delete apiParams.usage;
}
const queryParams = new URLSearchParams(apiParams);
const response = await request(
`${host}/api/vod/vodlogos/?${queryParams.toString()}`
);
return response;
} catch (e) {
errorNotification('Failed to retrieve VOD logos', e);
throw e;
}
}
static async deleteVODLogo(id) {
try {
await request(`${host}/api/vod/vodlogos/${id}/`, {
method: 'DELETE',
});
return true;
} catch (e) {
errorNotification('Failed to delete VOD logo', e);
throw e;
}
}
static async deleteVODLogos(ids) {
try {
await request(`${host}/api/vod/vodlogos/bulk-delete/`, {
method: 'DELETE',
body: { logo_ids: ids },
});
return true;
} catch (e) {
errorNotification('Failed to delete VOD logos', e);
throw e;
}
}
static async cleanupUnusedVODLogos() {
try {
const response = await request(`${host}/api/vod/vodlogos/cleanup/`, {
method: 'POST',
});
return response;
} catch (e) {
errorNotification('Failed to cleanup unused VOD logos', e);
throw e;
}
}
static async getChannelProfiles() {
try {
const response = await request(`${host}/api/channels/profiles/`);
@ -2050,6 +2180,83 @@ export default class API {
}
}
static async updateRecording(id, values) {
try {
const response = await request(`${host}/api/channels/recordings/${id}/`, {
method: 'PATCH',
body: values,
});
useChannelsStore.getState().fetchRecordings();
return response;
} catch (e) {
errorNotification(`Failed to update recording ${id}`, e);
}
}
static async getComskipConfig() {
try {
return await request(`${host}/api/channels/dvr/comskip-config/`);
} catch (e) {
errorNotification('Failed to retrieve comskip configuration', e);
}
}
static async uploadComskipIni(file) {
try {
const formData = new FormData();
formData.append('file', file);
return await request(`${host}/api/channels/dvr/comskip-config/`, {
method: 'POST',
body: formData,
});
} catch (e) {
errorNotification('Failed to upload comskip.ini', e);
}
}
static async listRecurringRules() {
try {
const response = await request(`${host}/api/channels/recurring-rules/`);
return response;
} catch (e) {
errorNotification('Failed to retrieve recurring DVR rules', e);
}
}
static async createRecurringRule(payload) {
try {
const response = await request(`${host}/api/channels/recurring-rules/`, {
method: 'POST',
body: payload,
});
return response;
} catch (e) {
errorNotification('Failed to create recurring DVR rule', e);
}
}
static async updateRecurringRule(ruleId, payload) {
try {
const response = await request(`${host}/api/channels/recurring-rules/${ruleId}/`, {
method: 'PATCH',
body: payload,
});
return response;
} catch (e) {
errorNotification(`Failed to update recurring rule ${ruleId}`, e);
}
}
static async deleteRecurringRule(ruleId) {
try {
await request(`${host}/api/channels/recurring-rules/${ruleId}/`, {
method: 'DELETE',
});
} catch (e) {
errorNotification(`Failed to delete recurring rule ${ruleId}`, e);
}
}
static async deleteRecording(id) {
try {
await request(`${host}/api/channels/recordings/${id}/`, { method: 'DELETE' });
@ -2194,9 +2401,15 @@ export default class API {
// If successful, requery channels to update UI
if (response.success) {
// Build message based on whether EPG sources need refreshing
let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`;
if (response.programs_refreshed > 0) {
message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`;
}
notifications.show({
title: 'EPG Association',
message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`,
message: message,
color: 'blue',
});
@ -2457,4 +2670,21 @@ export default class API {
errorNotification('Failed to update playback position', e);
}
}
static async getSystemEvents(limit = 100, offset = 0, eventType = null) {
try {
const params = new URLSearchParams();
params.append('limit', limit);
params.append('offset', offset);
if (eventType) {
params.append('event_type', eventType);
}
const response = await request(
`${host}/api/core/system-events/?${params.toString()}`
);
return response;
} catch (e) {
errorNotification('Failed to retrieve system events', e);
}
}
}

View file

@ -17,7 +17,9 @@ import {
Table,
Divider,
} from '@mantine/core';
import { Play } from 'lucide-react';
import { Play, Copy } from 'lucide-react';
import { notifications } from '@mantine/notifications';
import { copyToClipboard } from '../utils';
import useVODStore from '../store/useVODStore';
import useVideoStore from '../store/useVideoStore';
import useSettingsStore from '../store/settings';
@ -262,6 +264,39 @@ const SeriesModal = ({ series, opened, onClose }) => {
showVideo(streamUrl, 'vod', episode);
};
const getEpisodeStreamUrl = (episode) => {
let streamUrl = `/proxy/vod/episode/${episode.uuid}`;
// Add selected provider as query parameter if available
if (selectedProvider) {
// Use stream_id for most specific selection, fallback to account_id
if (selectedProvider.stream_id) {
streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`;
} else {
streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`;
}
}
if (env_mode === 'dev') {
streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`;
} else {
streamUrl = `${window.location.origin}${streamUrl}`;
}
return streamUrl;
};
const handleCopyEpisodeLink = async (episode) => {
const streamUrl = getEpisodeStreamUrl(episode);
const success = await copyToClipboard(streamUrl);
notifications.show({
title: success ? 'Link Copied!' : 'Copy Failed',
message: success
? 'Episode link copied to clipboard'
: 'Failed to copy link to clipboard',
color: success ? 'green' : 'red',
});
};
const handleEpisodeRowClick = (episode) => {
setExpandedEpisode(expandedEpisode === episode.id ? null : episode.id);
};
@ -611,20 +646,34 @@ const SeriesModal = ({ series, opened, onClose }) => {
</Text>
</Table.Td>
<Table.Td>
<ActionIcon
variant="filled"
color="blue"
size="sm"
disabled={
providers.length > 0 && !selectedProvider
}
onClick={(e) => {
e.stopPropagation();
handlePlayEpisode(episode);
}}
>
<Play size={12} />
</ActionIcon>
<Group spacing="xs">
<ActionIcon
variant="filled"
color="blue"
size="sm"
disabled={
providers.length > 0 &&
!selectedProvider
}
onClick={(e) => {
e.stopPropagation();
handlePlayEpisode(episode);
}}
>
<Play size={12} />
</ActionIcon>
<ActionIcon
variant="outline"
color="gray"
size="sm"
onClick={(e) => {
e.stopPropagation();
handleCopyEpisodeLink(episode);
}}
>
<Copy size={12} />
</ActionIcon>
</Group>
</Table.Td>
</Table.Tr>
{expandedEpisode === episode.id && (
@ -879,7 +928,8 @@ const SeriesModal = ({ series, opened, onClose }) => {
src={trailerUrl}
title="YouTube Trailer"
frameBorder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerPolicy="strict-origin-when-cross-origin"
allowFullScreen
style={{
position: 'absolute',

View file

@ -188,8 +188,8 @@ const Sidebar = ({ collapsed, toggleDrawer, drawerWidth, miniDrawerWidth }) => {
}
};
const onLogout = () => {
logout();
const onLogout = async () => {
await logout();
window.location.reload();
};

View file

@ -0,0 +1,333 @@
import React, { useState, useEffect, useCallback } from 'react';
import {
ActionIcon,
Box,
Button,
Card,
Group,
NumberInput,
Pagination,
Select,
Stack,
Text,
Title,
} from '@mantine/core';
import { useElementSize } from '@mantine/hooks';
import {
ChevronDown,
CirclePlay,
Download,
Gauge,
HardDriveDownload,
List,
LogIn,
LogOut,
RefreshCw,
Shield,
ShieldAlert,
SquareX,
Timer,
Users,
Video,
XCircle,
} from 'lucide-react';
import dayjs from 'dayjs';
import API from '../api';
import useLocalStorage from '../hooks/useLocalStorage';
const SystemEvents = () => {
const [events, setEvents] = useState([]);
const [totalEvents, setTotalEvents] = useState(0);
const [isExpanded, setIsExpanded] = useState(false);
const { ref: cardRef, width: cardWidth } = useElementSize();
const isNarrow = cardWidth < 650;
const [isLoading, setIsLoading] = useState(false);
const [dateFormatSetting] = useLocalStorage('date-format', 'mdy');
const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM';
const [eventsRefreshInterval, setEventsRefreshInterval] = useLocalStorage(
'events-refresh-interval',
0
);
const [eventsLimit, setEventsLimit] = useLocalStorage('events-limit', 100);
const [currentPage, setCurrentPage] = useState(1);
// Calculate offset based on current page and limit
const offset = (currentPage - 1) * eventsLimit;
const totalPages = Math.ceil(totalEvents / eventsLimit);
const fetchEvents = useCallback(async () => {
try {
setIsLoading(true);
const response = await API.getSystemEvents(eventsLimit, offset);
if (response && response.events) {
setEvents(response.events);
setTotalEvents(response.total || 0);
}
} catch (error) {
console.error('Error fetching system events:', error);
} finally {
setIsLoading(false);
}
}, [eventsLimit, offset]);
// Fetch events on mount and when eventsRefreshInterval changes
useEffect(() => {
fetchEvents();
// Set up polling if interval is set and events section is expanded
if (eventsRefreshInterval > 0 && isExpanded) {
const interval = setInterval(fetchEvents, eventsRefreshInterval * 1000);
return () => clearInterval(interval);
}
}, [fetchEvents, eventsRefreshInterval, isExpanded]);
// Reset to first page when limit changes
useEffect(() => {
setCurrentPage(1);
}, [eventsLimit]);
const getEventIcon = (eventType) => {
switch (eventType) {
case 'channel_start':
return <CirclePlay size={16} />;
case 'channel_stop':
return <SquareX size={16} />;
case 'channel_reconnect':
return <RefreshCw size={16} />;
case 'channel_buffering':
return <Timer size={16} />;
case 'channel_failover':
return <HardDriveDownload size={16} />;
case 'client_connect':
return <Users size={16} />;
case 'client_disconnect':
return <Users size={16} />;
case 'recording_start':
return <Video size={16} />;
case 'recording_end':
return <Video size={16} />;
case 'stream_switch':
return <HardDriveDownload size={16} />;
case 'm3u_refresh':
return <RefreshCw size={16} />;
case 'm3u_download':
return <Download size={16} />;
case 'epg_refresh':
return <RefreshCw size={16} />;
case 'epg_download':
return <Download size={16} />;
case 'login_success':
return <LogIn size={16} />;
case 'login_failed':
return <ShieldAlert size={16} />;
case 'logout':
return <LogOut size={16} />;
case 'm3u_blocked':
return <XCircle size={16} />;
case 'epg_blocked':
return <XCircle size={16} />;
default:
return <Gauge size={16} />;
}
};
const getEventColor = (eventType) => {
switch (eventType) {
case 'channel_start':
case 'client_connect':
case 'recording_start':
case 'login_success':
return 'green';
case 'channel_reconnect':
return 'yellow';
case 'channel_stop':
case 'client_disconnect':
case 'recording_end':
case 'logout':
return 'gray';
case 'channel_buffering':
return 'yellow';
case 'channel_failover':
case 'channel_error':
return 'orange';
case 'stream_switch':
return 'blue';
case 'm3u_refresh':
case 'epg_refresh':
return 'cyan';
case 'm3u_download':
case 'epg_download':
return 'teal';
case 'login_failed':
case 'm3u_blocked':
case 'epg_blocked':
return 'red';
default:
return 'gray';
}
};
return (
<Card
ref={cardRef}
shadow="sm"
padding="sm"
radius="md"
withBorder
style={{
color: '#fff',
backgroundColor: '#27272A',
width: '100%',
maxWidth: isExpanded ? '100%' : '800px',
marginLeft: 'auto',
marginRight: 'auto',
transition: 'max-width 0.3s ease',
}}
>
<Group justify="space-between" mb={isExpanded ? 'sm' : 0}>
<Group gap="xs">
<Gauge size={20} />
<Title order={4}>System Events</Title>
</Group>
<Group gap="xs">
{(isExpanded || !isNarrow) && (
<>
<NumberInput
size="xs"
label="Events Per Page"
value={eventsLimit}
onChange={(value) => setEventsLimit(value || 10)}
min={10}
max={1000}
step={10}
style={{ width: 130 }}
/>
<Select
size="xs"
label="Auto Refresh"
value={eventsRefreshInterval.toString()}
onChange={(value) => setEventsRefreshInterval(parseInt(value))}
data={[
{ value: '0', label: 'Manual' },
{ value: '5', label: '5s' },
{ value: '10', label: '10s' },
{ value: '30', label: '30s' },
{ value: '60', label: '1m' },
]}
style={{ width: 120 }}
/>
<Button
size="xs"
variant="subtle"
onClick={fetchEvents}
loading={isLoading}
style={{ marginTop: 'auto' }}
>
Refresh
</Button>
</>
)}
<ActionIcon
variant="subtle"
onClick={() => setIsExpanded(!isExpanded)}
>
<ChevronDown
size={18}
style={{
transform: isExpanded ? 'rotate(180deg)' : 'rotate(0deg)',
transition: 'transform 0.2s',
}}
/>
</ActionIcon>
</Group>
</Group>
{isExpanded && (
<>
{totalEvents > eventsLimit && (
<Group justify="space-between" align="center" mt="sm" mb="xs">
<Text size="xs" c="dimmed">
Showing {offset + 1}-
{Math.min(offset + eventsLimit, totalEvents)} of {totalEvents}
</Text>
<Pagination
total={totalPages}
value={currentPage}
onChange={setCurrentPage}
size="sm"
/>
</Group>
)}
<Stack
gap="xs"
mt="sm"
style={{
maxHeight: '60vh',
overflowY: 'auto',
}}
>
{events.length === 0 ? (
<Text size="sm" c="dimmed" ta="center" py="xl">
No events recorded yet
</Text>
) : (
events.map((event) => (
<Box
key={event.id}
p="xs"
style={{
backgroundColor: '#1A1B1E',
borderRadius: '4px',
borderLeft: `3px solid var(--mantine-color-${getEventColor(event.event_type)}-6)`,
}}
>
<Group justify="space-between" wrap="nowrap">
<Group gap="xs" style={{ flex: 1, minWidth: 0 }}>
<Box c={`${getEventColor(event.event_type)}.6`}>
{getEventIcon(event.event_type)}
</Box>
<Stack gap={2} style={{ flex: 1, minWidth: 0 }}>
<Group gap="xs" wrap="nowrap">
<Text size="sm" fw={500}>
{event.event_type_display || event.event_type}
</Text>
{event.channel_name && (
<Text
size="sm"
c="dimmed"
truncate
style={{ maxWidth: '300px' }}
>
{event.channel_name}
</Text>
)}
</Group>
{event.details &&
Object.keys(event.details).length > 0 && (
<Text size="xs" c="dimmed">
{Object.entries(event.details)
.filter(
([key]) =>
!['stream_url', 'new_url'].includes(key)
)
.map(([key, value]) => `${key}: ${value}`)
.join(', ')}
</Text>
)}
</Stack>
</Group>
<Text size="xs" c="dimmed" style={{ whiteSpace: 'nowrap' }}>
{dayjs(event.timestamp).format(`${dateFormat} HH:mm:ss`)}
</Text>
</Group>
</Box>
))
)}
</Stack>
</>
)}
</Card>
);
};
export default SystemEvents;

View file

@ -13,7 +13,9 @@ import {
Stack,
Modal,
} from '@mantine/core';
import { Play } from 'lucide-react';
import { Play, Copy } from 'lucide-react';
import { notifications } from '@mantine/notifications';
import { copyToClipboard } from '../utils';
import useVODStore from '../store/useVODStore';
import useVideoStore from '../store/useVideoStore';
import useSettingsStore from '../store/settings';
@ -232,9 +234,9 @@ const VODModal = ({ vod, opened, onClose }) => {
}
}, [opened]);
const handlePlayVOD = () => {
const getStreamUrl = () => {
const vodToPlay = detailedVOD || vod;
if (!vodToPlay) return;
if (!vodToPlay) return null;
let streamUrl = `/proxy/vod/movie/${vod.uuid}`;
@ -253,9 +255,29 @@ const VODModal = ({ vod, opened, onClose }) => {
} else {
streamUrl = `${window.location.origin}${streamUrl}`;
}
return streamUrl;
};
const handlePlayVOD = () => {
const streamUrl = getStreamUrl();
if (!streamUrl) return;
const vodToPlay = detailedVOD || vod;
showVideo(streamUrl, 'vod', vodToPlay);
};
const handleCopyLink = async () => {
const streamUrl = getStreamUrl();
if (!streamUrl) return;
const success = await copyToClipboard(streamUrl);
notifications.show({
title: success ? 'Link Copied!' : 'Copy Failed',
message: success
? 'Stream link copied to clipboard'
: 'Failed to copy link to clipboard',
color: success ? 'green' : 'red',
});
};
// Helper to get embeddable YouTube URL
const getEmbedUrl = (url) => {
if (!url) return '';
@ -486,6 +508,16 @@ const VODModal = ({ vod, opened, onClose }) => {
Watch Trailer
</Button>
)}
<Button
leftSection={<Copy size={16} />}
variant="outline"
color="gray"
size="sm"
onClick={handleCopyLink}
style={{ alignSelf: 'flex-start' }}
>
Copy Link
</Button>
</Group>
</Stack>
</Flex>
@ -662,7 +694,8 @@ const VODModal = ({ vod, opened, onClose }) => {
src={trailerUrl}
title="YouTube Trailer"
frameBorder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerPolicy="strict-origin-when-cross-origin"
allowFullScreen
style={{
position: 'absolute',

View file

@ -11,6 +11,7 @@ import logo from '../../images/logo.png';
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
import useLogosStore from '../../store/logos';
import LazyLogo from '../LazyLogo';
import LogoForm from './Logo';
import {
Box,
Button,
@ -37,7 +38,7 @@ import {
import { notifications } from '@mantine/notifications';
import { ListOrdered, SquarePlus, SquareX, X, Zap } from 'lucide-react';
import useEPGsStore from '../../store/epgs';
import { Dropzone } from '@mantine/dropzone';
import { FixedSizeList as List } from 'react-window';
import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants';
@ -71,7 +72,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
const tvgs = useEPGsStore((s) => s.tvgs);
const tvgsById = useEPGsStore((s) => s.tvgsById);
const [logoPreview, setLogoPreview] = useState(null);
const [logoModalOpen, setLogoModalOpen] = useState(false);
const [channelStreams, setChannelStreams] = useState([]);
const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false);
const [epgPopoverOpened, setEpgPopoverOpened] = useState(false);
@ -97,33 +98,12 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
setChannelStreams(Array.from(streamSet));
};
const handleLogoChange = async (files) => {
if (files.length === 1) {
const file = files[0];
// Validate file size on frontend first
if (file.size > 5 * 1024 * 1024) {
// 5MB
notifications.show({
title: 'Error',
message: 'File too large. Maximum size is 5MB.',
color: 'red',
});
return;
}
try {
const retval = await API.uploadLogo(file);
// Note: API.uploadLogo already adds the logo to the store, no need to fetch
setLogoPreview(retval.cache_url);
formik.setFieldValue('logo_id', retval.id);
} catch (error) {
console.error('Logo upload failed:', error);
// Error notification is already handled in API.uploadLogo
}
} else {
setLogoPreview(null);
const handleLogoSuccess = ({ logo }) => {
if (logo && logo.id) {
formik.setFieldValue('logo_id', logo.id);
ensureLogosLoaded(); // Refresh logos
}
setLogoModalOpen(false);
};
const handleAutoMatchEpg = async () => {
@ -283,6 +263,34 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
}
};
const handleSetTvgIdFromEpg = () => {
const epgDataId = formik.values.epg_data_id;
if (!epgDataId) {
notifications.show({
title: 'No EPG Selected',
message: 'Please select an EPG source first.',
color: 'orange',
});
return;
}
const tvg = tvgsById[epgDataId];
if (tvg && tvg.tvg_id) {
formik.setFieldValue('tvg_id', tvg.tvg_id);
notifications.show({
title: 'Success',
message: `TVG-ID set to "${tvg.tvg_id}"`,
color: 'green',
});
} else {
notifications.show({
title: 'No TVG-ID Available',
message: 'No TVG-ID found in the selected EPG data.',
color: 'orange',
});
}
};
const formik = useFormik({
initialValues: {
name: '',
@ -809,35 +817,13 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
</Stack>
</Group>
<Group>
<Divider size="xs" style={{ flex: 1 }} />
<Text size="xs" c="dimmed">
OR
</Text>
<Divider size="xs" style={{ flex: 1 }} />
</Group>
<Stack>
<Text size="sm">Upload Logo</Text>
<Dropzone
onDrop={handleLogoChange}
onReject={(files) => console.log('rejected files', files)}
maxSize={5 * 1024 ** 2}
>
<Group
justify="center"
gap="xl"
mih={40}
style={{ pointerEvents: 'none' }}
>
<Text size="sm" inline>
Drag images here or click to select files
</Text>
</Group>
</Dropzone>
<Center></Center>
</Stack>
<Button
onClick={() => setLogoModalOpen(true)}
fullWidth
variant="default"
>
Upload or Create Logo
</Button>
</Stack>
<Divider size="sm" orientation="vertical" />
@ -865,7 +851,23 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
<TextInput
id="tvg_id"
name="tvg_id"
label="TVG-ID"
label={
<Group gap="xs">
<span>TVG-ID</span>
{formik.values.epg_data_id && (
<Button
size="xs"
variant="transparent"
onClick={handleSetTvgIdFromEpg}
title="Set TVG-ID from EPG data"
p={0}
h="auto"
>
Use EPG TVG-ID
</Button>
)}
</Group>
}
value={formik.values.tvg_id}
onChange={formik.handleChange}
error={formik.errors.tvg_id ? formik.touched.tvg_id : ''}
@ -1046,8 +1048,10 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
type="submit"
variant="default"
disabled={formik.isSubmitting}
loading={formik.isSubmitting}
loaderProps={{ type: 'dots' }}
>
Submit
{formik.isSubmitting ? 'Saving...' : 'Submit'}
</Button>
</Flex>
</form>
@ -1057,6 +1061,12 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
isOpen={channelGroupModelOpen}
onClose={handleChannelGroupModalClose}
/>
<LogoForm
isOpen={logoModalOpen}
onClose={() => setLogoModalOpen(false)}
onSuccess={handleLogoSuccess}
/>
</>
);
};

View file

@ -2,6 +2,7 @@ import React, { useState, useEffect, useMemo, useRef } from 'react';
import useChannelsStore from '../../store/channels';
import API from '../../api';
import useStreamProfilesStore from '../../store/streamProfiles';
import useEPGsStore from '../../store/epgs';
import ChannelGroupForm from './ChannelGroup';
import {
Box,
@ -29,36 +30,160 @@ import { FixedSizeList as List } from 'react-window';
import { useForm } from '@mantine/form';
import { notifications } from '@mantine/notifications';
import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants';
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
import LazyLogo from '../LazyLogo';
import logo from '../../images/logo.png';
import ConfirmationDialog from '../ConfirmationDialog';
import useWarningsStore from '../../store/warnings';
const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
const theme = useMantineTheme();
const groupListRef = useRef(null);
const logoListRef = useRef(null);
const channelGroups = useChannelsStore((s) => s.channelGroups);
const {
logos: channelLogos,
ensureLogosLoaded,
isLoading: logosLoading,
} = useChannelLogoSelection();
useEffect(() => {
ensureLogosLoaded();
}, [ensureLogosLoaded]);
const streamProfiles = useStreamProfilesStore((s) => s.profiles);
const epgs = useEPGsStore((s) => s.epgs);
const tvgs = useEPGsStore((s) => s.tvgs);
const fetchEPGs = useEPGsStore((s) => s.fetchEPGs);
const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false);
const [selectedChannelGroup, setSelectedChannelGroup] = useState('-1');
const [selectedLogoId, setSelectedLogoId] = useState('-1');
const [isSubmitting, setIsSubmitting] = useState(false);
const [regexFind, setRegexFind] = useState('');
const [regexReplace, setRegexReplace] = useState('');
const [selectedDummyEpgId, setSelectedDummyEpgId] = useState(null);
const [groupPopoverOpened, setGroupPopoverOpened] = useState(false);
const [groupFilter, setGroupFilter] = useState('');
const groupOptions = Object.values(channelGroups);
const [logoPopoverOpened, setLogoPopoverOpened] = useState(false);
const [logoFilter, setLogoFilter] = useState('');
// Confirmation dialog states
const [confirmSetNamesOpen, setConfirmSetNamesOpen] = useState(false);
const [confirmSetLogosOpen, setConfirmSetLogosOpen] = useState(false);
const [confirmSetTvgIdsOpen, setConfirmSetTvgIdsOpen] = useState(false);
const [confirmBatchUpdateOpen, setConfirmBatchUpdateOpen] = useState(false);
const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed);
const suppressWarning = useWarningsStore((s) => s.suppressWarning);
// Fetch EPG sources when modal opens
useEffect(() => {
if (isOpen) {
fetchEPGs();
}
}, [isOpen, fetchEPGs]);
// Get dummy EPG sources
const dummyEpgSources = useMemo(() => {
return Object.values(epgs).filter((epg) => epg.source_type === 'dummy');
}, [epgs]);
const form = useForm({
mode: 'uncontrolled',
initialValues: {
channel_group: '(no change)',
logo: '(no change)',
stream_profile_id: '-1',
user_level: '-1',
},
});
// Build confirmation message based on selected changes
const getConfirmationMessage = () => {
const changes = [];
const values = form.getValues();
// Check for regex name changes
if (regexFind.trim().length > 0) {
changes.push(
`• Name Change: Apply regex find "${regexFind}" replace with "${regexReplace || ''}"`
);
}
// Check channel group
if (selectedChannelGroup && selectedChannelGroup !== '-1') {
const groupName = channelGroups[selectedChannelGroup]?.name || 'Unknown';
changes.push(`• Channel Group: ${groupName}`);
}
// Check logo
if (selectedLogoId && selectedLogoId !== '-1') {
if (selectedLogoId === '0') {
changes.push(`• Logo: Use Default`);
} else {
const logoName = channelLogos[selectedLogoId]?.name || 'Selected Logo';
changes.push(`• Logo: ${logoName}`);
}
}
// Check stream profile
if (values.stream_profile_id && values.stream_profile_id !== '-1') {
if (values.stream_profile_id === '0') {
changes.push(`• Stream Profile: Use Default`);
} else {
const profileName =
streamProfiles[values.stream_profile_id]?.name || 'Selected Profile';
changes.push(`• Stream Profile: ${profileName}`);
}
}
// Check user level
if (values.user_level && values.user_level !== '-1') {
const userLevelLabel =
USER_LEVEL_LABELS[values.user_level] || values.user_level;
changes.push(`• User Level: ${userLevelLabel}`);
}
// Check dummy EPG
if (selectedDummyEpgId) {
if (selectedDummyEpgId === 'clear') {
changes.push(`• EPG: Clear Assignment (use default dummy)`);
} else {
const epgName = epgs[selectedDummyEpgId]?.name || 'Selected EPG';
changes.push(`• Dummy EPG: ${epgName}`);
}
}
return changes;
};
const handleSubmit = () => {
const changes = getConfirmationMessage();
// If no changes detected, show notification
if (changes.length === 0) {
notifications.show({
title: 'No Changes',
message: 'Please select at least one field to update.',
color: 'orange',
});
return;
}
// Skip warning if suppressed
if (isWarningSuppressed('batch-update-channels')) {
return onSubmit();
}
setConfirmBatchUpdateOpen(true);
};
const onSubmit = async () => {
setConfirmBatchUpdateOpen(false);
setIsSubmitting(true);
const values = {
@ -70,6 +195,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
delete values.channel_group_id;
}
if (selectedLogoId && selectedLogoId !== '-1') {
if (selectedLogoId === '0') {
values.logo_id = null;
} else {
values.logo_id = parseInt(selectedLogoId);
}
}
delete values.logo;
// Handle stream profile ID - convert special values
if (!values.stream_profile_id || values.stream_profile_id === '-1') {
delete values.stream_profile_id;
@ -90,6 +224,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
try {
const applyRegex = regexFind.trim().length > 0;
// First, handle standard field updates (name, group, logo, etc.)
if (applyRegex) {
// Build per-channel updates to apply unique names via regex
let flags = 'g';
@ -117,10 +252,48 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
});
await API.bulkUpdateChannels(updates);
} else {
} else if (Object.keys(values).length > 0) {
await API.updateChannels(channelIds, values);
}
// Then, handle EPG assignment if a dummy EPG was selected
if (selectedDummyEpgId) {
if (selectedDummyEpgId === 'clear') {
// Clear EPG assignments
const associations = channelIds.map((id) => ({
channel_id: id,
epg_data_id: null,
}));
await API.batchSetEPG(associations);
} else {
// Assign the selected dummy EPG
const selectedEpg = epgs[selectedDummyEpgId];
if (selectedEpg && selectedEpg.epg_data_count > 0) {
// Convert to number for comparison since Select returns string
const epgSourceId = parseInt(selectedDummyEpgId, 10);
// Check if we already have EPG data loaded in the store
let epgData = tvgs.find((data) => data.epg_source === epgSourceId);
// If not in store, fetch it
if (!epgData) {
const epgDataList = await API.getEPGData();
epgData = epgDataList.find(
(data) => data.epg_source === epgSourceId
);
}
if (epgData) {
const associations = channelIds.map((id) => ({
channel_id: id,
epg_data_id: epgData.id,
}));
await API.batchSetEPG(associations);
}
}
}
}
// Refresh both the channels table data and the main channels store
await Promise.all([
API.requeryChannels(),
@ -144,6 +317,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
return;
}
// Skip warning if suppressed
if (isWarningSuppressed('batch-set-names-from-epg')) {
return executeSetNamesFromEpg();
}
setConfirmSetNamesOpen(true);
};
const executeSetNamesFromEpg = async () => {
try {
// Start the backend task
await API.setChannelNamesFromEpg(channelIds);
@ -157,6 +339,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
});
// Close the modal since the task is now running in background
setConfirmSetNamesOpen(false);
onClose();
} catch (error) {
console.error('Failed to start EPG name setting task:', error);
@ -165,6 +348,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
message: 'Failed to start EPG name setting task.',
color: 'red',
});
setConfirmSetNamesOpen(false);
}
};
@ -178,6 +362,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
return;
}
// Skip warning if suppressed
if (isWarningSuppressed('batch-set-logos-from-epg')) {
return executeSetLogosFromEpg();
}
setConfirmSetLogosOpen(true);
};
const executeSetLogosFromEpg = async () => {
try {
// Start the backend task
await API.setChannelLogosFromEpg(channelIds);
@ -191,6 +384,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
});
// Close the modal since the task is now running in background
setConfirmSetLogosOpen(false);
onClose();
} catch (error) {
console.error('Failed to start EPG logo setting task:', error);
@ -199,6 +393,52 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
message: 'Failed to start EPG logo setting task.',
color: 'red',
});
setConfirmSetLogosOpen(false);
}
};
const handleSetTvgIdsFromEpg = async () => {
if (!channelIds || channelIds.length === 0) {
notifications.show({
title: 'No Channels Selected',
message: 'No channels to update.',
color: 'orange',
});
return;
}
// Skip warning if suppressed
if (isWarningSuppressed('batch-set-tvg-ids-from-epg')) {
return executeSetTvgIdsFromEpg();
}
setConfirmSetTvgIdsOpen(true);
};
const executeSetTvgIdsFromEpg = async () => {
try {
// Start the backend task
await API.setChannelTvgIdsFromEpg(channelIds);
// The task will send WebSocket updates for progress
// Just show that it started successfully
notifications.show({
title: 'Task Started',
message: `Started setting TVG-IDs from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`,
color: 'blue',
});
// Close the modal since the task is now running in background
setConfirmSetTvgIdsOpen(false);
onClose();
} catch (error) {
console.error('Failed to start EPG TVG-ID setting task:', error);
notifications.show({
title: 'Error',
message: 'Failed to start EPG TVG-ID setting task.',
color: 'red',
});
setConfirmSetTvgIdsOpen(false);
}
};
@ -242,6 +482,18 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
),
];
const logoOptions = useMemo(() => {
return [
{ id: '-1', name: '(no change)' },
{ id: '0', name: 'Use Default', isDefault: true },
...Object.values(channelLogos),
];
}, [channelLogos]);
const filteredLogos = logoOptions.filter((logo) =>
logo.name.toLowerCase().includes(logoFilter.toLowerCase())
);
if (!isOpen) {
return <></>;
}
@ -260,7 +512,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
}
styles={{ hannontent: { '--mantine-color-body': '#27272A' } }}
>
<form onSubmit={form.onSubmit(onSubmit)}>
<form onSubmit={form.onSubmit(handleSubmit)}>
<Group justify="space-between" align="top">
<Stack gap="5" style={{ flex: 1 }}>
<Paper withBorder p="xs" radius="md">
@ -317,10 +569,39 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
>
Set Logos from EPG
</Button>
<Button
size="xs"
variant="light"
onClick={handleSetTvgIdsFromEpg}
style={{ flex: 1 }}
>
Set TVG-IDs from EPG
</Button>
</Group>
<Divider my="xs" />
<Stack gap="xs">
<Text size="xs" fw={600}>
Assign Dummy EPG
</Text>
<Select
size="xs"
placeholder="Select a dummy EPG..."
data={[
{ value: 'clear', label: '(Clear EPG Assignment)' },
...dummyEpgSources.map((epg) => ({
value: String(epg.id),
label: epg.name,
})),
]}
value={selectedDummyEpgId}
onChange={setSelectedDummyEpgId}
clearable
/>
</Stack>
<Text size="xs" c="dimmed" mt="xs">
Updates channel names and logos based on their assigned EPG
data
Updates channel names, logos, and TVG-IDs based on their
assigned EPG data, or assign a custom dummy EPG to selected
channels
</Text>
</Paper>
@ -445,6 +726,163 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
</Popover.Dropdown>
</Popover>
<Group style={{ width: '100%' }} align="flex-end" gap="xs">
<Popover
opened={logoPopoverOpened}
onChange={setLogoPopoverOpened}
withArrow
>
<Popover.Target>
<TextInput
label="Logo"
readOnly
{...form.getInputProps('logo')}
key={form.key('logo')}
onClick={() => setLogoPopoverOpened(true)}
size="xs"
style={{ flex: 1 }}
rightSection={
selectedLogoId !== '-1' && (
<ActionIcon
size="xs"
variant="subtle"
onClick={(e) => {
e.stopPropagation();
setSelectedLogoId('-1');
form.setValues({ logo: '(no change)' });
}}
>
<X size={12} />
</ActionIcon>
)
}
/>
</Popover.Target>
<Popover.Dropdown onMouseDown={(e) => e.stopPropagation()}>
<Group>
<TextInput
placeholder="Filter"
value={logoFilter}
onChange={(event) =>
setLogoFilter(event.currentTarget.value)
}
mb="xs"
size="xs"
/>
{logosLoading && (
<Text size="xs" c="dimmed">
Loading...
</Text>
)}
</Group>
<ScrollArea style={{ height: 200 }}>
{filteredLogos.length === 0 ? (
<Center style={{ height: 200 }}>
<Text size="sm" c="dimmed">
{logoFilter
? 'No logos match your filter'
: 'No logos available'}
</Text>
</Center>
) : (
<List
height={200}
itemCount={filteredLogos.length}
itemSize={55}
style={{ width: '100%' }}
ref={logoListRef}
>
{({ index, style }) => {
const item = filteredLogos[index];
return (
<div
style={{
...style,
cursor: 'pointer',
padding: '5px',
borderRadius: '4px',
}}
onClick={() => {
setSelectedLogoId(item.id);
form.setValues({
logo: item.name,
});
setLogoPopoverOpened(false);
}}
onMouseEnter={(e) => {
e.currentTarget.style.backgroundColor =
'rgb(68, 68, 68)';
}}
onMouseLeave={(e) => {
e.currentTarget.style.backgroundColor =
'transparent';
}}
>
<Center
style={{
flexDirection: 'column',
gap: '2px',
}}
>
{item.isDefault ? (
<img
src={logo}
height="30"
style={{
maxWidth: 80,
objectFit: 'contain',
}}
alt="Default Logo"
/>
) : item.id > 0 ? (
<img
src={item.cache_url || logo}
height="30"
style={{
maxWidth: 80,
objectFit: 'contain',
}}
alt={item.name || 'Logo'}
onError={(e) => {
if (e.target.src !== logo) {
e.target.src = logo;
}
}}
/>
) : (
<Box h={30} />
)}
<Text
size="xs"
c="dimmed"
ta="center"
style={{
maxWidth: 80,
overflow: 'hidden',
textOverflow: 'ellipsis',
whiteSpace: 'nowrap',
}}
>
{item.name}
</Text>
</Center>
</div>
);
}}
</List>
)}
</ScrollArea>
</Popover.Dropdown>
</Popover>
{selectedLogoId > 0 && (
<LazyLogo
logoId={selectedLogoId}
alt="channel logo"
style={{ height: 24, marginBottom: 5 }}
/>
)}
</Group>
<Select
id="stream_profile_id"
label="Stream Profile"
@ -485,8 +923,14 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
</Stack>
</Group>
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
<Button type="submit" variant="default" disabled={isSubmitting}>
Submit
<Button
type="submit"
variant="default"
disabled={isSubmitting}
loading={isSubmitting}
loaderProps={{ type: 'dots' }}
>
{isSubmitting ? 'Saving...' : 'Submit'}
</Button>
</Flex>
</form>
@ -496,6 +940,110 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
isOpen={channelGroupModelOpen}
onClose={handleChannelGroupModalClose}
/>
<ConfirmationDialog
opened={confirmSetNamesOpen}
onClose={() => setConfirmSetNamesOpen(false)}
onConfirm={executeSetNamesFromEpg}
title="Confirm Set Names from EPG"
message={
<div style={{ whiteSpace: 'pre-line' }}>
{`Are you sure you want to set names from EPG for ${channelIds?.length || 0} selected channels?
This will replace the current channel names with the names from their assigned EPG data.
This action cannot be undone.`}
</div>
}
confirmLabel="Set Names"
cancelLabel="Cancel"
actionKey="batch-set-names-from-epg"
onSuppressChange={suppressWarning}
size="md"
/>
<ConfirmationDialog
opened={confirmSetLogosOpen}
onClose={() => setConfirmSetLogosOpen(false)}
onConfirm={executeSetLogosFromEpg}
title="Confirm Set Logos from EPG"
message={
<div style={{ whiteSpace: 'pre-line' }}>
{`Are you sure you want to set logos from EPG for ${channelIds?.length || 0} selected channels?
This will replace the current channel logos with logos from their assigned EPG data. New logos will be created if needed.
This action cannot be undone.`}
</div>
}
confirmLabel="Set Logos"
cancelLabel="Cancel"
actionKey="batch-set-logos-from-epg"
onSuppressChange={suppressWarning}
size="md"
/>
<ConfirmationDialog
opened={confirmSetTvgIdsOpen}
onClose={() => setConfirmSetTvgIdsOpen(false)}
onConfirm={executeSetTvgIdsFromEpg}
title="Confirm Set TVG-IDs from EPG"
message={
<div style={{ whiteSpace: 'pre-line' }}>
{`Are you sure you want to set TVG-IDs from EPG for ${channelIds?.length || 0} selected channels?
This will replace the current TVG-IDs with the TVG-IDs from their assigned EPG data.
This action cannot be undone.`}
</div>
}
confirmLabel="Set TVG-IDs"
cancelLabel="Cancel"
actionKey="batch-set-tvg-ids-from-epg"
onSuppressChange={suppressWarning}
size="md"
/>
<ConfirmationDialog
opened={confirmBatchUpdateOpen}
onClose={() => setConfirmBatchUpdateOpen(false)}
onConfirm={onSubmit}
title="Confirm Batch Update"
message={
<div>
<Text mb="md">
You are about to apply the following changes to{' '}
<strong>{channelIds?.length || 0}</strong> selected channel
{(channelIds?.length || 0) !== 1 ? 's' : ''}:
</Text>
<Paper
withBorder
p="sm"
style={{ backgroundColor: 'rgba(0, 0, 0, 0.2)' }}
>
<Stack gap="xs">
{getConfirmationMessage().map((change, index) => (
<Text
key={index}
size="sm"
style={{ fontFamily: 'monospace' }}
>
{change}
</Text>
))}
</Stack>
</Paper>
<Text mt="md" size="sm" c="dimmed">
This action cannot be undone.
</Text>
</div>
}
confirmLabel="Apply Changes"
cancelLabel="Cancel"
actionKey="batch-update-channels"
onSuppressChange={suppressWarning}
size="md"
/>
</>
);
};

View file

@ -1,729 +0,0 @@
import React, { useState, useEffect, useRef, useMemo } from 'react';
import { useFormik } from 'formik';
import * as Yup from 'yup';
import useChannelsStore from '../../store/channels';
import API from '../../api';
import useStreamProfilesStore from '../../store/streamProfiles';
import useStreamsStore from '../../store/streams';
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
import LazyLogo from '../LazyLogo';
import ChannelGroupForm from './ChannelGroup';
import usePlaylistsStore from '../../store/playlists';
import logo from '../../images/logo.png';
import {
Box,
Button,
Modal,
TextInput,
NativeSelect,
Text,
Group,
ActionIcon,
Center,
Grid,
Flex,
Select,
Divider,
Stack,
useMantineTheme,
Popover,
ScrollArea,
Tooltip,
NumberInput,
Image,
UnstyledButton,
} from '@mantine/core';
import { ListOrdered, SquarePlus, SquareX, X } from 'lucide-react';
import useEPGsStore from '../../store/epgs';
import { Dropzone } from '@mantine/dropzone';
import { notifications } from '@mantine/notifications';
import { FixedSizeList as List } from 'react-window';
const ChannelsForm = ({ channel = null, isOpen, onClose }) => {
const theme = useMantineTheme();
const listRef = useRef(null);
const logoListRef = useRef(null);
const groupListRef = useRef(null);
const channelGroups = useChannelsStore((s) => s.channelGroups);
const { logos, ensureLogosLoaded } = useChannelLogoSelection();
const streams = useStreamsStore((state) => state.streams);
const streamProfiles = useStreamProfilesStore((s) => s.profiles);
const playlists = usePlaylistsStore((s) => s.playlists);
const epgs = useEPGsStore((s) => s.epgs);
const tvgs = useEPGsStore((s) => s.tvgs);
const tvgsById = useEPGsStore((s) => s.tvgsById);
const [logoPreview, setLogoPreview] = useState(null);
const [channelStreams, setChannelStreams] = useState([]);
const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false);
const [epgPopoverOpened, setEpgPopoverOpened] = useState(false);
const [logoPopoverOpened, setLogoPopoverOpened] = useState(false);
const [selectedEPG, setSelectedEPG] = useState('');
const [tvgFilter, setTvgFilter] = useState('');
const [logoFilter, setLogoFilter] = useState('');
const [groupPopoverOpened, setGroupPopoverOpened] = useState(false);
const [groupFilter, setGroupFilter] = useState('');
const groupOptions = Object.values(channelGroups);
const addStream = (stream) => {
const streamSet = new Set(channelStreams);
streamSet.add(stream);
setChannelStreams(Array.from(streamSet));
};
const removeStream = (stream) => {
const streamSet = new Set(channelStreams);
streamSet.delete(stream);
setChannelStreams(Array.from(streamSet));
};
const handleLogoChange = async (files) => {
if (files.length === 1) {
const file = files[0];
// Validate file size on frontend first
if (file.size > 5 * 1024 * 1024) {
// 5MB
notifications.show({
title: 'Error',
message: 'File too large. Maximum size is 5MB.',
color: 'red',
});
return;
}
try {
const retval = await API.uploadLogo(file);
// Note: API.uploadLogo already adds the logo to the store, no need to fetch
setLogoPreview(retval.cache_url);
formik.setFieldValue('logo_id', retval.id);
} catch (error) {
console.error('Logo upload failed:', error);
// Error notification is already handled in API.uploadLogo
}
} else {
setLogoPreview(null);
}
};
const formik = useFormik({
initialValues: {
name: '',
channel_number: '', // Change from 0 to empty string for consistency
channel_group_id:
Object.keys(channelGroups).length > 0
? Object.keys(channelGroups)[0]
: '',
stream_profile_id: '0',
tvg_id: '',
tvc_guide_stationid: '',
epg_data_id: '',
logo_id: '',
},
validationSchema: Yup.object({
name: Yup.string().required('Name is required'),
channel_group_id: Yup.string().required('Channel group is required'),
}),
onSubmit: async (values, { setSubmitting }) => {
let response;
try {
const formattedValues = { ...values };
// Convert empty or "0" stream_profile_id to null for the API
if (
!formattedValues.stream_profile_id ||
formattedValues.stream_profile_id === '0'
) {
formattedValues.stream_profile_id = null;
}
// Ensure tvg_id is properly included (no empty strings)
formattedValues.tvg_id = formattedValues.tvg_id || null;
// Ensure tvc_guide_stationid is properly included (no empty strings)
formattedValues.tvc_guide_stationid =
formattedValues.tvc_guide_stationid || null;
if (channel) {
// If there's an EPG to set, use our enhanced endpoint
if (values.epg_data_id !== (channel.epg_data_id ?? '')) {
// Use the special endpoint to set EPG and trigger refresh
const epgResponse = await API.setChannelEPG(
channel.id,
values.epg_data_id
);
// Remove epg_data_id from values since we've handled it separately
const { epg_data_id, ...otherValues } = formattedValues;
// Update other channel fields if needed
if (Object.keys(otherValues).length > 0) {
response = await API.updateChannel({
id: channel.id,
...otherValues,
streams: channelStreams.map((stream) => stream.id),
});
}
} else {
// No EPG change, regular update
response = await API.updateChannel({
id: channel.id,
...formattedValues,
streams: channelStreams.map((stream) => stream.id),
});
}
} else {
// New channel creation - use the standard method
response = await API.addChannel({
...formattedValues,
streams: channelStreams.map((stream) => stream.id),
});
}
} catch (error) {
console.error('Error saving channel:', error);
}
formik.resetForm();
API.requeryChannels();
// Refresh channel profiles to update the membership information
useChannelsStore.getState().fetchChannelProfiles();
setSubmitting(false);
setTvgFilter('');
setLogoFilter('');
onClose();
},
});
useEffect(() => {
if (channel) {
if (channel.epg_data_id) {
const epgSource = epgs[tvgsById[channel.epg_data_id]?.epg_source];
setSelectedEPG(epgSource ? `${epgSource.id}` : '');
}
formik.setValues({
name: channel.name || '',
channel_number:
channel.channel_number !== null ? channel.channel_number : '',
channel_group_id: channel.channel_group_id
? `${channel.channel_group_id}`
: '',
stream_profile_id: channel.stream_profile_id
? `${channel.stream_profile_id}`
: '0',
tvg_id: channel.tvg_id || '',
tvc_guide_stationid: channel.tvc_guide_stationid || '',
epg_data_id: channel.epg_data_id ?? '',
logo_id: channel.logo_id ? `${channel.logo_id}` : '',
});
setChannelStreams(channel.streams || []);
} else {
formik.resetForm();
setTvgFilter('');
setLogoFilter('');
}
}, [channel, tvgsById, channelGroups]);
// Memoize logo options to prevent infinite re-renders during background loading
const logoOptions = useMemo(() => {
return [{ id: '0', name: 'Default' }].concat(Object.values(logos));
}, [logos]); // Only depend on logos object
const renderLogoOption = ({ option, checked }) => {
return (
<Center style={{ width: '100%' }}>
<img src={logos[option.value].cache_url} width="30" />
</Center>
);
};
// Update the handler for when channel group modal is closed
const handleChannelGroupModalClose = (newGroup) => {
setChannelGroupModalOpen(false);
// If a new group was created and returned, update the form with it
if (newGroup && newGroup.id) {
// Preserve all current form values while updating just the channel_group_id
formik.setValues({
...formik.values,
channel_group_id: `${newGroup.id}`,
});
}
};
if (!isOpen) {
return <></>;
}
const filteredTvgs = tvgs
.filter((tvg) => tvg.epg_source == selectedEPG)
.filter(
(tvg) =>
tvg.name.toLowerCase().includes(tvgFilter.toLowerCase()) ||
tvg.tvg_id.toLowerCase().includes(tvgFilter.toLowerCase())
);
const filteredLogos = logoOptions.filter((logo) =>
logo.name.toLowerCase().includes(logoFilter.toLowerCase())
);
const filteredGroups = groupOptions.filter((group) =>
group.name.toLowerCase().includes(groupFilter.toLowerCase())
);
return (
<Modal
opened={isOpen}
onClose={onClose}
size={1000}
title={
<Group gap="5">
<ListOrdered size="20" />
<Text>Channels</Text>
</Group>
}
styles={{ content: { '--mantine-color-body': '#27272A' } }}
>
<form onSubmit={formik.handleSubmit}>
<Group justify="space-between" align="top">
<Stack gap="5" style={{ flex: 1 }}>
<TextInput
id="name"
name="name"
label="Channel Name"
value={formik.values.name}
onChange={formik.handleChange}
error={formik.errors.name ? formik.touched.name : ''}
size="xs"
/>
<Flex gap="sm">
<Popover
opened={groupPopoverOpened}
onChange={setGroupPopoverOpened}
// position="bottom-start"
withArrow
>
<Popover.Target>
<TextInput
id="channel_group_id"
name="channel_group_id"
label="Channel Group"
readOnly
value={
channelGroups[formik.values.channel_group_id]
? channelGroups[formik.values.channel_group_id].name
: ''
}
onClick={() => setGroupPopoverOpened(true)}
size="xs"
/>
</Popover.Target>
<Popover.Dropdown onMouseDown={(e) => e.stopPropagation()}>
<Group>
<TextInput
placeholder="Filter"
value={groupFilter}
onChange={(event) =>
setGroupFilter(event.currentTarget.value)
}
mb="xs"
size="xs"
/>
</Group>
<ScrollArea style={{ height: 200 }}>
<List
height={200} // Set max height for visible items
itemCount={filteredGroups.length}
itemSize={20} // Adjust row height for each item
width={200}
ref={groupListRef}
>
{({ index, style }) => (
<Box
style={{ ...style, height: 20, overflow: 'hidden' }}
>
<Tooltip
openDelay={500}
label={filteredGroups[index].name}
size="xs"
>
<UnstyledButton
onClick={() => {
formik.setFieldValue(
'channel_group_id',
filteredGroups[index].id
);
setGroupPopoverOpened(false);
}}
>
<Text
size="xs"
style={{
whiteSpace: 'nowrap',
overflow: 'hidden',
textOverflow: 'ellipsis',
}}
>
{filteredGroups[index].name}
</Text>
</UnstyledButton>
</Tooltip>
</Box>
)}
</List>
</ScrollArea>
</Popover.Dropdown>
</Popover>
{/* <Select
id="channel_group_id"
name="channel_group_id"
label="Channel Group"
value={formik.values.channel_group_id}
searchable
onChange={(value) => {
formik.setFieldValue('channel_group_id', value); // Update Formik's state with the new value
}}
error={
formik.errors.channel_group_id
? formik.touched.channel_group_id
: ''
}
data={Object.values(channelGroups).map((option, index) => ({
value: `${option.id}`,
label: option.name,
}))}
size="xs"
style={{ flex: 1 }}
/> */}
<Flex align="flex-end">
<ActionIcon
color={theme.tailwind.green[5]}
onClick={() => setChannelGroupModalOpen(true)}
title="Create new group"
size="small"
variant="transparent"
style={{ marginBottom: 5 }}
>
<SquarePlus size="20" />
</ActionIcon>
</Flex>
</Flex>
<Select
id="stream_profile_id"
label="Stream Profile"
name="stream_profile_id"
value={formik.values.stream_profile_id}
onChange={(value) => {
formik.setFieldValue('stream_profile_id', value); // Update Formik's state with the new value
}}
error={
formik.errors.stream_profile_id
? formik.touched.stream_profile_id
: ''
}
data={[{ value: '0', label: '(use default)' }].concat(
streamProfiles.map((option) => ({
value: `${option.id}`,
label: option.name,
}))
)}
size="xs"
/>
</Stack>
<Divider size="sm" orientation="vertical" />
<Stack justify="flex-start" style={{ flex: 1 }}>
<Group justify="space-between">
<Popover
opened={logoPopoverOpened}
onChange={(opened) => {
setLogoPopoverOpened(opened);
if (opened) {
ensureLogosLoaded();
}
}}
// position="bottom-start"
withArrow
>
<Popover.Target>
<TextInput
id="logo_id"
name="logo_id"
label="Logo"
readOnly
value={logos[formik.values.logo_id]?.name || 'Default'}
onClick={() => setLogoPopoverOpened(true)}
size="xs"
/>
</Popover.Target>
<Popover.Dropdown onMouseDown={(e) => e.stopPropagation()}>
<Group>
<TextInput
placeholder="Filter"
value={logoFilter}
onChange={(event) =>
setLogoFilter(event.currentTarget.value)
}
mb="xs"
size="xs"
/>
</Group>
<ScrollArea style={{ height: 200 }}>
<List
height={200} // Set max height for visible items
itemCount={filteredLogos.length}
itemSize={20} // Adjust row height for each item
width="100%"
ref={logoListRef}
>
{({ index, style }) => (
<div style={style}>
<Center>
<img
src={filteredLogos[index].cache_url || logo}
height="20"
style={{ maxWidth: 80 }}
onClick={() => {
formik.setFieldValue(
'logo_id',
filteredLogos[index].id
);
}}
/>
</Center>
</div>
)}
</List>
</ScrollArea>
</Popover.Dropdown>
</Popover>
<LazyLogo
logoId={formik.values.logo_id}
alt="channel logo"
style={{ height: 40 }}
/>
</Group>
<Group>
<Divider size="xs" style={{ flex: 1 }} />
<Text size="xs" c="dimmed">
OR
</Text>
<Divider size="xs" style={{ flex: 1 }} />
</Group>
<Stack>
<Text size="sm">Upload Logo</Text>
<Dropzone
onDrop={handleLogoChange}
onReject={(files) => console.log('rejected files', files)}
maxSize={5 * 1024 ** 2}
>
<Group
justify="center"
gap="xl"
mih={40}
style={{ pointerEvents: 'none' }}
>
<Text size="sm" inline>
Drag images here or click to select files
</Text>
</Group>
</Dropzone>
<Center></Center>
</Stack>
</Stack>
<Divider size="sm" orientation="vertical" />
<Stack gap="5" style={{ flex: 1 }} justify="flex-start">
<NumberInput
id="channel_number"
name="channel_number"
label="Channel # (blank to auto-assign)"
value={formik.values.channel_number}
onChange={(value) =>
formik.setFieldValue('channel_number', value)
}
error={
formik.errors.channel_number
? formik.touched.channel_number
: ''
}
size="xs"
/>
<TextInput
id="tvg_id"
name="tvg_id"
label="TVG-ID"
value={formik.values.tvg_id}
onChange={formik.handleChange}
error={formik.errors.tvg_id ? formik.touched.tvg_id : ''}
size="xs"
/>
<TextInput
id="tvc_guide_stationid"
name="tvc_guide_stationid"
label="Gracenote StationId"
value={formik.values.tvc_guide_stationid}
onChange={formik.handleChange}
error={
formik.errors.tvc_guide_stationid
? formik.touched.tvc_guide_stationid
: ''
}
size="xs"
/>
<Popover
opened={epgPopoverOpened}
onChange={setEpgPopoverOpened}
// position="bottom-start"
withArrow
>
<Popover.Target>
<TextInput
id="epg_data_id"
name="epg_data_id"
label={
<Group style={{ width: '100%' }}>
<Box>EPG</Box>
<Button
size="xs"
variant="transparent"
onClick={() =>
formik.setFieldValue('epg_data_id', null)
}
>
Use Dummy
</Button>
</Group>
}
readOnly
value={
formik.values.epg_data_id
? tvgsById[formik.values.epg_data_id].name
: 'Dummy'
}
onClick={() => setEpgPopoverOpened(true)}
size="xs"
rightSection={
<Tooltip label="Use dummy EPG">
<ActionIcon
// color={theme.tailwind.green[5]}
color="white"
onClick={(e) => {
e.stopPropagation();
formik.setFieldValue('epg_data_id', null);
}}
title="Create new group"
size="small"
variant="transparent"
>
<X size="20" />
</ActionIcon>
</Tooltip>
}
/>
</Popover.Target>
<Popover.Dropdown onMouseDown={(e) => e.stopPropagation()}>
<Group>
<Select
label="Source"
value={selectedEPG}
onChange={setSelectedEPG}
data={Object.values(epgs).map((epg) => ({
value: `${epg.id}`,
label: epg.name,
}))}
size="xs"
mb="xs"
/>
{/* Filter Input */}
<TextInput
label="Filter"
value={tvgFilter}
onChange={(event) =>
setTvgFilter(event.currentTarget.value)
}
mb="xs"
size="xs"
/>
</Group>
<ScrollArea style={{ height: 200 }}>
<List
height={200} // Set max height for visible items
itemCount={filteredTvgs.length}
itemSize={40} // Adjust row height for each item
width="100%"
ref={listRef}
>
{({ index, style }) => (
<div style={style}>
<Button
key={filteredTvgs[index].id}
variant="subtle"
color="gray"
fullWidth
justify="left"
size="xs"
onClick={() => {
if (filteredTvgs[index].id == '0') {
formik.setFieldValue('epg_data_id', null);
} else {
formik.setFieldValue(
'epg_data_id',
filteredTvgs[index].id
);
}
setEpgPopoverOpened(false);
}}
>
{filteredTvgs[index].tvg_id}
</Button>
</div>
)}
</List>
</ScrollArea>
</Popover.Dropdown>
</Popover>
</Stack>
</Group>
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
<Button
type="submit"
variant="default"
disabled={formik.isSubmitting}
>
Submit
</Button>
</Flex>
</form>
</Modal>
);
};
export default ChannelsForm;

File diff suppressed because it is too large Load diff

View file

@ -1,31 +1,23 @@
// Modal.js
import React, { useState, useEffect } from 'react';
import API from '../../api';
import useEPGsStore from '../../store/epgs';
import {
LoadingOverlay,
TextInput,
Button,
Checkbox,
Modal,
Flex,
NativeSelect,
NumberInput,
Space,
Grid,
Group,
FileInput,
Title,
Text,
Divider,
Stack,
Group,
Divider,
Box,
Text,
} from '@mantine/core';
import { isNotEmpty, useForm } from '@mantine/form';
import { notifications } from '@mantine/notifications';
const EPG = ({ epg = null, isOpen, onClose }) => {
const epgs = useEPGsStore((state) => state.epgs);
// Remove the file state and handler since we're not supporting file uploads
const [sourceType, setSourceType] = useState('xmltv');
const form = useForm({
@ -49,13 +41,19 @@ const EPG = ({ epg = null, isOpen, onClose }) => {
const values = form.getValues();
if (epg?.id) {
// Remove file from API call
// Validate that we have a valid EPG object before updating
if (!epg || typeof epg !== 'object' || !epg.id) {
notifications.show({
title: 'Error',
message: 'Invalid EPG data. Please close and reopen this form.',
color: 'red',
});
return;
}
await API.updateEPG({ id: epg.id, ...values });
} else {
// Remove file from API call
await API.addEPG({
...values,
});
await API.addEPG(values);
}
form.reset();
@ -73,11 +71,12 @@ const EPG = ({ epg = null, isOpen, onClose }) => {
refresh_interval: epg.refresh_interval,
};
form.setValues(values);
setSourceType(epg.source_type); // Update source type state
setSourceType(epg.source_type);
} else {
form.reset();
setSourceType('xmltv'); // Reset to xmltv
setSourceType('xmltv');
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [epg]);
// Function to handle source type changes
@ -156,7 +155,7 @@ const EPG = ({ epg = null, isOpen, onClose }) => {
description="API key for services that require authentication"
{...form.getInputProps('api_key')}
key={form.key('api_key')}
disabled={sourceType !== 'schedules_direct'} // Use the state variable
disabled={sourceType !== 'schedules_direct'}
/>
{/* Put checkbox at the same level as Refresh Interval */}
@ -171,8 +170,8 @@ const EPG = ({ epg = null, isOpen, onClose }) => {
style={{
display: 'flex',
alignItems: 'center',
height: '30px', // Reduced height
marginTop: '-4px', // Slight negative margin to move it up
height: '30px',
marginTop: '-4px',
}}
>
<Checkbox

View file

@ -16,11 +16,20 @@ import {
Box,
MultiSelect,
Tooltip,
Popover,
ScrollArea,
Center,
} from '@mantine/core';
import { Info } from 'lucide-react';
import useChannelsStore from '../../store/channels';
import useStreamProfilesStore from '../../store/streamProfiles';
import { CircleCheck, CircleX } from 'lucide-react';
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
import { FixedSizeList as List } from 'react-window';
import LazyLogo from '../LazyLogo';
import LogoForm from './Logo';
import logo from '../../images/logo.png';
import API from '../../api';
// Custom item component for MultiSelect with tooltip
const OptionWithTooltip = forwardRef(
@ -33,12 +42,33 @@ const OptionWithTooltip = forwardRef(
)
);
const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
const LiveGroupFilter = ({
playlist,
groupStates,
setGroupStates,
autoEnableNewGroupsLive,
setAutoEnableNewGroupsLive,
}) => {
const channelGroups = useChannelsStore((s) => s.channelGroups);
const profiles = useChannelsStore((s) => s.profiles);
const streamProfiles = useStreamProfilesStore((s) => s.profiles);
const fetchStreamProfiles = useStreamProfilesStore((s) => s.fetchProfiles);
const [groupFilter, setGroupFilter] = useState('');
const [epgSources, setEpgSources] = useState([]);
// Logo selection functionality
const {
logos: channelLogos,
ensureLogosLoaded,
isLoading: logosLoading,
} = useChannelLogoSelection();
const [logoModalOpen, setLogoModalOpen] = useState(false);
const [currentEditingGroupId, setCurrentEditingGroupId] = useState(null);
// Ensure logos are loaded when component mounts
useEffect(() => {
ensureLogosLoaded();
}, [ensureLogosLoaded]);
// Fetch stream profiles when component mounts
useEffect(() => {
@ -47,6 +77,19 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
}
}, [streamProfiles.length, fetchStreamProfiles]);
// Fetch EPG sources when component mounts
useEffect(() => {
const fetchEPGSources = async () => {
try {
const sources = await API.getEPGs();
setEpgSources(sources || []);
} catch (error) {
console.error('Failed to fetch EPG sources:', error);
}
};
fetchEPGSources();
}, []);
useEffect(() => {
if (Object.keys(channelGroups).length === 0) {
return;
@ -62,7 +105,7 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
typeof group.custom_properties === 'string'
? JSON.parse(group.custom_properties)
: group.custom_properties;
} catch (e) {
} catch {
customProps = {};
}
}
@ -109,21 +152,27 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
);
};
// Toggle force_dummy_epg in custom_properties for a group
const toggleForceDummyEPG = (id) => {
setGroupStates(
groupStates.map((state) => {
if (state.channel_group == id) {
const customProps = { ...(state.custom_properties || {}) };
customProps.force_dummy_epg = !customProps.force_dummy_epg;
return {
...state,
custom_properties: customProps,
};
}
return state;
})
);
// Handle logo selection from LogoForm
const handleLogoSuccess = ({ logo }) => {
if (logo && logo.id && currentEditingGroupId !== null) {
setGroupStates(
groupStates.map((state) => {
if (state.channel_group === currentEditingGroupId) {
return {
...state,
custom_properties: {
...state.custom_properties,
custom_logo_id: logo.id,
},
};
}
return state;
})
);
ensureLogosLoaded(); // Refresh logos
}
setLogoModalOpen(false);
setCurrentEditingGroupId(null);
};
const selectAll = () => {
@ -159,6 +208,16 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
</Text>
</Alert>
<Checkbox
label="Automatically enable new groups discovered on future scans"
checked={autoEnableNewGroupsLive}
onChange={(event) =>
setAutoEnableNewGroupsLive(event.currentTarget.checked)
}
size="sm"
description="When disabled, new groups from the M3U source will be created but disabled by default. You can enable them manually later."
/>
<Flex gap="sm">
<TextInput
placeholder="Filter groups..."
@ -254,10 +313,10 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
placeholder="Select options..."
data={[
{
value: 'force_dummy_epg',
label: 'Force Dummy EPG',
value: 'force_epg',
label: 'Force EPG Source',
description:
'Assign a dummy EPG to all channels in this group if no EPG is matched',
'Force a specific EPG source for all auto-synced channels, or disable EPG assignment entirely',
},
{
value: 'group_override',
@ -295,12 +354,22 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
description:
'Assign a specific stream profile to all channels in this group during auto sync',
},
{
value: 'custom_logo',
label: 'Custom Logo',
description:
'Assign a custom logo to all auto-synced channels in this group',
},
]}
itemComponent={OptionWithTooltip}
value={(() => {
const selectedValues = [];
if (group.custom_properties?.force_dummy_epg) {
selectedValues.push('force_dummy_epg');
if (
group.custom_properties?.custom_epg_id !==
undefined ||
group.custom_properties?.force_dummy_epg
) {
selectedValues.push('force_epg');
}
if (
group.custom_properties?.group_override !==
@ -340,6 +409,12 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
) {
selectedValues.push('stream_profile_assignment');
}
if (
group.custom_properties?.custom_logo_id !==
undefined
) {
selectedValues.push('custom_logo');
}
return selectedValues;
})()}
onChange={(values) => {
@ -353,13 +428,25 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
...(state.custom_properties || {}),
};
// Handle force_dummy_epg
if (
selectedOptions.includes('force_dummy_epg')
) {
newCustomProps.force_dummy_epg = true;
// Handle force_epg
if (selectedOptions.includes('force_epg')) {
// Migrate from old force_dummy_epg if present
if (
newCustomProps.force_dummy_epg &&
newCustomProps.custom_epg_id === undefined
) {
// Migrate: force_dummy_epg=true becomes custom_epg_id=null
newCustomProps.custom_epg_id = null;
delete newCustomProps.force_dummy_epg;
} else if (
newCustomProps.custom_epg_id === undefined
) {
// New configuration: initialize with null (no EPG/default dummy)
newCustomProps.custom_epg_id = null;
}
} else {
delete newCustomProps.force_dummy_epg;
// Only remove custom_epg_id when deselected
delete newCustomProps.custom_epg_id;
}
// Handle group_override
@ -459,6 +546,17 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
delete newCustomProps.stream_profile_id;
}
// Handle custom_logo
if (selectedOptions.includes('custom_logo')) {
if (
newCustomProps.custom_logo_id === undefined
) {
newCustomProps.custom_logo_id = null;
}
} else {
delete newCustomProps.custom_logo_id;
}
return {
...state,
custom_properties: newCustomProps,
@ -785,6 +883,317 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
/>
</Tooltip>
)}
{/* Show logo selector only if custom_logo is selected */}
{group.custom_properties?.custom_logo_id !==
undefined && (
<Box>
<Group justify="space-between">
<Popover
opened={group.logoPopoverOpened || false}
onChange={(opened) => {
setGroupStates(
groupStates.map((state) => {
if (
state.channel_group ===
group.channel_group
) {
return {
...state,
logoPopoverOpened: opened,
};
}
return state;
})
);
if (opened) {
ensureLogosLoaded();
}
}}
withArrow
>
<Popover.Target>
<TextInput
label="Custom Logo"
readOnly
value={
channelLogos[
group.custom_properties?.custom_logo_id
]?.name || 'Default'
}
onClick={() => {
setGroupStates(
groupStates.map((state) => {
if (
state.channel_group ===
group.channel_group
) {
return {
...state,
logoPopoverOpened: true,
};
}
return {
...state,
logoPopoverOpened: false,
};
})
);
}}
size="xs"
/>
</Popover.Target>
<Popover.Dropdown
onMouseDown={(e) => e.stopPropagation()}
>
<Group>
<TextInput
placeholder="Filter logos..."
size="xs"
value={group.logoFilter || ''}
onChange={(e) => {
const val = e.currentTarget.value;
setGroupStates(
groupStates.map((state) =>
state.channel_group ===
group.channel_group
? {
...state,
logoFilter: val,
}
: state
)
);
}}
/>
{logosLoading && (
<Text size="xs" c="dimmed">
Loading...
</Text>
)}
</Group>
<ScrollArea style={{ height: 200 }}>
{(() => {
const logoOptions = [
{ id: '0', name: 'Default' },
...Object.values(channelLogos),
];
const filteredLogos = logoOptions.filter(
(logo) =>
logo.name
.toLowerCase()
.includes(
(
group.logoFilter || ''
).toLowerCase()
)
);
if (filteredLogos.length === 0) {
return (
<Center style={{ height: 200 }}>
<Text size="sm" c="dimmed">
{group.logoFilter
? 'No logos match your filter'
: 'No logos available'}
</Text>
</Center>
);
}
return (
<List
height={200}
itemCount={filteredLogos.length}
itemSize={55}
style={{ width: '100%' }}
>
{({ index, style }) => {
const logoItem = filteredLogos[index];
return (
<div
style={{
...style,
cursor: 'pointer',
padding: '5px',
borderRadius: '4px',
}}
onClick={() => {
setGroupStates(
groupStates.map((state) => {
if (
state.channel_group ===
group.channel_group
) {
return {
...state,
custom_properties: {
...state.custom_properties,
custom_logo_id:
logoItem.id,
},
logoPopoverOpened: false,
};
}
return state;
})
);
}}
onMouseEnter={(e) => {
e.currentTarget.style.backgroundColor =
'rgb(68, 68, 68)';
}}
onMouseLeave={(e) => {
e.currentTarget.style.backgroundColor =
'transparent';
}}
>
<Center
style={{
flexDirection: 'column',
gap: '2px',
}}
>
<img
src={
logoItem.cache_url || logo
}
height="30"
style={{
maxWidth: 80,
objectFit: 'contain',
}}
alt={logoItem.name || 'Logo'}
onError={(e) => {
if (e.target.src !== logo) {
e.target.src = logo;
}
}}
/>
<Text
size="xs"
c="dimmed"
ta="center"
style={{
maxWidth: 80,
overflow: 'hidden',
textOverflow: 'ellipsis',
whiteSpace: 'nowrap',
}}
>
{logoItem.name || 'Default'}
</Text>
</Center>
</div>
);
}}
</List>
);
})()}
</ScrollArea>
</Popover.Dropdown>
</Popover>
<Stack gap="xs" align="center">
<LazyLogo
logoId={group.custom_properties?.custom_logo_id}
alt="custom logo"
style={{ height: 40 }}
/>
</Stack>
</Group>
<Button
onClick={() => {
setCurrentEditingGroupId(group.channel_group);
setLogoModalOpen(true);
}}
fullWidth
variant="default"
size="xs"
mt="xs"
>
Upload or Create Logo
</Button>
</Box>
)}
{/* Show EPG selector when force_epg is selected */}
{(group.custom_properties?.custom_epg_id !== undefined ||
group.custom_properties?.force_dummy_epg) && (
<Tooltip
label="Force a specific EPG source for all auto-synced channels in this group. For dummy EPGs, all channels will share the same EPG data. For regular EPG sources (XMLTV, Schedules Direct), channels will be matched by their tvg_id within that source. Select 'No EPG' to disable EPG assignment."
withArrow
>
<Select
label="EPG Source"
placeholder="No EPG (Disabled)"
value={(() => {
// Handle migration from force_dummy_epg
if (
group.custom_properties?.custom_epg_id !==
undefined
) {
// Convert to string, use '0' for null/no EPG
return group.custom_properties.custom_epg_id ===
null
? '0'
: group.custom_properties.custom_epg_id.toString();
} else if (
group.custom_properties?.force_dummy_epg
) {
// Show "No EPG" for old force_dummy_epg configs
return '0';
}
return '0';
})()}
onChange={(value) => {
// Convert back: '0' means no EPG (null)
const newValue =
value === '0' ? null : parseInt(value);
setGroupStates(
groupStates.map((state) => {
if (
state.channel_group === group.channel_group
) {
return {
...state,
custom_properties: {
...state.custom_properties,
custom_epg_id: newValue,
},
};
}
return state;
})
);
}}
data={[
{ value: '0', label: 'No EPG (Disabled)' },
...[...epgSources]
.sort((a, b) => a.name.localeCompare(b.name))
.map((source) => ({
value: source.id.toString(),
label: `${source.name} (${
source.source_type === 'dummy'
? 'Dummy'
: source.source_type === 'xmltv'
? 'XMLTV'
: source.source_type ===
'schedules_direct'
? 'Schedules Direct'
: source.source_type
})`,
})),
]}
clearable
searchable
size="xs"
/>
</Tooltip>
)}
</>
)}
</Stack>
@ -792,6 +1201,16 @@ const LiveGroupFilter = ({ playlist, groupStates, setGroupStates }) => {
))}
</SimpleGrid>
</Box>
{/* Logo Upload Modal */}
<LogoForm
isOpen={logoModalOpen}
onClose={() => {
setLogoModalOpen(false);
setCurrentEditingGroupId(null);
}}
onSuccess={handleLogoSuccess}
/>
</Stack>
);
};

View file

@ -1,7 +1,24 @@
import React, { useState, useEffect } from 'react';
import { useNavigate } from 'react-router-dom';
import useAuthStore from '../../store/auth';
import { Paper, Title, TextInput, Button, Center, Stack } from '@mantine/core';
import API from '../../api';
import {
Paper,
Title,
TextInput,
Button,
Center,
Stack,
Text,
Image,
Group,
Divider,
Modal,
Anchor,
Code,
Checkbox,
} from '@mantine/core';
import logo from '../../assets/logo.png';
const LoginForm = () => {
const login = useAuthStore((s) => s.login);
@ -11,12 +28,69 @@ const LoginForm = () => {
const navigate = useNavigate(); // Hook to navigate to other routes
const [formData, setFormData] = useState({ username: '', password: '' });
const [rememberMe, setRememberMe] = useState(false);
const [savePassword, setSavePassword] = useState(false);
const [forgotPasswordOpened, setForgotPasswordOpened] = useState(false);
const [version, setVersion] = useState(null);
const [isLoading, setIsLoading] = useState(false);
// useEffect(() => {
// if (isAuthenticated) {
// navigate('/channels');
// }
// }, [isAuthenticated, navigate]);
// Simple base64 encoding/decoding for localStorage
// Note: This is obfuscation, not encryption. Use browser's password manager for real security.
const encodePassword = (password) => {
try {
return btoa(password);
} catch (error) {
console.error('Encoding error:', error);
return null;
}
};
const decodePassword = (encoded) => {
try {
return atob(encoded);
} catch (error) {
console.error('Decoding error:', error);
return '';
}
};
useEffect(() => {
// Fetch version info
API.getVersion().then((data) => {
setVersion(data?.version);
});
}, []);
useEffect(() => {
// Load saved username if it exists
const savedUsername = localStorage.getItem(
'dispatcharr_remembered_username'
);
const savedPassword = localStorage.getItem('dispatcharr_saved_password');
if (savedUsername) {
setFormData((prev) => ({ ...prev, username: savedUsername }));
setRememberMe(true);
if (savedPassword) {
try {
const decrypted = decodePassword(savedPassword);
if (decrypted) {
setFormData((prev) => ({ ...prev, password: decrypted }));
setSavePassword(true);
}
} catch {
// If decoding fails, just skip
}
}
}
}, []);
useEffect(() => {
if (isAuthenticated) {
navigate('/channels');
}
}, [isAuthenticated, navigate]);
const handleInputChange = (e) => {
setFormData({
@ -27,13 +101,38 @@ const LoginForm = () => {
const handleSubmit = async (e) => {
e.preventDefault();
await login(formData);
setIsLoading(true);
try {
await login(formData);
// Save username if remember me is checked
if (rememberMe) {
localStorage.setItem(
'dispatcharr_remembered_username',
formData.username
);
// Save password if save password is checked
if (savePassword) {
const encoded = encodePassword(formData.password);
if (encoded) {
localStorage.setItem('dispatcharr_saved_password', encoded);
}
} else {
localStorage.removeItem('dispatcharr_saved_password');
}
} else {
localStorage.removeItem('dispatcharr_remembered_username');
localStorage.removeItem('dispatcharr_saved_password');
}
await initData();
navigate('/channels');
// Navigation will happen automatically via the useEffect or route protection
} catch (e) {
console.log(`Failed to login: ${e}`);
await logout();
setIsLoading(false);
}
};
@ -45,11 +144,29 @@ const LoginForm = () => {
>
<Paper
elevation={3}
style={{ padding: 30, width: '100%', maxWidth: 400 }}
style={{
padding: 30,
width: '100%',
maxWidth: 500,
position: 'relative',
}}
>
<Title order={4} align="center">
Login
</Title>
<Stack align="center" spacing="lg">
<Image
src={logo}
alt="Dispatcharr Logo"
width={120}
height={120}
fit="contain"
/>
<Title order={2} align="center">
Dispatcharr
</Title>
<Text size="sm" color="dimmed" align="center">
Welcome back! Please log in to continue.
</Text>
<Divider style={{ width: '100%' }} />
</Stack>
<form onSubmit={handleSubmit}>
<Stack>
<TextInput
@ -69,12 +186,124 @@ const LoginForm = () => {
// required
/>
<Button type="submit" mt="sm">
Login
<Group justify="space-between" align="center">
<Group align="center" spacing="xs">
<Checkbox
label="Remember me"
checked={rememberMe}
onChange={(e) => setRememberMe(e.currentTarget.checked)}
size="sm"
/>
{rememberMe && (
<Checkbox
label="Save password"
checked={savePassword}
onChange={(e) => setSavePassword(e.currentTarget.checked)}
size="sm"
/>
)}
</Group>
<Anchor
size="sm"
component="button"
type="button"
onClick={(e) => {
e.preventDefault();
setForgotPasswordOpened(true);
}}
>
Forgot password?
</Anchor>
</Group>
<div
style={{
position: 'relative',
height: '0',
overflow: 'visible',
marginBottom: '-4px',
}}
>
{savePassword && (
<Text
size="xs"
color="red"
style={{
marginTop: '-10px',
marginBottom: '0',
lineHeight: '1.2',
}}
>
Password will be stored locally without encryption. Only
use on trusted devices.
</Text>
)}
</div>
<Button
type="submit"
fullWidth
loading={isLoading}
disabled={isLoading}
loaderProps={{ type: 'dots' }}
>
{isLoading ? 'Logging you in...' : 'Login'}
</Button>
</Stack>
</form>
{version && (
<Text
size="xs"
color="dimmed"
style={{
position: 'absolute',
bottom: 6,
right: 30,
}}
>
v{version}
</Text>
)}
</Paper>
<Modal
opened={forgotPasswordOpened}
onClose={() => setForgotPasswordOpened(false)}
title="Reset Your Password"
centered
>
<Stack spacing="md">
<Text>
To reset your password, your administrator needs to run a Django
management command:
</Text>
<div>
<Text weight={500} size="sm" mb={8}>
If running with Docker:
</Text>
<Code block>
docker exec &lt;container_name&gt; python manage.py changepassword
&lt;username&gt;
</Code>
</div>
<div>
<Text weight={500} size="sm" mb={8}>
If running locally:
</Text>
<Code block>python manage.py changepassword &lt;username&gt;</Code>
</div>
<Text size="sm" color="dimmed">
The command will prompt for a new password. Replace
<code>&lt;container_name&gt;</code> with your Docker container name
and <code>&lt;username&gt;</code> with the account username.
</Text>
<Text size="sm" color="dimmed" italic>
Please contact your system administrator to perform a password
reset.
</Text>
</Stack>
</Modal>
</Center>
);
};

View file

@ -106,13 +106,12 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
onSuccess?.({ type: 'create', logo: newLogo }); // Call onSuccess for creates
} else {
// File was uploaded and logo was already created
// Note: API.uploadLogo already calls addLogo() in the store, so no need to call onSuccess
notifications.show({
title: 'Success',
message: 'Logo uploaded successfully',
color: 'green',
});
// No onSuccess call needed - API.uploadLogo already updated the store
onSuccess?.({ type: 'create', logo: uploadResponse });
}
onClose();
} catch (error) {
@ -211,6 +210,24 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
}
};
const handleUrlBlur = (event) => {
const urlValue = event.target.value;
if (urlValue) {
try {
const url = new URL(urlValue);
const pathname = url.pathname;
const filename = pathname.substring(pathname.lastIndexOf('/') + 1);
const nameWithoutExtension = filename.replace(/\.[^/.]+$/, '');
if (nameWithoutExtension) {
formik.setFieldValue('name', nameWithoutExtension);
}
} catch (error) {
// If the URL is invalid, do nothing.
// The validation schema will catch this.
}
}
};
// Clean up object URLs when component unmounts or preview changes
useEffect(() => {
return () => {
@ -323,6 +340,7 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
placeholder="https://example.com/logo.png"
{...formik.getFieldProps('url')}
onChange={handleUrlChange}
onBlur={handleUrlBlur}
error={formik.touched.url && formik.errors.url}
disabled={!!selectedFile} // Disable when file is selected
/>

View file

@ -23,7 +23,6 @@ import {
} from '@mantine/core';
import M3UGroupFilter from './M3UGroupFilter';
import useChannelsStore from '../../store/channels';
import usePlaylistsStore from '../../store/playlists';
import { notifications } from '@mantine/notifications';
import { isNotEmpty, useForm } from '@mantine/form';
import useEPGsStore from '../../store/epgs';
@ -40,7 +39,6 @@ const M3U = ({
const userAgents = useUserAgentsStore((s) => s.userAgents);
const fetchChannelGroups = useChannelsStore((s) => s.fetchChannelGroups);
const fetchPlaylists = usePlaylistsStore((s) => s.fetchPlaylists);
const fetchEPGs = useEPGsStore((s) => s.fetchEPGs);
const fetchCategories = useVODStore((s) => s.fetchCategories);
@ -61,7 +59,7 @@ const M3U = ({
is_active: true,
max_streams: 0,
refresh_interval: 24,
account_type: 'STD',
account_type: 'XC',
create_epg: false,
username: '',
password: '',
@ -171,8 +169,14 @@ const M3U = ({
return;
}
// Fetch the updated playlist details (this also updates the store via API)
const updatedPlaylist = await API.getPlaylist(newPlaylist.id);
await Promise.all([fetchChannelGroups(), fetchPlaylists(), fetchEPGs()]);
// Note: We don't call fetchPlaylists() here because API.addPlaylist()
// already added the playlist to the store. Calling fetchPlaylists() creates
// a race condition where the store is temporarily cleared/replaced while
// websocket updates for the new playlist's refresh task are arriving.
await Promise.all([fetchChannelGroups(), fetchEPGs()]);
// If this is an XC account with VOD enabled, also fetch VOD categories
if (values.account_type === 'XC' && values.enable_vod) {
@ -199,6 +203,11 @@ const M3U = ({
const closeGroupFilter = () => {
setGroupFilterModalOpen(false);
// After group filter setup for a new account, reset everything
form.reset();
setFile(null);
setPlaylist(null);
onClose();
};
const closeFilter = () => {
@ -217,7 +226,17 @@ const M3U = ({
return (
<>
<Modal size={700} opened={isOpen} onClose={close} title="M3U Account">
<Modal
size={700}
opened={isOpen}
onClose={close}
title="M3U Account"
scrollAreaComponent={Modal.NativeScrollArea}
lockScroll={false}
withinPortal={true}
trapFocus={false}
yOffset="2vh"
>
<LoadingOverlay
visible={form.submitting}
overlayBlur={2}

View file

@ -253,7 +253,16 @@ const M3UFilters = ({ playlist, isOpen, onClose }) => {
return (
<>
<Modal opened={isOpen} onClose={onClose} title="Filters" size="lg">
<Modal
opened={isOpen}
onClose={onClose}
title="Filters"
size="lg"
scrollAreaComponent={Modal.NativeScrollArea}
lockScroll={false}
withinPortal={true}
yOffset="2vh"
>
<Alert
icon={<Info size={16} />}
color="blue"

View file

@ -55,6 +55,21 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
const [isLoading, setIsLoading] = useState(false);
const [movieCategoryStates, setMovieCategoryStates] = useState([]);
const [seriesCategoryStates, setSeriesCategoryStates] = useState([]);
const [autoEnableNewGroupsLive, setAutoEnableNewGroupsLive] = useState(true);
const [autoEnableNewGroupsVod, setAutoEnableNewGroupsVod] = useState(true);
const [autoEnableNewGroupsSeries, setAutoEnableNewGroupsSeries] =
useState(true);
useEffect(() => {
if (!playlist) return;
// Initialize account-level settings
setAutoEnableNewGroupsLive(playlist.auto_enable_new_groups_live ?? true);
setAutoEnableNewGroupsVod(playlist.auto_enable_new_groups_vod ?? true);
setAutoEnableNewGroupsSeries(
playlist.auto_enable_new_groups_series ?? true
);
}, [playlist]);
useEffect(() => {
if (Object.keys(channelGroups).length === 0) {
@ -62,27 +77,29 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
}
setGroupStates(
playlist.channel_groups.map((group) => {
// Parse custom_properties if present
let customProps = {};
if (group.custom_properties) {
try {
customProps =
typeof group.custom_properties === 'string'
? JSON.parse(group.custom_properties)
: group.custom_properties;
} catch (e) {
customProps = {};
playlist.channel_groups
.filter((group) => channelGroups[group.channel_group]) // Filter out groups that don't exist
.map((group) => {
// Parse custom_properties if present
let customProps = {};
if (group.custom_properties) {
try {
customProps =
typeof group.custom_properties === 'string'
? JSON.parse(group.custom_properties)
: group.custom_properties;
} catch (e) {
customProps = {};
}
}
}
return {
...group,
name: channelGroups[group.channel_group].name,
auto_channel_sync: group.auto_channel_sync || false,
auto_sync_channel_start: group.auto_sync_channel_start || 1.0,
custom_properties: customProps,
};
})
return {
...group,
name: channelGroups[group.channel_group].name,
auto_channel_sync: group.auto_channel_sync || false,
auto_sync_channel_start: group.auto_sync_channel_start || 1.0,
custom_properties: customProps,
};
})
);
}, [playlist, channelGroups]);
@ -116,6 +133,14 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
}))
.filter((state) => state.enabled !== state.original_enabled);
// Update account-level settings via the proper account endpoint
await API.updatePlaylist({
id: playlist.id,
auto_enable_new_groups_live: autoEnableNewGroupsLive,
auto_enable_new_groups_vod: autoEnableNewGroupsVod,
auto_enable_new_groups_series: autoEnableNewGroupsSeries,
});
// Update group settings via API endpoint
await API.updateM3UGroupSettings(
playlist.id,
@ -161,6 +186,10 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
title="M3U Group Filter & Auto Channel Sync"
size={1000}
styles={{ content: { '--mantine-color-body': '#27272A' } }}
scrollAreaComponent={Modal.NativeScrollArea}
lockScroll={false}
withinPortal={true}
yOffset="2vh"
>
<LoadingOverlay visible={isLoading} overlayBlur={2} />
<Stack>
@ -176,6 +205,8 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
playlist={playlist}
groupStates={groupStates}
setGroupStates={setGroupStates}
autoEnableNewGroupsLive={autoEnableNewGroupsLive}
setAutoEnableNewGroupsLive={setAutoEnableNewGroupsLive}
/>
</Tabs.Panel>
@ -185,6 +216,8 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
categoryStates={movieCategoryStates}
setCategoryStates={setMovieCategoryStates}
type="movie"
autoEnableNewGroups={autoEnableNewGroupsVod}
setAutoEnableNewGroups={setAutoEnableNewGroupsVod}
/>
</Tabs.Panel>
@ -194,6 +227,8 @@ const M3UGroupFilter = ({ playlist = null, isOpen, onClose }) => {
categoryStates={seriesCategoryStates}
setCategoryStates={setSeriesCategoryStates}
type="series"
autoEnableNewGroups={autoEnableNewGroupsSeries}
setAutoEnableNewGroups={setAutoEnableNewGroupsSeries}
/>
</Tabs.Panel>
</Tabs>

View file

@ -192,7 +192,15 @@ const M3UProfiles = ({ playlist = null, isOpen, onClose }) => {
return (
<>
<Modal opened={isOpen} onClose={onClose} title="Profiles">
<Modal
opened={isOpen}
onClose={onClose}
title="Profiles"
scrollAreaComponent={Modal.NativeScrollArea}
lockScroll={false}
withinPortal={true}
yOffset="2vh"
>
{profilesArray
.sort((a, b) => {
// Always put default profile first

View file

@ -1,117 +1,424 @@
// Modal.js
import React from 'react';
import React, { useEffect, useMemo, useState } from 'react';
import dayjs from 'dayjs';
import API from '../../api';
import { Button, Modal, Flex, Select, Alert } from '@mantine/core';
import useChannelsStore from '../../store/channels';
import { DateTimePicker } from '@mantine/dates';
import {
Alert,
Button,
Modal,
Select,
Stack,
SegmentedControl,
MultiSelect,
Group,
TextInput,
} from '@mantine/core';
import { DateTimePicker, TimeInput, DatePickerInput } from '@mantine/dates';
import { CircleAlert } from 'lucide-react';
import { isNotEmpty, useForm } from '@mantine/form';
import useChannelsStore from '../../store/channels';
import { notifications } from '@mantine/notifications';
const DVR = ({ recording = null, channel = null, isOpen, onClose }) => {
const DAY_OPTIONS = [
{ value: '6', label: 'Sun' },
{ value: '0', label: 'Mon' },
{ value: '1', label: 'Tue' },
{ value: '2', label: 'Wed' },
{ value: '3', label: 'Thu' },
{ value: '4', label: 'Fri' },
{ value: '5', label: 'Sat' },
];
const asDate = (value) => {
if (!value) return null;
if (value instanceof Date) return value;
const parsed = new Date(value);
return Number.isNaN(parsed.getTime()) ? null : parsed;
};
const toIsoIfDate = (value) => {
const dt = asDate(value);
return dt ? dt.toISOString() : value;
};
// Accepts "h:mm A"/"hh:mm A"/"HH:mm"/Date, returns "HH:mm"
const toTimeString = (value) => {
if (!value) return '00:00';
if (typeof value === 'string') {
const parsed = dayjs(value, ['HH:mm', 'hh:mm A', 'h:mm A', 'HH:mm:ss'], true);
if (parsed.isValid()) return parsed.format('HH:mm');
return value;
}
const dt = asDate(value);
if (!dt) return '00:00';
return dayjs(dt).format('HH:mm');
};
const toDateString = (value) => {
const dt = asDate(value);
if (!dt) return null;
const year = dt.getFullYear();
const month = String(dt.getMonth() + 1).padStart(2, '0');
const day = String(dt.getDate()).padStart(2, '0');
return `${year}-${month}-${day}`;
};
const createRoundedDate = (minutesAhead = 0) => {
const dt = new Date();
dt.setSeconds(0);
dt.setMilliseconds(0);
dt.setMinutes(Math.ceil(dt.getMinutes() / 30) * 30);
if (minutesAhead) dt.setMinutes(dt.getMinutes() + minutesAhead);
return dt;
};
// robust onChange for TimeInput (string or event)
const timeChange = (setter) => (valOrEvent) => {
if (typeof valOrEvent === 'string') setter(valOrEvent);
else if (valOrEvent?.currentTarget) setter(valOrEvent.currentTarget.value);
};
const RecordingModal = ({ recording = null, channel = null, isOpen, onClose }) => {
const channels = useChannelsStore((s) => s.channels);
const fetchRecordings = useChannelsStore((s) => s.fetchRecordings);
const fetchRecurringRules = useChannelsStore((s) => s.fetchRecurringRules);
let startTime = new Date();
startTime.setMinutes(Math.ceil(startTime.getMinutes() / 30) * 30);
startTime.setSeconds(0);
startTime.setMilliseconds(0);
const [mode, setMode] = useState('single');
const [submitting, setSubmitting] = useState(false);
let endTime = new Date();
endTime.setMinutes(Math.ceil(endTime.getMinutes() / 30) * 30);
endTime.setSeconds(0);
endTime.setMilliseconds(0);
endTime.setHours(endTime.getHours() + 1);
const defaultStart = createRoundedDate();
const defaultEnd = createRoundedDate(60);
const defaultDate = new Date();
const form = useForm({
mode: 'uncontrolled',
// One-time form
const singleForm = useForm({
mode: 'controlled',
initialValues: {
channel_id: recording
? recording.channel_id
: channel
? `${channel.id}`
: '',
start_time: recording ? recording.start_time : startTime,
end_time: recording ? recording.end_time : endTime,
channel_id: recording ? `${recording.channel}` : channel ? `${channel.id}` : '',
start_time: recording ? asDate(recording.start_time) || defaultStart : defaultStart,
end_time: recording ? asDate(recording.end_time) || defaultEnd : defaultEnd,
},
validate: {
channel_id: isNotEmpty('Select a channel'),
start_time: isNotEmpty('Select a start time'),
end_time: isNotEmpty('Select an end time'),
end_time: (value, values) => {
const start = asDate(values.start_time);
const end = asDate(value);
if (!end) return 'Select an end time';
if (start && end <= start) return 'End time must be after start time';
return null;
},
},
});
const onSubmit = async () => {
const { channel_id, ...values } = form.getValues();
// Recurring form stores times as "HH:mm" strings for stable editing
const recurringForm = useForm({
mode: 'controlled',
validateInputOnChange: false,
validateInputOnBlur: true,
initialValues: {
channel_id: channel ? `${channel.id}` : '',
days_of_week: [],
start_time: dayjs(defaultStart).format('HH:mm'),
end_time: dayjs(defaultEnd).format('HH:mm'),
rule_name: '',
start_date: defaultDate,
end_date: defaultDate,
},
validate: {
channel_id: isNotEmpty('Select a channel'),
days_of_week: (value) => (value && value.length ? null : 'Pick at least one day'),
start_time: (value) => (value ? null : 'Select a start time'),
end_time: (value, values) => {
if (!value) return 'Select an end time';
const start = dayjs(values.start_time, ['HH:mm', 'hh:mm A', 'h:mm A'], true);
const end = dayjs(value, ['HH:mm', 'hh:mm A', 'h:mm A'], true);
if (start.isValid() && end.isValid() && end.diff(start, 'minute') === 0) {
return 'End time must differ from start time';
}
return null;
},
end_date: (value, values) => {
const end = asDate(value);
const start = asDate(values.start_date);
if (!end) return 'Select an end date';
if (start && end < start) return 'End date cannot be before start date';
return null;
},
},
});
console.log(values);
useEffect(() => {
if (!isOpen) return;
await API.createRecording({
...values,
channel: channel_id,
const freshStart = createRoundedDate();
const freshEnd = createRoundedDate(60);
const freshDate = new Date();
if (recording && recording.id) {
setMode('single');
singleForm.setValues({
channel_id: `${recording.channel}`,
start_time: asDate(recording.start_time) || defaultStart,
end_time: asDate(recording.end_time) || defaultEnd,
});
} else {
// Reset forms for fresh open
singleForm.setValues({
channel_id: channel ? `${channel.id}` : '',
start_time: freshStart,
end_time: freshEnd,
});
const startStr = dayjs(freshStart).format('HH:mm');
recurringForm.setValues({
channel_id: channel ? `${channel.id}` : '',
days_of_week: [],
start_time: startStr,
end_time: dayjs(freshEnd).format('HH:mm'),
rule_name: channel?.name || '',
start_date: freshDate,
end_date: freshDate,
});
setMode('single');
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isOpen, recording, channel]);
const channelOptions = useMemo(() => {
const list = Object.values(channels || {});
list.sort((a, b) => {
const aNum = Number(a.channel_number) || 0;
const bNum = Number(b.channel_number) || 0;
if (aNum === bNum) return (a.name || '').localeCompare(b.name || '');
return aNum - bNum;
});
return list.map((item) => ({ value: `${item.id}`, label: item.name || `Channel ${item.id}` }));
}, [channels]);
form.reset();
onClose();
const resetForms = () => {
singleForm.reset();
recurringForm.reset();
setMode('single');
};
if (!isOpen) {
return <></>;
}
const handleClose = () => {
resetForms();
onClose?.();
};
const handleSingleSubmit = async (values) => {
try {
setSubmitting(true);
if (recording && recording.id) {
await API.updateRecording(recording.id, {
channel: values.channel_id,
start_time: toIsoIfDate(values.start_time),
end_time: toIsoIfDate(values.end_time),
});
notifications.show({
title: 'Recording updated',
message: 'Recording schedule updated successfully',
color: 'green',
autoClose: 2500,
});
} else {
await API.createRecording({
channel: values.channel_id,
start_time: toIsoIfDate(values.start_time),
end_time: toIsoIfDate(values.end_time),
});
notifications.show({
title: 'Recording scheduled',
message: 'One-time recording added to DVR queue',
color: 'green',
autoClose: 2500,
});
}
await fetchRecordings();
handleClose();
} catch (error) {
console.error('Failed to create recording', error);
} finally {
setSubmitting(false);
}
};
const handleRecurringSubmit = async (values) => {
try {
setSubmitting(true);
await API.createRecurringRule({
channel: values.channel_id,
days_of_week: (values.days_of_week || []).map((d) => Number(d)),
start_time: toTimeString(values.start_time),
end_time: toTimeString(values.end_time),
start_date: toDateString(values.start_date),
end_date: toDateString(values.end_date),
name: values.rule_name?.trim() || '',
});
await Promise.all([fetchRecurringRules(), fetchRecordings()]);
notifications.show({
title: 'Recurring rule saved',
message: 'Future slots will be scheduled automatically',
color: 'green',
autoClose: 2500,
});
handleClose();
} catch (error) {
console.error('Failed to create recurring rule', error);
} finally {
setSubmitting(false);
}
};
const onSubmit =
mode === 'single'
? singleForm.onSubmit(handleSingleSubmit)
: recurringForm.onSubmit(handleRecurringSubmit);
if (!isOpen) return null;
return (
<Modal opened={isOpen} onClose={onClose} title="Channel Recording">
<Modal opened={isOpen} onClose={handleClose} title="Channel Recording">
<Alert
variant="light"
color="yellow"
title="Scheduling Conflicts"
icon={<CircleAlert />}
style={{ paddingBottom: 5 }}
style={{ paddingBottom: 5, marginBottom: 12 }}
>
Recordings may fail if active streams or overlapping recordings use up
all available streams
Recordings may fail if active streams or overlapping recordings use up all available tuners.
</Alert>
<form onSubmit={form.onSubmit(onSubmit)}>
<Select
{...form.getInputProps('channel_id')}
label="Channel"
key={form.key('channel_id')}
searchable
data={Object.values(channels).map((channel) => ({
value: `${channel.id}`,
label: channel.name,
}))}
<Stack gap="md">
<SegmentedControl
value={mode}
onChange={setMode}
disabled={Boolean(recording && recording.id)}
data={[
{ value: 'single', label: 'One-time' },
{ value: 'recurring', label: 'Recurring' },
]}
/>
<DateTimePicker
{...form.getInputProps('start_time')}
key={form.key('start_time')}
id="start_time"
label="Start Time"
valueFormat="M/DD/YYYY hh:mm A"
/>
<form onSubmit={onSubmit}>
<Stack gap="md">
{mode === 'single' ? (
<Select
{...singleForm.getInputProps('channel_id')}
key={singleForm.key('channel_id')}
label="Channel"
placeholder="Select channel"
searchable
data={channelOptions}
/>
) : (
<Select
{...recurringForm.getInputProps('channel_id')}
key={recurringForm.key('channel_id')}
label="Channel"
placeholder="Select channel"
searchable
data={channelOptions}
/>
)}
<DateTimePicker
{...form.getInputProps('end_time')}
key={form.key('end_time')}
id="end_time"
label="End Time"
valueFormat="M/DD/YYYY hh:mm A"
/>
{mode === 'single' ? (
<>
<DateTimePicker
{...singleForm.getInputProps('start_time')}
key={singleForm.key('start_time')}
label="Start"
valueFormat="MMM D, YYYY h:mm A"
timeInputProps={{ format: '12', withSeconds: false, amLabel: 'AM', pmLabel: 'PM' }}
/>
<DateTimePicker
{...singleForm.getInputProps('end_time')}
key={singleForm.key('end_time')}
label="End"
valueFormat="MMM D, YYYY h:mm A"
timeInputProps={{ format: '12', withSeconds: false, amLabel: 'AM', pmLabel: 'PM' }}
/>
</>
) : (
<>
<TextInput
{...recurringForm.getInputProps('rule_name')}
key={recurringForm.key('rule_name')}
label="Rule name"
placeholder="Morning News, Football Sundays, ..."
/>
<MultiSelect
{...recurringForm.getInputProps('days_of_week')}
key={recurringForm.key('days_of_week')}
label="Every"
placeholder="Select days"
data={DAY_OPTIONS}
searchable
clearable
nothingFoundMessage="No match"
/>
<Flex mih={50} gap="xs" justify="flex-end" align="flex-end">
<Button
type="submit"
variant="contained"
size="small"
disabled={form.submitting}
>
Submit
</Button>
</Flex>
</form>
<Group grow>
<DatePickerInput
label="Start date"
value={recurringForm.values.start_date}
onChange={(value) =>
recurringForm.setFieldValue('start_date', value || new Date())
}
valueFormat="MMM D, YYYY"
/>
<DatePickerInput
label="End date"
value={recurringForm.values.end_date}
onChange={(value) => recurringForm.setFieldValue('end_date', value)}
valueFormat="MMM D, YYYY"
minDate={recurringForm.values.start_date || undefined}
/>
</Group>
<Group grow>
<TimeInput
label="Start time"
value={recurringForm.values.start_time}
onChange={timeChange((val) =>
recurringForm.setFieldValue('start_time', toTimeString(val))
)}
onBlur={() => recurringForm.validateField('start_time')}
withSeconds={false}
format="12" // shows 12-hour (so "00:00" renders "12:00 AM")
inputMode="numeric"
amLabel="AM"
pmLabel="PM"
/>
<TimeInput
label="End time"
value={recurringForm.values.end_time}
onChange={timeChange((val) =>
recurringForm.setFieldValue('end_time', toTimeString(val))
)}
onBlur={() => recurringForm.validateField('end_time')}
withSeconds={false}
format="12"
inputMode="numeric"
amLabel="AM"
pmLabel="PM"
/>
</Group>
</>
)}
<Group justify="flex-end">
<Button type="submit" loading={submitting}>
{mode === 'single' ? 'Schedule Recording' : 'Save Rule'}
</Button>
</Group>
</Stack>
</form>
</Stack>
</Modal>
);
};
export default DVR;
export default RecordingModal;

View file

@ -25,10 +25,22 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
}),
onSubmit: async (values, { setSubmitting, resetForm }) => {
console.log(values);
// Convert string IDs back to integers for the API
const payload = {
...values,
channel_group: values.channel_group
? parseInt(values.channel_group, 10)
: null,
stream_profile_id: values.stream_profile_id
? parseInt(values.stream_profile_id, 10)
: null,
};
if (stream?.id) {
await API.updateStream({ id: stream.id, ...values });
await API.updateStream({ id: stream.id, ...payload });
} else {
await API.addStream(values);
await API.addStream(payload);
}
resetForm();
@ -42,12 +54,18 @@ const Stream = ({ stream = null, isOpen, onClose }) => {
formik.setValues({
name: stream.name,
url: stream.url,
channel_group: stream.channel_group,
stream_profile_id: stream.stream_profile_id,
// Convert IDs to strings to match Select component values
channel_group: stream.channel_group
? String(stream.channel_group)
: null,
stream_profile_id: stream.stream_profile_id
? String(stream.stream_profile_id)
: '',
});
} else {
formik.resetForm();
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [stream]);
if (!isOpen) {

View file

@ -10,6 +10,7 @@ import {
Text,
Divider,
Box,
Checkbox,
} from '@mantine/core';
import { CircleCheck, CircleX } from 'lucide-react';
import useVODStore from '../../store/useVODStore';
@ -19,6 +20,8 @@ const VODCategoryFilter = ({
categoryStates,
setCategoryStates,
type,
autoEnableNewGroups,
setAutoEnableNewGroups,
}) => {
const categories = useVODStore((s) => s.categories);
const [filter, setFilter] = useState('');
@ -85,6 +88,16 @@ const VODCategoryFilter = ({
return (
<Stack style={{ paddingTop: 10 }}>
<Checkbox
label={`Automatically enable new ${type === 'movie' ? 'movie' : 'series'} categories discovered on future scans`}
checked={autoEnableNewGroups}
onChange={(event) =>
setAutoEnableNewGroups(event.currentTarget.checked)
}
size="sm"
description="When disabled, new categories from the provider will be created but disabled by default. You can enable them manually later."
/>
<Flex gap="sm">
<TextInput
placeholder="Filter categories..."

View file

@ -23,6 +23,7 @@ import {
ArrowUpNarrowWide,
ArrowUpDown,
ArrowDownWideNarrow,
Search,
} from 'lucide-react';
import {
Box,
@ -307,6 +308,7 @@ const ChannelsTable = ({}) => {
const [channelToDelete, setChannelToDelete] = useState(null);
// Column sizing state for resizable columns
// Store in localStorage but with empty object as default
const [columnSizing, setColumnSizing] = useLocalStorage(
'channels-table-column-sizing',
{}
@ -882,7 +884,12 @@ const ChannelsTable = ({}) => {
),
},
],
[selectedProfileId, channelGroups, logos, theme, columnSizing]
// Note: columnSizing is intentionally excluded from dependencies to prevent
// columns from being recreated during drag operations (which causes infinite loops).
// The column.size values are only used for INITIAL sizing - TanStack Table manages
// the actual sizes through its own state after initialization.
// eslint-disable-next-line react-hooks/exhaustive-deps
[selectedProfileId, channelGroups, logos, theme]
);
const renderHeaderCell = (header) => {
@ -943,6 +950,7 @@ const ChannelsTable = ({}) => {
size="xs"
variant="unstyled"
className="table-input-header"
leftSection={<Search size={14} opacity={0.5} />}
/>
<Center>
{React.createElement(sortingIcon, {
@ -979,17 +987,18 @@ const ChannelsTable = ({}) => {
filters,
pagination,
sorting,
columnSizing,
setColumnSizing,
manualPagination: true,
manualSorting: true,
manualFiltering: true,
enableRowSelection: true,
onRowSelectionChange: onRowSelectionChange,
state: {
columnSizing,
pagination,
sorting,
},
onColumnSizingChange: setColumnSizing,
columnResizeMode: 'onChange',
getExpandedRowHeight: (row) => {
return 20 + 28 * row.original.streams.length;
},

View file

@ -2,6 +2,7 @@ import React, { useEffect, useMemo, useRef, useState } from 'react';
import API from '../../api';
import useEPGsStore from '../../store/epgs';
import EPGForm from '../forms/EPG';
import DummyEPGForm from '../forms/DummyEPG';
import { TableHelper } from '../../helpers';
import {
ActionIcon,
@ -17,6 +18,7 @@ import {
Progress,
Stack,
Group,
Menu,
} from '@mantine/core';
import { notifications } from '@mantine/notifications';
import {
@ -27,6 +29,7 @@ import {
SquareMinus,
SquarePen,
SquarePlus,
ChevronDown,
} from 'lucide-react';
import dayjs from 'dayjs';
import useSettingsStore from '../../store/settings';
@ -62,6 +65,7 @@ const getStatusColor = (status) => {
const RowActions = ({ tableSize, row, editEPG, deleteEPG, refreshEPG }) => {
const iconSize =
tableSize == 'default' ? 'sm' : tableSize == 'compact' ? 'xs' : 'md';
const isDummyEPG = row.original.source_type === 'dummy';
return (
<>
@ -88,7 +92,7 @@ const RowActions = ({ tableSize, row, editEPG, deleteEPG, refreshEPG }) => {
size={iconSize} // Use standardized icon size
color="blue.5" // Red color for delete actions
onClick={() => refreshEPG(row.original.id)}
disabled={!row.original.is_active}
disabled={!row.original.is_active || isDummyEPG}
>
<RefreshCcw size={tableSize === 'compact' ? 16 : 18} />{' '}
{/* Small icon size */}
@ -100,6 +104,7 @@ const RowActions = ({ tableSize, row, editEPG, deleteEPG, refreshEPG }) => {
const EPGsTable = () => {
const [epg, setEPG] = useState(null);
const [epgModalOpen, setEPGModalOpen] = useState(false);
const [dummyEpgModalOpen, setDummyEpgModalOpen] = useState(false);
const [rowSelection, setRowSelection] = useState([]);
const [confirmDeleteOpen, setConfirmDeleteOpen] = useState(false);
const [deleteTarget, setDeleteTarget] = useState(null);
@ -126,6 +131,12 @@ const EPGsTable = () => {
const toggleActive = async (epg) => {
try {
// Validate that epg is a valid object with an id
if (!epg || typeof epg !== 'object' || !epg.id) {
console.error('toggleActive called with invalid epg:', epg);
return;
}
// Send only the is_active field to trigger our special handling
await API.updateEPG(
{
@ -176,8 +187,6 @@ const EPGsTable = () => {
);
};
console.log(epgs);
const columns = useMemo(
//column definitions...
() => [
@ -224,11 +233,14 @@ const EPGsTable = () => {
size: 100,
cell: ({ row }) => {
const data = row.original;
const isDummyEPG = data.source_type === 'dummy';
// Dummy EPGs always show idle status
const displayStatus = isDummyEPG ? 'idle' : data.status;
// Always show status text, even when there's progress happening
return (
<Text size="sm" fw={500} c={getStatusColor(data.status)}>
{formatStatusText(data.status)}
<Text size="sm" fw={500} c={getStatusColor(displayStatus)}>
{formatStatusText(displayStatus)}
</Text>
);
},
@ -241,6 +253,12 @@ const EPGsTable = () => {
grow: true,
cell: ({ row }) => {
const data = row.original;
const isDummyEPG = data.source_type === 'dummy';
// Dummy EPGs don't have status messages
if (isDummyEPG) {
return null;
}
// Check if there's an active progress for this EPG - show progress first if active
if (
@ -305,15 +323,19 @@ const EPGsTable = () => {
mantineTableBodyCellProps: {
align: 'left',
},
cell: ({ row, cell }) => (
<Box sx={{ display: 'flex', justifyContent: 'center' }}>
<Switch
size="xs"
checked={cell.getValue()}
onChange={() => toggleActive(row.original)}
/>
</Box>
),
cell: ({ row, cell }) => {
const isDummyEPG = row.original.source_type === 'dummy';
return (
<Box sx={{ display: 'flex', justifyContent: 'center' }}>
<Switch
size="xs"
checked={cell.getValue()}
onChange={() => toggleActive(row.original)}
disabled={isDummyEPG}
/>
</Box>
);
},
},
{
id: 'actions',
@ -329,9 +351,24 @@ const EPGsTable = () => {
const editEPG = async (epg = null) => {
setEPG(epg);
// Open the appropriate modal based on source type
if (epg?.source_type === 'dummy') {
setDummyEpgModalOpen(true);
} else {
setEPGModalOpen(true);
}
};
const createStandardEPG = () => {
setEPG(null);
setEPGModalOpen(true);
};
const createDummyEPG = () => {
setEPG(null);
setDummyEpgModalOpen(true);
};
const deleteEPG = async (id) => {
// Get EPG details for the confirmation dialog
const epgObj = epgs[id];
@ -365,6 +402,11 @@ const EPGsTable = () => {
setEPGModalOpen(false);
};
const closeDummyEPGForm = () => {
setEPG(null);
setDummyEpgModalOpen(false);
};
useEffect(() => {
setData(
Object.values(epgs).sort((a, b) => {
@ -522,21 +564,31 @@ const EPGsTable = () => {
>
EPGs
</Text>
<Button
leftSection={<SquarePlus size={18} />}
variant="light"
size="xs"
onClick={() => editEPG()}
p={5}
color="green"
style={{
borderWidth: '1px',
borderColor: 'green',
color: 'white',
}}
>
Add EPG
</Button>
<Menu shadow="md" width={200}>
<Menu.Target>
<Button
leftSection={<SquarePlus size={18} />}
rightSection={<ChevronDown size={16} />}
variant="light"
size="xs"
p={5}
color="green"
style={{
borderWidth: '1px',
borderColor: 'green',
color: 'white',
}}
>
Add EPG
</Button>
</Menu.Target>
<Menu.Dropdown>
<Menu.Item onClick={createStandardEPG}>
Standard EPG Source
</Menu.Item>
<Menu.Item onClick={createDummyEPG}>Dummy EPG Source</Menu.Item>
</Menu.Dropdown>
</Menu>
</Flex>
<Paper
@ -579,6 +631,11 @@ const EPGsTable = () => {
</Box>
<EPGForm epg={epg} isOpen={epgModalOpen} onClose={closeEPGForm} />
<DummyEPGForm
epg={epg}
isOpen={dummyEpgModalOpen}
onClose={closeDummyEPGForm}
/>
<ConfirmationDialog
opened={confirmDeleteOpen}

Some files were not shown because too many files have changed in this diff Show more