mirror of
https://github.com/Dispatcharr/Dispatcharr.git
synced 2026-01-22 18:28:00 +00:00
Merge pull request #527 from Dispatcharr/dev
Pulling dev into DVR-Update
This commit is contained in:
commit
4c3bbd8f90
37 changed files with 4714 additions and 1259 deletions
135
.github/workflows/base-image.yml
vendored
135
.github/workflows/base-image.yml
vendored
|
|
@ -2,42 +2,37 @@ name: Base Image Build
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, dev ]
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
pull_request:
|
||||
branches: [ main, dev ]
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
build-base-image:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
|
|
@ -66,13 +61,111 @@ jobs:
|
|||
echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker base image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/DispatcharrBase
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
200
.github/workflows/ci.yml
vendored
200
.github/workflows/ci.yml
vendored
|
|
@ -2,19 +2,84 @@ name: CI Pipeline
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
branches: [dev]
|
||||
pull_request:
|
||||
branches: [ dev ]
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
branches: [dev]
|
||||
workflow_dispatch:
|
||||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
# compute a single timestamp, version, and repo metadata for the entire workflow
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
id: meta
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
# no per-job outputs here; shared metadata comes from the `prepare` job
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
@ -45,66 +110,85 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
id: meta
|
||||
run: |
|
||||
# Get lowercase repository owner
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get repository name
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Determine branch name
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# For other branches, use the branch name
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine if this is from a fork
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
# use metadata from the prepare job
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# Build only the platform for this matrix job to avoid running amd64
|
||||
# stages under qemu on an arm64 runner (and vice-versa). This makes
|
||||
# the matrix runner's platform the one built by buildx.
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
# push arch-specific tags from each matrix job (they will be combined
|
||||
# into a multi-arch manifest in a follow-up job)
|
||||
tags: |
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ steps.meta.outputs.repo_owner }}
|
||||
REPO_NAME=${{ steps.meta.outputs.repo_name }}
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BASE_TAG=base
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ steps.timestamp.outputs.timestamp }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
# wait for prepare and all matrix builds to finish
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
VERSION=${{ needs.prepare.outputs.version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# branch tag (e.g. latest or dev)
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# version + timestamp tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
||||
# also create Docker Hub manifests using the same username
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
138
.github/workflows/release.yml
vendored
138
.github/workflows/release.yml
vendored
|
|
@ -15,16 +15,21 @@ on:
|
|||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
new_version: ${{ steps.update_version.outputs.new_version }}
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
|
|
@ -38,14 +43,45 @@ jobs:
|
|||
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
|
||||
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set lowercase repo owner
|
||||
id: repo_owner
|
||||
- name: Set repository metadata
|
||||
id: meta
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: main
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
|
@ -57,36 +93,88 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and Push Release Image
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
VERSION=${{ needs.prepare.outputs.new_version }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# latest tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \
|
||||
ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64
|
||||
|
||||
# version tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# latest tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64
|
||||
|
||||
# version tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
|
||||
|
||||
create-release:
|
||||
needs: [prepare, create-manifest]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
tag_name: v${{ steps.update_version.outputs.new_version }}
|
||||
name: Release v${{ steps.update_version.outputs.new_version }}
|
||||
tag_name: v${{ needs.prepare.outputs.new_version }}
|
||||
name: Release v${{ needs.prepare.outputs.new_version }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
|||
|
|
@ -567,6 +567,37 @@ class ChannelViewSet(viewsets.ModelViewSet):
|
|||
"channel_count": len(channel_ids)
|
||||
})
|
||||
|
||||
@action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg")
|
||||
def set_tvg_ids_from_epg(self, request):
|
||||
"""
|
||||
Trigger a Celery task to set channel TVG-IDs from EPG data
|
||||
"""
|
||||
from .tasks import set_channels_tvg_ids_from_epg
|
||||
|
||||
data = request.data
|
||||
channel_ids = data.get("channel_ids", [])
|
||||
|
||||
if not channel_ids:
|
||||
return Response(
|
||||
{"error": "channel_ids is required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
if not isinstance(channel_ids, list):
|
||||
return Response(
|
||||
{"error": "channel_ids must be a list"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
# Start the Celery task
|
||||
task = set_channels_tvg_ids_from_epg.delay(channel_ids)
|
||||
|
||||
return Response({
|
||||
"message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels",
|
||||
"task_id": task.id,
|
||||
"channel_count": len(channel_ids)
|
||||
})
|
||||
|
||||
@action(detail=False, methods=["get"], url_path="ids")
|
||||
def get_ids(self, request, *args, **kwargs):
|
||||
# Get the filtered queryset
|
||||
|
|
|
|||
|
|
@ -119,11 +119,11 @@ class Stream(models.Model):
|
|||
return self.name or self.url or f"Stream ID {self.id}"
|
||||
|
||||
@classmethod
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None):
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None):
|
||||
if keys is None:
|
||||
keys = CoreSettings.get_m3u_hash_key().split(",")
|
||||
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id}
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id}
|
||||
|
||||
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
|
||||
|
||||
|
|
|
|||
|
|
@ -229,6 +229,17 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True
|
|||
logger.info(f"Channel {chan['id']} '{chan['name']}' => EPG found by secondary tvg_id={chan['tvg_id']}")
|
||||
continue
|
||||
|
||||
# Step 2.5: Exact Gracenote ID match
|
||||
normalized_gracenote_id = chan.get("gracenote_id", "")
|
||||
if normalized_gracenote_id:
|
||||
epg_by_gracenote_id = next((epg for epg in epg_data if epg["tvg_id"] == normalized_gracenote_id), None)
|
||||
if epg_by_gracenote_id:
|
||||
chan["epg_data_id"] = epg_by_gracenote_id["id"]
|
||||
channels_to_update.append(chan)
|
||||
matched_channels.append((chan['id'], fallback_name, f"gracenote:{epg_by_gracenote_id['tvg_id']}"))
|
||||
logger.info(f"Channel {chan['id']} '{fallback_name}' => EPG found by exact gracenote_id={normalized_gracenote_id}")
|
||||
continue
|
||||
|
||||
# Step 3: Name-based fuzzy matching
|
||||
if not chan["norm_chan"]:
|
||||
logger.debug(f"Channel {chan['id']} '{chan['name']}' => empty after normalization, skipping")
|
||||
|
|
@ -431,11 +442,14 @@ def match_epg_channels():
|
|||
channels_data = []
|
||||
for channel in channels_without_epg:
|
||||
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
|
||||
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
|
||||
channels_data.append({
|
||||
"id": channel.id,
|
||||
"name": channel.name,
|
||||
"tvg_id": normalized_tvg_id,
|
||||
"original_tvg_id": channel.tvg_id,
|
||||
"gracenote_id": normalized_gracenote_id,
|
||||
"original_gracenote_id": channel.tvc_guide_stationid,
|
||||
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
|
||||
"norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching!
|
||||
})
|
||||
|
|
@ -575,11 +589,14 @@ def match_selected_channels_epg(channel_ids):
|
|||
channels_data = []
|
||||
for channel in channels_without_epg:
|
||||
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
|
||||
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
|
||||
channels_data.append({
|
||||
"id": channel.id,
|
||||
"name": channel.name,
|
||||
"tvg_id": normalized_tvg_id,
|
||||
"original_tvg_id": channel.tvg_id,
|
||||
"gracenote_id": normalized_gracenote_id,
|
||||
"original_gracenote_id": channel.tvc_guide_stationid,
|
||||
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
|
||||
"norm_chan": normalize_name(channel.name)
|
||||
})
|
||||
|
|
@ -696,16 +713,19 @@ def match_single_channel_epg(channel_id):
|
|||
|
||||
# Prepare single channel data for matching (same format as bulk matching)
|
||||
normalized_tvg_id = channel.tvg_id.strip().lower() if channel.tvg_id else ""
|
||||
normalized_gracenote_id = channel.tvc_guide_stationid.strip().lower() if channel.tvc_guide_stationid else ""
|
||||
channel_data = {
|
||||
"id": channel.id,
|
||||
"name": channel.name,
|
||||
"tvg_id": normalized_tvg_id,
|
||||
"original_tvg_id": channel.tvg_id,
|
||||
"gracenote_id": normalized_gracenote_id,
|
||||
"original_gracenote_id": channel.tvc_guide_stationid,
|
||||
"fallback_name": normalized_tvg_id if normalized_tvg_id else channel.name,
|
||||
"norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching!
|
||||
}
|
||||
|
||||
logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', norm_chan='{channel_data['norm_chan']}'")
|
||||
logger.info(f"Channel data prepared: name='{channel.name}', tvg_id='{normalized_tvg_id}', gracenote_id='{normalized_gracenote_id}', norm_chan='{channel_data['norm_chan']}'")
|
||||
|
||||
# Debug: Test what the normalization does to preserve call signs
|
||||
test_name = "NBC 11 (KVLY) - Fargo" # Example for testing
|
||||
|
|
@ -2446,7 +2466,9 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
|
|||
|
||||
for i in range(0, total_streams, batch_size):
|
||||
batch_stream_ids = stream_ids[i:i + batch_size]
|
||||
batch_streams = Stream.objects.filter(id__in=batch_stream_ids)
|
||||
# Fetch streams and preserve the order from batch_stream_ids
|
||||
batch_streams_dict = {stream.id: stream for stream in Stream.objects.filter(id__in=batch_stream_ids)}
|
||||
batch_streams = [batch_streams_dict[stream_id] for stream_id in batch_stream_ids if stream_id in batch_streams_dict]
|
||||
|
||||
# Send progress update
|
||||
send_websocket_update('updates', 'update', {
|
||||
|
|
@ -2899,3 +2921,98 @@ def set_channels_logos_from_epg(self, channel_ids):
|
|||
'error': str(e)
|
||||
})
|
||||
raise
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def set_channels_tvg_ids_from_epg(self, channel_ids):
|
||||
"""
|
||||
Celery task to set channel TVG-IDs from EPG data for multiple channels
|
||||
"""
|
||||
from core.utils import send_websocket_update
|
||||
|
||||
task_id = self.request.id
|
||||
total_channels = len(channel_ids)
|
||||
updated_count = 0
|
||||
errors = []
|
||||
|
||||
try:
|
||||
logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels")
|
||||
|
||||
# Send initial progress
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': 0,
|
||||
'total': total_channels,
|
||||
'status': 'running',
|
||||
'message': 'Starting EPG TVG-ID setting...'
|
||||
})
|
||||
|
||||
batch_size = 100
|
||||
for i in range(0, total_channels, batch_size):
|
||||
batch_ids = channel_ids[i:i + batch_size]
|
||||
batch_updates = []
|
||||
|
||||
# Get channels and their EPG data
|
||||
channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data')
|
||||
|
||||
for channel in channels:
|
||||
try:
|
||||
if channel.epg_data and channel.epg_data.tvg_id:
|
||||
if channel.tvg_id != channel.epg_data.tvg_id:
|
||||
channel.tvg_id = channel.epg_data.tvg_id
|
||||
batch_updates.append(channel)
|
||||
updated_count += 1
|
||||
except Exception as e:
|
||||
errors.append(f"Channel {channel.id}: {str(e)}")
|
||||
logger.error(f"Error processing channel {channel.id}: {e}")
|
||||
|
||||
# Bulk update the batch
|
||||
if batch_updates:
|
||||
Channel.objects.bulk_update(batch_updates, ['tvg_id'])
|
||||
|
||||
# Send progress update
|
||||
progress = min(i + batch_size, total_channels)
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': progress,
|
||||
'total': total_channels,
|
||||
'status': 'running',
|
||||
'message': f'Updated {updated_count} channel TVG-IDs...',
|
||||
'updated_count': updated_count
|
||||
})
|
||||
|
||||
# Send completion notification
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': total_channels,
|
||||
'total': total_channels,
|
||||
'status': 'completed',
|
||||
'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data',
|
||||
'updated_count': updated_count,
|
||||
'error_count': len(errors),
|
||||
'errors': errors
|
||||
})
|
||||
|
||||
logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels")
|
||||
return {
|
||||
'status': 'completed',
|
||||
'updated_count': updated_count,
|
||||
'error_count': len(errors),
|
||||
'errors': errors
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"EPG TVG-ID setting task failed: {e}")
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': 0,
|
||||
'total': total_channels,
|
||||
'status': 'failed',
|
||||
'message': f'Task failed: {str(e)}',
|
||||
'error': str(e)
|
||||
})
|
||||
raise
|
||||
|
|
|
|||
18
apps/epg/migrations/0017_alter_epgsource_url.py
Normal file
18
apps/epg/migrations/0017_alter_epgsource_url.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-24 21:07
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0016_epgdata_icon_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=1000, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -28,7 +28,7 @@ class EPGSource(models.Model):
|
|||
|
||||
name = models.CharField(max_length=255, unique=True)
|
||||
source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES)
|
||||
url = models.URLField(blank=True, null=True) # For XMLTV
|
||||
url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV
|
||||
api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct
|
||||
is_active = models.BooleanField(default=True)
|
||||
file_path = models.CharField(max_length=1024, blank=True, null=True)
|
||||
|
|
|
|||
|
|
@ -28,6 +28,23 @@ from core.utils import acquire_task_lock, release_task_lock, send_websocket_upda
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validate_icon_url_fast(icon_url, max_length=None):
|
||||
"""
|
||||
Fast validation for icon URLs during parsing.
|
||||
Returns None if URL is too long, original URL otherwise.
|
||||
If max_length is None, gets it dynamically from the EPGData model field.
|
||||
"""
|
||||
if max_length is None:
|
||||
# Get max_length dynamically from the model field
|
||||
max_length = EPGData._meta.get_field('icon_url').max_length
|
||||
|
||||
if icon_url and len(icon_url) > max_length:
|
||||
logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...")
|
||||
return None
|
||||
return icon_url
|
||||
|
||||
|
||||
MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2)
|
||||
|
||||
|
||||
|
|
@ -831,6 +848,7 @@ def parse_channels_only(source):
|
|||
processed_channels = 0
|
||||
batch_size = 500 # Process in batches to limit memory usage
|
||||
progress = 0 # Initialize progress variable here
|
||||
icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field
|
||||
|
||||
# Track memory at key points
|
||||
if process:
|
||||
|
|
@ -859,7 +877,7 @@ def parse_channels_only(source):
|
|||
|
||||
# Change iterparse to look for both channel and programme elements
|
||||
logger.debug(f"Creating iterparse context for channels and programmes")
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True)
|
||||
if process:
|
||||
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
|
||||
|
|
@ -875,10 +893,11 @@ def parse_channels_only(source):
|
|||
display_name = None
|
||||
icon_url = None
|
||||
for child in elem:
|
||||
if child.tag == 'display-name' and child.text:
|
||||
if display_name is None and child.tag == 'display-name' and child.text:
|
||||
display_name = child.text.strip()
|
||||
elif child.tag == 'icon':
|
||||
icon_url = child.get('src', '').strip()
|
||||
raw_icon_url = child.get('src', '').strip()
|
||||
icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length)
|
||||
if display_name and icon_url:
|
||||
break # No need to continue if we have both
|
||||
|
||||
|
|
@ -1223,7 +1242,7 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
if elem.get('channel') == epg.tvg_id:
|
||||
|
|
|
|||
|
|
@ -774,7 +774,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
group_title = group_name
|
||||
|
||||
stream_hash = Stream.generate_hash_key(
|
||||
name, url, tvg_id, hash_keys
|
||||
name, url, tvg_id, hash_keys, m3u_id=account_id
|
||||
)
|
||||
stream_props = {
|
||||
"name": name,
|
||||
|
|
@ -903,6 +903,8 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
stream_hashes = {}
|
||||
|
||||
logger.debug(f"Processing batch of {len(batch)} for M3U account {account_id}")
|
||||
if compiled_filters:
|
||||
logger.debug(f"Using compiled filters: {[f[1].regex_pattern for f in compiled_filters]}")
|
||||
for stream_info in batch:
|
||||
try:
|
||||
name, url = stream_info["name"], stream_info["url"]
|
||||
|
|
@ -912,10 +914,10 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
group_title = get_case_insensitive_attr(
|
||||
stream_info["attributes"], "group-title", "Default Group"
|
||||
)
|
||||
|
||||
logger.debug(f"Processing stream: {name} - {url} in group {group_title}")
|
||||
include = True
|
||||
for pattern, filter in compiled_filters:
|
||||
logger.debug(f"Checking filter patterh {pattern}")
|
||||
logger.trace(f"Checking filter pattern {pattern}")
|
||||
target = name
|
||||
if filter.filter_type == "url":
|
||||
target = url
|
||||
|
|
@ -940,7 +942,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
)
|
||||
continue
|
||||
|
||||
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys)
|
||||
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id)
|
||||
stream_props = {
|
||||
"name": name,
|
||||
"url": url,
|
||||
|
|
@ -2071,13 +2073,13 @@ def get_transformed_credentials(account, profile=None):
|
|||
base_url = account.server_url
|
||||
base_username = account.username
|
||||
base_password = account.password # Build a complete URL with credentials (similar to how IPTV URLs are structured)
|
||||
# Format: http://server.com:port/username/password/rest_of_path
|
||||
# Format: http://server.com:port/live/username/password/1234.ts
|
||||
if base_url and base_username and base_password:
|
||||
# Remove trailing slash from server URL if present
|
||||
clean_server_url = base_url.rstrip('/')
|
||||
|
||||
# Build the complete URL with embedded credentials
|
||||
complete_url = f"{clean_server_url}/{base_username}/{base_password}/"
|
||||
complete_url = f"{clean_server_url}/live/{base_username}/{base_password}/1234.ts"
|
||||
logger.debug(f"Built complete URL: {complete_url}")
|
||||
|
||||
# Apply profile-specific transformations if profile is provided
|
||||
|
|
@ -2091,14 +2093,14 @@ def get_transformed_credentials(account, profile=None):
|
|||
logger.info(f"Transformed complete URL: {complete_url} -> {transformed_complete_url}")
|
||||
|
||||
# Extract components from the transformed URL
|
||||
# Pattern: http://server.com:port/username/password/
|
||||
# Pattern: http://server.com:port/live/username/password/1234.ts
|
||||
parsed_url = urllib.parse.urlparse(transformed_complete_url)
|
||||
path_parts = [part for part in parsed_url.path.split('/') if part]
|
||||
|
||||
if len(path_parts) >= 2:
|
||||
# Extract username and password from path
|
||||
transformed_username = path_parts[0]
|
||||
transformed_password = path_parts[1]
|
||||
transformed_username = path_parts[1]
|
||||
transformed_password = path_parts[2]
|
||||
|
||||
# Rebuild server URL without the username/password path
|
||||
transformed_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ def generate_m3u(request, profile_name=None, user=None):
|
|||
channel_profiles
|
||||
)
|
||||
|
||||
channels = Channel.objects.filter(**filters).order_by("channel_number")
|
||||
channels = Channel.objects.filter(**filters).distinct().order_by("channel_number")
|
||||
else:
|
||||
channels = Channel.objects.filter(user_level__lte=user.user_level).order_by(
|
||||
"channel_number"
|
||||
|
|
@ -95,7 +95,22 @@ def generate_m3u(request, profile_name=None, user=None):
|
|||
# Options: 'channel_number' (default), 'tvg_id', 'gracenote'
|
||||
tvg_id_source = request.GET.get('tvg_id_source', 'channel_number').lower()
|
||||
|
||||
m3u_content = "#EXTM3U\n"
|
||||
# Build EPG URL with query parameters if needed
|
||||
epg_base_url = build_absolute_uri_with_port(request, reverse('output:epg_endpoint', args=[profile_name]) if profile_name else reverse('output:epg_endpoint'))
|
||||
|
||||
# Optionally preserve certain query parameters
|
||||
preserved_params = ['tvg_id_source', 'cachedlogos', 'days']
|
||||
query_params = {k: v for k, v in request.GET.items() if k in preserved_params}
|
||||
if query_params:
|
||||
from urllib.parse import urlencode
|
||||
epg_url = f"{epg_base_url}?{urlencode(query_params)}"
|
||||
else:
|
||||
epg_url = epg_base_url
|
||||
|
||||
# Add x-tvg-url and url-tvg attribute for EPG URL
|
||||
m3u_content = f'#EXTM3U x-tvg-url="{epg_url}" url-tvg="{epg_url}"\n'
|
||||
|
||||
# Start building M3U content
|
||||
for channel in channels:
|
||||
group_title = channel.channel_group.name if channel.channel_group else "Default"
|
||||
|
||||
|
|
@ -311,7 +326,7 @@ def generate_epg(request, profile_name=None, user=None):
|
|||
channel_profiles
|
||||
)
|
||||
|
||||
channels = Channel.objects.filter(**filters).order_by("channel_number")
|
||||
channels = Channel.objects.filter(**filters).distinct().order_by("channel_number")
|
||||
else:
|
||||
channels = Channel.objects.filter(user_level__lte=user.user_level).order_by(
|
||||
"channel_number"
|
||||
|
|
@ -895,7 +910,7 @@ def xc_get_live_streams(request, user, category_id=None):
|
|||
if category_id is not None:
|
||||
filters["channel_group__id"] = category_id
|
||||
|
||||
channels = Channel.objects.filter(**filters).order_by("channel_number")
|
||||
channels = Channel.objects.filter(**filters).distinct().order_by("channel_number")
|
||||
else:
|
||||
if not category_id:
|
||||
channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number")
|
||||
|
|
@ -951,7 +966,10 @@ def xc_get_epg(request, user, short=False):
|
|||
channel_profiles = user.channel_profiles.all()
|
||||
filters["channelprofilemembership__channel_profile__in"] = channel_profiles
|
||||
|
||||
channel = get_object_or_404(Channel, **filters)
|
||||
# Use filter().first() with distinct instead of get_object_or_404 to handle multiple profile memberships
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
if not channel:
|
||||
raise Http404()
|
||||
else:
|
||||
channel = get_object_or_404(Channel, id=channel_id)
|
||||
|
||||
|
|
|
|||
|
|
@ -127,9 +127,9 @@ def stream_ts(request, channel_id):
|
|||
)
|
||||
ChannelService.stop_channel(channel_id)
|
||||
|
||||
# Use max retry attempts and connection timeout from config
|
||||
max_retries = ConfigHelper.max_retries()
|
||||
retry_timeout = ConfigHelper.connection_timeout()
|
||||
# Use fixed retry interval and timeout
|
||||
retry_timeout = 1.5 # 1.5 seconds total timeout
|
||||
retry_interval = 0.1 # 100ms between attempts
|
||||
wait_start_time = time.time()
|
||||
|
||||
stream_url = None
|
||||
|
|
@ -137,16 +137,18 @@ def stream_ts(request, channel_id):
|
|||
transcode = False
|
||||
profile_value = None
|
||||
error_reason = None
|
||||
attempt = 0
|
||||
|
||||
# Try to get a stream with configured retries
|
||||
for attempt in range(max_retries):
|
||||
# Try to get a stream with fixed interval retries
|
||||
while time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id}"
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts"
|
||||
)
|
||||
break
|
||||
|
||||
|
|
@ -158,21 +160,15 @@ def stream_ts(request, channel_id):
|
|||
)
|
||||
break
|
||||
|
||||
# Don't exceed the overall connection timeout
|
||||
if time.time() - wait_start_time > retry_timeout:
|
||||
logger.warning(
|
||||
f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)"
|
||||
# Wait 100ms before retrying
|
||||
elapsed_time = time.time() - wait_start_time
|
||||
remaining_time = retry_timeout - elapsed_time
|
||||
if remaining_time > retry_interval:
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
|
||||
)
|
||||
break
|
||||
|
||||
# Wait before retrying (using exponential backoff with a cap)
|
||||
wait_time = min(0.5 * (2**attempt), 2.0) # Caps at 2 seconds
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})"
|
||||
)
|
||||
gevent.sleep(
|
||||
wait_time
|
||||
) # FIXED: Using gevent.sleep instead of time.sleep
|
||||
gevent.sleep(retry_interval)
|
||||
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
|
||||
|
||||
if stream_url is None:
|
||||
# Make sure to release any stream locks that might have been acquired
|
||||
|
|
|
|||
|
|
@ -540,11 +540,9 @@ class RedisBackedVODConnection:
|
|||
}
|
||||
return {}
|
||||
|
||||
def cleanup(self, connection_manager=None):
|
||||
"""Clean up local resources and Redis state"""
|
||||
# Get connection state before cleanup to handle profile decrementing
|
||||
state = self._get_connection_state()
|
||||
|
||||
def cleanup(self, connection_manager=None, current_worker_id=None):
|
||||
"""Smart cleanup based on worker ownership and active streams"""
|
||||
# Always clean up local resources first
|
||||
if self.local_response:
|
||||
self.local_response.close()
|
||||
self.local_response = None
|
||||
|
|
@ -552,38 +550,72 @@ class RedisBackedVODConnection:
|
|||
self.local_session.close()
|
||||
self.local_session = None
|
||||
|
||||
# Remove from Redis
|
||||
if self.redis_client:
|
||||
try:
|
||||
# Use pipeline for atomic cleanup operations
|
||||
pipe = self.redis_client.pipeline()
|
||||
# Get current connection state to check ownership and active streams
|
||||
state = self._get_connection_state()
|
||||
|
||||
# 1. Remove main connection state (now contains consolidated data)
|
||||
pipe.delete(self.connection_key)
|
||||
if not state:
|
||||
logger.info(f"[{self.session_id}] No connection state found - local cleanup only")
|
||||
return
|
||||
|
||||
# 2. Remove distributed lock
|
||||
pipe.delete(self.lock_key)
|
||||
# Check if there are active streams
|
||||
if state.active_streams > 0:
|
||||
# There are active streams - check ownership
|
||||
if current_worker_id and state.worker_id == current_worker_id:
|
||||
logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) and we own them - local cleanup only")
|
||||
else:
|
||||
logger.info(f"[{self.session_id}] Active streams present ({state.active_streams}) but owned by worker {state.worker_id} - local cleanup only")
|
||||
return
|
||||
|
||||
# Execute all cleanup operations
|
||||
pipe.execute()
|
||||
# No active streams - we can clean up Redis state
|
||||
if not self.redis_client:
|
||||
logger.info(f"[{self.session_id}] No Redis client - local cleanup only")
|
||||
return
|
||||
|
||||
logger.info(f"[{self.session_id}] Cleaned up all Redis keys (consolidated connection state, locks)")
|
||||
# Acquire lock and do final check before cleanup to prevent race conditions
|
||||
if not self._acquire_lock():
|
||||
logger.warning(f"[{self.session_id}] Could not acquire lock for cleanup - skipping")
|
||||
return
|
||||
|
||||
# Decrement profile connections if we have the state and connection manager
|
||||
if state and state.m3u_profile_id and connection_manager:
|
||||
logger.info(f"[{self.session_id}] Decrementing profile connection count for profile {state.m3u_profile_id}")
|
||||
connection_manager._decrement_profile_connections(state.m3u_profile_id)
|
||||
logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}")
|
||||
else:
|
||||
if not state:
|
||||
logger.warning(f"[{self.session_id}] No connection state found during cleanup - cannot decrement profile connections")
|
||||
elif not state.m3u_profile_id:
|
||||
logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections")
|
||||
elif not connection_manager:
|
||||
logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections")
|
||||
try:
|
||||
# Re-check active streams with lock held to prevent race conditions
|
||||
current_state = self._get_connection_state()
|
||||
if not current_state:
|
||||
logger.info(f"[{self.session_id}] Connection state no longer exists - cleanup already done")
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}")
|
||||
if current_state.active_streams > 0:
|
||||
logger.info(f"[{self.session_id}] Active streams now present ({current_state.active_streams}) - skipping cleanup")
|
||||
return
|
||||
|
||||
# Use pipeline for atomic cleanup operations
|
||||
pipe = self.redis_client.pipeline()
|
||||
|
||||
# 1. Remove main connection state (contains consolidated data)
|
||||
pipe.delete(self.connection_key)
|
||||
|
||||
# 2. Remove distributed lock (will be released below anyway)
|
||||
pipe.delete(self.lock_key)
|
||||
|
||||
# Execute all cleanup operations
|
||||
pipe.execute()
|
||||
|
||||
logger.info(f"[{self.session_id}] Cleaned up Redis keys (verified no active streams)")
|
||||
|
||||
# Decrement profile connections if we have the state and connection manager
|
||||
if state.m3u_profile_id and connection_manager:
|
||||
connection_manager._decrement_profile_connections(state.m3u_profile_id)
|
||||
logger.info(f"[{self.session_id}] Profile connection count decremented for profile {state.m3u_profile_id}")
|
||||
else:
|
||||
if not state.m3u_profile_id:
|
||||
logger.warning(f"[{self.session_id}] No profile ID in connection state - cannot decrement profile connections")
|
||||
elif not connection_manager:
|
||||
logger.warning(f"[{self.session_id}] No connection manager provided - cannot decrement profile connections")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.session_id}] Error cleaning up Redis state: {e}")
|
||||
finally:
|
||||
# Always release the lock
|
||||
self._release_lock()
|
||||
|
||||
|
||||
# Modify the VODConnectionManager to use Redis-backed connections
|
||||
|
|
@ -694,6 +726,15 @@ class MultiWorkerVODConnectionManager:
|
|||
logger.info(f"[{client_id}] Worker {self.worker_id} - Found matching idle session: {matching_session_id}")
|
||||
effective_session_id = matching_session_id
|
||||
client_id = matching_session_id # Update client_id for logging consistency
|
||||
|
||||
# IMMEDIATELY reserve this session by incrementing active streams to prevent cleanup
|
||||
temp_connection = RedisBackedVODConnection(effective_session_id, self.redis_client)
|
||||
if temp_connection.increment_active_streams():
|
||||
logger.info(f"[{client_id}] Reserved idle session - incremented active streams")
|
||||
else:
|
||||
logger.warning(f"[{client_id}] Failed to reserve idle session - falling back to new session")
|
||||
effective_session_id = session_id
|
||||
matching_session_id = None # Clear the match so we create a new connection
|
||||
else:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - No matching idle session found, using new session")
|
||||
effective_session_id = session_id
|
||||
|
|
@ -761,14 +802,20 @@ class MultiWorkerVODConnectionManager:
|
|||
else:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Using existing Redis-backed connection")
|
||||
|
||||
# Update session activity in consolidated connection state
|
||||
# Transfer ownership to current worker and update session activity
|
||||
if redis_connection._acquire_lock():
|
||||
try:
|
||||
state = redis_connection._get_connection_state()
|
||||
if state:
|
||||
old_worker = state.worker_id
|
||||
state.last_activity = time.time()
|
||||
state.worker_id = self.worker_id # Track which worker last accessed this
|
||||
state.worker_id = self.worker_id # Transfer ownership to current worker
|
||||
redis_connection._save_connection_state(state)
|
||||
|
||||
if old_worker != self.worker_id:
|
||||
logger.info(f"[{client_id}] Ownership transferred from worker {old_worker} to {self.worker_id}")
|
||||
else:
|
||||
logger.debug(f"[{client_id}] Worker {self.worker_id} retaining ownership")
|
||||
finally:
|
||||
redis_connection._release_lock()
|
||||
|
||||
|
|
@ -788,8 +835,13 @@ class MultiWorkerVODConnectionManager:
|
|||
try:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream")
|
||||
|
||||
# Increment active streams
|
||||
redis_connection.increment_active_streams()
|
||||
# Increment active streams (unless we already did it for session reuse)
|
||||
if not matching_session_id:
|
||||
# New session - increment active streams
|
||||
redis_connection.increment_active_streams()
|
||||
else:
|
||||
# Reused session - we already incremented when reserving the session
|
||||
logger.debug(f"[{client_id}] Using pre-reserved session - active streams already incremented")
|
||||
|
||||
bytes_sent = 0
|
||||
chunk_count = 0
|
||||
|
|
@ -819,13 +871,13 @@ class MultiWorkerVODConnectionManager:
|
|||
redis_connection.decrement_active_streams()
|
||||
decremented = True
|
||||
|
||||
# Schedule cleanup if no active streams after normal completion
|
||||
# Schedule smart cleanup if no active streams after normal completion
|
||||
if not redis_connection.has_active_streams():
|
||||
def delayed_cleanup():
|
||||
time.sleep(1) # Wait 1 second
|
||||
if not redis_connection.has_active_streams():
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection after normal completion")
|
||||
redis_connection.cleanup(connection_manager=self)
|
||||
# Smart cleanup: check active streams and ownership
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after normal completion")
|
||||
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
|
||||
|
||||
import threading
|
||||
cleanup_thread = threading.Thread(target=delayed_cleanup)
|
||||
|
|
@ -838,13 +890,13 @@ class MultiWorkerVODConnectionManager:
|
|||
redis_connection.decrement_active_streams()
|
||||
decremented = True
|
||||
|
||||
# Schedule cleanup if no active streams
|
||||
# Schedule smart cleanup if no active streams
|
||||
if not redis_connection.has_active_streams():
|
||||
def delayed_cleanup():
|
||||
time.sleep(1) # Wait 1 second
|
||||
if not redis_connection.has_active_streams():
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Cleaning up idle Redis connection")
|
||||
redis_connection.cleanup(connection_manager=self)
|
||||
# Smart cleanup: check active streams and ownership
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Checking for smart cleanup after client disconnect")
|
||||
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
|
||||
|
||||
import threading
|
||||
cleanup_thread = threading.Thread(target=delayed_cleanup)
|
||||
|
|
@ -856,7 +908,8 @@ class MultiWorkerVODConnectionManager:
|
|||
if not decremented:
|
||||
redis_connection.decrement_active_streams()
|
||||
decremented = True
|
||||
redis_connection.cleanup(connection_manager=self)
|
||||
# Smart cleanup on error - immediate cleanup since we're in error state
|
||||
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
|
||||
yield b"Error: Stream interrupted"
|
||||
|
||||
finally:
|
||||
|
|
|
|||
|
|
@ -176,14 +176,15 @@ class VODStreamView(View):
|
|||
logger.error(f"[VOD-ERROR] No stream URL available for {content_type} {content_id}")
|
||||
return HttpResponse("No stream URL available", status=503)
|
||||
|
||||
# Get M3U profile
|
||||
m3u_profile = self._get_m3u_profile(m3u_account, profile_id)
|
||||
# Get M3U profile (returns profile and current connection count)
|
||||
profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id)
|
||||
|
||||
if not m3u_profile:
|
||||
if not profile_result or not profile_result[0]:
|
||||
logger.error(f"[VOD-ERROR] No suitable M3U profile found for {content_type} {content_id}")
|
||||
return HttpResponse("No available stream", status=503)
|
||||
|
||||
logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {m3u_profile.current_viewers})")
|
||||
m3u_profile, current_connections = profile_result
|
||||
logger.info(f"[VOD-PROFILE] Using M3U profile: {m3u_profile.id} (max_streams: {m3u_profile.max_streams}, current: {current_connections})")
|
||||
|
||||
# Connection tracking is handled by the connection manager
|
||||
# Transform URL based on profile
|
||||
|
|
@ -279,11 +280,13 @@ class VODStreamView(View):
|
|||
logger.error(f"[VOD-HEAD] No stream URL available for {content_type} {content_id}")
|
||||
return HttpResponse("No stream URL available", status=503)
|
||||
|
||||
# Get M3U profile
|
||||
m3u_profile = self._get_m3u_profile(m3u_account, profile_id)
|
||||
if not m3u_profile:
|
||||
logger.error(f"[VOD-HEAD] No M3U profile found")
|
||||
return HttpResponse("Profile not found", status=404)
|
||||
# Get M3U profile (returns profile and current connection count)
|
||||
profile_result = self._get_m3u_profile(m3u_account, profile_id, session_id)
|
||||
if not profile_result or not profile_result[0]:
|
||||
logger.error(f"[VOD-HEAD] No M3U profile found or all profiles at capacity")
|
||||
return HttpResponse("No available stream", status=503)
|
||||
|
||||
m3u_profile, current_connections = profile_result
|
||||
|
||||
# Transform URL if needed
|
||||
final_stream_url = self._transform_url(stream_url, m3u_profile)
|
||||
|
|
@ -517,10 +520,63 @@ class VODStreamView(View):
|
|||
logger.error(f"[VOD-URL] Error getting stream URL from relation: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def _get_m3u_profile(self, m3u_account, profile_id):
|
||||
"""Get appropriate M3U profile for streaming"""
|
||||
def _get_m3u_profile(self, m3u_account, profile_id, session_id=None):
|
||||
"""Get appropriate M3U profile for streaming using Redis-based viewer counts
|
||||
|
||||
Args:
|
||||
m3u_account: M3UAccount instance
|
||||
profile_id: Optional specific profile ID requested
|
||||
session_id: Optional session ID to check for existing connections
|
||||
|
||||
Returns:
|
||||
tuple: (M3UAccountProfile, current_connections) or None if no profile found
|
||||
"""
|
||||
try:
|
||||
# If specific profile requested, try to use it
|
||||
from core.utils import RedisClient
|
||||
redis_client = RedisClient.get_client()
|
||||
|
||||
if not redis_client:
|
||||
logger.warning("Redis not available, falling back to default profile")
|
||||
default_profile = M3UAccountProfile.objects.filter(
|
||||
m3u_account=m3u_account,
|
||||
is_active=True,
|
||||
is_default=True
|
||||
).first()
|
||||
return (default_profile, 0) if default_profile else None
|
||||
|
||||
# Check if this session already has an active connection
|
||||
if session_id:
|
||||
persistent_connection_key = f"vod_persistent_connection:{session_id}"
|
||||
connection_data = redis_client.hgetall(persistent_connection_key)
|
||||
|
||||
if connection_data:
|
||||
# Decode Redis hash data
|
||||
decoded_data = {}
|
||||
for k, v in connection_data.items():
|
||||
k_str = k.decode('utf-8') if isinstance(k, bytes) else k
|
||||
v_str = v.decode('utf-8') if isinstance(v, bytes) else v
|
||||
decoded_data[k_str] = v_str
|
||||
|
||||
existing_profile_id = decoded_data.get('m3u_profile_id')
|
||||
if existing_profile_id:
|
||||
try:
|
||||
existing_profile = M3UAccountProfile.objects.get(
|
||||
id=int(existing_profile_id),
|
||||
m3u_account=m3u_account,
|
||||
is_active=True
|
||||
)
|
||||
# Get current connections for logging
|
||||
profile_connections_key = f"profile_connections:{existing_profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
logger.info(f"[PROFILE-SELECTION] Session {session_id} reusing existing profile {existing_profile.id}: {current_connections}/{existing_profile.max_streams} connections")
|
||||
return (existing_profile, current_connections)
|
||||
except (M3UAccountProfile.DoesNotExist, ValueError):
|
||||
logger.warning(f"[PROFILE-SELECTION] Session {session_id} has invalid profile ID {existing_profile_id}, selecting new profile")
|
||||
except Exception as e:
|
||||
logger.warning(f"[PROFILE-SELECTION] Error checking existing profile for session {session_id}: {e}")
|
||||
else:
|
||||
logger.debug(f"[PROFILE-SELECTION] Session {session_id} exists but has no profile ID stored") # If specific profile requested, try to use it
|
||||
if profile_id:
|
||||
try:
|
||||
profile = M3UAccountProfile.objects.get(
|
||||
|
|
@ -528,24 +584,46 @@ class VODStreamView(View):
|
|||
m3u_account=m3u_account,
|
||||
is_active=True
|
||||
)
|
||||
if profile.current_viewers < profile.max_streams or profile.max_streams == 0:
|
||||
return profile
|
||||
except M3UAccountProfile.DoesNotExist:
|
||||
pass
|
||||
# Check Redis-based current connections
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
# Find available profile ordered by current usage (least loaded first)
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
if profile.max_streams == 0 or current_connections < profile.max_streams:
|
||||
logger.info(f"[PROFILE-SELECTION] Using requested profile {profile.id}: {current_connections}/{profile.max_streams} connections")
|
||||
return (profile, current_connections)
|
||||
else:
|
||||
logger.warning(f"[PROFILE-SELECTION] Requested profile {profile.id} is at capacity: {current_connections}/{profile.max_streams}")
|
||||
except M3UAccountProfile.DoesNotExist:
|
||||
logger.warning(f"[PROFILE-SELECTION] Requested profile {profile_id} not found")
|
||||
|
||||
# Get active profiles ordered by priority (default first)
|
||||
m3u_profiles = M3UAccountProfile.objects.filter(
|
||||
m3u_account=m3u_account,
|
||||
is_active=True
|
||||
).order_by('current_viewers')
|
||||
)
|
||||
|
||||
default_profile = m3u_profiles.filter(is_default=True).first()
|
||||
if not default_profile:
|
||||
logger.error(f"[PROFILE-SELECTION] No default profile found for M3U account {m3u_account.id}")
|
||||
return None
|
||||
|
||||
# Check profiles in order: default first, then others
|
||||
profiles = [default_profile] + list(m3u_profiles.filter(is_default=False))
|
||||
|
||||
for profile in profiles:
|
||||
# Check if profile has available connection slots
|
||||
if profile.current_viewers < profile.max_streams or profile.max_streams == 0:
|
||||
return profile
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
# Fallback to default profile even if over limit
|
||||
return profiles.filter(is_default=True).first()
|
||||
# Check if profile has available connection slots
|
||||
if profile.max_streams == 0 or current_connections < profile.max_streams:
|
||||
logger.info(f"[PROFILE-SELECTION] Selected profile {profile.id} ({profile.name}): {current_connections}/{profile.max_streams} connections")
|
||||
return (profile, current_connections)
|
||||
else:
|
||||
logger.debug(f"[PROFILE-SELECTION] Profile {profile.id} at capacity: {current_connections}/{profile.max_streams}")
|
||||
|
||||
# All profiles are at capacity - return None to trigger error response
|
||||
logger.error(f"[PROFILE-SELECTION] All profiles at capacity for M3U account {m3u_account.id}, rejecting request")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting M3U profile: {e}")
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ def preload_core_settings(apps, schema_editor):
|
|||
CoreSettings.objects.create(
|
||||
key=slugify("M3U Hash Key"),
|
||||
name="M3U Hash Key",
|
||||
value="name,url,tvg_id",
|
||||
value="url",
|
||||
)
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
|
|
|||
|
|
@ -513,7 +513,7 @@ def rehash_streams(keys):
|
|||
|
||||
for obj in batch:
|
||||
# Generate new hash
|
||||
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys)
|
||||
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id)
|
||||
|
||||
# Check if this hash already exists in our tracking dict or in database
|
||||
if new_hash in hash_keys:
|
||||
|
|
|
|||
|
|
@ -4,11 +4,15 @@ ARG REPO_NAME=dispatcharr
|
|||
ARG BASE_TAG=base
|
||||
|
||||
# --- Build frontend ---
|
||||
FROM node:20 AS frontend-builder
|
||||
|
||||
FROM node:24 AS frontend-builder
|
||||
|
||||
WORKDIR /app/frontend
|
||||
COPY ./frontend /app/frontend
|
||||
RUN corepack enable && corepack prepare yarn@stable --activate && \
|
||||
yarn install && yarn build && \
|
||||
# remove any node_modules that may have been copied from the host (x86)
|
||||
RUN rm -rf node_modules || true; \
|
||||
npm install --no-audit --progress=false;
|
||||
RUN npm run build; \
|
||||
rm -rf node_modules .cache
|
||||
|
||||
# --- Redeclare build arguments for the next stage ---
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ services:
|
|||
- 9191:9191
|
||||
volumes:
|
||||
- dispatcharr_data:/data
|
||||
- ./data:/data
|
||||
environment:
|
||||
- DISPATCHARR_ENV=aio
|
||||
- REDIS_HOST=localhost
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ services:
|
|||
command: >
|
||||
bash -c "
|
||||
cd /app &&
|
||||
celery -A dispatcharr worker -l info
|
||||
nice -n 5 celery -A dispatcharr worker -l info
|
||||
"
|
||||
|
||||
db:
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ exec-before = python /app/scripts/wait_for_redis.py
|
|||
; Start Redis first
|
||||
attach-daemon = redis-server
|
||||
; Then start other services
|
||||
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = celery -A dispatcharr beat
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr beat
|
||||
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
|
||||
attach-daemon = cd /app/frontend && npm run dev
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ exec-pre = python /app/scripts/wait_for_redis.py
|
|||
; Start Redis first
|
||||
attach-daemon = redis-server
|
||||
; Then start other services
|
||||
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = celery -A dispatcharr beat
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr beat
|
||||
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
|
||||
attach-daemon = cd /app/frontend && npm run dev
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ exec-pre = python /app/scripts/wait_for_redis.py
|
|||
; Start Redis first
|
||||
attach-daemon = redis-server
|
||||
; Then start other services
|
||||
attach-daemon = celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = celery -A dispatcharr beat
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1
|
||||
attach-daemon = nice -n 5 celery -A dispatcharr beat
|
||||
attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
|
||||
|
||||
# Core settings
|
||||
|
|
|
|||
2229
frontend/package-lock.json
generated
2229
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "vite",
|
||||
"name": "frontend",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
|
|
@ -7,7 +7,9 @@
|
|||
"dev": "vite --host",
|
||||
"build": "vite build",
|
||||
"lint": "eslint .",
|
||||
"preview": "vite preview"
|
||||
"preview": "vite preview",
|
||||
"test": "vitest --run",
|
||||
"test:watch": "vitest"
|
||||
},
|
||||
"dependencies": {
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
|
|
@ -22,14 +24,14 @@
|
|||
"@mantine/hooks": "~8.0.1",
|
||||
"@mantine/notifications": "~8.0.1",
|
||||
"@tanstack/react-table": "^8.21.2",
|
||||
"allotment": "^1.20.3",
|
||||
"allotment": "^1.20.4",
|
||||
"dayjs": "^1.11.13",
|
||||
"formik": "^2.4.6",
|
||||
"hls.js": "^1.5.20",
|
||||
"lucide-react": "^0.511.0",
|
||||
"mpegts.js": "^1.8.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"react-draggable": "^4.4.6",
|
||||
"react-pro-sidebar": "^1.1.0",
|
||||
"react-router-dom": "^7.3.0",
|
||||
|
|
@ -43,14 +45,27 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.21.0",
|
||||
"@types/react": "^19.0.10",
|
||||
"@types/react-dom": "^19.0.4",
|
||||
"@vitejs/plugin-react-swc": "^3.8.0",
|
||||
"@swc/core": "npm:@swc/wasm@1.13.20",
|
||||
"@swc/wasm": "^1.13.20",
|
||||
"@testing-library/dom": "^10.4.1",
|
||||
"@testing-library/jest-dom": "^6.8.0",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
"@types/react": "^19.1.0",
|
||||
"@types/react-dom": "^19.1.0",
|
||||
"@vitejs/plugin-react-swc": "^4.1.0",
|
||||
"eslint": "^9.21.0",
|
||||
"eslint-plugin-react-hooks": "^5.1.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.19",
|
||||
"globals": "^15.15.0",
|
||||
"jsdom": "^27.0.0",
|
||||
"prettier": "^3.5.3",
|
||||
"vite": "^6.2.0"
|
||||
"vite": "^6.2.0",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"resolutions": {
|
||||
"vite": "7.1.7",
|
||||
"react": "19.1.0",
|
||||
"react-dom": "19.1.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -562,6 +562,29 @@ export default class API {
|
|||
}
|
||||
}
|
||||
|
||||
static async setChannelTvgIdsFromEpg(channelIds) {
|
||||
try {
|
||||
const response = await request(
|
||||
`${host}/api/channels/channels/set-tvg-ids-from-epg/`,
|
||||
{
|
||||
method: 'POST',
|
||||
body: { channel_ids: channelIds },
|
||||
}
|
||||
);
|
||||
|
||||
notifications.show({
|
||||
title: 'Task Started',
|
||||
message: response.message,
|
||||
color: 'blue',
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (e) {
|
||||
errorNotification('Failed to start EPG TVG-ID setting task', e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
static async assignChannelNumbers(channelIds, startingNum = 1) {
|
||||
try {
|
||||
const response = await request(`${host}/api/channels/channels/assign/`, {
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import logo from '../../images/logo.png';
|
|||
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
|
||||
import useLogosStore from '../../store/logos';
|
||||
import LazyLogo from '../LazyLogo';
|
||||
import LogoForm from './Logo';
|
||||
import {
|
||||
Box,
|
||||
Button,
|
||||
|
|
@ -37,7 +38,7 @@ import {
|
|||
import { notifications } from '@mantine/notifications';
|
||||
import { ListOrdered, SquarePlus, SquareX, X, Zap } from 'lucide-react';
|
||||
import useEPGsStore from '../../store/epgs';
|
||||
import { Dropzone } from '@mantine/dropzone';
|
||||
|
||||
import { FixedSizeList as List } from 'react-window';
|
||||
import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants';
|
||||
|
||||
|
|
@ -71,7 +72,7 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
const tvgs = useEPGsStore((s) => s.tvgs);
|
||||
const tvgsById = useEPGsStore((s) => s.tvgsById);
|
||||
|
||||
const [logoPreview, setLogoPreview] = useState(null);
|
||||
const [logoModalOpen, setLogoModalOpen] = useState(false);
|
||||
const [channelStreams, setChannelStreams] = useState([]);
|
||||
const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false);
|
||||
const [epgPopoverOpened, setEpgPopoverOpened] = useState(false);
|
||||
|
|
@ -97,33 +98,12 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
setChannelStreams(Array.from(streamSet));
|
||||
};
|
||||
|
||||
const handleLogoChange = async (files) => {
|
||||
if (files.length === 1) {
|
||||
const file = files[0];
|
||||
|
||||
// Validate file size on frontend first
|
||||
if (file.size > 5 * 1024 * 1024) {
|
||||
// 5MB
|
||||
notifications.show({
|
||||
title: 'Error',
|
||||
message: 'File too large. Maximum size is 5MB.',
|
||||
color: 'red',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const retval = await API.uploadLogo(file);
|
||||
// Note: API.uploadLogo already adds the logo to the store, no need to fetch
|
||||
setLogoPreview(retval.cache_url);
|
||||
formik.setFieldValue('logo_id', retval.id);
|
||||
} catch (error) {
|
||||
console.error('Logo upload failed:', error);
|
||||
// Error notification is already handled in API.uploadLogo
|
||||
}
|
||||
} else {
|
||||
setLogoPreview(null);
|
||||
const handleLogoSuccess = ({ logo }) => {
|
||||
if (logo && logo.id) {
|
||||
formik.setFieldValue('logo_id', logo.id);
|
||||
ensureLogosLoaded(); // Refresh logos
|
||||
}
|
||||
setLogoModalOpen(false);
|
||||
};
|
||||
|
||||
const handleAutoMatchEpg = async () => {
|
||||
|
|
@ -283,6 +263,34 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
}
|
||||
};
|
||||
|
||||
const handleSetTvgIdFromEpg = () => {
|
||||
const epgDataId = formik.values.epg_data_id;
|
||||
if (!epgDataId) {
|
||||
notifications.show({
|
||||
title: 'No EPG Selected',
|
||||
message: 'Please select an EPG source first.',
|
||||
color: 'orange',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const tvg = tvgsById[epgDataId];
|
||||
if (tvg && tvg.tvg_id) {
|
||||
formik.setFieldValue('tvg_id', tvg.tvg_id);
|
||||
notifications.show({
|
||||
title: 'Success',
|
||||
message: `TVG-ID set to "${tvg.tvg_id}"`,
|
||||
color: 'green',
|
||||
});
|
||||
} else {
|
||||
notifications.show({
|
||||
title: 'No TVG-ID Available',
|
||||
message: 'No TVG-ID found in the selected EPG data.',
|
||||
color: 'orange',
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const formik = useFormik({
|
||||
initialValues: {
|
||||
name: '',
|
||||
|
|
@ -809,35 +817,13 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
</Stack>
|
||||
</Group>
|
||||
|
||||
<Group>
|
||||
<Divider size="xs" style={{ flex: 1 }} />
|
||||
<Text size="xs" c="dimmed">
|
||||
OR
|
||||
</Text>
|
||||
<Divider size="xs" style={{ flex: 1 }} />
|
||||
</Group>
|
||||
|
||||
<Stack>
|
||||
<Text size="sm">Upload Logo</Text>
|
||||
<Dropzone
|
||||
onDrop={handleLogoChange}
|
||||
onReject={(files) => console.log('rejected files', files)}
|
||||
maxSize={5 * 1024 ** 2}
|
||||
>
|
||||
<Group
|
||||
justify="center"
|
||||
gap="xl"
|
||||
mih={40}
|
||||
style={{ pointerEvents: 'none' }}
|
||||
>
|
||||
<Text size="sm" inline>
|
||||
Drag images here or click to select files
|
||||
</Text>
|
||||
</Group>
|
||||
</Dropzone>
|
||||
|
||||
<Center></Center>
|
||||
</Stack>
|
||||
<Button
|
||||
onClick={() => setLogoModalOpen(true)}
|
||||
fullWidth
|
||||
variant="default"
|
||||
>
|
||||
Upload or Create Logo
|
||||
</Button>
|
||||
</Stack>
|
||||
|
||||
<Divider size="sm" orientation="vertical" />
|
||||
|
|
@ -865,7 +851,23 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
<TextInput
|
||||
id="tvg_id"
|
||||
name="tvg_id"
|
||||
label="TVG-ID"
|
||||
label={
|
||||
<Group gap="xs">
|
||||
<span>TVG-ID</span>
|
||||
{formik.values.epg_data_id && (
|
||||
<Button
|
||||
size="xs"
|
||||
variant="transparent"
|
||||
onClick={handleSetTvgIdFromEpg}
|
||||
title="Set TVG-ID from EPG data"
|
||||
p={0}
|
||||
h="auto"
|
||||
>
|
||||
Use EPG TVG-ID
|
||||
</Button>
|
||||
)}
|
||||
</Group>
|
||||
}
|
||||
value={formik.values.tvg_id}
|
||||
onChange={formik.handleChange}
|
||||
error={formik.errors.tvg_id ? formik.touched.tvg_id : ''}
|
||||
|
|
@ -1057,6 +1059,12 @@ const ChannelForm = ({ channel = null, isOpen, onClose }) => {
|
|||
isOpen={channelGroupModelOpen}
|
||||
onClose={handleChannelGroupModalClose}
|
||||
/>
|
||||
|
||||
<LogoForm
|
||||
isOpen={logoModalOpen}
|
||||
onClose={() => setLogoModalOpen(false)}
|
||||
onSuccess={handleLogoSuccess}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -29,18 +29,34 @@ import { FixedSizeList as List } from 'react-window';
|
|||
import { useForm } from '@mantine/form';
|
||||
import { notifications } from '@mantine/notifications';
|
||||
import { USER_LEVELS, USER_LEVEL_LABELS } from '../../constants';
|
||||
import { useChannelLogoSelection } from '../../hooks/useSmartLogos';
|
||||
import LazyLogo from '../LazyLogo';
|
||||
import logo from '../../images/logo.png';
|
||||
import ConfirmationDialog from '../ConfirmationDialog';
|
||||
import useWarningsStore from '../../store/warnings';
|
||||
|
||||
const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
||||
const theme = useMantineTheme();
|
||||
|
||||
const groupListRef = useRef(null);
|
||||
const logoListRef = useRef(null);
|
||||
|
||||
const channelGroups = useChannelsStore((s) => s.channelGroups);
|
||||
const {
|
||||
logos: channelLogos,
|
||||
ensureLogosLoaded,
|
||||
isLoading: logosLoading,
|
||||
} = useChannelLogoSelection();
|
||||
|
||||
useEffect(() => {
|
||||
ensureLogosLoaded();
|
||||
}, [ensureLogosLoaded]);
|
||||
|
||||
const streamProfiles = useStreamProfilesStore((s) => s.profiles);
|
||||
|
||||
const [channelGroupModelOpen, setChannelGroupModalOpen] = useState(false);
|
||||
const [selectedChannelGroup, setSelectedChannelGroup] = useState('-1');
|
||||
const [selectedLogoId, setSelectedLogoId] = useState('-1');
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [regexFind, setRegexFind] = useState('');
|
||||
const [regexReplace, setRegexReplace] = useState('');
|
||||
|
|
@ -49,10 +65,21 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
const [groupFilter, setGroupFilter] = useState('');
|
||||
const groupOptions = Object.values(channelGroups);
|
||||
|
||||
const [logoPopoverOpened, setLogoPopoverOpened] = useState(false);
|
||||
const [logoFilter, setLogoFilter] = useState('');
|
||||
// Confirmation dialog states
|
||||
const [confirmSetNamesOpen, setConfirmSetNamesOpen] = useState(false);
|
||||
const [confirmSetLogosOpen, setConfirmSetLogosOpen] = useState(false);
|
||||
const [confirmSetTvgIdsOpen, setConfirmSetTvgIdsOpen] = useState(false);
|
||||
const [confirmClearEpgsOpen, setConfirmClearEpgsOpen] = useState(false);
|
||||
const isWarningSuppressed = useWarningsStore((s) => s.isWarningSuppressed);
|
||||
const suppressWarning = useWarningsStore((s) => s.suppressWarning);
|
||||
|
||||
const form = useForm({
|
||||
mode: 'uncontrolled',
|
||||
initialValues: {
|
||||
channel_group: '(no change)',
|
||||
logo: '(no change)',
|
||||
stream_profile_id: '-1',
|
||||
user_level: '-1',
|
||||
},
|
||||
|
|
@ -70,6 +97,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
delete values.channel_group_id;
|
||||
}
|
||||
|
||||
if (selectedLogoId && selectedLogoId !== '-1') {
|
||||
if (selectedLogoId === '0') {
|
||||
values.logo_id = null;
|
||||
} else {
|
||||
values.logo_id = parseInt(selectedLogoId);
|
||||
}
|
||||
}
|
||||
delete values.logo;
|
||||
|
||||
// Handle stream profile ID - convert special values
|
||||
if (!values.stream_profile_id || values.stream_profile_id === '-1') {
|
||||
delete values.stream_profile_id;
|
||||
|
|
@ -144,6 +180,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
return;
|
||||
}
|
||||
|
||||
// Skip warning if suppressed
|
||||
if (isWarningSuppressed('batch-set-names-from-epg')) {
|
||||
return executeSetNamesFromEpg();
|
||||
}
|
||||
|
||||
setConfirmSetNamesOpen(true);
|
||||
};
|
||||
|
||||
const executeSetNamesFromEpg = async () => {
|
||||
try {
|
||||
// Start the backend task
|
||||
await API.setChannelNamesFromEpg(channelIds);
|
||||
|
|
@ -157,6 +202,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
});
|
||||
|
||||
// Close the modal since the task is now running in background
|
||||
setConfirmSetNamesOpen(false);
|
||||
onClose();
|
||||
} catch (error) {
|
||||
console.error('Failed to start EPG name setting task:', error);
|
||||
|
|
@ -165,6 +211,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
message: 'Failed to start EPG name setting task.',
|
||||
color: 'red',
|
||||
});
|
||||
setConfirmSetNamesOpen(false);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -178,6 +225,15 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
return;
|
||||
}
|
||||
|
||||
// Skip warning if suppressed
|
||||
if (isWarningSuppressed('batch-set-logos-from-epg')) {
|
||||
return executeSetLogosFromEpg();
|
||||
}
|
||||
|
||||
setConfirmSetLogosOpen(true);
|
||||
};
|
||||
|
||||
const executeSetLogosFromEpg = async () => {
|
||||
try {
|
||||
// Start the backend task
|
||||
await API.setChannelLogosFromEpg(channelIds);
|
||||
|
|
@ -191,6 +247,7 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
});
|
||||
|
||||
// Close the modal since the task is now running in background
|
||||
setConfirmSetLogosOpen(false);
|
||||
onClose();
|
||||
} catch (error) {
|
||||
console.error('Failed to start EPG logo setting task:', error);
|
||||
|
|
@ -199,6 +256,95 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
message: 'Failed to start EPG logo setting task.',
|
||||
color: 'red',
|
||||
});
|
||||
setConfirmSetLogosOpen(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSetTvgIdsFromEpg = async () => {
|
||||
if (!channelIds || channelIds.length === 0) {
|
||||
notifications.show({
|
||||
title: 'No Channels Selected',
|
||||
message: 'No channels to update.',
|
||||
color: 'orange',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip warning if suppressed
|
||||
if (isWarningSuppressed('batch-set-tvg-ids-from-epg')) {
|
||||
return executeSetTvgIdsFromEpg();
|
||||
}
|
||||
|
||||
setConfirmSetTvgIdsOpen(true);
|
||||
};
|
||||
|
||||
const executeSetTvgIdsFromEpg = async () => {
|
||||
try {
|
||||
// Start the backend task
|
||||
await API.setChannelTvgIdsFromEpg(channelIds);
|
||||
|
||||
// The task will send WebSocket updates for progress
|
||||
// Just show that it started successfully
|
||||
notifications.show({
|
||||
title: 'Task Started',
|
||||
message: `Started setting TVG-IDs from EPG for ${channelIds.length} channels. Progress will be shown in notifications.`,
|
||||
color: 'blue',
|
||||
});
|
||||
|
||||
// Close the modal since the task is now running in background
|
||||
setConfirmSetTvgIdsOpen(false);
|
||||
onClose();
|
||||
} catch (error) {
|
||||
console.error('Failed to start EPG TVG-ID setting task:', error);
|
||||
notifications.show({
|
||||
title: 'Error',
|
||||
message: 'Failed to start EPG TVG-ID setting task.',
|
||||
color: 'red',
|
||||
});
|
||||
setConfirmSetTvgIdsOpen(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleClearEpgs = async () => {
|
||||
if (!channelIds || channelIds.length === 0) {
|
||||
notifications.show({
|
||||
title: 'No Channels Selected',
|
||||
message: 'No channels to update.',
|
||||
color: 'orange',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip warning if suppressed
|
||||
if (isWarningSuppressed('batch-clear-epgs')) {
|
||||
return executeClearEpgs();
|
||||
}
|
||||
|
||||
setConfirmClearEpgsOpen(true);
|
||||
};
|
||||
|
||||
const executeClearEpgs = async () => {
|
||||
try {
|
||||
// Clear EPG assignments (set to null/dummy) using existing batchSetEPG API
|
||||
const associations = channelIds.map((id) => ({
|
||||
channel_id: id,
|
||||
epg_data_id: null,
|
||||
}));
|
||||
|
||||
await API.batchSetEPG(associations);
|
||||
|
||||
// batchSetEPG already shows a notification and refreshes channels
|
||||
// Close the modal
|
||||
setConfirmClearEpgsOpen(false);
|
||||
onClose();
|
||||
} catch (error) {
|
||||
console.error('Failed to clear EPG assignments:', error);
|
||||
notifications.show({
|
||||
title: 'Error',
|
||||
message: 'Failed to clear EPG assignments.',
|
||||
color: 'red',
|
||||
});
|
||||
setConfirmClearEpgsOpen(false);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -242,6 +388,18 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
),
|
||||
];
|
||||
|
||||
const logoOptions = useMemo(() => {
|
||||
return [
|
||||
{ id: '-1', name: '(no change)' },
|
||||
{ id: '0', name: 'Use Default', isDefault: true },
|
||||
...Object.values(channelLogos),
|
||||
];
|
||||
}, [channelLogos]);
|
||||
|
||||
const filteredLogos = logoOptions.filter((logo) =>
|
||||
logo.name.toLowerCase().includes(logoFilter.toLowerCase())
|
||||
);
|
||||
|
||||
if (!isOpen) {
|
||||
return <></>;
|
||||
}
|
||||
|
|
@ -317,10 +475,29 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
>
|
||||
Set Logos from EPG
|
||||
</Button>
|
||||
<Button
|
||||
size="xs"
|
||||
variant="light"
|
||||
onClick={handleSetTvgIdsFromEpg}
|
||||
style={{ flex: 1 }}
|
||||
>
|
||||
Set TVG-IDs from EPG
|
||||
</Button>
|
||||
</Group>
|
||||
<Group gap="xs" wrap="nowrap" mt="xs">
|
||||
<Button
|
||||
size="xs"
|
||||
variant="light"
|
||||
color="red"
|
||||
onClick={handleClearEpgs}
|
||||
style={{ flex: 1 }}
|
||||
>
|
||||
Clear EPG (Set to Dummy)
|
||||
</Button>
|
||||
</Group>
|
||||
<Text size="xs" c="dimmed" mt="xs">
|
||||
Updates channel names and logos based on their assigned EPG
|
||||
data
|
||||
Updates channel names, logos, and TVG-IDs based on their
|
||||
assigned EPG data, or clear EPG assignments to use dummy EPG
|
||||
</Text>
|
||||
</Paper>
|
||||
|
||||
|
|
@ -445,6 +622,163 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
</Popover.Dropdown>
|
||||
</Popover>
|
||||
|
||||
<Group style={{ width: '100%' }} align="flex-end" gap="xs">
|
||||
<Popover
|
||||
opened={logoPopoverOpened}
|
||||
onChange={setLogoPopoverOpened}
|
||||
withArrow
|
||||
>
|
||||
<Popover.Target>
|
||||
<TextInput
|
||||
label="Logo"
|
||||
readOnly
|
||||
{...form.getInputProps('logo')}
|
||||
key={form.key('logo')}
|
||||
onClick={() => setLogoPopoverOpened(true)}
|
||||
size="xs"
|
||||
style={{ flex: 1 }}
|
||||
rightSection={
|
||||
selectedLogoId !== '-1' && (
|
||||
<ActionIcon
|
||||
size="xs"
|
||||
variant="subtle"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
setSelectedLogoId('-1');
|
||||
form.setValues({ logo: '(no change)' });
|
||||
}}
|
||||
>
|
||||
<X size={12} />
|
||||
</ActionIcon>
|
||||
)
|
||||
}
|
||||
/>
|
||||
</Popover.Target>
|
||||
<Popover.Dropdown onMouseDown={(e) => e.stopPropagation()}>
|
||||
<Group>
|
||||
<TextInput
|
||||
placeholder="Filter"
|
||||
value={logoFilter}
|
||||
onChange={(event) =>
|
||||
setLogoFilter(event.currentTarget.value)
|
||||
}
|
||||
mb="xs"
|
||||
size="xs"
|
||||
/>
|
||||
{logosLoading && (
|
||||
<Text size="xs" c="dimmed">
|
||||
Loading...
|
||||
</Text>
|
||||
)}
|
||||
</Group>
|
||||
<ScrollArea style={{ height: 200 }}>
|
||||
{filteredLogos.length === 0 ? (
|
||||
<Center style={{ height: 200 }}>
|
||||
<Text size="sm" c="dimmed">
|
||||
{logoFilter
|
||||
? 'No logos match your filter'
|
||||
: 'No logos available'}
|
||||
</Text>
|
||||
</Center>
|
||||
) : (
|
||||
<List
|
||||
height={200}
|
||||
itemCount={filteredLogos.length}
|
||||
itemSize={55}
|
||||
style={{ width: '100%' }}
|
||||
ref={logoListRef}
|
||||
>
|
||||
{({ index, style }) => {
|
||||
const item = filteredLogos[index];
|
||||
return (
|
||||
<div
|
||||
style={{
|
||||
...style,
|
||||
cursor: 'pointer',
|
||||
padding: '5px',
|
||||
borderRadius: '4px',
|
||||
}}
|
||||
onClick={() => {
|
||||
setSelectedLogoId(item.id);
|
||||
form.setValues({
|
||||
logo: item.name,
|
||||
});
|
||||
setLogoPopoverOpened(false);
|
||||
}}
|
||||
onMouseEnter={(e) => {
|
||||
e.currentTarget.style.backgroundColor =
|
||||
'rgb(68, 68, 68)';
|
||||
}}
|
||||
onMouseLeave={(e) => {
|
||||
e.currentTarget.style.backgroundColor =
|
||||
'transparent';
|
||||
}}
|
||||
>
|
||||
<Center
|
||||
style={{
|
||||
flexDirection: 'column',
|
||||
gap: '2px',
|
||||
}}
|
||||
>
|
||||
{item.isDefault ? (
|
||||
<img
|
||||
src={logo}
|
||||
height="30"
|
||||
style={{
|
||||
maxWidth: 80,
|
||||
objectFit: 'contain',
|
||||
}}
|
||||
alt="Default Logo"
|
||||
/>
|
||||
) : item.id > 0 ? (
|
||||
<img
|
||||
src={item.cache_url || logo}
|
||||
height="30"
|
||||
style={{
|
||||
maxWidth: 80,
|
||||
objectFit: 'contain',
|
||||
}}
|
||||
alt={item.name || 'Logo'}
|
||||
onError={(e) => {
|
||||
if (e.target.src !== logo) {
|
||||
e.target.src = logo;
|
||||
}
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<Box h={30} />
|
||||
)}
|
||||
<Text
|
||||
size="xs"
|
||||
c="dimmed"
|
||||
ta="center"
|
||||
style={{
|
||||
maxWidth: 80,
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
whiteSpace: 'nowrap',
|
||||
}}
|
||||
>
|
||||
{item.name}
|
||||
</Text>
|
||||
</Center>
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
</List>
|
||||
)}
|
||||
</ScrollArea>
|
||||
</Popover.Dropdown>
|
||||
</Popover>
|
||||
{selectedLogoId > 0 && (
|
||||
<LazyLogo
|
||||
logoId={selectedLogoId}
|
||||
alt="channel logo"
|
||||
style={{ height: 24, marginBottom: 5 }}
|
||||
/>
|
||||
)}
|
||||
</Group>
|
||||
|
||||
<Select
|
||||
id="stream_profile_id"
|
||||
label="Stream Profile"
|
||||
|
|
@ -496,6 +830,90 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => {
|
|||
isOpen={channelGroupModelOpen}
|
||||
onClose={handleChannelGroupModalClose}
|
||||
/>
|
||||
|
||||
<ConfirmationDialog
|
||||
opened={confirmSetNamesOpen}
|
||||
onClose={() => setConfirmSetNamesOpen(false)}
|
||||
onConfirm={executeSetNamesFromEpg}
|
||||
title="Confirm Set Names from EPG"
|
||||
message={
|
||||
<div style={{ whiteSpace: 'pre-line' }}>
|
||||
{`Are you sure you want to set names from EPG for ${channelIds?.length || 0} selected channels?
|
||||
|
||||
This will replace the current channel names with the names from their assigned EPG data.
|
||||
|
||||
This action cannot be undone.`}
|
||||
</div>
|
||||
}
|
||||
confirmLabel="Set Names"
|
||||
cancelLabel="Cancel"
|
||||
actionKey="batch-set-names-from-epg"
|
||||
onSuppressChange={suppressWarning}
|
||||
size="md"
|
||||
/>
|
||||
|
||||
<ConfirmationDialog
|
||||
opened={confirmSetLogosOpen}
|
||||
onClose={() => setConfirmSetLogosOpen(false)}
|
||||
onConfirm={executeSetLogosFromEpg}
|
||||
title="Confirm Set Logos from EPG"
|
||||
message={
|
||||
<div style={{ whiteSpace: 'pre-line' }}>
|
||||
{`Are you sure you want to set logos from EPG for ${channelIds?.length || 0} selected channels?
|
||||
|
||||
This will replace the current channel logos with logos from their assigned EPG data. New logos will be created if needed.
|
||||
|
||||
This action cannot be undone.`}
|
||||
</div>
|
||||
}
|
||||
confirmLabel="Set Logos"
|
||||
cancelLabel="Cancel"
|
||||
actionKey="batch-set-logos-from-epg"
|
||||
onSuppressChange={suppressWarning}
|
||||
size="md"
|
||||
/>
|
||||
|
||||
<ConfirmationDialog
|
||||
opened={confirmSetTvgIdsOpen}
|
||||
onClose={() => setConfirmSetTvgIdsOpen(false)}
|
||||
onConfirm={executeSetTvgIdsFromEpg}
|
||||
title="Confirm Set TVG-IDs from EPG"
|
||||
message={
|
||||
<div style={{ whiteSpace: 'pre-line' }}>
|
||||
{`Are you sure you want to set TVG-IDs from EPG for ${channelIds?.length || 0} selected channels?
|
||||
|
||||
This will replace the current TVG-IDs with the TVG-IDs from their assigned EPG data.
|
||||
|
||||
This action cannot be undone.`}
|
||||
</div>
|
||||
}
|
||||
confirmLabel="Set TVG-IDs"
|
||||
cancelLabel="Cancel"
|
||||
actionKey="batch-set-tvg-ids-from-epg"
|
||||
onSuppressChange={suppressWarning}
|
||||
size="md"
|
||||
/>
|
||||
|
||||
<ConfirmationDialog
|
||||
opened={confirmClearEpgsOpen}
|
||||
onClose={() => setConfirmClearEpgsOpen(false)}
|
||||
onConfirm={executeClearEpgs}
|
||||
title="Confirm Clear EPG Assignments"
|
||||
message={
|
||||
<div style={{ whiteSpace: 'pre-line' }}>
|
||||
{`Are you sure you want to clear EPG assignments for ${channelIds?.length || 0} selected channels?
|
||||
|
||||
This will set all selected channels to use dummy EPG data.
|
||||
|
||||
This action cannot be undone.`}
|
||||
</div>
|
||||
}
|
||||
confirmLabel="Clear EPGs"
|
||||
cancelLabel="Cancel"
|
||||
actionKey="batch-clear-epgs"
|
||||
onSuppressChange={suppressWarning}
|
||||
size="md"
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -106,13 +106,12 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
|
|||
onSuccess?.({ type: 'create', logo: newLogo }); // Call onSuccess for creates
|
||||
} else {
|
||||
// File was uploaded and logo was already created
|
||||
// Note: API.uploadLogo already calls addLogo() in the store, so no need to call onSuccess
|
||||
notifications.show({
|
||||
title: 'Success',
|
||||
message: 'Logo uploaded successfully',
|
||||
color: 'green',
|
||||
});
|
||||
// No onSuccess call needed - API.uploadLogo already updated the store
|
||||
onSuccess?.({ type: 'create', logo: uploadResponse });
|
||||
}
|
||||
onClose();
|
||||
} catch (error) {
|
||||
|
|
@ -211,6 +210,24 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
|
|||
}
|
||||
};
|
||||
|
||||
const handleUrlBlur = (event) => {
|
||||
const urlValue = event.target.value;
|
||||
if (urlValue) {
|
||||
try {
|
||||
const url = new URL(urlValue);
|
||||
const pathname = url.pathname;
|
||||
const filename = pathname.substring(pathname.lastIndexOf('/') + 1);
|
||||
const nameWithoutExtension = filename.replace(/\.[^/.]+$/, '');
|
||||
if (nameWithoutExtension) {
|
||||
formik.setFieldValue('name', nameWithoutExtension);
|
||||
}
|
||||
} catch (error) {
|
||||
// If the URL is invalid, do nothing.
|
||||
// The validation schema will catch this.
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Clean up object URLs when component unmounts or preview changes
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
|
|
@ -323,6 +340,7 @@ const LogoForm = ({ logo = null, isOpen, onClose, onSuccess }) => {
|
|||
placeholder="https://example.com/logo.png"
|
||||
{...formik.getFieldProps('url')}
|
||||
onChange={handleUrlChange}
|
||||
onBlur={handleUrlBlur}
|
||||
error={formik.touched.url && formik.errors.url}
|
||||
disabled={!!selectedFile} // Disable when file is selected
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -51,8 +51,7 @@ const M3UFilter = ({ filter = null, m3u, isOpen, onClose }) => {
|
|||
values.custom_properties = setCustomProperty(
|
||||
filter ? filter.custom_properties : {},
|
||||
'case_sensitive',
|
||||
values.case_sensitive,
|
||||
true
|
||||
values.case_sensitive
|
||||
);
|
||||
|
||||
delete values.case_sensitive;
|
||||
|
|
|
|||
|
|
@ -282,7 +282,7 @@ const StreamsTable = () => {
|
|||
cell: ({ getValue }) => (
|
||||
<Box
|
||||
style={{
|
||||
whiteSpace: 'nowrap',
|
||||
whiteSpace: 'pre',
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
}}
|
||||
|
|
@ -301,7 +301,7 @@ const StreamsTable = () => {
|
|||
cell: ({ getValue }) => (
|
||||
<Box
|
||||
style={{
|
||||
whiteSpace: 'nowrap',
|
||||
whiteSpace: 'pre',
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
}}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -199,6 +199,8 @@ const SettingsPage = () => {
|
|||
// Add a new state to track the dialog type
|
||||
const [rehashDialogType, setRehashDialogType] = useState(null); // 'save' or 'rehash'
|
||||
|
||||
// Store pending changed settings when showing the dialog
|
||||
const [pendingChangedSettings, setPendingChangedSettings] = useState(null);
|
||||
const [comskipFile, setComskipFile] = useState(null);
|
||||
const [comskipUploadLoading, setComskipUploadLoading] = useState(false);
|
||||
const [comskipConfig, setComskipConfig] = useState({
|
||||
|
|
@ -422,6 +424,8 @@ const SettingsPage = () => {
|
|||
|
||||
// If M3U hash key changed, show warning (unless suppressed)
|
||||
if (m3uHashKeyChanged && !isWarningSuppressed('rehash-streams')) {
|
||||
// Store the changed settings before showing dialog
|
||||
setPendingChangedSettings(changedSettings);
|
||||
setRehashDialogType('save'); // Set dialog type to save
|
||||
setRehashConfirmOpen(true);
|
||||
return;
|
||||
|
|
@ -565,23 +569,28 @@ const SettingsPage = () => {
|
|||
const executeSettingsSaveAndRehash = async () => {
|
||||
setRehashConfirmOpen(false);
|
||||
|
||||
// First save the settings
|
||||
const values = form.getValues();
|
||||
const changedSettings = {};
|
||||
// Use the stored pending values that were captured before the dialog was shown
|
||||
const changedSettings = pendingChangedSettings || {};
|
||||
|
||||
for (const settingKey in values) {
|
||||
if (String(values[settingKey]) !== String(settings[settingKey].value)) {
|
||||
changedSettings[settingKey] = `${values[settingKey]}`;
|
||||
// Update each changed setting in the backend (create if missing)
|
||||
for (const updatedKey in changedSettings) {
|
||||
const existing = settings[updatedKey];
|
||||
if (existing && existing.id) {
|
||||
await API.updateSetting({
|
||||
...existing,
|
||||
value: changedSettings[updatedKey],
|
||||
});
|
||||
} else {
|
||||
await API.createSetting({
|
||||
key: updatedKey,
|
||||
name: updatedKey.replace(/-/g, ' '),
|
||||
value: changedSettings[updatedKey],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update each changed setting in the backend
|
||||
for (const updatedKey in changedSettings) {
|
||||
await API.updateSetting({
|
||||
...settings[updatedKey],
|
||||
value: changedSettings[updatedKey],
|
||||
});
|
||||
}
|
||||
// Clear the pending values
|
||||
setPendingChangedSettings(null);
|
||||
};
|
||||
|
||||
const executeRehashStreamsOnly = async () => {
|
||||
|
|
@ -661,7 +670,7 @@ const SettingsPage = () => {
|
|||
data={[
|
||||
{
|
||||
value: '12h',
|
||||
label: '12h hour time',
|
||||
label: '12 hour time',
|
||||
},
|
||||
{
|
||||
value: '24h',
|
||||
|
|
@ -959,6 +968,10 @@ const SettingsPage = () => {
|
|||
value: 'tvg_id',
|
||||
label: 'TVG-ID',
|
||||
},
|
||||
{
|
||||
value: 'm3u_id',
|
||||
label: 'M3U ID',
|
||||
},
|
||||
]}
|
||||
{...form.getInputProps('m3u-hash-key')}
|
||||
key={form.key('m3u-hash-key')}
|
||||
|
|
@ -1180,6 +1193,8 @@ const SettingsPage = () => {
|
|||
onClose={() => {
|
||||
setRehashConfirmOpen(false);
|
||||
setRehashDialogType(null);
|
||||
// Clear pending values when dialog is cancelled
|
||||
setPendingChangedSettings(null);
|
||||
}}
|
||||
onConfirm={handleRehashConfirm}
|
||||
title={
|
||||
|
|
|
|||
100
frontend/src/pages/__tests__/guideUtils.test.js
Normal file
100
frontend/src/pages/__tests__/guideUtils.test.js
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import dayjs from 'dayjs';
|
||||
import {
|
||||
PROGRAM_HEIGHT,
|
||||
EXPANDED_PROGRAM_HEIGHT,
|
||||
buildChannelIdMap,
|
||||
mapProgramsByChannel,
|
||||
computeRowHeights,
|
||||
} from '../guideUtils.js';
|
||||
|
||||
describe('guideUtils', () => {
|
||||
describe('buildChannelIdMap', () => {
|
||||
it('maps tvg ids from epg records and falls back to channel uuid', () => {
|
||||
const channels = [
|
||||
{ id: 1, epg_data_id: 'epg-1', uuid: 'uuid-1' },
|
||||
{ id: 2, epg_data_id: null, uuid: 'uuid-2' },
|
||||
];
|
||||
const tvgsById = {
|
||||
'epg-1': { tvg_id: 'alpha' },
|
||||
};
|
||||
|
||||
const map = buildChannelIdMap(channels, tvgsById);
|
||||
|
||||
expect(map.get('alpha')).toBe(1);
|
||||
expect(map.get('uuid-2')).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapProgramsByChannel', () => {
|
||||
it('groups programs by channel and sorts them by start time', () => {
|
||||
const programs = [
|
||||
{
|
||||
id: 10,
|
||||
tvg_id: 'alpha',
|
||||
start_time: dayjs('2025-01-01T02:00:00Z').toISOString(),
|
||||
end_time: dayjs('2025-01-01T03:00:00Z').toISOString(),
|
||||
title: 'Late Show',
|
||||
},
|
||||
{
|
||||
id: 11,
|
||||
tvg_id: 'alpha',
|
||||
start_time: dayjs('2025-01-01T01:00:00Z').toISOString(),
|
||||
end_time: dayjs('2025-01-01T02:00:00Z').toISOString(),
|
||||
title: 'Evening News',
|
||||
},
|
||||
{
|
||||
id: 20,
|
||||
tvg_id: 'beta',
|
||||
start_time: dayjs('2025-01-01T00:00:00Z').toISOString(),
|
||||
end_time: dayjs('2025-01-01T01:00:00Z').toISOString(),
|
||||
title: 'Morning Show',
|
||||
},
|
||||
];
|
||||
|
||||
const channelIdByTvgId = new Map([
|
||||
['alpha', 1],
|
||||
['beta', 2],
|
||||
]);
|
||||
|
||||
const map = mapProgramsByChannel(programs, channelIdByTvgId);
|
||||
|
||||
expect(map.get(1)).toHaveLength(2);
|
||||
expect(map.get(1)?.map((item) => item.id)).toEqual([11, 10]);
|
||||
expect(map.get(2)).toHaveLength(1);
|
||||
expect(map.get(2)?.[0].startMs).toBeTypeOf('number');
|
||||
expect(map.get(2)?.[0].endMs).toBeTypeOf('number');
|
||||
});
|
||||
});
|
||||
|
||||
describe('computeRowHeights', () => {
|
||||
it('returns program heights with expanded rows when needed', () => {
|
||||
const filteredChannels = [
|
||||
{ id: 1 },
|
||||
{ id: 2 },
|
||||
];
|
||||
|
||||
const programsByChannel = new Map([
|
||||
[1, [{ id: 10 }, { id: 11 }]],
|
||||
[2, [{ id: 20 }]],
|
||||
]);
|
||||
|
||||
const collapsed = computeRowHeights(
|
||||
filteredChannels,
|
||||
programsByChannel,
|
||||
null
|
||||
);
|
||||
expect(collapsed).toEqual([PROGRAM_HEIGHT, PROGRAM_HEIGHT]);
|
||||
|
||||
const expanded = computeRowHeights(
|
||||
filteredChannels,
|
||||
programsByChannel,
|
||||
10
|
||||
);
|
||||
expect(expanded).toEqual([
|
||||
EXPANDED_PROGRAM_HEIGHT,
|
||||
PROGRAM_HEIGHT,
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
79
frontend/src/pages/guideUtils.js
Normal file
79
frontend/src/pages/guideUtils.js
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
import dayjs from 'dayjs';
|
||||
|
||||
export const PROGRAM_HEIGHT = 90;
|
||||
export const EXPANDED_PROGRAM_HEIGHT = 180;
|
||||
|
||||
export function buildChannelIdMap(channels, tvgsById) {
|
||||
const map = new Map();
|
||||
channels.forEach((channel) => {
|
||||
const tvgRecord = channel.epg_data_id
|
||||
? tvgsById[channel.epg_data_id]
|
||||
: null;
|
||||
const tvgId = tvgRecord?.tvg_id ?? channel.uuid;
|
||||
if (tvgId) {
|
||||
const tvgKey = String(tvgId);
|
||||
if (!map.has(tvgKey)) {
|
||||
map.set(tvgKey, []);
|
||||
}
|
||||
map.get(tvgKey).push(channel.id);
|
||||
}
|
||||
});
|
||||
return map;
|
||||
}
|
||||
|
||||
export function mapProgramsByChannel(programs, channelIdByTvgId) {
|
||||
if (!programs?.length || !channelIdByTvgId?.size) {
|
||||
return new Map();
|
||||
}
|
||||
|
||||
const map = new Map();
|
||||
programs.forEach((program) => {
|
||||
const channelIds = channelIdByTvgId.get(String(program.tvg_id));
|
||||
if (!channelIds || channelIds.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const startMs = program.startMs ?? dayjs(program.start_time).valueOf();
|
||||
const endMs = program.endMs ?? dayjs(program.end_time).valueOf();
|
||||
|
||||
const programData = {
|
||||
...program,
|
||||
startMs,
|
||||
endMs,
|
||||
};
|
||||
|
||||
// Add this program to all channels that share the same TVG ID
|
||||
channelIds.forEach((channelId) => {
|
||||
if (!map.has(channelId)) {
|
||||
map.set(channelId, []);
|
||||
}
|
||||
map.get(channelId).push(programData);
|
||||
});
|
||||
});
|
||||
|
||||
map.forEach((list) => {
|
||||
list.sort((a, b) => a.startMs - b.startMs);
|
||||
});
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
export function computeRowHeights(
|
||||
filteredChannels,
|
||||
programsByChannelId,
|
||||
expandedProgramId,
|
||||
defaultHeight = PROGRAM_HEIGHT,
|
||||
expandedHeight = EXPANDED_PROGRAM_HEIGHT
|
||||
) {
|
||||
if (!filteredChannels?.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return filteredChannels.map((channel) => {
|
||||
const channelPrograms = programsByChannelId.get(channel.id) || [];
|
||||
const expanded = channelPrograms.some(
|
||||
(program) => program.id === expandedProgramId
|
||||
);
|
||||
return expanded ? expandedHeight : defaultHeight;
|
||||
});
|
||||
}
|
||||
42
frontend/src/test/setupTests.js
Normal file
42
frontend/src/test/setupTests.js
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import '@testing-library/jest-dom/vitest';
|
||||
import { afterEach, vi } from 'vitest';
|
||||
import { cleanup } from '@testing-library/react';
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
if (typeof window !== 'undefined' && !window.matchMedia) {
|
||||
window.matchMedia = vi.fn().mockImplementation((query) => ({
|
||||
matches: false,
|
||||
media: query,
|
||||
onchange: null,
|
||||
addListener: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
addEventListener: vi.fn(),
|
||||
removeEventListener: vi.fn(),
|
||||
dispatchEvent: vi.fn(),
|
||||
}));
|
||||
}
|
||||
|
||||
if (typeof window !== 'undefined' && !window.ResizeObserver) {
|
||||
class ResizeObserver {
|
||||
constructor(callback) {
|
||||
this.callback = callback;
|
||||
}
|
||||
observe() {}
|
||||
unobserve() {}
|
||||
disconnect() {}
|
||||
}
|
||||
|
||||
window.ResizeObserver = ResizeObserver;
|
||||
}
|
||||
|
||||
if (typeof window !== 'undefined') {
|
||||
if (!window.requestAnimationFrame) {
|
||||
window.requestAnimationFrame = (cb) => setTimeout(cb, 16);
|
||||
}
|
||||
if (!window.cancelAnimationFrame) {
|
||||
window.cancelAnimationFrame = (id) => clearTimeout(id);
|
||||
}
|
||||
}
|
||||
|
|
@ -26,4 +26,10 @@ export default defineConfig({
|
|||
// },
|
||||
// },
|
||||
},
|
||||
|
||||
test: {
|
||||
environment: 'jsdom',
|
||||
setupFiles: ['./src/test/setupTests.js'],
|
||||
globals: true,
|
||||
},
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
Dispatcharr version information.
|
||||
"""
|
||||
__version__ = '0.9.1' # Follow semantic versioning (MAJOR.MINOR.PATCH)
|
||||
__version__ = '0.10.3' # Follow semantic versioning (MAJOR.MINOR.PATCH)
|
||||
__timestamp__ = None # Set during CI/CD build process
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue