mirror of
https://github.com/Dispatcharr/Dispatcharr.git
synced 2026-01-23 02:35:14 +00:00
Compare commits
475 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8521df94ad | ||
|
|
c970cfcf9a | ||
|
|
fe60c4f3bc | ||
|
|
7cf7aecdf2 | ||
|
|
54644df9a3 | ||
|
|
38fa0fe99d | ||
|
|
a772f5c353 | ||
|
|
da186bcb9d | ||
|
|
75df00e329 | ||
|
|
d0ed682b3d | ||
|
|
60955a39c7 | ||
|
|
6c15ae940d | ||
|
|
516d0e02aa | ||
|
|
6607cef5d4 | ||
|
|
2f9b544519 | ||
|
|
36967c10ce | ||
|
|
4bfdd15b37 | ||
|
|
2a3d0db670 | ||
|
|
43636a84d0 | ||
|
|
6d5d16d667 | ||
|
|
f821dabe8e | ||
|
|
564dceb210 | ||
|
|
2e9280cf59 | ||
|
|
7594ba0a08 | ||
|
|
e8d949db86 | ||
|
|
a9a433bc5b | ||
|
|
e72e0215cb | ||
|
|
b8374fcc68 | ||
|
|
6b873be3cf | ||
|
|
edfa497203 | ||
|
|
0242eb69ee | ||
|
|
93f74c9d91 | ||
|
|
e2e6f61dee | ||
|
|
719a975210 | ||
|
|
a84553d15c | ||
|
|
cc9d38212e | ||
|
|
caf56a59f3 | ||
|
|
ba5aa861e3 | ||
|
|
312fa11cfb | ||
|
|
ad334347a9 | ||
|
|
74a9d3d0cb | ||
|
|
fa6315de33 | ||
|
|
d6c1a2369b | ||
|
|
72d9125c36 | ||
|
|
6e74c370cb | ||
|
|
10447f8c86 | ||
|
|
1a2d39de91 | ||
|
|
f389420251 | ||
|
|
3f6eff96fc | ||
|
|
02faa1a4a7 | ||
|
|
c5a3a2af81 | ||
|
|
01370e8892 | ||
|
|
8cbb55c44b | ||
|
|
0441dd7b7e | ||
|
|
30d093a2d3 | ||
|
|
518c93c398 | ||
|
|
cc09c89156 | ||
|
|
21c0758cc9 | ||
|
|
f664910bf4 | ||
|
|
bc19bf8629 | ||
|
|
16bbc1d875 | ||
|
|
9612a67412 | ||
|
|
4e65ffd113 | ||
|
|
6031885537 | ||
|
|
8ae1a98a3b | ||
|
|
48bdcfbd65 | ||
|
|
e151da27b9 | ||
|
|
fdca1fd165 | ||
|
|
9cc90354ee | ||
|
|
62b6cfa2fb | ||
|
|
3f46f28a70 | ||
|
|
058de26bdf | ||
|
|
f51463162c | ||
|
|
0cb189acba | ||
|
|
3fe5ff9130 | ||
|
|
131ebf9f55 | ||
|
|
2ed784e8c4 | ||
|
|
2e0aa90cd6 | ||
|
|
a363d9f0e6 | ||
|
|
6a985d7a7d | ||
|
|
1a67f3c8ec | ||
|
|
6bd8a0c12d | ||
|
|
6678311fa7 | ||
|
|
e8c9432f65 | ||
|
|
33f988b2c6 | ||
|
|
13e4b19960 | ||
|
|
042c34eecc | ||
|
|
ded785de54 | ||
|
|
c57f9fd7e7 | ||
|
|
b4b0774189 | ||
|
|
7b1a85617f | ||
|
|
a6361a07d2 | ||
|
|
b157159b87 | ||
|
|
d9fc0e68d6 | ||
|
|
43525ca32a | ||
|
|
ffa1331c3b | ||
|
|
26d9dbd246 | ||
|
|
f97399de07 | ||
|
|
a5688605cd | ||
|
|
ca96adf781 | ||
|
|
61247a452a | ||
|
|
fda188e738 | ||
|
|
57a6a842b2 | ||
|
|
f1c096bc94 | ||
|
|
5a4be532fd | ||
|
|
cc3ed80e1a | ||
|
|
af88756197 | ||
|
|
1b1f360705 | ||
|
|
bc3ef1a3a9 | ||
|
|
81af73a086 | ||
|
|
0abacf1fef | ||
|
|
36a39cd4de | ||
|
|
46413b7e3a | ||
|
|
874e981449 | ||
|
|
f5c6d2b576 | ||
|
|
1ef5a9ca13 | ||
|
|
2d31eca93d | ||
|
|
510c9fc617 | ||
|
|
8f63659ad7 | ||
|
|
31b9868bfd | ||
|
|
da4597ac95 | ||
|
|
523a127c81 | ||
|
|
ec3093d9af | ||
|
|
5481b18d8a | ||
|
|
bfca663870 | ||
|
|
11b3320277 | ||
|
|
44a122924f | ||
|
|
48ebaffadd | ||
|
|
daa919c764 | ||
|
|
8f811f2ed3 | ||
|
|
ff7298a93e | ||
|
|
9c9cbab94c | ||
|
|
904500906c | ||
|
|
106ea72c9d | ||
|
|
eea84cfd8b | ||
|
|
c7590d204e | ||
|
|
7a0af3445a | ||
|
|
18645fc08f | ||
|
|
aa5db6c3f4 | ||
|
|
1029eb5b5c | ||
|
|
ee183a9f75 | ||
|
|
63daa3ddf2 | ||
|
|
4cd63bc898 | ||
|
|
05b62c22ad | ||
|
|
2c12e8b872 | ||
|
|
20182c7ebf | ||
|
|
f0a9a3fc15 | ||
|
|
097551ccf7 | ||
|
|
22527b085d | ||
|
|
944736612b | ||
|
|
abc6ae94e5 | ||
|
|
5371519d8a | ||
|
|
b83f12809f | ||
|
|
601f7d0297 | ||
|
|
de31826137 | ||
|
|
e78c18c473 | ||
|
|
73956924f5 | ||
|
|
0a4d27c236 | ||
|
|
45ea63e9cf | ||
|
|
1510197bf0 | ||
|
|
9623dff6b1 | ||
|
|
3ddcadb50d | ||
|
|
1e42aa1011 | ||
|
|
ee0502f559 | ||
|
|
f43de44946 | ||
|
|
2b1d5622a6 | ||
|
|
bd148a7f14 | ||
|
|
a76a81c7f4 | ||
|
|
bd57ee3f3c | ||
|
|
2558ea0b0b | ||
|
|
2a0df81c59 | ||
|
|
1906c9955e | ||
|
|
4c60ce0c28 | ||
|
|
865ba432d3 | ||
|
|
7ea843956b | ||
|
|
98a016a418 | ||
|
|
36ec2fb1b0 | ||
|
|
dd75b5b21a | ||
|
|
38033da90f | ||
|
|
7c45542332 | ||
|
|
748d5dc72d | ||
|
|
48e7060cdb | ||
|
|
6c1b0f9a60 | ||
|
|
ffd8d9fe6b | ||
|
|
0ba22df233 | ||
|
|
bc72b2d4a3 | ||
|
|
88c10e85c3 | ||
|
|
1ad8d6cdfd | ||
|
|
ee7a39fe21 | ||
|
|
3b7f6dadaa | ||
|
|
41642cd479 | ||
|
|
1b27472c81 | ||
|
|
a60fd530f3 | ||
|
|
4878e92f44 | ||
|
|
3bf8ddf376 | ||
|
|
65dbc5498d | ||
|
|
85390a078c | ||
|
|
bd6cf287dc | ||
|
|
662c5ff89a | ||
|
|
1dc7700a62 | ||
|
|
d97f0c907f | ||
|
|
ae60f81314 | ||
|
|
bfcc47c331 | ||
|
|
679adb324c | ||
|
|
58a6cdedf7 | ||
|
|
dedd898a29 | ||
|
|
0b09cd18b9 | ||
|
|
3537c9ee09 | ||
|
|
97930c3de8 | ||
|
|
c51916b40c | ||
|
|
ed61ac656a | ||
|
|
56cf37d637 | ||
|
|
ea38c0b4b8 | ||
|
|
dd5ae8450d | ||
|
|
0070d9e500 | ||
|
|
aea888238a | ||
|
|
700d0d2383 | ||
|
|
0bfd06a5a3 | ||
|
|
8388152d79 | ||
|
|
795934dafe | ||
|
|
70e574e25a | ||
|
|
3c76c72479 | ||
|
|
53159bd420 | ||
|
|
901cc09e38 | ||
|
|
d4fbc9dc61 | ||
|
|
1a350e79e0 | ||
|
|
e71e6bc3d7 | ||
|
|
c65df2de89 | ||
|
|
5fbcaa91e0 | ||
|
|
d718e5a142 | ||
|
|
806f78244d | ||
|
|
e8fb01ebdd | ||
|
|
514e7e06e4 | ||
|
|
69f9ecd93c | ||
|
|
4df4e5f963 | ||
|
|
ecbef65891 | ||
|
|
98b29f97a1 | ||
|
|
62f5c32609 | ||
|
|
43b55e2d99 | ||
|
|
c03ddf60a0 | ||
|
|
ce70b04097 | ||
|
|
e2736babaa | ||
|
|
2155229d7f | ||
|
|
cf37c6fd98 | ||
|
|
3512c3a623 | ||
|
|
d0edc3fa07 | ||
|
|
b18bc62983 | ||
|
|
a912055255 | ||
|
|
10f329d673 | ||
|
|
f3a901cb3a | ||
|
|
759569b871 | ||
|
|
c1d960138e | ||
|
|
0d177e44f8 | ||
|
|
3b34fb11ef | ||
|
|
6c8270d0e5 | ||
|
|
5693ee7f9e | ||
|
|
256ac2f55a | ||
|
|
2a8ba9125c | ||
|
|
2de6ac5da1 | ||
|
|
6a96b6b485 | ||
|
|
5fce83fb51 | ||
|
|
81b6570366 | ||
|
|
042612c677 | ||
|
|
e64002dfc4 | ||
|
|
70cf8928c4 | ||
|
|
3f9fd424e2 | ||
|
|
f38fb36eba | ||
|
|
5e1ae23c4e | ||
|
|
53a50474ba | ||
|
|
92ced69bfd | ||
|
|
f1320c9a5d | ||
|
|
5b193249a8 | ||
|
|
0571c6801a | ||
|
|
c57c7d64de | ||
|
|
0bf3499917 | ||
|
|
3cb695279a | ||
|
|
2c5fbaffb4 | ||
|
|
85b5b18a57 | ||
|
|
be0409bfc2 | ||
|
|
bd3709463a | ||
|
|
cf08e54bd8 | ||
|
|
641dcfc21e | ||
|
|
43949c3ef4 | ||
|
|
6a9b5282cd | ||
|
|
b791190e3b | ||
|
|
1d23ed3685 | ||
|
|
3fb18ecce8 | ||
|
|
3eaa76174e | ||
|
|
2b58d7d46e | ||
|
|
fb084d013b | ||
|
|
8754839c81 | ||
|
|
13ad62d3e1 | ||
|
|
0997cd7a9d | ||
|
|
962d5e965b | ||
|
|
7673cd0793 | ||
|
|
aae7b1bc14 | ||
|
|
e7700b60f3 | ||
|
|
aa9fa09822 | ||
|
|
c5f6d8ccf3 | ||
|
|
cb1953baf2 | ||
|
|
d94d615d76 | ||
|
|
05f98e9275 | ||
|
|
db276f6d32 | ||
|
|
89a23164ff | ||
|
|
1f0fe00cbf | ||
|
|
204a5a0c76 | ||
|
|
fea7c99021 | ||
|
|
3e77259b2c | ||
|
|
968a8f1cd0 | ||
|
|
b6c3234e96 | ||
|
|
dc2a408041 | ||
|
|
afedce5cb2 | ||
|
|
d8df848136 | ||
|
|
1b16df4482 | ||
|
|
1560afab97 | ||
|
|
bbe1f6364b | ||
|
|
6bd5958c3c | ||
|
|
0700cf29ea | ||
|
|
2514528337 | ||
|
|
827501c9f7 | ||
|
|
23e2814fe7 | ||
|
|
5160ead093 | ||
|
|
acbcc46a91 | ||
|
|
ed7e16483b | ||
|
|
a3be679acf | ||
|
|
4f29f7f3f9 | ||
|
|
7321a6d7f8 | ||
|
|
761ee42396 | ||
|
|
6dab5e3cf3 | ||
|
|
b2a041c7c4 | ||
|
|
575b764487 | ||
|
|
325c836510 | ||
|
|
0360292b94 | ||
|
|
cc7cd32c90 | ||
|
|
4b5d3047bb | ||
|
|
6e79b37a66 | ||
|
|
4720e045a3 | ||
|
|
79895a1ce4 | ||
|
|
a3c16d48ec | ||
|
|
431ea6da32 | ||
|
|
b9e819e343 | ||
|
|
a7f449f746 | ||
|
|
b608af1d51 | ||
|
|
21723e29bc | ||
|
|
dc22dff713 | ||
|
|
9a5e04af0e | ||
|
|
6037c158f4 | ||
|
|
860c671f8c | ||
|
|
4701456a46 | ||
|
|
c3153f6b93 | ||
|
|
da628705df | ||
|
|
ed86eb2274 | ||
|
|
871f9f953e | ||
|
|
77e98508fb | ||
|
|
e6146e5243 | ||
|
|
d0ebfb57c4 | ||
|
|
81639c0f15 | ||
|
|
93f074241d | ||
|
|
d15d8f6644 | ||
|
|
12aae44672 | ||
|
|
60f77c85da | ||
|
|
c7e955b4a8 | ||
|
|
6715bc7c5c | ||
|
|
1b282f1987 | ||
|
|
16c44ea851 | ||
|
|
400c77f258 | ||
|
|
9d4fd63cde | ||
|
|
0741e45ce6 | ||
|
|
4284955412 | ||
|
|
c9d7e66545 | ||
|
|
28c211cd56 | ||
|
|
1fde8e4600 | ||
|
|
5c27bd2c10 | ||
|
|
3e2e704765 | ||
|
|
423c56f582 | ||
|
|
2042274f10 | ||
|
|
0fd464cb96 | ||
|
|
a1834d9885 | ||
|
|
57b99e3900 | ||
|
|
29c46eeb0a | ||
|
|
2de5acf12c | ||
|
|
0a6f9eb8e1 | ||
|
|
73bb1ecd2d | ||
|
|
645c1ec9df | ||
|
|
dd5f0d0753 | ||
|
|
d5de69cd6a | ||
|
|
119b222428 | ||
|
|
92d499a274 | ||
|
|
97c24dbea3 | ||
|
|
4b74673795 | ||
|
|
6a85475402 | ||
|
|
6e0e646938 | ||
|
|
937c20c082 | ||
|
|
75215cfdc6 | ||
|
|
163b1dd7cf | ||
|
|
603c9f9269 | ||
|
|
fe540045fc | ||
|
|
dee672287b | ||
|
|
c21ea5ecbe | ||
|
|
d456051eb3 | ||
|
|
9b07f013a4 | ||
|
|
7cbdb61f2c | ||
|
|
8494f615d0 | ||
|
|
0d987aae99 | ||
|
|
81276bfc16 | ||
|
|
1a541bd133 | ||
|
|
fa2a90fab4 | ||
|
|
91eaa64ebb | ||
|
|
0a4c7cae25 | ||
|
|
ba695ebbe9 | ||
|
|
22fb0b3bdd | ||
|
|
ca8e9d0143 | ||
|
|
d3d7f3c733 | ||
|
|
7744d7287b | ||
|
|
ec21e8329d | ||
|
|
0031d55bab | ||
|
|
b9a0aaa574 | ||
|
|
9b2ebf169b | ||
|
|
ae8b85a3e2 | ||
|
|
4df2f79bcf | ||
|
|
ed065f718d | ||
|
|
90d065df80 | ||
|
|
87d2131789 | ||
|
|
071561c570 | ||
|
|
404d2f82a3 | ||
|
|
74280baa85 | ||
|
|
fa08216600 | ||
|
|
fbd83e61b7 | ||
|
|
6acb0da933 | ||
|
|
d32abecb25 | ||
|
|
d5f9ba7e5e | ||
|
|
f58bc81c36 | ||
|
|
fefab4c4c6 | ||
|
|
9dc54fdcff | ||
|
|
85fdfedabe | ||
|
|
951af5f3fb | ||
|
|
fe58594a36 | ||
|
|
072201016c | ||
|
|
8794156767 | ||
|
|
da245c409a | ||
|
|
171bb004c4 | ||
|
|
99ad0ecb7b | ||
|
|
a959ba1748 | ||
|
|
d1aa9fe441 | ||
|
|
3326b9fbdc | ||
|
|
13874d64ad | ||
|
|
bc574c272c | ||
|
|
144a861142 | ||
|
|
e01338f055 | ||
|
|
22493c2797 | ||
|
|
a4a677a6fb | ||
|
|
8db9689999 | ||
|
|
dea6411e1c | ||
|
|
a31feee311 | ||
|
|
ad4143d035 | ||
|
|
0406c868bc | ||
|
|
cfd235ba34 | ||
|
|
cedd0d6f4d | ||
|
|
e6a9672a14 | ||
|
|
d7735255ec | ||
|
|
67a6ad1168 | ||
|
|
18dc73cbcb | ||
|
|
25d6322186 | ||
|
|
d1ac5b11e5 | ||
|
|
23209e79e9 | ||
|
|
a502d309f1 | ||
|
|
ec193813b4 | ||
|
|
fd9038463b | ||
|
|
6536f35dc0 | ||
|
|
eee4ab0725 | ||
|
|
02ac0d8de5 | ||
|
|
0dbc5221b2 | ||
|
|
424a450654 | ||
|
|
192edda48e | ||
|
|
7e5be6094f |
255 changed files with 39550 additions and 9761 deletions
|
|
@ -31,3 +31,4 @@
|
|||
LICENSE
|
||||
README.md
|
||||
data/
|
||||
docker/data/
|
||||
|
|
|
|||
87
.github/workflows/base-image.yml
vendored
87
.github/workflows/base-image.yml
vendored
|
|
@ -101,6 +101,28 @@ jobs:
|
|||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker base image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
|
|
@ -113,6 +135,7 @@ jobs:
|
|||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
|
|
@ -154,18 +177,74 @@ jobs:
|
|||
|
||||
# GitHub Container Registry manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
89
.github/workflows/ci.yml
vendored
89
.github/workflows/ci.yml
vendored
|
|
@ -3,6 +3,8 @@ name: CI Pipeline
|
|||
on:
|
||||
push:
|
||||
branches: [dev]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
pull_request:
|
||||
branches: [dev]
|
||||
workflow_dispatch:
|
||||
|
|
@ -117,7 +119,27 @@ jobs:
|
|||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# use metadata from the prepare job
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
|
|
@ -135,6 +157,7 @@ jobs:
|
|||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
|
|
@ -179,16 +202,72 @@ jobs:
|
|||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# branch tag (e.g. latest or dev)
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# version + timestamp tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
||||
# also create Docker Hub manifests using the same username
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
41
.github/workflows/frontend-tests.yml
vendored
Normal file
41
.github/workflows/frontend-tests.yml
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
name: Frontend Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./frontend
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './frontend/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# - name: Run linter
|
||||
# run: npm run lint
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
83
.github/workflows/release.yml
vendored
83
.github/workflows/release.yml
vendored
|
|
@ -25,6 +25,7 @@ jobs:
|
|||
new_version: ${{ steps.update_version.outputs.new_version }}
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
@ -43,6 +44,10 @@ jobs:
|
|||
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
|
||||
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update Changelog
|
||||
run: |
|
||||
python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }}
|
||||
|
||||
- name: Set repository metadata
|
||||
id: meta
|
||||
run: |
|
||||
|
|
@ -52,9 +57,15 @@ jobs:
|
|||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py
|
||||
git add version.py CHANGELOG.md
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
|
|
@ -100,6 +111,28 @@ jobs:
|
|||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
|
|
@ -111,6 +144,7 @@ jobs:
|
|||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
|
|
@ -145,25 +179,48 @@ jobs:
|
|||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
VERSION=${{ needs.prepare.outputs.new_version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# latest tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \
|
||||
ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64
|
||||
|
||||
# version tag
|
||||
docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:latest \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# latest tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64
|
||||
|
||||
# version tag
|
||||
docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
|
||||
|
||||
create-release:
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -18,4 +18,5 @@ dump.rdb
|
|||
debugpy*
|
||||
uwsgi.sock
|
||||
package-lock.json
|
||||
models
|
||||
models
|
||||
.idea
|
||||
1014
CHANGELOG.md
Normal file
1014
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -20,30 +20,88 @@ class TokenObtainPairView(TokenObtainPairView):
|
|||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked login attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
username = request.data.get("username", 'unknown')
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied',
|
||||
)
|
||||
return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
# Get the response from the parent class first
|
||||
response = super().post(request, *args, **kwargs)
|
||||
username = request.data.get("username")
|
||||
|
||||
# If login was successful, update last_login
|
||||
if response.status_code == 200:
|
||||
username = request.data.get("username")
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
# Log login attempt
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
return response
|
||||
try:
|
||||
response = super().post(request, *args, **kwargs)
|
||||
|
||||
# If login was successful, update last_login and log success
|
||||
if response.status_code == 200:
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
else:
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
# If parent class raises an exception (e.g., validation error), log failed attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason=f'Authentication error: {str(e)[:100]}',
|
||||
)
|
||||
raise # Re-raise the exception to maintain normal error flow
|
||||
|
||||
|
||||
class TokenRefreshView(TokenRefreshView):
|
||||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked token refresh attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user='token_refresh',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied (token refresh)',
|
||||
)
|
||||
return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
return super().post(request, *args, **kwargs)
|
||||
|
|
@ -80,6 +138,15 @@ def initialize_superuser(request):
|
|||
class AuthViewSet(viewsets.ViewSet):
|
||||
"""Handles user login and logout"""
|
||||
|
||||
def get_permissions(self):
|
||||
"""
|
||||
Login doesn't require auth, but logout does
|
||||
"""
|
||||
if self.action == 'logout':
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
return [IsAuthenticated()]
|
||||
return []
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Authenticate and log in a user",
|
||||
request_body=openapi.Schema(
|
||||
|
|
@ -100,6 +167,11 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
password = request.data.get("password")
|
||||
user = authenticate(request, username=username, password=password)
|
||||
|
||||
# Get client info for logging
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
if user:
|
||||
login(request, user)
|
||||
# Update last_login timestamp
|
||||
|
|
@ -107,6 +179,14 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"message": "Login successful",
|
||||
|
|
@ -118,6 +198,15 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
},
|
||||
}
|
||||
)
|
||||
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
return Response({"error": "Invalid credentials"}, status=400)
|
||||
|
||||
@swagger_auto_schema(
|
||||
|
|
@ -126,6 +215,19 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
)
|
||||
def logout(self, request):
|
||||
"""Logs out the authenticated user"""
|
||||
# Log logout event before actually logging out
|
||||
from core.utils import log_system_event
|
||||
username = request.user.username if request.user and request.user.is_authenticated else 'unknown'
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
log_system_event(
|
||||
event_type='logout',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
logout(request)
|
||||
return Response({"message": "Logout successful"})
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ urlpatterns = [
|
|||
path('core/', include(('core.api_urls', 'core'), namespace='core')),
|
||||
path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')),
|
||||
path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')),
|
||||
path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')),
|
||||
# path('output/', include(('apps.output.api_urls', 'output'), namespace='output')),
|
||||
#path('player/', include(('apps.player.api_urls', 'player'), namespace='player')),
|
||||
#path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')),
|
||||
|
|
|
|||
0
apps/backups/__init__.py
Normal file
0
apps/backups/__init__.py
Normal file
18
apps/backups/api_urls.py
Normal file
18
apps/backups/api_urls.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
from django.urls import path
|
||||
|
||||
from . import api_views
|
||||
|
||||
app_name = "backups"
|
||||
|
||||
urlpatterns = [
|
||||
path("", api_views.list_backups, name="backup-list"),
|
||||
path("create/", api_views.create_backup, name="backup-create"),
|
||||
path("upload/", api_views.upload_backup, name="backup-upload"),
|
||||
path("schedule/", api_views.get_schedule, name="backup-schedule-get"),
|
||||
path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"),
|
||||
path("status/<str:task_id>/", api_views.backup_status, name="backup-status"),
|
||||
path("<str:filename>/download-token/", api_views.get_download_token, name="backup-download-token"),
|
||||
path("<str:filename>/download/", api_views.download_backup, name="backup-download"),
|
||||
path("<str:filename>/delete/", api_views.delete_backup, name="backup-delete"),
|
||||
path("<str:filename>/restore/", api_views.restore_backup, name="backup-restore"),
|
||||
]
|
||||
364
apps/backups/api_views.py
Normal file
364
apps/backups/api_views.py
Normal file
|
|
@ -0,0 +1,364 @@
|
|||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse, StreamingHttpResponse, Http404
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import api_view, permission_classes, parser_classes
|
||||
from rest_framework.permissions import IsAdminUser, AllowAny
|
||||
from rest_framework.parsers import MultiPartParser, FormParser
|
||||
from rest_framework.response import Response
|
||||
|
||||
from . import services
|
||||
from .tasks import create_backup_task, restore_backup_task
|
||||
from .scheduler import get_schedule_settings, update_schedule_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_task_token(task_id: str) -> str:
|
||||
"""Generate a signed token for task status access without auth."""
|
||||
secret = settings.SECRET_KEY.encode()
|
||||
return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32]
|
||||
|
||||
|
||||
def _verify_task_token(task_id: str, token: str) -> bool:
|
||||
"""Verify a task token is valid."""
|
||||
expected = _generate_task_token(task_id)
|
||||
return hmac.compare_digest(expected, token)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def list_backups(request):
|
||||
"""List all available backup files."""
|
||||
try:
|
||||
backups = services.list_backups()
|
||||
return Response(backups, status=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to list backups: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def create_backup(request):
|
||||
"""Create a new backup (async via Celery)."""
|
||||
try:
|
||||
task = create_backup_task.delay()
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start backup: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def backup_status(request, task_id):
|
||||
"""Check the status of a backup/restore task.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid task_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (for restore when session is invalidated)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(task_id, token):
|
||||
return Response(
|
||||
{"detail": "Invalid task token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
result = AsyncResult(task_id)
|
||||
|
||||
if result.ready():
|
||||
task_result = result.get()
|
||||
if task_result.get("status") == "completed":
|
||||
return Response({
|
||||
"state": "completed",
|
||||
"result": task_result,
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": task_result.get("error", "Unknown error"),
|
||||
})
|
||||
elif result.failed():
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": str(result.result),
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": result.state.lower(),
|
||||
})
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get task status: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_download_token(request, filename):
|
||||
"""Get a signed token for downloading a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
token = _generate_task_token(filename)
|
||||
return Response({"token": token})
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to generate token: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def download_backup(request, filename):
|
||||
"""Download a backup file.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid download_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (avoids CORS preflight issues)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(filename, token):
|
||||
return Response(
|
||||
{"detail": "Invalid download token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
# Security: prevent path traversal by checking for suspicious characters
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = (backup_dir / filename).resolve()
|
||||
|
||||
# Security: ensure the resolved path is still within backup_dir
|
||||
if not str(backup_file).startswith(str(backup_dir.resolve())):
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
if not backup_file.exists() or not backup_file.is_file():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
file_size = backup_file.stat().st_size
|
||||
|
||||
# Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly
|
||||
# Fall back to streaming for non-nginx deployments
|
||||
use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true"
|
||||
logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}")
|
||||
|
||||
if use_nginx_accel:
|
||||
# X-Accel-Redirect: Django returns immediately, nginx serves file
|
||||
logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}")
|
||||
response = HttpResponse()
|
||||
response["X-Accel-Redirect"] = f"/protected-backups/{filename}"
|
||||
response["Content-Type"] = "application/zip"
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
else:
|
||||
# Streaming fallback for non-nginx deployments
|
||||
logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)")
|
||||
def file_iterator(file_path, chunk_size=2 * 1024 * 1024):
|
||||
with open(file_path, "rb") as f:
|
||||
while chunk := f.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
response = StreamingHttpResponse(
|
||||
file_iterator(backup_file),
|
||||
content_type="application/zip",
|
||||
)
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Download failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["DELETE"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def delete_backup(request, filename):
|
||||
"""Delete a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
services.delete_backup(filename)
|
||||
return Response(
|
||||
{"detail": "Backup deleted successfully"},
|
||||
status=status.HTTP_204_NO_CONTENT,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise Http404("Backup file not found")
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Delete failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
@parser_classes([MultiPartParser, FormParser])
|
||||
def upload_backup(request):
|
||||
"""Upload a backup file for restoration."""
|
||||
uploaded = request.FILES.get("file")
|
||||
if not uploaded:
|
||||
return Response(
|
||||
{"detail": "No file uploaded"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
try:
|
||||
backup_dir = services.get_backup_dir()
|
||||
filename = uploaded.name or "uploaded-backup.zip"
|
||||
|
||||
# Ensure unique filename
|
||||
backup_file = backup_dir / filename
|
||||
counter = 1
|
||||
while backup_file.exists():
|
||||
name_parts = filename.rsplit(".", 1)
|
||||
if len(name_parts) == 2:
|
||||
backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}"
|
||||
else:
|
||||
backup_file = backup_dir / f"{filename}-{counter}"
|
||||
counter += 1
|
||||
|
||||
# Save uploaded file
|
||||
with backup_file.open("wb") as f:
|
||||
for chunk in uploaded.chunks():
|
||||
f.write(chunk)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup uploaded successfully",
|
||||
"filename": backup_file.name,
|
||||
},
|
||||
status=status.HTTP_201_CREATED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Upload failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def restore_backup(request, filename):
|
||||
"""Restore from a backup file (async via Celery). WARNING: This will flush the database!"""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
task = restore_backup_task.delay(filename)
|
||||
return Response(
|
||||
{
|
||||
"detail": "Restore started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start restore: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_schedule(request):
|
||||
"""Get backup schedule settings."""
|
||||
try:
|
||||
settings = get_schedule_settings()
|
||||
return Response(settings)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["PUT"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def update_schedule(request):
|
||||
"""Update backup schedule settings."""
|
||||
try:
|
||||
settings = update_schedule_settings(request.data)
|
||||
return Response(settings)
|
||||
except ValueError as e:
|
||||
return Response(
|
||||
{"detail": str(e)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to update schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
7
apps/backups/apps.py
Normal file
7
apps/backups/apps.py
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class BackupsConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "apps.backups"
|
||||
verbose_name = "Backups"
|
||||
0
apps/backups/migrations/__init__.py
Normal file
0
apps/backups/migrations/__init__.py
Normal file
0
apps/backups/models.py
Normal file
0
apps/backups/models.py
Normal file
202
apps/backups/scheduler.py
Normal file
202
apps/backups/scheduler.py
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from django_celery_beat.models import PeriodicTask, CrontabSchedule
|
||||
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task"
|
||||
|
||||
DEFAULTS = {
|
||||
"schedule_enabled": True,
|
||||
"schedule_frequency": "daily",
|
||||
"schedule_time": "03:00",
|
||||
"schedule_day_of_week": 0, # Sunday
|
||||
"retention_count": 3,
|
||||
"schedule_cron_expression": "",
|
||||
}
|
||||
|
||||
|
||||
def _get_backup_settings():
|
||||
"""Get all backup settings from CoreSettings grouped JSON."""
|
||||
try:
|
||||
settings_obj = CoreSettings.objects.get(key="backup_settings")
|
||||
return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy()
|
||||
except CoreSettings.DoesNotExist:
|
||||
return DEFAULTS.copy()
|
||||
|
||||
|
||||
def _update_backup_settings(updates: dict) -> None:
|
||||
"""Update backup settings in the grouped JSON."""
|
||||
obj, created = CoreSettings.objects.get_or_create(
|
||||
key="backup_settings",
|
||||
defaults={"name": "Backup Settings", "value": DEFAULTS.copy()}
|
||||
)
|
||||
current = obj.value if isinstance(obj.value, dict) else {}
|
||||
current.update(updates)
|
||||
obj.value = current
|
||||
obj.save()
|
||||
|
||||
|
||||
def get_schedule_settings() -> dict:
|
||||
"""Get all backup schedule settings."""
|
||||
settings = _get_backup_settings()
|
||||
return {
|
||||
"enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])),
|
||||
"frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])),
|
||||
"time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])),
|
||||
"day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])),
|
||||
"retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])),
|
||||
"cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])),
|
||||
}
|
||||
|
||||
|
||||
def update_schedule_settings(data: dict) -> dict:
|
||||
"""Update backup schedule settings and sync the PeriodicTask."""
|
||||
# Validate
|
||||
if "frequency" in data and data["frequency"] not in ("daily", "weekly"):
|
||||
raise ValueError("frequency must be 'daily' or 'weekly'")
|
||||
|
||||
if "time" in data:
|
||||
try:
|
||||
hour, minute = data["time"].split(":")
|
||||
int(hour)
|
||||
int(minute)
|
||||
except (ValueError, AttributeError):
|
||||
raise ValueError("time must be in HH:MM format")
|
||||
|
||||
if "day_of_week" in data:
|
||||
day = int(data["day_of_week"])
|
||||
if day < 0 or day > 6:
|
||||
raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)")
|
||||
|
||||
if "retention_count" in data:
|
||||
count = int(data["retention_count"])
|
||||
if count < 0:
|
||||
raise ValueError("retention_count must be >= 0")
|
||||
|
||||
# Update settings with proper key names
|
||||
updates = {}
|
||||
if "enabled" in data:
|
||||
updates["schedule_enabled"] = bool(data["enabled"])
|
||||
if "frequency" in data:
|
||||
updates["schedule_frequency"] = str(data["frequency"])
|
||||
if "time" in data:
|
||||
updates["schedule_time"] = str(data["time"])
|
||||
if "day_of_week" in data:
|
||||
updates["schedule_day_of_week"] = int(data["day_of_week"])
|
||||
if "retention_count" in data:
|
||||
updates["retention_count"] = int(data["retention_count"])
|
||||
if "cron_expression" in data:
|
||||
updates["schedule_cron_expression"] = str(data["cron_expression"])
|
||||
|
||||
_update_backup_settings(updates)
|
||||
|
||||
# Sync the periodic task
|
||||
_sync_periodic_task()
|
||||
|
||||
return get_schedule_settings()
|
||||
|
||||
|
||||
def _sync_periodic_task() -> None:
|
||||
"""Create, update, or delete the scheduled backup task based on settings."""
|
||||
settings = get_schedule_settings()
|
||||
|
||||
if not settings["enabled"]:
|
||||
# Delete the task if it exists
|
||||
task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first()
|
||||
if task:
|
||||
old_crontab = task.crontab
|
||||
task.delete()
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
logger.info("Backup schedule disabled, removed periodic task")
|
||||
return
|
||||
|
||||
# Get old crontab before creating new one
|
||||
old_crontab = None
|
||||
try:
|
||||
old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME)
|
||||
old_crontab = old_task.crontab
|
||||
except PeriodicTask.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Check if using cron expression (advanced mode)
|
||||
if settings["cron_expression"]:
|
||||
# Parse cron expression: "minute hour day month weekday"
|
||||
try:
|
||||
parts = settings["cron_expression"].split()
|
||||
if len(parts) != 5:
|
||||
raise ValueError("Cron expression must have 5 parts: minute hour day month weekday")
|
||||
|
||||
minute, hour, day_of_month, month_of_year, day_of_week = parts
|
||||
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=day_of_week,
|
||||
day_of_month=day_of_month,
|
||||
month_of_year=month_of_year,
|
||||
timezone=CoreSettings.get_system_time_zone(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}")
|
||||
raise ValueError(f"Invalid cron expression: {e}")
|
||||
else:
|
||||
# Use simple frequency-based scheduling
|
||||
# Parse time
|
||||
hour, minute = settings["time"].split(":")
|
||||
|
||||
# Build crontab based on frequency
|
||||
system_tz = CoreSettings.get_system_time_zone()
|
||||
if settings["frequency"] == "daily":
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week="*",
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
else: # weekly
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=str(settings["day_of_week"]),
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
|
||||
# Create or update the periodic task
|
||||
task, created = PeriodicTask.objects.update_or_create(
|
||||
name=BACKUP_SCHEDULE_TASK_NAME,
|
||||
defaults={
|
||||
"task": "apps.backups.tasks.scheduled_backup_task",
|
||||
"crontab": crontab,
|
||||
"enabled": True,
|
||||
"kwargs": json.dumps({"retention_count": settings["retention_count"]}),
|
||||
},
|
||||
)
|
||||
|
||||
# Clean up old crontab if it changed and is orphaned
|
||||
if old_crontab and old_crontab.id != crontab.id:
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
|
||||
action = "Created" if created else "Updated"
|
||||
logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}")
|
||||
|
||||
|
||||
def _cleanup_orphaned_crontab(crontab_schedule):
|
||||
"""Delete old CrontabSchedule if no other tasks are using it."""
|
||||
if crontab_schedule is None:
|
||||
return
|
||||
|
||||
# Check if any other tasks are using this crontab
|
||||
if PeriodicTask.objects.filter(crontab=crontab_schedule).exists():
|
||||
logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting")
|
||||
return
|
||||
|
||||
logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}")
|
||||
crontab_schedule.delete()
|
||||
350
apps/backups/services.py
Normal file
350
apps/backups/services.py
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from zipfile import ZipFile, ZIP_DEFLATED
|
||||
import logging
|
||||
import pytz
|
||||
|
||||
from django.conf import settings
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_backup_dir() -> Path:
|
||||
"""Get the backup directory, creating it if necessary."""
|
||||
backup_dir = Path(settings.BACKUP_ROOT)
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
return backup_dir
|
||||
|
||||
|
||||
def _is_postgresql() -> bool:
|
||||
"""Check if we're using PostgreSQL."""
|
||||
return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql"
|
||||
|
||||
|
||||
def _get_pg_env() -> dict:
|
||||
"""Get environment variables for PostgreSQL commands."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
env = os.environ.copy()
|
||||
env["PGPASSWORD"] = db_config.get("PASSWORD", "")
|
||||
return env
|
||||
|
||||
|
||||
def _get_pg_args() -> list[str]:
|
||||
"""Get common PostgreSQL command arguments."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
return [
|
||||
"-h", db_config.get("HOST", "localhost"),
|
||||
"-p", str(db_config.get("PORT", 5432)),
|
||||
"-U", db_config.get("USER", "postgres"),
|
||||
"-d", db_config.get("NAME", "dispatcharr"),
|
||||
]
|
||||
|
||||
|
||||
def _dump_postgresql(output_file: Path) -> None:
|
||||
"""Dump PostgreSQL database using pg_dump."""
|
||||
logger.info("Dumping PostgreSQL database with pg_dump...")
|
||||
|
||||
cmd = [
|
||||
"pg_dump",
|
||||
*_get_pg_args(),
|
||||
"-Fc", # Custom format for pg_restore
|
||||
"-v", # Verbose
|
||||
"-f", str(output_file),
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"pg_dump failed: {result.stderr}")
|
||||
raise RuntimeError(f"pg_dump failed: {result.stderr}")
|
||||
|
||||
logger.debug(f"pg_dump output: {result.stderr}")
|
||||
|
||||
|
||||
def _clean_postgresql_schema() -> None:
|
||||
"""Drop and recreate the public schema to ensure a completely clean restore."""
|
||||
logger.info("[PG_CLEAN] Dropping and recreating public schema...")
|
||||
|
||||
# Commands to drop and recreate schema
|
||||
sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;"
|
||||
|
||||
cmd = [
|
||||
"psql",
|
||||
*_get_pg_args(),
|
||||
"-c", sql_commands,
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}")
|
||||
raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}")
|
||||
|
||||
logger.info("[PG_CLEAN] Schema cleaned successfully")
|
||||
|
||||
|
||||
def _restore_postgresql(dump_file: Path) -> None:
|
||||
"""Restore PostgreSQL database using pg_restore."""
|
||||
logger.info("[PG_RESTORE] Starting pg_restore...")
|
||||
logger.info(f"[PG_RESTORE] Dump file: {dump_file}")
|
||||
|
||||
# Drop and recreate schema to ensure a completely clean restore
|
||||
_clean_postgresql_schema()
|
||||
|
||||
pg_args = _get_pg_args()
|
||||
logger.info(f"[PG_RESTORE] Connection args: {pg_args}")
|
||||
|
||||
cmd = [
|
||||
"pg_restore",
|
||||
"--no-owner", # Skip ownership commands (we already created schema)
|
||||
*pg_args,
|
||||
"-v", # Verbose
|
||||
str(dump_file),
|
||||
]
|
||||
|
||||
logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}")
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
logger.info(f"[PG_RESTORE] Return code: {result.returncode}")
|
||||
|
||||
# pg_restore may return non-zero even on partial success
|
||||
# Check for actual errors vs warnings
|
||||
if result.returncode != 0:
|
||||
# Some errors during restore are expected (e.g., "does not exist" when cleaning)
|
||||
# Only fail on critical errors
|
||||
stderr = result.stderr.lower()
|
||||
if "fatal" in stderr or "could not connect" in stderr:
|
||||
logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}")
|
||||
raise RuntimeError(f"pg_restore failed: {result.stderr}")
|
||||
else:
|
||||
logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...")
|
||||
|
||||
logger.info("[PG_RESTORE] Completed successfully")
|
||||
|
||||
|
||||
def _dump_sqlite(output_file: Path) -> None:
|
||||
"""Dump SQLite database using sqlite3 .backup command."""
|
||||
logger.info("Dumping SQLite database with sqlite3 .backup...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"SQLite database not found: {db_path}")
|
||||
|
||||
# Use sqlite3 .backup command via stdin for reliable execution
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=f".backup '{output_file}'\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 backup failed: {result.stderr}")
|
||||
raise RuntimeError(f"sqlite3 backup failed: {result.stderr}")
|
||||
|
||||
# Verify the backup file was created
|
||||
if not output_file.exists():
|
||||
raise RuntimeError("sqlite3 backup failed: output file not created")
|
||||
|
||||
logger.info(f"sqlite3 backup completed successfully: {output_file}")
|
||||
|
||||
|
||||
def _restore_sqlite(dump_file: Path) -> None:
|
||||
"""Restore SQLite database by replacing the database file."""
|
||||
logger.info("Restoring SQLite database...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
backup_current = None
|
||||
|
||||
# Backup current database before overwriting
|
||||
if db_path.exists():
|
||||
backup_current = db_path.with_suffix(".db.bak")
|
||||
shutil.copy2(db_path, backup_current)
|
||||
logger.info(f"Backed up current database to {backup_current}")
|
||||
|
||||
# Ensure parent directory exists
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# The backup file from _dump_sqlite is a complete SQLite database file
|
||||
# We can simply copy it over the existing database
|
||||
shutil.copy2(dump_file, db_path)
|
||||
|
||||
# Verify the restore worked by checking if sqlite3 can read it
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=".tables\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 verification failed: {result.stderr}")
|
||||
# Try to restore from backup
|
||||
if backup_current and backup_current.exists():
|
||||
shutil.copy2(backup_current, db_path)
|
||||
logger.info("Restored original database from backup")
|
||||
raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}")
|
||||
|
||||
logger.info("sqlite3 restore completed successfully")
|
||||
|
||||
|
||||
def create_backup() -> Path:
|
||||
"""
|
||||
Create a backup archive containing database dump and data directories.
|
||||
Returns the path to the created backup file.
|
||||
"""
|
||||
backup_dir = get_backup_dir()
|
||||
|
||||
# Use system timezone for filename (user-friendly), but keep internal timestamps as UTC
|
||||
system_tz_name = CoreSettings.get_system_time_zone()
|
||||
try:
|
||||
system_tz = pytz.timezone(system_tz_name)
|
||||
now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz)
|
||||
timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC")
|
||||
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S")
|
||||
|
||||
backup_name = f"dispatcharr-backup-{timestamp}.zip"
|
||||
backup_file = backup_dir / backup_name
|
||||
|
||||
logger.info(f"Creating backup: {backup_name}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Determine database type and dump accordingly
|
||||
if _is_postgresql():
|
||||
db_dump_file = temp_path / "database.dump"
|
||||
_dump_postgresql(db_dump_file)
|
||||
db_type = "postgresql"
|
||||
else:
|
||||
db_dump_file = temp_path / "database.sqlite3"
|
||||
_dump_sqlite(db_dump_file)
|
||||
db_type = "sqlite"
|
||||
|
||||
# Create ZIP archive with compression and ZIP64 support for large files
|
||||
with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file:
|
||||
# Add database dump
|
||||
zip_file.write(db_dump_file, db_dump_file.name)
|
||||
|
||||
# Add metadata
|
||||
metadata = {
|
||||
"format": "dispatcharr-backup",
|
||||
"version": 2,
|
||||
"database_type": db_type,
|
||||
"database_file": db_dump_file.name,
|
||||
"created_at": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
}
|
||||
zip_file.writestr("metadata.json", json.dumps(metadata, indent=2))
|
||||
|
||||
logger.info(f"Backup created successfully: {backup_file}")
|
||||
return backup_file
|
||||
|
||||
|
||||
def restore_backup(backup_file: Path) -> None:
|
||||
"""
|
||||
Restore from a backup archive.
|
||||
WARNING: This will overwrite the database!
|
||||
"""
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {backup_file}")
|
||||
|
||||
logger.info(f"Restoring from backup: {backup_file}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Extract backup
|
||||
logger.debug("Extracting backup archive...")
|
||||
with ZipFile(backup_file, "r") as zip_file:
|
||||
zip_file.extractall(temp_path)
|
||||
|
||||
# Read metadata
|
||||
metadata_file = temp_path / "metadata.json"
|
||||
if not metadata_file.exists():
|
||||
raise ValueError("Invalid backup: missing metadata.json")
|
||||
|
||||
with open(metadata_file) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Restore database
|
||||
_restore_database(temp_path, metadata)
|
||||
|
||||
logger.info("Restore completed successfully")
|
||||
|
||||
|
||||
def _restore_database(temp_path: Path, metadata: dict) -> None:
|
||||
"""Restore database from backup."""
|
||||
db_type = metadata.get("database_type", "postgresql")
|
||||
db_file = metadata.get("database_file", "database.dump")
|
||||
dump_file = temp_path / db_file
|
||||
|
||||
if not dump_file.exists():
|
||||
raise ValueError(f"Invalid backup: missing {db_file}")
|
||||
|
||||
current_db_type = "postgresql" if _is_postgresql() else "sqlite"
|
||||
|
||||
if db_type != current_db_type:
|
||||
raise ValueError(
|
||||
f"Database type mismatch: backup is {db_type}, "
|
||||
f"but current database is {current_db_type}"
|
||||
)
|
||||
|
||||
if db_type == "postgresql":
|
||||
_restore_postgresql(dump_file)
|
||||
else:
|
||||
_restore_sqlite(dump_file)
|
||||
|
||||
|
||||
def list_backups() -> list[dict]:
|
||||
"""List all available backup files with metadata."""
|
||||
backup_dir = get_backup_dir()
|
||||
backups = []
|
||||
|
||||
for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True):
|
||||
# Use UTC timezone so frontend can convert to user's local time
|
||||
created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC)
|
||||
backups.append({
|
||||
"name": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"created": created_time.isoformat(),
|
||||
})
|
||||
|
||||
return backups
|
||||
|
||||
|
||||
def delete_backup(filename: str) -> None:
|
||||
"""Delete a backup file."""
|
||||
backup_dir = get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {filename}")
|
||||
|
||||
if not backup_file.is_file():
|
||||
raise ValueError(f"Invalid backup file: {filename}")
|
||||
|
||||
backup_file.unlink()
|
||||
logger.info(f"Deleted backup: {filename}")
|
||||
106
apps/backups/tasks.py
Normal file
106
apps/backups/tasks.py
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
import logging
|
||||
import traceback
|
||||
from celery import shared_task
|
||||
|
||||
from . import services
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _cleanup_old_backups(retention_count: int) -> int:
|
||||
"""Delete old backups, keeping only the most recent N. Returns count deleted."""
|
||||
if retention_count <= 0:
|
||||
return 0
|
||||
|
||||
backups = services.list_backups()
|
||||
if len(backups) <= retention_count:
|
||||
return 0
|
||||
|
||||
# Backups are sorted newest first, so delete from the end
|
||||
to_delete = backups[retention_count:]
|
||||
deleted = 0
|
||||
|
||||
for backup in to_delete:
|
||||
try:
|
||||
services.delete_backup(backup["name"])
|
||||
deleted += 1
|
||||
logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}")
|
||||
except Exception as e:
|
||||
logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}")
|
||||
|
||||
return deleted
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def create_backup_task(self):
|
||||
"""Celery task to create a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[BACKUP] Starting backup task {self.request.id}")
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def restore_backup_task(self, filename: str):
|
||||
"""Celery task to restore a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}")
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
logger.info(f"[RESTORE] Backup file path: {backup_file}")
|
||||
services.restore_backup(backup_file)
|
||||
logger.info(f"[RESTORE] Task {self.request.id} completed successfully")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": filename,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def scheduled_backup_task(self, retention_count: int = 0):
|
||||
"""Celery task for scheduled backups with optional retention cleanup."""
|
||||
try:
|
||||
logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}")
|
||||
|
||||
# Create backup
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[SCHEDULED] Backup created: {backup_file.name}")
|
||||
|
||||
# Cleanup old backups if retention is set
|
||||
deleted = 0
|
||||
if retention_count > 0:
|
||||
deleted = _cleanup_old_backups(retention_count)
|
||||
logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)")
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"deleted_count": deleted,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
1163
apps/backups/tests.py
Normal file
1163
apps/backups/tests.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -13,12 +13,14 @@ from .api_views import (
|
|||
UpdateChannelMembershipAPIView,
|
||||
BulkUpdateChannelMembershipAPIView,
|
||||
RecordingViewSet,
|
||||
RecurringRecordingRuleViewSet,
|
||||
GetChannelStreamsAPIView,
|
||||
SeriesRulesAPIView,
|
||||
DeleteSeriesRuleAPIView,
|
||||
EvaluateSeriesRulesAPIView,
|
||||
BulkRemoveSeriesRecordingsAPIView,
|
||||
BulkDeleteUpcomingRecordingsAPIView,
|
||||
ComskipConfigAPIView,
|
||||
)
|
||||
|
||||
app_name = 'channels' # for DRF routing
|
||||
|
|
@ -30,6 +32,7 @@ router.register(r'channels', ChannelViewSet, basename='channel')
|
|||
router.register(r'logos', LogoViewSet, basename='logo')
|
||||
router.register(r'profiles', ChannelProfileViewSet, basename='profile')
|
||||
router.register(r'recordings', RecordingViewSet, basename='recording')
|
||||
router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule')
|
||||
|
||||
urlpatterns = [
|
||||
# Bulk delete is a single APIView, not a ViewSet
|
||||
|
|
@ -44,8 +47,9 @@ urlpatterns = [
|
|||
path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'),
|
||||
path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'),
|
||||
path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'),
|
||||
path('series-rules/<str:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
|
||||
path('series-rules/<path:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
|
||||
path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'),
|
||||
path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'),
|
||||
]
|
||||
|
||||
urlpatterns += router.urls
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
31
apps/channels/migrations/0026_recurringrecordingrule.py
Normal file
31
apps/channels/migrations/0026_recurringrecordingrule.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
# Generated by Django 5.0.14 on 2025-09-18 14:56
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='RecurringRecordingRule',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('days_of_week', models.JSONField(default=list)),
|
||||
('start_time', models.TimeField()),
|
||||
('end_time', models.TimeField()),
|
||||
('enabled', models.BooleanField(default=True)),
|
||||
('name', models.CharField(blank=True, max_length=255)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['channel', 'start_time'],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-05 20:50
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0026_recurringrecordingrule'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='end_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='start_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-06 22:55
|
||||
|
||||
import django.utils.timezone
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='created_at',
|
||||
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'),
|
||||
preserve_default=False,
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='updated_at',
|
||||
field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
# Generated migration to backfill stream_hash for existing custom streams
|
||||
|
||||
from django.db import migrations
|
||||
import hashlib
|
||||
|
||||
|
||||
def backfill_custom_stream_hashes(apps, schema_editor):
|
||||
"""
|
||||
Generate stream_hash for all custom streams that don't have one.
|
||||
Uses stream ID to create a stable hash that won't change when name/url is edited.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams_without_hash = Stream.objects.filter(
|
||||
is_custom=True,
|
||||
stream_hash__isnull=True
|
||||
)
|
||||
|
||||
updated_count = 0
|
||||
for stream in custom_streams_without_hash:
|
||||
# Generate a stable hash using the stream's ID
|
||||
# This ensures the hash never changes even if name/url is edited
|
||||
unique_string = f"custom_stream_{stream.id}"
|
||||
stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
stream.save(update_fields=['stream_hash'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Backfilled stream_hash for {updated_count} custom streams")
|
||||
else:
|
||||
print("No custom streams needed stream_hash backfill")
|
||||
|
||||
|
||||
def reverse_backfill(apps, schema_editor):
|
||||
"""
|
||||
Reverse migration - clear stream_hash for custom streams.
|
||||
Note: This will break preview functionality for custom streams.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams = Stream.objects.filter(is_custom=True)
|
||||
count = custom_streams.update(stream_hash=None)
|
||||
print(f"Cleared stream_hash for {count} custom streams")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill),
|
||||
]
|
||||
18
apps/channels/migrations/0030_alter_stream_url.py
Normal file
18
apps/channels/migrations/0030_alter_stream_url.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-28 20:00
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0029_backfill_custom_stream_hashes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='stream',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=4096, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
# Generated by Django 5.2.9 on 2026-01-09 18:19
|
||||
|
||||
import datetime
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0030_alter_stream_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='last_seen',
|
||||
field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
]
|
||||
|
|
@ -55,7 +55,7 @@ class Stream(models.Model):
|
|||
"""
|
||||
|
||||
name = models.CharField(max_length=255, default="Default Stream")
|
||||
url = models.URLField(max_length=2000, blank=True, null=True)
|
||||
url = models.URLField(max_length=4096, blank=True, null=True)
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -94,6 +94,11 @@ class Stream(models.Model):
|
|||
db_index=True,
|
||||
)
|
||||
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)"
|
||||
)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
# Stream statistics fields
|
||||
|
|
@ -119,11 +124,11 @@ class Stream(models.Model):
|
|||
return self.name or self.url or f"Stream ID {self.id}"
|
||||
|
||||
@classmethod
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None):
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None):
|
||||
if keys is None:
|
||||
keys = CoreSettings.get_m3u_hash_key().split(",")
|
||||
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id}
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group}
|
||||
|
||||
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
|
||||
|
||||
|
|
@ -152,8 +157,14 @@ class Stream(models.Model):
|
|||
stream = cls.objects.create(**fields_to_update)
|
||||
return stream, True # True means it was created
|
||||
|
||||
# @TODO: honor stream's stream profile
|
||||
def get_stream_profile(self):
|
||||
"""
|
||||
Get the stream profile for this stream.
|
||||
Uses the stream's own profile if set, otherwise returns the default.
|
||||
"""
|
||||
if self.stream_profile:
|
||||
return self.stream_profile
|
||||
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
|
|
@ -303,6 +314,15 @@ class Channel(models.Model):
|
|||
help_text="The M3U account that auto-created this channel"
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(
|
||||
auto_now_add=True,
|
||||
help_text="Timestamp when this channel was created"
|
||||
)
|
||||
updated_at = models.DateTimeField(
|
||||
auto_now=True,
|
||||
help_text="Timestamp when this channel was last updated"
|
||||
)
|
||||
|
||||
def clean(self):
|
||||
# Enforce unique channel_number within a given group
|
||||
existing = Channel.objects.filter(
|
||||
|
|
@ -574,6 +594,16 @@ class ChannelGroupM3UAccount(models.Model):
|
|||
blank=True,
|
||||
help_text='Starting channel number for auto-created channels in this group'
|
||||
)
|
||||
last_seen = models.DateTimeField(
|
||||
default=datetime.now,
|
||||
db_index=True,
|
||||
help_text='Last time this group was seen in the M3U source during a refresh'
|
||||
)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ("channel_group", "m3u_account")
|
||||
|
|
@ -601,3 +631,35 @@ class Recording(models.Model):
|
|||
|
||||
def __str__(self):
|
||||
return f"{self.channel.name} - {self.start_time} to {self.end_time}"
|
||||
|
||||
|
||||
class RecurringRecordingRule(models.Model):
|
||||
"""Rule describing a recurring manual DVR schedule."""
|
||||
|
||||
channel = models.ForeignKey(
|
||||
"Channel",
|
||||
on_delete=models.CASCADE,
|
||||
related_name="recurring_rules",
|
||||
)
|
||||
days_of_week = models.JSONField(default=list)
|
||||
start_time = models.TimeField()
|
||||
end_time = models.TimeField()
|
||||
enabled = models.BooleanField(default=True)
|
||||
name = models.CharField(max_length=255, blank=True)
|
||||
start_date = models.DateField(null=True, blank=True)
|
||||
end_date = models.DateField(null=True, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["channel", "start_time"]
|
||||
|
||||
def __str__(self):
|
||||
channel_name = getattr(self.channel, "name", str(self.channel_id))
|
||||
return f"Recurring rule for {channel_name}"
|
||||
|
||||
def cleaned_days(self):
|
||||
try:
|
||||
return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6})
|
||||
except Exception:
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from rest_framework import serializers
|
||||
from .models import (
|
||||
Stream,
|
||||
|
|
@ -10,6 +12,7 @@ from .models import (
|
|||
ChannelProfile,
|
||||
ChannelProfileMembership,
|
||||
Recording,
|
||||
RecurringRecordingRule,
|
||||
)
|
||||
from apps.epg.serializers import EPGDataSerializer
|
||||
from core.models import StreamProfile
|
||||
|
|
@ -61,47 +64,15 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
return reverse("api:channels:logo-cache", args=[obj.id])
|
||||
|
||||
def get_channel_count(self, obj):
|
||||
"""Get the number of channels, movies, and series using this logo"""
|
||||
channel_count = obj.channels.count()
|
||||
|
||||
# Safely get movie count
|
||||
try:
|
||||
movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0
|
||||
except AttributeError:
|
||||
movie_count = 0
|
||||
|
||||
# Safely get series count
|
||||
try:
|
||||
series_count = obj.series.count() if hasattr(obj, 'series') else 0
|
||||
except AttributeError:
|
||||
series_count = 0
|
||||
|
||||
return channel_count + movie_count + series_count
|
||||
"""Get the number of channels using this logo"""
|
||||
return obj.channels.count()
|
||||
|
||||
def get_is_used(self, obj):
|
||||
"""Check if this logo is used by any channels, movies, or series"""
|
||||
# Check if used by channels
|
||||
if obj.channels.exists():
|
||||
return True
|
||||
|
||||
# Check if used by movies (handle case where VOD app might not be available)
|
||||
try:
|
||||
if hasattr(obj, 'movie') and obj.movie.exists():
|
||||
return True
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Check if used by series (handle case where VOD app might not be available)
|
||||
try:
|
||||
if hasattr(obj, 'series') and obj.series.exists():
|
||||
return True
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
"""Check if this logo is used by any channels"""
|
||||
return obj.channels.exists()
|
||||
|
||||
def get_channel_names(self, obj):
|
||||
"""Get the names of channels, movies, and series using this logo (limited to first 5)"""
|
||||
"""Get the names of channels using this logo (limited to first 5)"""
|
||||
names = []
|
||||
|
||||
# Get channel names
|
||||
|
|
@ -109,28 +80,6 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
for channel in channels:
|
||||
names.append(f"Channel: {channel.name}")
|
||||
|
||||
# Get movie names (only if we haven't reached limit)
|
||||
if len(names) < 5:
|
||||
try:
|
||||
if hasattr(obj, 'movie'):
|
||||
remaining_slots = 5 - len(names)
|
||||
movies = obj.movie.all()[:remaining_slots]
|
||||
for movie in movies:
|
||||
names.append(f"Movie: {movie.name}")
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Get series names (only if we haven't reached limit)
|
||||
if len(names) < 5:
|
||||
try:
|
||||
if hasattr(obj, 'series'):
|
||||
remaining_slots = 5 - len(names)
|
||||
series = obj.series.all()[:remaining_slots]
|
||||
for series_item in series:
|
||||
names.append(f"Series: {series_item.name}")
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Calculate total count for "more" message
|
||||
total_count = self.get_channel_count(obj)
|
||||
if total_count > 5:
|
||||
|
|
@ -170,6 +119,7 @@ class StreamSerializer(serializers.ModelSerializer):
|
|||
"current_viewers",
|
||||
"updated_at",
|
||||
"last_seen",
|
||||
"is_stale",
|
||||
"stream_profile_id",
|
||||
"is_custom",
|
||||
"channel_group",
|
||||
|
|
@ -206,7 +156,7 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
|||
|
||||
class Meta:
|
||||
model = ChannelGroupM3UAccount
|
||||
fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"]
|
||||
fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"]
|
||||
|
||||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
|
@ -230,8 +180,8 @@ class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
|||
# Channel Group
|
||||
#
|
||||
class ChannelGroupSerializer(serializers.ModelSerializer):
|
||||
channel_count = serializers.IntegerField(read_only=True)
|
||||
m3u_account_count = serializers.IntegerField(read_only=True)
|
||||
channel_count = serializers.SerializerMethodField()
|
||||
m3u_account_count = serializers.SerializerMethodField()
|
||||
m3u_accounts = ChannelGroupM3UAccountSerializer(
|
||||
many=True,
|
||||
read_only=True
|
||||
|
|
@ -241,6 +191,14 @@ class ChannelGroupSerializer(serializers.ModelSerializer):
|
|||
model = ChannelGroup
|
||||
fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"]
|
||||
|
||||
def get_channel_count(self, obj):
|
||||
"""Get count of channels in this group"""
|
||||
return obj.channels.count()
|
||||
|
||||
def get_m3u_account_count(self, obj):
|
||||
"""Get count of M3U accounts associated with this group"""
|
||||
return obj.m3u_accounts.count()
|
||||
|
||||
|
||||
class ChannelProfileSerializer(serializers.ModelSerializer):
|
||||
channels = serializers.SerializerMethodField()
|
||||
|
|
@ -345,8 +303,17 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
|
||||
if include_streams:
|
||||
self.fields["streams"] = serializers.SerializerMethodField()
|
||||
|
||||
return super().to_representation(instance)
|
||||
return super().to_representation(instance)
|
||||
else:
|
||||
# Fix: For PATCH/PUT responses, ensure streams are ordered
|
||||
representation = super().to_representation(instance)
|
||||
if "streams" in representation:
|
||||
representation["streams"] = list(
|
||||
instance.streams.all()
|
||||
.order_by("channelstream__order")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
return representation
|
||||
|
||||
def get_logo(self, obj):
|
||||
return LogoSerializer(obj.logo).data
|
||||
|
|
@ -454,6 +421,13 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
start_time = data.get("start_time")
|
||||
end_time = data.get("end_time")
|
||||
|
||||
if start_time and timezone.is_naive(start_time):
|
||||
start_time = timezone.make_aware(start_time, timezone.get_current_timezone())
|
||||
data["start_time"] = start_time
|
||||
if end_time and timezone.is_naive(end_time):
|
||||
end_time = timezone.make_aware(end_time, timezone.get_current_timezone())
|
||||
data["end_time"] = end_time
|
||||
|
||||
# If this is an EPG-based recording (program provided), apply global pre/post offsets
|
||||
try:
|
||||
cp = data.get("custom_properties") or {}
|
||||
|
|
@ -497,3 +471,56 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
raise serializers.ValidationError("End time must be after start time.")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class RecurringRecordingRuleSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = RecurringRecordingRule
|
||||
fields = "__all__"
|
||||
read_only_fields = ["created_at", "updated_at"]
|
||||
|
||||
def validate_days_of_week(self, value):
|
||||
if not value:
|
||||
raise serializers.ValidationError("Select at least one day of the week")
|
||||
cleaned = []
|
||||
for entry in value:
|
||||
try:
|
||||
iv = int(entry)
|
||||
except (TypeError, ValueError):
|
||||
raise serializers.ValidationError("Days of week must be integers 0-6")
|
||||
if iv < 0 or iv > 6:
|
||||
raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)")
|
||||
cleaned.append(iv)
|
||||
return sorted(set(cleaned))
|
||||
|
||||
def validate(self, attrs):
|
||||
start = attrs.get("start_time") or getattr(self.instance, "start_time", None)
|
||||
end = attrs.get("end_time") or getattr(self.instance, "end_time", None)
|
||||
start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None)
|
||||
end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None)
|
||||
if start_date is None:
|
||||
existing_start = getattr(self.instance, "start_date", None)
|
||||
if existing_start is None:
|
||||
raise serializers.ValidationError("Start date is required")
|
||||
if start_date and end_date and end_date < start_date:
|
||||
raise serializers.ValidationError("End date must be on or after start date")
|
||||
if end_date is None:
|
||||
existing_end = getattr(self.instance, "end_date", None)
|
||||
if existing_end is None:
|
||||
raise serializers.ValidationError("End date is required")
|
||||
if start and end and start_date and end_date:
|
||||
start_dt = datetime.combine(start_date, start)
|
||||
end_dt = datetime.combine(end_date, end)
|
||||
if end_dt <= start_dt:
|
||||
raise serializers.ValidationError("End datetime must be after start datetime")
|
||||
elif start and end and end == start:
|
||||
raise serializers.ValidationError("End time must be different from start time")
|
||||
# Normalize empty strings to None for dates
|
||||
if attrs.get("end_date") == "":
|
||||
attrs["end_date"] = None
|
||||
if attrs.get("start_date") == "":
|
||||
attrs["start_date"] = None
|
||||
return super().validate(attrs)
|
||||
|
||||
def create(self, validated_data):
|
||||
return super().create(validated_data)
|
||||
|
|
|
|||
|
|
@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs):
|
|||
else:
|
||||
raise ValueError("No default M3UAccount found.")
|
||||
|
||||
@receiver(post_save, sender=Stream)
|
||||
def generate_custom_stream_hash(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Generate a stable stream_hash for custom streams after creation.
|
||||
Uses the stream's ID to ensure the hash never changes even if name/url is edited.
|
||||
"""
|
||||
if instance.is_custom and not instance.stream_hash and created:
|
||||
import hashlib
|
||||
# Use stream ID for a stable, unique hash that never changes
|
||||
unique_string = f"custom_stream_{instance.id}"
|
||||
instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
# Use update to avoid triggering signals again
|
||||
Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash)
|
||||
|
||||
@receiver(post_save, sender=Channel)
|
||||
def refresh_epg_programs(sender, instance, created, **kwargs):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import requests
|
|||
import time
|
||||
import json
|
||||
import subprocess
|
||||
import signal
|
||||
from zoneinfo import ZoneInfo
|
||||
from datetime import datetime, timedelta
|
||||
import gc
|
||||
|
||||
|
|
@ -28,6 +30,23 @@ from urllib.parse import quote
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size)
|
||||
# We use 2000 as a safe maximum to account for multibyte characters
|
||||
def validate_logo_url(logo_url, max_length=2000):
|
||||
"""
|
||||
Fast validation for logo URLs during bulk creation.
|
||||
Returns None if URL is too long (would exceed PostgreSQL btree index limit),
|
||||
original URL otherwise.
|
||||
|
||||
PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than
|
||||
this cannot be indexed and would cause database errors. These are typically
|
||||
base64-encoded images embedded in URLs.
|
||||
"""
|
||||
if logo_url and len(logo_url) > max_length:
|
||||
logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...")
|
||||
return None
|
||||
return logo_url
|
||||
|
||||
def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"):
|
||||
"""
|
||||
Send EPG matching progress via WebSocket
|
||||
|
|
@ -276,7 +295,11 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True
|
|||
if score > 50: # Only show decent matches
|
||||
logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})")
|
||||
|
||||
if score > best_score:
|
||||
# When scores are equal, prefer higher priority EPG source
|
||||
row_priority = row.get('epg_source_priority', 0)
|
||||
best_priority = best_epg.get('epg_source_priority', 0) if best_epg else -1
|
||||
|
||||
if score > best_score or (score == best_score and row_priority > best_priority):
|
||||
best_score = score
|
||||
best_epg = row
|
||||
|
||||
|
|
@ -452,9 +475,9 @@ def match_epg_channels():
|
|||
"norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching!
|
||||
})
|
||||
|
||||
# Get all EPG data
|
||||
# Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data = []
|
||||
for epg in EPGData.objects.all():
|
||||
for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True):
|
||||
normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else ""
|
||||
epg_data.append({
|
||||
'id': epg.id,
|
||||
|
|
@ -463,9 +486,13 @@ def match_epg_channels():
|
|||
'name': epg.name,
|
||||
'norm_name': normalize_name(epg.name),
|
||||
'epg_source_id': epg.epg_source.id if epg.epg_source else None,
|
||||
'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0,
|
||||
})
|
||||
|
||||
logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries")
|
||||
# Sort EPG data by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True)
|
||||
|
||||
logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries (from active sources only)")
|
||||
|
||||
# Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations
|
||||
result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True)
|
||||
|
|
@ -599,9 +626,9 @@ def match_selected_channels_epg(channel_ids):
|
|||
"norm_chan": normalize_name(channel.name)
|
||||
})
|
||||
|
||||
# Get all EPG data
|
||||
# Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data = []
|
||||
for epg in EPGData.objects.all():
|
||||
for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True):
|
||||
normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else ""
|
||||
epg_data.append({
|
||||
'id': epg.id,
|
||||
|
|
@ -610,9 +637,13 @@ def match_selected_channels_epg(channel_ids):
|
|||
'name': epg.name,
|
||||
'norm_name': normalize_name(epg.name),
|
||||
'epg_source_id': epg.epg_source.id if epg.epg_source else None,
|
||||
'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0,
|
||||
})
|
||||
|
||||
logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries")
|
||||
# Sort EPG data by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True)
|
||||
|
||||
logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries (from active sources only)")
|
||||
|
||||
# Run EPG matching with progress updates - automatically uses appropriate thresholds
|
||||
result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True)
|
||||
|
|
@ -730,9 +761,10 @@ def match_single_channel_epg(channel_id):
|
|||
test_normalized = normalize_name(test_name)
|
||||
logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)")
|
||||
|
||||
# Get all EPG data for matching - must include norm_name field
|
||||
# Get all EPG data for matching from active sources - must include norm_name field
|
||||
# Ordered by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data_list = []
|
||||
for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''):
|
||||
for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True, name__isnull=False).exclude(name=''):
|
||||
normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else ""
|
||||
epg_data_list.append({
|
||||
'id': epg.id,
|
||||
|
|
@ -741,10 +773,14 @@ def match_single_channel_epg(channel_id):
|
|||
'name': epg.name,
|
||||
'norm_name': normalize_name(epg.name),
|
||||
'epg_source_id': epg.epg_source.id if epg.epg_source else None,
|
||||
'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0,
|
||||
})
|
||||
|
||||
# Sort EPG data by source priority (highest first) so we prefer higher priority matches
|
||||
epg_data_list.sort(key=lambda x: x['epg_source_priority'], reverse=True)
|
||||
|
||||
if not epg_data_list:
|
||||
return {"matched": False, "message": "No EPG data available for matching"}
|
||||
return {"matched": False, "message": "No EPG data available for matching (from active sources)"}
|
||||
|
||||
logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries")
|
||||
|
||||
|
|
@ -1115,6 +1151,148 @@ def reschedule_upcoming_recordings_for_offset_change():
|
|||
return reschedule_upcoming_recordings_for_offset_change_impl()
|
||||
|
||||
|
||||
def _notify_recordings_refresh():
|
||||
try:
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def purge_recurring_rule_impl(rule_id: int) -> int:
|
||||
"""Remove all future recordings created by a recurring rule."""
|
||||
from django.utils import timezone
|
||||
from .models import Recording
|
||||
|
||||
now = timezone.now()
|
||||
try:
|
||||
removed, _ = Recording.objects.filter(
|
||||
start_time__gte=now,
|
||||
custom_properties__rule__id=rule_id,
|
||||
).delete()
|
||||
except Exception:
|
||||
removed = 0
|
||||
if removed:
|
||||
_notify_recordings_refresh()
|
||||
return removed
|
||||
|
||||
|
||||
def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int:
|
||||
"""Ensure recordings exist for a recurring rule within the scheduling horizon."""
|
||||
from django.utils import timezone
|
||||
from .models import RecurringRecordingRule, Recording
|
||||
|
||||
rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first()
|
||||
now = timezone.now()
|
||||
removed = 0
|
||||
if drop_existing:
|
||||
removed = purge_recurring_rule_impl(rule_id)
|
||||
|
||||
if not rule or not rule.enabled:
|
||||
return 0
|
||||
|
||||
days = rule.cleaned_days()
|
||||
if not days:
|
||||
return 0
|
||||
|
||||
tz_name = CoreSettings.get_system_time_zone()
|
||||
try:
|
||||
tz = ZoneInfo(tz_name)
|
||||
except Exception:
|
||||
logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name)
|
||||
tz = timezone.get_current_timezone()
|
||||
start_limit = rule.start_date or now.date()
|
||||
end_limit = rule.end_date
|
||||
horizon = now + timedelta(days=horizon_days)
|
||||
start_window = max(start_limit, now.date())
|
||||
if drop_existing and end_limit:
|
||||
end_window = end_limit
|
||||
else:
|
||||
end_window = horizon.date()
|
||||
if end_limit and end_limit < end_window:
|
||||
end_window = end_limit
|
||||
if end_window < start_window:
|
||||
return 0
|
||||
total_created = 0
|
||||
|
||||
for offset in range((end_window - start_window).days + 1):
|
||||
target_date = start_window + timedelta(days=offset)
|
||||
if target_date.weekday() not in days:
|
||||
continue
|
||||
if end_limit and target_date > end_limit:
|
||||
continue
|
||||
try:
|
||||
start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz)
|
||||
end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz)
|
||||
except Exception:
|
||||
continue
|
||||
if end_dt <= start_dt:
|
||||
end_dt = end_dt + timedelta(days=1)
|
||||
if start_dt <= now:
|
||||
continue
|
||||
exists = Recording.objects.filter(
|
||||
channel=rule.channel,
|
||||
start_time=start_dt,
|
||||
custom_properties__rule__id=rule.id,
|
||||
).exists()
|
||||
if exists:
|
||||
continue
|
||||
description = rule.name or f"Recurring recording for {rule.channel.name}"
|
||||
cp = {
|
||||
"rule": {
|
||||
"type": "recurring",
|
||||
"id": rule.id,
|
||||
"days_of_week": days,
|
||||
"name": rule.name or "",
|
||||
},
|
||||
"status": "scheduled",
|
||||
"description": description,
|
||||
"program": {
|
||||
"title": rule.name or rule.channel.name,
|
||||
"description": description,
|
||||
"start_time": start_dt.isoformat(),
|
||||
"end_time": end_dt.isoformat(),
|
||||
},
|
||||
}
|
||||
try:
|
||||
Recording.objects.create(
|
||||
channel=rule.channel,
|
||||
start_time=start_dt,
|
||||
end_time=end_dt,
|
||||
custom_properties=cp,
|
||||
)
|
||||
total_created += 1
|
||||
except Exception as err:
|
||||
logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}")
|
||||
|
||||
if removed or total_created:
|
||||
_notify_recordings_refresh()
|
||||
|
||||
return total_created
|
||||
|
||||
|
||||
@shared_task
|
||||
def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14):
|
||||
return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days)
|
||||
|
||||
|
||||
@shared_task
|
||||
def maintain_recurring_recordings():
|
||||
from .models import RecurringRecordingRule
|
||||
|
||||
total = 0
|
||||
for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True):
|
||||
try:
|
||||
total += sync_recurring_rule_impl(rule_id, drop_existing=False)
|
||||
except Exception as err:
|
||||
logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}")
|
||||
return total
|
||||
|
||||
|
||||
@shared_task
|
||||
def purge_recurring_rule(rule_id: int):
|
||||
return purge_recurring_rule_impl(rule_id)
|
||||
|
||||
@shared_task
|
||||
def _safe_name(s):
|
||||
try:
|
||||
|
|
@ -1273,6 +1451,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
|
|||
|
||||
logger.info(f"Starting recording for channel {channel.name}")
|
||||
|
||||
# Log system event for recording start
|
||||
try:
|
||||
from core.utils import log_system_event
|
||||
log_system_event(
|
||||
'recording_start',
|
||||
channel_id=channel.uuid,
|
||||
channel_name=channel.name,
|
||||
recording_id=recording_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log recording start event: {e}")
|
||||
|
||||
# Try to resolve the Recording row up front
|
||||
recording_obj = None
|
||||
try:
|
||||
|
|
@ -1666,6 +1856,20 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str):
|
|||
# After the loop, the file and response are closed automatically.
|
||||
logger.info(f"Finished recording for channel {channel.name}")
|
||||
|
||||
# Log system event for recording end
|
||||
try:
|
||||
from core.utils import log_system_event
|
||||
log_system_event(
|
||||
'recording_end',
|
||||
channel_id=channel.uuid,
|
||||
channel_name=channel.name,
|
||||
recording_id=recording_id,
|
||||
interrupted=interrupted,
|
||||
bytes_written=bytes_written
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log recording end event: {e}")
|
||||
|
||||
# Remux TS to MKV container
|
||||
remux_success = False
|
||||
try:
|
||||
|
|
@ -1837,6 +2041,7 @@ def comskip_process_recording(recording_id: int):
|
|||
Safe to call even if comskip is not installed; stores status in custom_properties.comskip.
|
||||
"""
|
||||
import shutil
|
||||
from django.db import DatabaseError
|
||||
from .models import Recording
|
||||
# Helper to broadcast status over websocket
|
||||
def _ws(status: str, extra: dict | None = None):
|
||||
|
|
@ -1854,7 +2059,33 @@ def comskip_process_recording(recording_id: int):
|
|||
except Recording.DoesNotExist:
|
||||
return "not_found"
|
||||
|
||||
cp = rec.custom_properties or {}
|
||||
cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {}
|
||||
|
||||
def _persist_custom_properties():
|
||||
"""Persist updated custom_properties without raising if the row disappeared."""
|
||||
try:
|
||||
updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp)
|
||||
if not updated:
|
||||
logger.warning(
|
||||
"Recording %s vanished before comskip status could be saved",
|
||||
recording_id,
|
||||
)
|
||||
return False
|
||||
except DatabaseError as db_err:
|
||||
logger.warning(
|
||||
"Failed to persist comskip status for recording %s: %s",
|
||||
recording_id,
|
||||
db_err,
|
||||
)
|
||||
return False
|
||||
except Exception as unexpected:
|
||||
logger.warning(
|
||||
"Unexpected error while saving comskip status for recording %s: %s",
|
||||
recording_id,
|
||||
unexpected,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
file_path = (cp or {}).get("file_path")
|
||||
if not file_path or not os.path.exists(file_path):
|
||||
return "no_file"
|
||||
|
|
@ -1865,8 +2096,7 @@ def comskip_process_recording(recording_id: int):
|
|||
comskip_bin = shutil.which("comskip")
|
||||
if not comskip_bin:
|
||||
cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
_persist_custom_properties()
|
||||
_ws('skipped', {"reason": "comskip_not_installed"})
|
||||
return "comskip_missing"
|
||||
|
||||
|
|
@ -1878,24 +2108,59 @@ def comskip_process_recording(recording_id: int):
|
|||
|
||||
try:
|
||||
cmd = [comskip_bin, "--output", os.path.dirname(file_path)]
|
||||
# Prefer system ini if present to squelch warning and get sane defaults
|
||||
for ini_path in ("/etc/comskip/comskip.ini", "/app/docker/comskip.ini"):
|
||||
if os.path.exists(ini_path):
|
||||
# Prefer user-specified INI, fall back to known defaults
|
||||
ini_candidates = []
|
||||
try:
|
||||
custom_ini = CoreSettings.get_dvr_comskip_custom_path()
|
||||
if custom_ini:
|
||||
ini_candidates.append(custom_ini)
|
||||
except Exception as ini_err:
|
||||
logger.debug(f"Unable to load custom comskip.ini path: {ini_err}")
|
||||
ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"])
|
||||
selected_ini = None
|
||||
for ini_path in ini_candidates:
|
||||
if ini_path and os.path.exists(ini_path):
|
||||
selected_ini = ini_path
|
||||
cmd.extend([f"--ini={ini_path}"])
|
||||
break
|
||||
cmd.append(file_path)
|
||||
subprocess.run(cmd, check=True)
|
||||
subprocess.run(
|
||||
cmd,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
stderr_tail = (e.stderr or "").strip().splitlines()
|
||||
stderr_tail = stderr_tail[-5:] if stderr_tail else []
|
||||
detail = {
|
||||
"status": "error",
|
||||
"reason": "comskip_failed",
|
||||
"returncode": e.returncode,
|
||||
}
|
||||
if e.returncode and e.returncode < 0:
|
||||
try:
|
||||
detail["signal"] = signal.Signals(-e.returncode).name
|
||||
except Exception:
|
||||
detail["signal"] = f"signal_{-e.returncode}"
|
||||
if stderr_tail:
|
||||
detail["stderr"] = "\n".join(stderr_tail)
|
||||
if selected_ini:
|
||||
detail["ini_path"] = selected_ini
|
||||
cp["comskip"] = detail
|
||||
_persist_custom_properties()
|
||||
_ws('error', {"reason": "comskip_failed", "returncode": e.returncode})
|
||||
return "comskip_failed"
|
||||
except Exception as e:
|
||||
cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
_persist_custom_properties()
|
||||
_ws('error', {"reason": str(e)})
|
||||
return "comskip_failed"
|
||||
|
||||
if not os.path.exists(edl_path):
|
||||
cp["comskip"] = {"status": "error", "reason": "edl_not_found"}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
_persist_custom_properties()
|
||||
_ws('error', {"reason": "edl_not_found"})
|
||||
return "no_edl"
|
||||
|
||||
|
|
@ -1913,8 +2178,7 @@ def comskip_process_recording(recording_id: int):
|
|||
duration = _ffprobe_duration(file_path)
|
||||
if duration is None:
|
||||
cp["comskip"] = {"status": "error", "reason": "duration_unknown"}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
_persist_custom_properties()
|
||||
_ws('error', {"reason": "duration_unknown"})
|
||||
return "no_duration"
|
||||
|
||||
|
|
@ -1943,9 +2207,14 @@ def comskip_process_recording(recording_id: int):
|
|||
keep.append((cur, duration))
|
||||
|
||||
if not commercials or sum((e - s) for s, e in commercials) <= 0.5:
|
||||
cp["comskip"] = {"status": "completed", "skipped": True, "edl": os.path.basename(edl_path)}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
cp["comskip"] = {
|
||||
"status": "completed",
|
||||
"skipped": True,
|
||||
"edl": os.path.basename(edl_path),
|
||||
}
|
||||
if selected_ini:
|
||||
cp["comskip"]["ini_path"] = selected_ini
|
||||
_persist_custom_properties()
|
||||
_ws('skipped', {"reason": "no_commercials", "commercials": 0})
|
||||
return "no_commercials"
|
||||
|
||||
|
|
@ -1969,7 +2238,8 @@ def comskip_process_recording(recording_id: int):
|
|||
list_path = os.path.join(workdir, "concat_list.txt")
|
||||
with open(list_path, "w") as lf:
|
||||
for pth in parts:
|
||||
lf.write(f"file '{pth}'\n")
|
||||
escaped = pth.replace("'", "'\\''")
|
||||
lf.write(f"file '{escaped}'\n")
|
||||
|
||||
output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv")
|
||||
subprocess.run([
|
||||
|
|
@ -1995,14 +2265,14 @@ def comskip_process_recording(recording_id: int):
|
|||
"segments_kept": len(parts),
|
||||
"commercials": len(commercials),
|
||||
}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
if selected_ini:
|
||||
cp["comskip"]["ini_path"] = selected_ini
|
||||
_persist_custom_properties()
|
||||
_ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)})
|
||||
return "ok"
|
||||
except Exception as e:
|
||||
cp["comskip"] = {"status": "error", "reason": str(e)}
|
||||
rec.custom_properties = cp
|
||||
rec.save(update_fields=["custom_properties"])
|
||||
_persist_custom_properties()
|
||||
_ws('error', {"reason": str(e)})
|
||||
return f"error:{e}"
|
||||
def _resolve_poster_for_program(channel_name, program):
|
||||
|
|
@ -2333,15 +2603,16 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
|
|||
# Store profile IDs for this channel
|
||||
profile_map.append(channel_profile_ids)
|
||||
|
||||
# Handle logo
|
||||
if stream.logo_url:
|
||||
# Handle logo - validate URL length to avoid PostgreSQL btree index errors
|
||||
validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None
|
||||
if validated_logo_url:
|
||||
logos_to_create.append(
|
||||
Logo(
|
||||
url=stream.logo_url,
|
||||
url=validated_logo_url,
|
||||
name=stream.name or stream.tvg_id,
|
||||
)
|
||||
)
|
||||
logo_map.append(stream.logo_url)
|
||||
logo_map.append(validated_logo_url)
|
||||
else:
|
||||
logo_map.append(None)
|
||||
|
||||
|
|
@ -2408,7 +2679,38 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
|
|||
)
|
||||
|
||||
# Handle channel profile membership
|
||||
if profile_ids:
|
||||
# Semantics:
|
||||
# - None: add to ALL profiles (backward compatible default)
|
||||
# - Empty array []: add to NO profiles
|
||||
# - Sentinel [0] or 0 in array: add to ALL profiles (explicit)
|
||||
# - [1,2,...]: add to specified profile IDs only
|
||||
if profile_ids is None:
|
||||
# Omitted -> add to all profiles (backward compatible)
|
||||
all_profiles = ChannelProfile.objects.all()
|
||||
channel_profile_memberships.extend([
|
||||
ChannelProfileMembership(
|
||||
channel_profile=profile,
|
||||
channel=channel,
|
||||
enabled=True
|
||||
)
|
||||
for profile in all_profiles
|
||||
])
|
||||
elif isinstance(profile_ids, list) and len(profile_ids) == 0:
|
||||
# Empty array -> add to no profiles
|
||||
pass
|
||||
elif isinstance(profile_ids, list) and 0 in profile_ids:
|
||||
# Sentinel 0 -> add to all profiles (explicit)
|
||||
all_profiles = ChannelProfile.objects.all()
|
||||
channel_profile_memberships.extend([
|
||||
ChannelProfileMembership(
|
||||
channel_profile=profile,
|
||||
channel=channel,
|
||||
enabled=True
|
||||
)
|
||||
for profile in all_profiles
|
||||
])
|
||||
else:
|
||||
# Specific profile IDs
|
||||
try:
|
||||
specific_profiles = ChannelProfile.objects.filter(id__in=profile_ids)
|
||||
channel_profile_memberships.extend([
|
||||
|
|
@ -2424,17 +2726,6 @@ def bulk_create_channels_from_streams(self, stream_ids, channel_profile_ids=None
|
|||
'channel_id': channel.id,
|
||||
'error': f'Failed to add to profiles: {str(e)}'
|
||||
})
|
||||
else:
|
||||
# Add to all profiles by default
|
||||
all_profiles = ChannelProfile.objects.all()
|
||||
channel_profile_memberships.extend([
|
||||
ChannelProfileMembership(
|
||||
channel_profile=profile,
|
||||
channel=channel,
|
||||
enabled=True
|
||||
)
|
||||
for profile in all_profiles
|
||||
])
|
||||
|
||||
# Bulk update channels with logos
|
||||
if update:
|
||||
|
|
@ -2711,3 +3002,98 @@ def set_channels_logos_from_epg(self, channel_ids):
|
|||
'error': str(e)
|
||||
})
|
||||
raise
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def set_channels_tvg_ids_from_epg(self, channel_ids):
|
||||
"""
|
||||
Celery task to set channel TVG-IDs from EPG data for multiple channels
|
||||
"""
|
||||
from core.utils import send_websocket_update
|
||||
|
||||
task_id = self.request.id
|
||||
total_channels = len(channel_ids)
|
||||
updated_count = 0
|
||||
errors = []
|
||||
|
||||
try:
|
||||
logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels")
|
||||
|
||||
# Send initial progress
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': 0,
|
||||
'total': total_channels,
|
||||
'status': 'running',
|
||||
'message': 'Starting EPG TVG-ID setting...'
|
||||
})
|
||||
|
||||
batch_size = 100
|
||||
for i in range(0, total_channels, batch_size):
|
||||
batch_ids = channel_ids[i:i + batch_size]
|
||||
batch_updates = []
|
||||
|
||||
# Get channels and their EPG data
|
||||
channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data')
|
||||
|
||||
for channel in channels:
|
||||
try:
|
||||
if channel.epg_data and channel.epg_data.tvg_id:
|
||||
if channel.tvg_id != channel.epg_data.tvg_id:
|
||||
channel.tvg_id = channel.epg_data.tvg_id
|
||||
batch_updates.append(channel)
|
||||
updated_count += 1
|
||||
except Exception as e:
|
||||
errors.append(f"Channel {channel.id}: {str(e)}")
|
||||
logger.error(f"Error processing channel {channel.id}: {e}")
|
||||
|
||||
# Bulk update the batch
|
||||
if batch_updates:
|
||||
Channel.objects.bulk_update(batch_updates, ['tvg_id'])
|
||||
|
||||
# Send progress update
|
||||
progress = min(i + batch_size, total_channels)
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': progress,
|
||||
'total': total_channels,
|
||||
'status': 'running',
|
||||
'message': f'Updated {updated_count} channel TVG-IDs...',
|
||||
'updated_count': updated_count
|
||||
})
|
||||
|
||||
# Send completion notification
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': total_channels,
|
||||
'total': total_channels,
|
||||
'status': 'completed',
|
||||
'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data',
|
||||
'updated_count': updated_count,
|
||||
'error_count': len(errors),
|
||||
'errors': errors
|
||||
})
|
||||
|
||||
logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels")
|
||||
return {
|
||||
'status': 'completed',
|
||||
'updated_count': updated_count,
|
||||
'error_count': len(errors),
|
||||
'errors': errors
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"EPG TVG-ID setting task failed: {e}")
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_tvg_id_setting_progress',
|
||||
'task_id': task_id,
|
||||
'progress': 0,
|
||||
'total': total_channels,
|
||||
'status': 'failed',
|
||||
'message': f'Task failed: {str(e)}',
|
||||
'error': str(e)
|
||||
})
|
||||
raise
|
||||
|
|
|
|||
0
apps/channels/tests/__init__.py
Normal file
0
apps/channels/tests/__init__.py
Normal file
211
apps/channels/tests/test_channel_api.py
Normal file
211
apps/channels/tests/test_channel_api.py
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
from django.test import TestCase
|
||||
from django.contrib.auth import get_user_model
|
||||
from rest_framework.test import APIClient
|
||||
from rest_framework import status
|
||||
|
||||
from apps.channels.models import Channel, ChannelGroup
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class ChannelBulkEditAPITests(TestCase):
|
||||
def setUp(self):
|
||||
# Create a test admin user (user_level >= 10) and authenticate
|
||||
self.user = User.objects.create_user(username="testuser", password="testpass123")
|
||||
self.user.user_level = 10 # Set admin level
|
||||
self.user.save()
|
||||
self.client = APIClient()
|
||||
self.client.force_authenticate(user=self.user)
|
||||
self.bulk_edit_url = "/api/channels/channels/edit/bulk/"
|
||||
|
||||
# Create test channel group
|
||||
self.group1 = ChannelGroup.objects.create(name="Test Group 1")
|
||||
self.group2 = ChannelGroup.objects.create(name="Test Group 2")
|
||||
|
||||
# Create test channels
|
||||
self.channel1 = Channel.objects.create(
|
||||
channel_number=1.0,
|
||||
name="Channel 1",
|
||||
tvg_id="channel1",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel2 = Channel.objects.create(
|
||||
channel_number=2.0,
|
||||
name="Channel 2",
|
||||
tvg_id="channel2",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel3 = Channel.objects.create(
|
||||
channel_number=3.0,
|
||||
name="Channel 3",
|
||||
tvg_id="channel3"
|
||||
)
|
||||
|
||||
def test_bulk_edit_success(self):
|
||||
"""Test successful bulk update of multiple channels"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Updated Channel 1"},
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
self.assertEqual(len(response.data["channels"]), 2)
|
||||
|
||||
# Verify database changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Updated Channel 1")
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
self.assertEqual(self.channel2.channel_number, 22.0)
|
||||
|
||||
def test_bulk_edit_with_empty_validated_data_first(self):
|
||||
"""
|
||||
Test the bug fix: when first channel has empty validated_data.
|
||||
This was causing: ValueError: Field names must be given to bulk_update()
|
||||
"""
|
||||
# Create a channel with data that will be "unchanged" (empty validated_data)
|
||||
# We'll send the same data it already has
|
||||
data = [
|
||||
# First channel: no actual changes (this would create empty validated_data)
|
||||
{"id": self.channel1.id},
|
||||
# Second channel: has changes
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should not crash with ValueError
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
# Verify the channel with changes was updated
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
|
||||
def test_bulk_edit_all_empty_updates(self):
|
||||
"""Test when all channels have empty updates (no actual changes)"""
|
||||
data = [
|
||||
{"id": self.channel1.id},
|
||||
{"id": self.channel2.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should succeed without calling bulk_update
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
def test_bulk_edit_mixed_fields(self):
|
||||
"""Test bulk update where different channels update different fields"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "New Name 1"},
|
||||
{"id": self.channel2.id, "channel_number": 99.0},
|
||||
{"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 3 channels")
|
||||
|
||||
# Verify all updates
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
|
||||
self.assertEqual(self.channel1.name, "New Name 1")
|
||||
self.assertEqual(self.channel2.channel_number, 99.0)
|
||||
self.assertEqual(self.channel3.tvg_id, "new_tvg_id")
|
||||
self.assertEqual(self.channel3.name, "New Name 3")
|
||||
|
||||
def test_bulk_edit_with_channel_group(self):
|
||||
"""Test bulk update with channel_group_id changes"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_group_id": self.group2.id},
|
||||
{"id": self.channel3.id, "channel_group_id": self.group1.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify group changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
self.assertEqual(self.channel1.channel_group, self.group2)
|
||||
self.assertEqual(self.channel3.channel_group, self.group1)
|
||||
|
||||
def test_bulk_edit_nonexistent_channel(self):
|
||||
"""Test bulk update with a channel that doesn't exist"""
|
||||
nonexistent_id = 99999
|
||||
data = [
|
||||
{"id": nonexistent_id, "name": "Should Fail"},
|
||||
{"id": self.channel1.id, "name": "Should Still Update"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id)
|
||||
self.assertEqual(response.data["errors"][0]["error"], "Channel not found")
|
||||
|
||||
# The valid channel should still be updated
|
||||
self.assertEqual(response.data["updated_count"], 1)
|
||||
|
||||
def test_bulk_edit_validation_error(self):
|
||||
"""Test bulk update with invalid data (validation error)"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_number": "invalid_number"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with validation errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertIn("channel_number", response.data["errors"][0]["errors"])
|
||||
|
||||
def test_bulk_edit_empty_channel_updates(self):
|
||||
"""Test bulk update with empty list"""
|
||||
data = []
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Empty list is accepted and returns success with 0 updates
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 0 channels")
|
||||
|
||||
def test_bulk_edit_missing_channel_updates(self):
|
||||
"""Test bulk update without proper format (dict instead of list)"""
|
||||
data = {"channel_updates": {}}
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertEqual(response.data["error"], "Expected a list of channel updates")
|
||||
|
||||
def test_bulk_edit_preserves_other_fields(self):
|
||||
"""Test that bulk update only changes specified fields"""
|
||||
original_channel_number = self.channel1.channel_number
|
||||
original_tvg_id = self.channel1.tvg_id
|
||||
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Only Name Changed"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify only name changed, other fields preserved
|
||||
self.channel1.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Only Name Changed")
|
||||
self.assertEqual(self.channel1.channel_number, original_channel_number)
|
||||
self.assertEqual(self.channel1.tvg_id, original_tvg_id)
|
||||
40
apps/channels/tests/test_recurring_rules.py
Normal file
40
apps/channels/tests/test_recurring_rules.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
from datetime import datetime, timedelta
|
||||
from django.test import TestCase
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.channels.models import Channel, RecurringRecordingRule, Recording
|
||||
from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl
|
||||
|
||||
|
||||
class RecurringRecordingRuleTasksTests(TestCase):
|
||||
def test_sync_recurring_rule_creates_and_purges_recordings(self):
|
||||
now = timezone.now()
|
||||
channel = Channel.objects.create(channel_number=1, name='Test Channel')
|
||||
|
||||
start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0)
|
||||
end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0)
|
||||
|
||||
rule = RecurringRecordingRule.objects.create(
|
||||
channel=channel,
|
||||
days_of_week=[now.weekday()],
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
)
|
||||
|
||||
created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1)
|
||||
self.assertEqual(created, 1)
|
||||
|
||||
recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first()
|
||||
self.assertIsNotNone(recording)
|
||||
self.assertEqual(recording.channel, channel)
|
||||
self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id)
|
||||
|
||||
expected_start = timezone.make_aware(
|
||||
datetime.combine(recording.start_time.date(), start_time),
|
||||
timezone.get_current_timezone(),
|
||||
)
|
||||
self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60)
|
||||
|
||||
removed = purge_recurring_rule_impl(rule.id)
|
||||
self.assertEqual(removed, 1)
|
||||
self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists())
|
||||
|
|
@ -147,23 +147,37 @@ class EPGGridAPIView(APIView):
|
|||
f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows."
|
||||
)
|
||||
|
||||
# Generate dummy programs for channels that have no EPG data
|
||||
# Generate dummy programs for channels that have no EPG data OR dummy EPG sources
|
||||
from apps.channels.models import Channel
|
||||
from apps.epg.models import EPGSource
|
||||
from django.db.models import Q
|
||||
|
||||
# Get channels with no EPG data
|
||||
# Get channels with no EPG data at all (standard dummy)
|
||||
channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True))
|
||||
channels_count = channels_without_epg.count()
|
||||
|
||||
# Log more detailed information about channels missing EPG data
|
||||
if channels_count > 0:
|
||||
# Get channels with custom dummy EPG sources (generate on-demand with patterns)
|
||||
channels_with_custom_dummy = Channel.objects.filter(
|
||||
epg_data__epg_source__source_type='dummy'
|
||||
).distinct()
|
||||
|
||||
# Log what we found
|
||||
without_count = channels_without_epg.count()
|
||||
custom_count = channels_with_custom_dummy.count()
|
||||
|
||||
if without_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg]
|
||||
logger.warning(
|
||||
f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}"
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
if custom_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy]
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Found {channels_count} channels with no EPG data."
|
||||
f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG."
|
||||
)
|
||||
|
||||
# Serialize the regular programs
|
||||
|
|
@ -205,12 +219,91 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Generate and append dummy programs
|
||||
dummy_programs = []
|
||||
for channel in channels_without_epg:
|
||||
# Use the channel UUID as tvg_id for dummy programs to match in the guide
|
||||
|
||||
# Import the function from output.views
|
||||
from apps.output.views import generate_dummy_programs as gen_dummy_progs
|
||||
|
||||
# Handle channels with CUSTOM dummy EPG sources (with patterns)
|
||||
for channel in channels_with_custom_dummy:
|
||||
# For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel
|
||||
# This prevents multiple channels assigned to the same dummy EPG from showing identical data
|
||||
# Each channel gets its own unique program data even if they share the same EPG source
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
# Create programs every 4 hours for the next 24 hours
|
||||
# Get the custom dummy EPG source
|
||||
epg_source = channel.epg_data.epg_source if channel.epg_data else None
|
||||
|
||||
logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Determine which name to parse based on custom properties
|
||||
name_to_parse = channel.name
|
||||
if epg_source and epg_source.custom_properties:
|
||||
custom_props = epg_source.custom_properties
|
||||
name_source = custom_props.get('name_source')
|
||||
|
||||
if name_source == 'stream':
|
||||
# Get the stream index (1-based from user, convert to 0-based)
|
||||
stream_index = custom_props.get('stream_index', 1) - 1
|
||||
|
||||
# Get streams ordered by channelstream order
|
||||
channel_streams = channel.streams.all().order_by('channelstream__order')
|
||||
|
||||
if channel_streams.exists() and 0 <= stream_index < channel_streams.count():
|
||||
stream = list(channel_streams)[stream_index]
|
||||
name_to_parse = stream.name
|
||||
logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})")
|
||||
else:
|
||||
logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name")
|
||||
elif name_source == 'channel':
|
||||
logger.debug(f"Using channel name for parsing: {name_to_parse}")
|
||||
|
||||
# Generate programs using custom patterns from the dummy EPG source
|
||||
# Use the same tvg_id that will be set in the program data
|
||||
generated = gen_dummy_progs(
|
||||
channel_id=dummy_tvg_id,
|
||||
channel_name=name_to_parse,
|
||||
num_days=1,
|
||||
program_length_hours=4,
|
||||
epg_source=epg_source
|
||||
)
|
||||
|
||||
# Custom dummy should always return data (either from patterns or fallback)
|
||||
if generated:
|
||||
logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}")
|
||||
# Convert generated programs to API format
|
||||
for program in generated:
|
||||
dummy_program = {
|
||||
"id": f"dummy-custom-{channel.id}-{program['start_time'].hour}",
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": program['start_time'].isoformat(),
|
||||
"end_time": program['end_time'].isoformat(),
|
||||
"title": program['title'],
|
||||
"description": program['description'],
|
||||
"tvg_id": dummy_tvg_id,
|
||||
"sub_title": None,
|
||||
"custom_properties": None,
|
||||
}
|
||||
dummy_programs.append(dummy_program)
|
||||
else:
|
||||
logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Handle channels with NO EPG data (standard dummy with humorous descriptions)
|
||||
for channel in channels_without_epg:
|
||||
# For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic)
|
||||
# The frontend uses: tvgRecord?.tvg_id ?? channel.uuid
|
||||
# Since there's no EPG data, it will fall back to UUID
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Create programs every 4 hours for the next 24 hours with humorous descriptions
|
||||
for hour_offset in range(0, 24, 4):
|
||||
# Use timedelta for time arithmetic instead of replace() to avoid hour overflow
|
||||
start_time = now + timedelta(hours=hour_offset)
|
||||
|
|
@ -238,7 +331,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Create a dummy program in the same format as regular programs
|
||||
dummy_program = {
|
||||
"id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID
|
||||
"id": f"dummy-standard-{channel.id}-{hour_offset}",
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": start_time.isoformat(),
|
||||
"end_time": end_time.isoformat(),
|
||||
|
|
@ -252,7 +345,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Combine regular and dummy programs
|
||||
|
|
@ -284,7 +377,22 @@ class EPGImportAPIView(APIView):
|
|||
)
|
||||
def post(self, request, format=None):
|
||||
logger.info("EPGImportAPIView: Received request to import EPG data.")
|
||||
refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task
|
||||
epg_id = request.data.get("id", None)
|
||||
|
||||
# Check if this is a dummy EPG source
|
||||
try:
|
||||
from .models import EPGSource
|
||||
epg_source = EPGSource.objects.get(id=epg_id)
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}")
|
||||
return Response(
|
||||
{"success": False, "message": "Dummy EPG sources do not require refreshing."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except EPGSource.DoesNotExist:
|
||||
pass # Let the task handle the missing source
|
||||
|
||||
refresh_epg_data.delay(epg_id) # Trigger Celery task
|
||||
logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.")
|
||||
return Response(
|
||||
{"success": True, "message": "EPG data import initiated."},
|
||||
|
|
@ -308,3 +416,4 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
return [perm() for perm in permission_classes_by_action[self.action]]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-17 17:02
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0017_alter_epgsource_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='source_type',
|
||||
field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20),
|
||||
),
|
||||
]
|
||||
18
apps/epg/migrations/0019_alter_programdata_sub_title.py
Normal file
18
apps/epg/migrations/0019_alter_programdata_sub_title.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-22 21:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0018_epgsource_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='programdata',
|
||||
name='sub_title',
|
||||
field=models.TextField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# Generated migration to replace {time} placeholders with {starttime}
|
||||
|
||||
import re
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def migrate_time_placeholders(apps, schema_editor):
|
||||
"""
|
||||
Replace {time} with {starttime} and {time24} with {starttime24}
|
||||
in all dummy EPG source custom_properties templates.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
# Fields that contain templates with placeholders
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
# Get all dummy EPG sources
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Replace {time24} first (before {time}) to avoid double replacement
|
||||
# e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime}
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{time24\}', '{starttime24}', new_value)
|
||||
new_value = re.sub(r'\{time\}', '{starttime}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.")
|
||||
else:
|
||||
print("No dummy EPG sources needed placeholder updates.")
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""
|
||||
Reverse the migration by replacing {starttime} back to {time}.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Reverse the replacements
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{starttime24\}', '{time24}', new_value)
|
||||
new_value = re.sub(r'\{starttime\}', '{time}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0019_alter_programdata_sub_title'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_time_placeholders, reverse_migration),
|
||||
]
|
||||
18
apps/epg/migrations/0021_epgsource_priority.py
Normal file
18
apps/epg/migrations/0021_epgsource_priority.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-12-05 15:24
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0020_migrate_time_to_starttime_placeholders'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='priority',
|
||||
field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'),
|
||||
),
|
||||
]
|
||||
|
|
@ -8,6 +8,7 @@ class EPGSource(models.Model):
|
|||
SOURCE_TYPE_CHOICES = [
|
||||
('xmltv', 'XMLTV URL'),
|
||||
('schedules_direct', 'Schedules Direct API'),
|
||||
('dummy', 'Custom Dummy EPG'),
|
||||
]
|
||||
|
||||
STATUS_IDLE = 'idle'
|
||||
|
|
@ -38,6 +39,16 @@ class EPGSource(models.Model):
|
|||
refresh_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
)
|
||||
custom_properties = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)"
|
||||
)
|
||||
priority = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel."
|
||||
)
|
||||
status = models.CharField(
|
||||
max_length=20,
|
||||
choices=STATUS_CHOICES,
|
||||
|
|
@ -148,7 +159,7 @@ class ProgramData(models.Model):
|
|||
start_time = models.DateTimeField()
|
||||
end_time = models.DateTimeField()
|
||||
title = models.CharField(max_length=255)
|
||||
sub_title = models.CharField(max_length=255, blank=True, null=True)
|
||||
sub_title = models.TextField(blank=True, null=True)
|
||||
description = models.TextField(blank=True, null=True)
|
||||
tvg_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from .models import EPGSource, EPGData, ProgramData
|
|||
from apps.channels.models import Channel
|
||||
|
||||
class EPGSourceSerializer(serializers.ModelSerializer):
|
||||
epg_data_ids = serializers.SerializerMethodField()
|
||||
epg_data_count = serializers.SerializerMethodField()
|
||||
read_only_fields = ['created_at', 'updated_at']
|
||||
url = serializers.CharField(
|
||||
required=False,
|
||||
|
|
@ -24,15 +24,18 @@ class EPGSourceSerializer(serializers.ModelSerializer):
|
|||
'is_active',
|
||||
'file_path',
|
||||
'refresh_interval',
|
||||
'priority',
|
||||
'status',
|
||||
'last_message',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'epg_data_ids'
|
||||
'custom_properties',
|
||||
'epg_data_count'
|
||||
]
|
||||
|
||||
def get_epg_data_ids(self, obj):
|
||||
return list(obj.epgs.values_list('id', flat=True))
|
||||
def get_epg_data_count(self, obj):
|
||||
"""Return the count of EPG data entries instead of all IDs to prevent large payloads"""
|
||||
return obj.epgs.count()
|
||||
|
||||
class ProgramDataSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from django.db.models.signals import post_save, post_delete, pre_save
|
||||
from django.dispatch import receiver
|
||||
from .models import EPGSource
|
||||
from .models import EPGSource, EPGData
|
||||
from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id
|
||||
from django_celery_beat.models import PeriodicTask, IntervalSchedule
|
||||
from core.utils import is_protected_path
|
||||
from core.utils import is_protected_path, send_websocket_update
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
|
@ -12,15 +12,77 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs):
|
||||
# Trigger refresh only if the source is newly created and active
|
||||
if created and instance.is_active:
|
||||
# Trigger refresh only if the source is newly created, active, and not a dummy EPG
|
||||
if created and instance.is_active and instance.source_type != 'dummy':
|
||||
refresh_epg_data.delay(instance.id)
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_dummy_epg_data(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Automatically create EPGData for dummy EPG sources when they are created.
|
||||
This allows channels to be assigned to dummy EPGs immediately without
|
||||
requiring a refresh first.
|
||||
"""
|
||||
if instance.source_type == 'dummy':
|
||||
# Ensure dummy EPGs always have idle status and no status message
|
||||
if instance.status != EPGSource.STATUS_IDLE or instance.last_message:
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
instance.save(update_fields=['status', 'last_message'])
|
||||
|
||||
# Create a URL-friendly tvg_id from the dummy EPG name
|
||||
# Replace spaces and special characters with underscores
|
||||
friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_')
|
||||
# Remove any characters that aren't alphanumeric or underscores
|
||||
friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_')
|
||||
# Convert to lowercase for consistency
|
||||
friendly_tvg_id = friendly_tvg_id.lower()
|
||||
# Prefix with 'dummy_' to make it clear this is a dummy EPG
|
||||
friendly_tvg_id = f"dummy_{friendly_tvg_id}"
|
||||
|
||||
# Create or update the EPGData record
|
||||
epg_data, data_created = EPGData.objects.get_or_create(
|
||||
tvg_id=friendly_tvg_id,
|
||||
epg_source=instance,
|
||||
defaults={
|
||||
'name': instance.name,
|
||||
'icon_url': None
|
||||
}
|
||||
)
|
||||
|
||||
# Update name if it changed and record already existed
|
||||
if not data_created and epg_data.name != instance.name:
|
||||
epg_data.name = instance.name
|
||||
epg_data.save(update_fields=['name'])
|
||||
|
||||
if data_created:
|
||||
logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
# Send websocket update to notify frontend that EPG data has been created
|
||||
# This allows the channel form to immediately show the new dummy EPG without refreshing
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_data_created',
|
||||
'source_id': instance.id,
|
||||
'source_name': instance.name,
|
||||
'epg_data_id': epg_data.id
|
||||
})
|
||||
else:
|
||||
logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_or_update_refresh_task(sender, instance, **kwargs):
|
||||
"""
|
||||
Create or update a Celery Beat periodic task when an EPGSource is created/updated.
|
||||
Skip creating tasks for dummy EPG sources as they don't need refreshing.
|
||||
"""
|
||||
# Skip task creation for dummy EPGs
|
||||
if instance.source_type == 'dummy':
|
||||
# If there's an existing task, disable it
|
||||
if instance.refresh_task:
|
||||
instance.refresh_task.enabled = False
|
||||
instance.refresh_task.save(update_fields=['enabled'])
|
||||
return
|
||||
|
||||
task_name = f"epg_source-refresh-{instance.id}"
|
||||
interval, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=int(instance.refresh_interval),
|
||||
|
|
@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs):
|
|||
def update_status_on_active_change(sender, instance, **kwargs):
|
||||
"""
|
||||
When an EPGSource's is_active field changes, update the status accordingly.
|
||||
For dummy EPGs, always ensure status is idle and no status message.
|
||||
"""
|
||||
# Dummy EPGs should always be idle with no status message
|
||||
if instance.source_type == 'dummy':
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
return
|
||||
|
||||
if instance.pk: # Only for existing records, not new ones
|
||||
try:
|
||||
# Get the current record from the database
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ from asgiref.sync import async_to_sync
|
|||
from channels.layers import get_channel_layer
|
||||
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -133,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id):
|
|||
@shared_task
|
||||
def refresh_all_epg_data():
|
||||
logger.info("Starting refresh_epg_data task.")
|
||||
active_sources = EPGSource.objects.filter(is_active=True)
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s).")
|
||||
# Exclude dummy EPG sources from refresh - they don't need refreshing
|
||||
active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy')
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).")
|
||||
|
||||
for source in active_sources:
|
||||
refresh_epg_data(source.id)
|
||||
|
|
@ -180,6 +181,13 @@ def refresh_epg_data(source_id):
|
|||
gc.collect()
|
||||
return
|
||||
|
||||
# Skip refresh for dummy EPG sources - they don't need refreshing
|
||||
if source.source_type == 'dummy':
|
||||
logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})")
|
||||
release_task_lock('refresh_epg_data', source_id)
|
||||
gc.collect()
|
||||
return
|
||||
|
||||
# Continue with the normal processing...
|
||||
logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})")
|
||||
if source.source_type == 'xmltv':
|
||||
|
|
@ -278,11 +286,12 @@ def fetch_xmltv(source):
|
|||
logger.info(f"Fetching XMLTV data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
if default_user_agent_id:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -1149,6 +1158,12 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
epg = EPGData.objects.get(id=epg_id)
|
||||
epg_source = epg.epg_source
|
||||
|
||||
# Skip program parsing for dummy EPG sources - they don't have program data files
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
return
|
||||
|
||||
if not Channel.objects.filter(epg_data=epg).exists():
|
||||
logger.info(f"No channels matched to EPG {epg.tvg_id}")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
|
|
@ -1379,11 +1394,23 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
|
||||
|
||||
def parse_programs_for_source(epg_source, tvg_id=None):
|
||||
"""
|
||||
Parse programs for all MAPPED channels from an EPG source in a single pass.
|
||||
|
||||
This is an optimized version that:
|
||||
1. Only processes EPG entries that are actually mapped to channels
|
||||
2. Parses the XML file ONCE instead of once per channel
|
||||
3. Skips programmes for unmapped channels entirely during parsing
|
||||
|
||||
This dramatically improves performance when an EPG source has many channels
|
||||
but only a fraction are mapped.
|
||||
"""
|
||||
# Send initial programs parsing notification
|
||||
send_epg_update(epg_source.id, "parsing_programs", 0)
|
||||
should_log_memory = False
|
||||
process = None
|
||||
initial_memory = 0
|
||||
source_file = None
|
||||
|
||||
# Add memory tracking only in trace mode or higher
|
||||
try:
|
||||
|
|
@ -1403,91 +1430,251 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
should_log_memory = False
|
||||
|
||||
try:
|
||||
# Process EPG entries in batches rather than all at once
|
||||
batch_size = 20 # Process fewer channels at once to reduce memory usage
|
||||
epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
# Only get EPG entries that are actually mapped to channels
|
||||
mapped_epg_ids = set(
|
||||
Channel.objects.filter(
|
||||
epg_data__epg_source=epg_source,
|
||||
epg_data__isnull=False
|
||||
).values_list('epg_data_id', flat=True)
|
||||
)
|
||||
|
||||
if epg_count == 0:
|
||||
logger.info(f"No EPG entries found for source: {epg_source.name}")
|
||||
# Update status - this is not an error, just no entries
|
||||
if not mapped_epg_ids:
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} "
|
||||
f"(source has {total_epg_count} EPG entries, 0 mapped)")
|
||||
# Update status - this is not an error, just no mapped entries
|
||||
epg_source.status = 'success'
|
||||
epg_source.save(update_fields=['status'])
|
||||
epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="success")
|
||||
return True
|
||||
|
||||
logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}")
|
||||
# Get the mapped EPG entries with their tvg_ids
|
||||
mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id')
|
||||
tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']}
|
||||
mapped_tvg_ids = set(tvg_id_to_epg_id.keys())
|
||||
|
||||
failed_entries = []
|
||||
program_count = 0
|
||||
channel_count = 0
|
||||
updated_count = 0
|
||||
processed = 0
|
||||
# Process in batches using cursor-based approach to limit memory usage
|
||||
last_id = 0
|
||||
while True:
|
||||
# Get a batch of EPG entries
|
||||
batch_entries = list(EPGData.objects.filter(
|
||||
epg_source=epg_source,
|
||||
id__gt=last_id
|
||||
).order_by('id')[:batch_size])
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
mapped_count = len(mapped_tvg_ids)
|
||||
|
||||
if not batch_entries:
|
||||
break # No more entries to process
|
||||
logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} "
|
||||
f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)")
|
||||
|
||||
# Update last_id for next iteration
|
||||
last_id = batch_entries[-1].id
|
||||
# Get the file path
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
if not file_path:
|
||||
file_path = epg_source.get_cache_file()
|
||||
|
||||
# Process this batch
|
||||
for epg in batch_entries:
|
||||
if epg.tvg_id:
|
||||
try:
|
||||
result = parse_programs_for_tvg_id(epg.id)
|
||||
if result == "Task already running":
|
||||
logger.info(f"Program parse for {epg.id} already in progress, skipping")
|
||||
# Check if the file exists
|
||||
if not os.path.exists(file_path):
|
||||
logger.error(f"EPG file not found at: {file_path}")
|
||||
|
||||
processed += 1
|
||||
progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True)
|
||||
failed_entries.append(f"{epg.tvg_id}: {str(e)}")
|
||||
if epg_source.url:
|
||||
# Update the file path in the database
|
||||
new_path = epg_source.get_cache_file()
|
||||
logger.info(f"Updating file_path from '{file_path}' to '{new_path}'")
|
||||
epg_source.file_path = new_path
|
||||
epg_source.save(update_fields=['file_path'])
|
||||
logger.info(f"Fetching new EPG data from URL: {epg_source.url}")
|
||||
|
||||
# Force garbage collection after each batch
|
||||
batch_entries = None # Remove reference to help garbage collection
|
||||
# Fetch new data before continuing
|
||||
fetch_success = fetch_xmltv(epg_source)
|
||||
|
||||
if not fetch_success:
|
||||
logger.error(f"Failed to fetch EPG data for source: {epg_source.name}")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"Failed to download EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file")
|
||||
return False
|
||||
|
||||
# Update file_path with the new location
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
else:
|
||||
logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"No URL provided, cannot fetch EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided")
|
||||
return False
|
||||
|
||||
# SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory
|
||||
# We parse FIRST, then do an atomic delete+insert to avoid race conditions
|
||||
# where clients might see empty/partial EPG data during the transition
|
||||
all_programs_to_create = []
|
||||
programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel
|
||||
total_programs = 0
|
||||
skipped_programs = 0
|
||||
last_progress_update = 0
|
||||
|
||||
try:
|
||||
logger.debug(f"Opening file for single-pass parsing: {file_path}")
|
||||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
channel_id = elem.get('channel')
|
||||
|
||||
# Skip programmes for unmapped channels immediately
|
||||
if channel_id not in mapped_tvg_ids:
|
||||
skipped_programs += 1
|
||||
# Clear element to free memory
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
# This programme is for a mapped channel - process it
|
||||
try:
|
||||
start_time = parse_xmltv_time(elem.get('start'))
|
||||
end_time = parse_xmltv_time(elem.get('stop'))
|
||||
title = None
|
||||
desc = None
|
||||
sub_title = None
|
||||
|
||||
# Efficiently process child elements
|
||||
for child in elem:
|
||||
if child.tag == 'title':
|
||||
title = child.text or 'No Title'
|
||||
elif child.tag == 'desc':
|
||||
desc = child.text or ''
|
||||
elif child.tag == 'sub-title':
|
||||
sub_title = child.text or ''
|
||||
|
||||
if not title:
|
||||
title = 'No Title'
|
||||
|
||||
# Extract custom properties
|
||||
custom_props = extract_custom_properties(elem)
|
||||
custom_properties_json = custom_props if custom_props else None
|
||||
|
||||
epg_id = tvg_id_to_epg_id[channel_id]
|
||||
all_programs_to_create.append(ProgramData(
|
||||
epg_id=epg_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
title=title,
|
||||
description=desc,
|
||||
sub_title=sub_title,
|
||||
tvg_id=channel_id,
|
||||
custom_properties=custom_properties_json
|
||||
))
|
||||
total_programs += 1
|
||||
programs_by_channel[channel_id] += 1
|
||||
|
||||
# Clear the element to free memory
|
||||
clear_element(elem)
|
||||
|
||||
# Send progress update (estimate based on programs processed)
|
||||
if total_programs - last_progress_update >= 5000:
|
||||
last_progress_update = total_programs
|
||||
# Cap at 70% during parsing phase (save 30% for DB operations)
|
||||
progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60))
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress,
|
||||
processed=total_programs, channels=mapped_count)
|
||||
|
||||
# Periodic garbage collection during parsing
|
||||
if total_programs % 5000 == 0:
|
||||
gc.collect()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True)
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
except etree.XMLSyntaxError as xml_error:
|
||||
logger.error(f"XML syntax error parsing program data: {xml_error}")
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"XML parsing error: {str(xml_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing XML for programs: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
if source_file:
|
||||
source_file.close()
|
||||
source_file = None
|
||||
|
||||
# Now perform atomic delete + bulk insert
|
||||
# This ensures clients never see empty/partial EPG data
|
||||
logger.info(f"Parsed {total_programs} programs, performing atomic database update...")
|
||||
send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...")
|
||||
|
||||
batch_size = 1000
|
||||
try:
|
||||
with transaction.atomic():
|
||||
# Delete existing programs for mapped EPGs
|
||||
deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0]
|
||||
logger.debug(f"Deleted {deleted_count} existing programs")
|
||||
|
||||
# Clean up orphaned programs for unmapped EPG entries
|
||||
unmapped_epg_ids = list(EPGData.objects.filter(
|
||||
epg_source=epg_source
|
||||
).exclude(id__in=mapped_epg_ids).values_list('id', flat=True))
|
||||
|
||||
if unmapped_epg_ids:
|
||||
orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0]
|
||||
if orphaned_count > 0:
|
||||
logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries")
|
||||
|
||||
# Bulk insert all new programs in batches within the same transaction
|
||||
for i in range(0, len(all_programs_to_create), batch_size):
|
||||
batch = all_programs_to_create[i:i + batch_size]
|
||||
ProgramData.objects.bulk_create(batch)
|
||||
|
||||
# Update progress during insertion
|
||||
progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95
|
||||
if i % (batch_size * 5) == 0:
|
||||
send_epg_update(epg_source.id, "parsing_programs", min(95, progress),
|
||||
message=f"Inserting programs... {i}/{len(all_programs_to_create)}")
|
||||
|
||||
logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs")
|
||||
|
||||
except Exception as db_error:
|
||||
logger.error(f"Database error during atomic update: {db_error}", exc_info=True)
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"Database error: {str(db_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error))
|
||||
return False
|
||||
finally:
|
||||
# Clear the large list to free memory
|
||||
all_programs_to_create = None
|
||||
gc.collect()
|
||||
|
||||
# If there were failures, include them in the message but continue
|
||||
if failed_entries:
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed
|
||||
error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries"
|
||||
stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.last_message = f"{stats_summary} Warning: {error_summary}"
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
# Count channels that actually got programs
|
||||
channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0)
|
||||
|
||||
# Send completion notification with mixed status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message)
|
||||
|
||||
# Explicitly release memory of large lists before returning
|
||||
del failed_entries
|
||||
gc.collect()
|
||||
|
||||
return True
|
||||
|
||||
# If all successful, set a comprehensive success message
|
||||
# Success message
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS
|
||||
epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.last_message = (
|
||||
f"Parsed {total_programs:,} programs for {channels_with_programs} channels "
|
||||
f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)"
|
||||
)
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
|
||||
# Log system event for EPG refresh
|
||||
log_system_event(
|
||||
event_type='epg_refresh',
|
||||
source_name=epg_source.name,
|
||||
programs=total_programs,
|
||||
channels=channels_with_programs,
|
||||
skipped_programs=skipped_programs,
|
||||
unmapped_channels=total_epg_count - mapped_count,
|
||||
)
|
||||
|
||||
# Send completion notification with status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message)
|
||||
message=epg_source.last_message,
|
||||
updated_at=epg_source.updated_at.isoformat())
|
||||
|
||||
logger.info(f"Completed parsing all programs for source: {epg_source.name}")
|
||||
logger.info(f"Completed parsing programs for source: {epg_source.name} - "
|
||||
f"{total_programs:,} programs for {channels_with_programs} channels, "
|
||||
f"skipped {skipped_programs:,} programs for unmapped channels")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -1502,14 +1689,19 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
return False
|
||||
finally:
|
||||
# Final memory cleanup and tracking
|
||||
|
||||
if source_file:
|
||||
try:
|
||||
source_file.close()
|
||||
except:
|
||||
pass
|
||||
source_file = None
|
||||
|
||||
# Explicitly release any remaining large data structures
|
||||
failed_entries = None
|
||||
program_count = None
|
||||
channel_count = None
|
||||
updated_count = None
|
||||
processed = None
|
||||
programs_to_create = None
|
||||
programs_by_channel = None
|
||||
mapped_epg_ids = None
|
||||
mapped_tvg_ids = None
|
||||
tvg_id_to_epg_id = None
|
||||
gc.collect()
|
||||
|
||||
# Add comprehensive memory cleanup at the end
|
||||
|
|
@ -1523,12 +1715,13 @@ def fetch_schedules_direct(source):
|
|||
logger.info(f"Fetching Schedules Direct data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
if default_user_agent_id:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -1943,3 +2136,20 @@ def detect_file_format(file_path=None, content=None):
|
|||
|
||||
# If we reach here, we couldn't reliably determine the format
|
||||
return format_type, is_compressed, file_extension
|
||||
|
||||
|
||||
def generate_dummy_epg(source):
|
||||
"""
|
||||
DEPRECATED: This function is no longer used.
|
||||
|
||||
Dummy EPG programs are now generated on-demand when they are requested
|
||||
(during XMLTV export or EPG grid display), rather than being pre-generated
|
||||
and stored in the database.
|
||||
|
||||
See: apps/output/views.py - generate_custom_dummy_programs()
|
||||
|
||||
This function remains for backward compatibility but should not be called.
|
||||
"""
|
||||
logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. "
|
||||
f"Dummy EPG programs are now generated on-demand.")
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -81,6 +81,13 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
account_type = response.data.get("account_type")
|
||||
account_id = response.data.get("id")
|
||||
|
||||
# Notify frontend that a new playlist was created
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'playlist_created',
|
||||
'playlist_id': account_id
|
||||
})
|
||||
|
||||
if account_type == M3UAccount.Types.XC:
|
||||
refresh_m3u_groups(account_id)
|
||||
|
||||
|
|
@ -145,6 +152,46 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
and not old_vod_enabled
|
||||
and new_vod_enabled
|
||||
):
|
||||
# Create Uncategorized categories immediately so they're available in the UI
|
||||
from apps.vod.models import VODCategory, M3UVODCategoryRelation
|
||||
|
||||
# Create movie Uncategorized category
|
||||
movie_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="movie",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create series Uncategorized category
|
||||
series_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="series",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create relations for both categories (disabled by default until first refresh)
|
||||
account_custom_props = instance.custom_properties or {}
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=movie_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=series_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
# Trigger full VOD refresh
|
||||
from apps.vod.tasks import refresh_vod_content
|
||||
|
||||
refresh_vod_content.delay(instance.id)
|
||||
|
|
|
|||
|
|
@ -136,6 +136,9 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
validators=[validate_flexible_url],
|
||||
)
|
||||
enable_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = M3UAccount
|
||||
|
|
@ -164,6 +167,9 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
"status",
|
||||
"last_message",
|
||||
"enable_vod",
|
||||
"auto_enable_new_groups_live",
|
||||
"auto_enable_new_groups_vod",
|
||||
"auto_enable_new_groups_series",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"password": {
|
||||
|
|
@ -175,23 +181,36 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
||||
# Parse custom_properties to get VOD preference
|
||||
# Parse custom_properties to get VOD preference and auto_enable_new_groups settings
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
data["enable_vod"] = custom_props.get("enable_vod", False)
|
||||
data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True)
|
||||
data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True)
|
||||
data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True)
|
||||
return data
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
# Handle enable_vod preference
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings
|
||||
enable_vod = validated_data.pop("enable_vod", None)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None)
|
||||
|
||||
# Get existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update preferences
|
||||
if enable_vod is not None:
|
||||
# Get existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update VOD preference
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
validated_data["custom_properties"] = custom_props
|
||||
if auto_enable_new_groups_live is not None:
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
if auto_enable_new_groups_vod is not None:
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
if auto_enable_new_groups_series is not None:
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
# Pop out channel group memberships so we can handle them manually
|
||||
channel_group_data = validated_data.pop("channel_group", [])
|
||||
|
|
@ -225,14 +244,20 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
return instance
|
||||
|
||||
def create(self, validated_data):
|
||||
# Handle enable_vod preference during creation
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings during creation
|
||||
enable_vod = validated_data.pop("enable_vod", False)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True)
|
||||
|
||||
# Parse existing custom_properties or create new
|
||||
custom_props = validated_data.get("custom_properties", {})
|
||||
|
||||
# Set VOD preference
|
||||
# Set preferences (default to True for auto_enable_new_groups)
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
return super().create(validated_data)
|
||||
|
|
|
|||
|
|
@ -24,11 +24,13 @@ from core.utils import (
|
|||
acquire_task_lock,
|
||||
release_task_lock,
|
||||
natural_sort_key,
|
||||
log_system_event,
|
||||
)
|
||||
from core.models import CoreSettings, UserAgent
|
||||
from asgiref.sync import async_to_sync
|
||||
from core.xtream_codes import Client as XCClient
|
||||
from core.utils import send_websocket_update
|
||||
from .utils import normalize_stream_url
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -219,6 +221,10 @@ def fetch_m3u_lines(account, use_cache=False):
|
|||
# Has HTTP URLs, might be a simple M3U without headers
|
||||
is_valid_m3u = True
|
||||
logger.info("Content validated as M3U: contains HTTP URLs")
|
||||
elif any(line.strip().startswith(('rtsp', 'rtp', 'udp')) for line in content_lines):
|
||||
# Has RTSP/RTP/UDP URLs, might be a simple M3U without headers
|
||||
is_valid_m3u = True
|
||||
logger.info("Content validated as M3U: contains RTSP/RTP/UDP URLs")
|
||||
|
||||
if not is_valid_m3u:
|
||||
# Log what we actually received for debugging
|
||||
|
|
@ -434,25 +440,51 @@ def get_case_insensitive_attr(attributes, key, default=""):
|
|||
def parse_extinf_line(line: str) -> dict:
|
||||
"""
|
||||
Parse an EXTINF line from an M3U file.
|
||||
This function removes the "#EXTINF:" prefix, then splits the remaining
|
||||
string on the first comma that is not enclosed in quotes.
|
||||
This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes,
|
||||
and treats everything after the last attribute as the display name.
|
||||
|
||||
Returns a dictionary with:
|
||||
- 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title)
|
||||
- 'display_name': the text after the comma (the fallback display name)
|
||||
- 'display_name': the text after the attributes (the fallback display name)
|
||||
- 'name': the value from tvg-name (if present) or the display name otherwise.
|
||||
"""
|
||||
if not line.startswith("#EXTINF:"):
|
||||
return None
|
||||
content = line[len("#EXTINF:") :].strip()
|
||||
# Split on the first comma that is not inside quotes.
|
||||
parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1)
|
||||
if len(parts) != 2:
|
||||
return None
|
||||
attributes_part, display_name = parts[0], parts[1].strip()
|
||||
attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part))
|
||||
# Use tvg-name attribute if available; otherwise, use the display name.
|
||||
name = get_case_insensitive_attr(attrs, "tvg-name", display_name)
|
||||
|
||||
# Single pass: extract all attributes AND track the last attribute position
|
||||
# This regex matches both key="value" and key='value' patterns
|
||||
attrs = {}
|
||||
last_attr_end = 0
|
||||
|
||||
# Use a single regex that handles both quote types
|
||||
for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content):
|
||||
key = match.group(1)
|
||||
value = match.group(3)
|
||||
attrs[key] = value
|
||||
last_attr_end = match.end()
|
||||
|
||||
# Everything after the last attribute (skipping leading comma and whitespace) is the display name
|
||||
if last_attr_end > 0:
|
||||
remaining = content[last_attr_end:].strip()
|
||||
# Remove leading comma if present
|
||||
if remaining.startswith(','):
|
||||
remaining = remaining[1:].strip()
|
||||
display_name = remaining
|
||||
else:
|
||||
# No attributes found, try the old comma-split method as fallback
|
||||
parts = content.split(',', 1)
|
||||
if len(parts) == 2:
|
||||
display_name = parts[1].strip()
|
||||
else:
|
||||
display_name = content.strip()
|
||||
|
||||
# Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name.
|
||||
name = get_case_insensitive_attr(attrs, "tvg-name", None)
|
||||
if not name:
|
||||
name = get_case_insensitive_attr(attrs, "tvc-guide-title", None)
|
||||
if not name:
|
||||
name = display_name
|
||||
return {"attributes": attrs, "display_name": display_name, "name": name}
|
||||
|
||||
|
||||
|
|
@ -481,32 +513,48 @@ def check_field_lengths(streams_to_create):
|
|||
|
||||
|
||||
@shared_task
|
||||
def process_groups(account, groups):
|
||||
def process_groups(account, groups, scan_start_time=None):
|
||||
"""Process groups and update their relationships with the M3U account.
|
||||
|
||||
Args:
|
||||
account: M3UAccount instance
|
||||
groups: Dict of group names to custom properties
|
||||
scan_start_time: Timestamp when the scan started (for consistent last_seen marking)
|
||||
"""
|
||||
# Use scan_start_time if provided, otherwise current time
|
||||
# This ensures consistency with stream processing and cleanup logic
|
||||
if scan_start_time is None:
|
||||
scan_start_time = timezone.now()
|
||||
|
||||
existing_groups = {
|
||||
group.name: group
|
||||
for group in ChannelGroup.objects.filter(name__in=groups.keys())
|
||||
}
|
||||
logger.info(f"Currently {len(existing_groups)} existing groups")
|
||||
|
||||
group_objs = []
|
||||
# Check if we should auto-enable new groups based on account settings
|
||||
account_custom_props = account.custom_properties or {}
|
||||
auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True)
|
||||
|
||||
# Separate existing groups from groups that need to be created
|
||||
existing_group_objs = []
|
||||
groups_to_create = []
|
||||
|
||||
for group_name, custom_props in groups.items():
|
||||
logger.debug(f"Handling group for M3U account {account.id}: {group_name}")
|
||||
|
||||
if group_name not in existing_groups:
|
||||
groups_to_create.append(
|
||||
ChannelGroup(
|
||||
name=group_name,
|
||||
)
|
||||
)
|
||||
if group_name in existing_groups:
|
||||
existing_group_objs.append(existing_groups[group_name])
|
||||
else:
|
||||
group_objs.append(existing_groups[group_name])
|
||||
groups_to_create.append(ChannelGroup(name=group_name))
|
||||
|
||||
# Create new groups and fetch them back with IDs
|
||||
newly_created_group_objs = []
|
||||
if groups_to_create:
|
||||
logger.debug(f"Creating {len(groups_to_create)} groups")
|
||||
created = ChannelGroup.bulk_create_and_fetch(groups_to_create)
|
||||
logger.debug(f"Created {len(created)} groups")
|
||||
group_objs.extend(created)
|
||||
logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}")
|
||||
newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create))
|
||||
logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups")
|
||||
|
||||
# Combine all groups
|
||||
all_group_objs = existing_group_objs + newly_created_group_objs
|
||||
|
||||
# Get existing relationships for this account
|
||||
existing_relationships = {
|
||||
|
|
@ -517,26 +565,10 @@ def process_groups(account, groups):
|
|||
).select_related('channel_group')
|
||||
}
|
||||
|
||||
# Get ALL existing relationships for this account to identify orphaned ones
|
||||
all_existing_relationships = {
|
||||
rel.channel_group.name: rel
|
||||
for rel in ChannelGroupM3UAccount.objects.filter(
|
||||
m3u_account=account
|
||||
).select_related('channel_group')
|
||||
}
|
||||
|
||||
relations_to_create = []
|
||||
relations_to_update = []
|
||||
relations_to_delete = []
|
||||
|
||||
# Find orphaned relationships (groups that no longer exist in the source)
|
||||
current_group_names = set(groups.keys())
|
||||
for group_name, rel in all_existing_relationships.items():
|
||||
if group_name not in current_group_names:
|
||||
relations_to_delete.append(rel)
|
||||
logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}")
|
||||
|
||||
for group in group_objs:
|
||||
for group in all_group_objs:
|
||||
custom_props = groups.get(group.name, {})
|
||||
|
||||
if group.name in existing_relationships:
|
||||
|
|
@ -561,40 +593,30 @@ def process_groups(account, groups):
|
|||
del updated_custom_props["xc_id"]
|
||||
|
||||
existing_rel.custom_properties = updated_custom_props
|
||||
existing_rel.last_seen = scan_start_time
|
||||
existing_rel.is_stale = False
|
||||
relations_to_update.append(existing_rel)
|
||||
logger.debug(f"Updated xc_id for group '{group.name}' from '{existing_xc_id}' to '{new_xc_id}' - account {account.id}")
|
||||
else:
|
||||
# Update last_seen even if xc_id hasn't changed
|
||||
existing_rel.last_seen = scan_start_time
|
||||
existing_rel.is_stale = False
|
||||
relations_to_update.append(existing_rel)
|
||||
logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}")
|
||||
else:
|
||||
# Create new relationship - but check if there's an existing relationship that might have user settings
|
||||
# This can happen if the group was temporarily removed and is now back
|
||||
try:
|
||||
potential_existing = ChannelGroupM3UAccount.objects.filter(
|
||||
m3u_account=account,
|
||||
channel_group=group
|
||||
).first()
|
||||
# Create new relationship - this group is new to this M3U account
|
||||
# Use the auto_enable setting to determine if it should start enabled
|
||||
if not auto_enable_new_groups_live:
|
||||
logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)")
|
||||
|
||||
if potential_existing:
|
||||
# Merge with existing custom properties to preserve user settings
|
||||
existing_custom_props = potential_existing.custom_properties or {}
|
||||
|
||||
# Merge new properties with existing ones
|
||||
merged_custom_props = existing_custom_props.copy()
|
||||
merged_custom_props.update(custom_props)
|
||||
custom_props = merged_custom_props
|
||||
logger.debug(f"Merged custom properties for existing relationship: group '{group.name}' - account {account.id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not check for existing relationship: {str(e)}")
|
||||
# Fall back to using just the new custom properties
|
||||
pass
|
||||
|
||||
# Create new relationship
|
||||
relations_to_create.append(
|
||||
ChannelGroupM3UAccount(
|
||||
channel_group=group,
|
||||
m3u_account=account,
|
||||
custom_properties=custom_props,
|
||||
enabled=True, # Default to enabled
|
||||
enabled=auto_enable_new_groups_live,
|
||||
last_seen=scan_start_time,
|
||||
is_stale=False,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
@ -605,15 +627,38 @@ def process_groups(account, groups):
|
|||
|
||||
# Bulk update existing relationships
|
||||
if relations_to_update:
|
||||
ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties'])
|
||||
logger.info(f"Updated {len(relations_to_update)} existing group relationships with new xc_id values for account {account.id}")
|
||||
ChannelGroupM3UAccount.objects.bulk_update(relations_to_update, ['custom_properties', 'last_seen', 'is_stale'])
|
||||
logger.info(f"Updated {len(relations_to_update)} existing group relationships for account {account.id}")
|
||||
|
||||
# Delete orphaned relationships
|
||||
if relations_to_delete:
|
||||
ChannelGroupM3UAccount.objects.filter(
|
||||
id__in=[rel.id for rel in relations_to_delete]
|
||||
).delete()
|
||||
logger.info(f"Deleted {len(relations_to_delete)} orphaned group relationships for account {account.id}: {[rel.channel_group.name for rel in relations_to_delete]}")
|
||||
|
||||
def cleanup_stale_group_relationships(account, scan_start_time):
|
||||
"""
|
||||
Remove group relationships that haven't been seen since the stale retention period.
|
||||
This follows the same logic as stream cleanup for consistency.
|
||||
"""
|
||||
# Calculate cutoff date for stale group relationships
|
||||
stale_cutoff = scan_start_time - timezone.timedelta(days=account.stale_stream_days)
|
||||
logger.info(
|
||||
f"Removing group relationships not seen since {stale_cutoff} for M3U account {account.id}"
|
||||
)
|
||||
|
||||
# Find stale relationships
|
||||
stale_relationships = ChannelGroupM3UAccount.objects.filter(
|
||||
m3u_account=account,
|
||||
last_seen__lt=stale_cutoff
|
||||
).select_related('channel_group')
|
||||
|
||||
relations_to_delete = list(stale_relationships)
|
||||
deleted_count = len(relations_to_delete)
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(
|
||||
f"Found {deleted_count} stale group relationships for account {account.id}: "
|
||||
f"{[rel.channel_group.name for rel in relations_to_delete]}"
|
||||
)
|
||||
|
||||
# Delete the stale relationships
|
||||
stale_relationships.delete()
|
||||
|
||||
# Check if any of the deleted relationships left groups with no remaining associations
|
||||
orphaned_group_ids = []
|
||||
|
|
@ -638,6 +683,10 @@ def process_groups(account, groups):
|
|||
deleted_groups = list(ChannelGroup.objects.filter(id__in=orphaned_group_ids).values_list('name', flat=True))
|
||||
ChannelGroup.objects.filter(id__in=orphaned_group_ids).delete()
|
||||
logger.info(f"Deleted {len(orphaned_group_ids)} orphaned groups that had no remaining associations: {deleted_groups}")
|
||||
else:
|
||||
logger.debug(f"No stale group relationships found for account {account.id}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
|
||||
def collect_xc_streams(account_id, enabled_groups):
|
||||
|
|
@ -774,7 +823,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
group_title = group_name
|
||||
|
||||
stream_hash = Stream.generate_hash_key(
|
||||
name, url, tvg_id, hash_keys, m3u_id=account_id
|
||||
name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title
|
||||
)
|
||||
stream_props = {
|
||||
"name": name,
|
||||
|
|
@ -785,6 +834,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
"channel_group_id": int(group_id),
|
||||
"stream_hash": stream_hash,
|
||||
"custom_properties": stream,
|
||||
"is_stale": False,
|
||||
}
|
||||
|
||||
if stream_hash not in stream_hashes:
|
||||
|
|
@ -820,10 +870,12 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
setattr(obj, key, value)
|
||||
obj.last_seen = timezone.now()
|
||||
obj.updated_at = timezone.now() # Update timestamp only for changed streams
|
||||
obj.is_stale = False
|
||||
streams_to_update.append(obj)
|
||||
else:
|
||||
# Always update last_seen, even if nothing else changed
|
||||
obj.last_seen = timezone.now()
|
||||
obj.is_stale = False
|
||||
# Don't update updated_at for unchanged streams
|
||||
streams_to_update.append(obj)
|
||||
|
||||
|
|
@ -834,6 +886,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
stream_props["updated_at"] = (
|
||||
timezone.now()
|
||||
) # Set initial updated_at for new streams
|
||||
stream_props["is_stale"] = False
|
||||
streams_to_create.append(Stream(**stream_props))
|
||||
|
||||
try:
|
||||
|
|
@ -845,7 +898,7 @@ def process_xc_category_direct(account_id, batch, groups, hash_keys):
|
|||
# Simplified bulk update for better performance
|
||||
Stream.objects.bulk_update(
|
||||
streams_to_update,
|
||||
['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'],
|
||||
['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'],
|
||||
batch_size=150 # Smaller batch size for XC processing
|
||||
)
|
||||
|
||||
|
|
@ -908,6 +961,12 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
for stream_info in batch:
|
||||
try:
|
||||
name, url = stream_info["name"], stream_info["url"]
|
||||
|
||||
# Validate URL length - maximum of 4096 characters
|
||||
if url and len(url) > 4096:
|
||||
logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)")
|
||||
continue
|
||||
|
||||
tvg_id, tvg_logo = get_case_insensitive_attr(
|
||||
stream_info["attributes"], "tvg-id", ""
|
||||
), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "")
|
||||
|
|
@ -942,7 +1001,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
)
|
||||
continue
|
||||
|
||||
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id)
|
||||
stream_hash = Stream.generate_hash_key(name, url, tvg_id, hash_keys, m3u_id=account_id, group=group_title)
|
||||
stream_props = {
|
||||
"name": name,
|
||||
"url": url,
|
||||
|
|
@ -952,6 +1011,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
"channel_group_id": int(groups.get(group_title)),
|
||||
"stream_hash": stream_hash,
|
||||
"custom_properties": stream_info["attributes"],
|
||||
"is_stale": False,
|
||||
}
|
||||
|
||||
if stream_hash not in stream_hashes:
|
||||
|
|
@ -991,11 +1051,15 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
obj.custom_properties = stream_props["custom_properties"]
|
||||
obj.updated_at = timezone.now()
|
||||
|
||||
# Always mark as not stale since we saw it in this refresh
|
||||
obj.is_stale = False
|
||||
|
||||
streams_to_update.append(obj)
|
||||
else:
|
||||
# New stream
|
||||
stream_props["last_seen"] = timezone.now()
|
||||
stream_props["updated_at"] = timezone.now()
|
||||
stream_props["is_stale"] = False
|
||||
streams_to_create.append(Stream(**stream_props))
|
||||
|
||||
try:
|
||||
|
|
@ -1007,7 +1071,7 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys):
|
|||
# Update all streams in a single bulk operation
|
||||
Stream.objects.bulk_update(
|
||||
streams_to_update,
|
||||
['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at'],
|
||||
['name', 'url', 'logo_url', 'tvg_id', 'custom_properties', 'last_seen', 'updated_at', 'is_stale'],
|
||||
batch_size=200
|
||||
)
|
||||
except Exception as e:
|
||||
|
|
@ -1068,7 +1132,15 @@ def cleanup_streams(account_id, scan_start_time=timezone.now):
|
|||
|
||||
|
||||
@shared_task
|
||||
def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
|
||||
def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False, scan_start_time=None):
|
||||
"""Refresh M3U groups for an account.
|
||||
|
||||
Args:
|
||||
account_id: ID of the M3U account
|
||||
use_cache: Whether to use cached M3U file
|
||||
full_refresh: Whether this is part of a full refresh
|
||||
scan_start_time: Timestamp when the scan started (for consistent last_seen marking)
|
||||
"""
|
||||
if not acquire_task_lock("refresh_m3u_account_groups", account_id):
|
||||
return f"Task already running for account_id={account_id}.", None
|
||||
|
||||
|
|
@ -1194,52 +1266,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
|
|||
auth_result = xc_client.authenticate()
|
||||
logger.debug(f"Authentication response: {auth_result}")
|
||||
|
||||
# Save account information to all active profiles
|
||||
# Queue async profile refresh task to run in background
|
||||
# This prevents any delay in the main refresh process
|
||||
try:
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
m3u_account=account,
|
||||
is_active=True
|
||||
)
|
||||
|
||||
# Update each profile with account information using its own transformed credentials
|
||||
for profile in profiles:
|
||||
try:
|
||||
# Get transformed credentials for this specific profile
|
||||
profile_url, profile_username, profile_password = get_transformed_credentials(account, profile)
|
||||
|
||||
# Create a separate XC client for this profile's credentials
|
||||
with XCClient(
|
||||
profile_url,
|
||||
profile_username,
|
||||
profile_password,
|
||||
user_agent_string
|
||||
) as profile_client:
|
||||
# Authenticate with this profile's credentials
|
||||
if profile_client.authenticate():
|
||||
# Get account information specific to this profile's credentials
|
||||
profile_account_info = profile_client.get_account_info()
|
||||
|
||||
# Merge with existing custom_properties if they exist
|
||||
existing_props = profile.custom_properties or {}
|
||||
existing_props.update(profile_account_info)
|
||||
profile.custom_properties = existing_props
|
||||
profile.save(update_fields=['custom_properties'])
|
||||
|
||||
logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials")
|
||||
else:
|
||||
logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials")
|
||||
|
||||
except Exception as profile_error:
|
||||
logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}")
|
||||
# Continue with other profiles even if one fails
|
||||
|
||||
logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}")
|
||||
|
||||
except Exception as save_error:
|
||||
logger.warning(f"Failed to process profile account information: {str(save_error)}")
|
||||
# Don't fail the whole process if saving account info fails
|
||||
logger.info(f"Queueing background profile refresh for account {account.name}")
|
||||
refresh_account_profiles.delay(account.id)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to queue profile refresh task: {str(e)}")
|
||||
# Don't fail the main refresh if profile refresh can't be queued
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to authenticate with XC server: {str(e)}"
|
||||
|
|
@ -1381,10 +1415,12 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
|
|||
)
|
||||
problematic_lines.append((line_index + 1, line[:200]))
|
||||
|
||||
elif extinf_data and line.startswith("http"):
|
||||
elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")):
|
||||
url_count += 1
|
||||
# Normalize UDP URLs only (e.g., remove VLC-specific @ prefix)
|
||||
normalized_url = normalize_stream_url(line) if line.startswith("udp") else line
|
||||
# Associate URL with the last EXTINF line
|
||||
extinf_data[-1]["url"] = line
|
||||
extinf_data[-1]["url"] = normalized_url
|
||||
valid_stream_count += 1
|
||||
|
||||
# Periodically log progress for large files
|
||||
|
|
@ -1431,7 +1467,7 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False):
|
|||
|
||||
send_m3u_update(account_id, "processing_groups", 0)
|
||||
|
||||
process_groups(account, groups)
|
||||
process_groups(account, groups, scan_start_time)
|
||||
|
||||
release_task_lock("refresh_m3u_account_groups", account_id)
|
||||
|
||||
|
|
@ -1562,7 +1598,7 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
|
||||
# Get force_dummy_epg, group_override, and regex patterns from group custom_properties
|
||||
group_custom_props = {}
|
||||
force_dummy_epg = False
|
||||
force_dummy_epg = False # Backward compatibility: legacy option to disable EPG
|
||||
override_group_id = None
|
||||
name_regex_pattern = None
|
||||
name_replace_pattern = None
|
||||
|
|
@ -1571,6 +1607,8 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
channel_sort_order = None
|
||||
channel_sort_reverse = False
|
||||
stream_profile_id = None
|
||||
custom_logo_id = None
|
||||
custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg)
|
||||
if group_relation.custom_properties:
|
||||
group_custom_props = group_relation.custom_properties
|
||||
force_dummy_epg = group_custom_props.get("force_dummy_epg", False)
|
||||
|
|
@ -1581,11 +1619,13 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
)
|
||||
name_match_regex = group_custom_props.get("name_match_regex")
|
||||
channel_profile_ids = group_custom_props.get("channel_profile_ids")
|
||||
custom_epg_id = group_custom_props.get("custom_epg_id")
|
||||
channel_sort_order = group_custom_props.get("channel_sort_order")
|
||||
channel_sort_reverse = group_custom_props.get(
|
||||
"channel_sort_reverse", False
|
||||
)
|
||||
stream_profile_id = group_custom_props.get("stream_profile_id")
|
||||
custom_logo_id = group_custom_props.get("custom_logo_id")
|
||||
|
||||
# Determine which group to use for created channels
|
||||
target_group = channel_group
|
||||
|
|
@ -1840,7 +1880,25 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
|
||||
# Handle logo updates
|
||||
current_logo = None
|
||||
if stream.logo_url:
|
||||
if custom_logo_id:
|
||||
# Use the custom logo specified in group settings
|
||||
from apps.channels.models import Logo
|
||||
try:
|
||||
current_logo = Logo.objects.get(id=custom_logo_id)
|
||||
except Logo.DoesNotExist:
|
||||
logger.warning(
|
||||
f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo"
|
||||
)
|
||||
# Fall back to stream logo if custom logo not found
|
||||
if stream.logo_url:
|
||||
current_logo, _ = Logo.objects.get_or_create(
|
||||
url=stream.logo_url,
|
||||
defaults={
|
||||
"name": stream.name or stream.tvg_id or "Unknown"
|
||||
},
|
||||
)
|
||||
elif stream.logo_url:
|
||||
# No custom logo configured, use stream logo
|
||||
from apps.channels.models import Logo
|
||||
|
||||
current_logo, _ = Logo.objects.get_or_create(
|
||||
|
|
@ -1856,10 +1914,42 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
|
||||
# Handle EPG data updates
|
||||
current_epg_data = None
|
||||
if stream.tvg_id and not force_dummy_epg:
|
||||
if custom_epg_id:
|
||||
# Use the custom EPG specified in group settings (e.g., a dummy EPG)
|
||||
from apps.epg.models import EPGSource
|
||||
try:
|
||||
epg_source = EPGSource.objects.get(id=custom_epg_id)
|
||||
# For dummy EPGs, select the first (and typically only) EPGData entry from this source
|
||||
if epg_source.source_type == 'dummy':
|
||||
current_epg_data = EPGData.objects.filter(
|
||||
epg_source=epg_source
|
||||
).first()
|
||||
if not current_epg_data:
|
||||
logger.warning(
|
||||
f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})"
|
||||
)
|
||||
else:
|
||||
# For non-dummy sources, try to find existing EPGData by tvg_id
|
||||
if stream.tvg_id:
|
||||
current_epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id,
|
||||
epg_source=epg_source
|
||||
).first()
|
||||
except EPGSource.DoesNotExist:
|
||||
logger.warning(
|
||||
f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match"
|
||||
)
|
||||
# Fall back to auto-match by tvg_id
|
||||
if stream.tvg_id and not force_dummy_epg:
|
||||
current_epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id
|
||||
).first()
|
||||
elif stream.tvg_id and not force_dummy_epg:
|
||||
# Auto-match EPG by tvg_id (original behavior)
|
||||
current_epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id
|
||||
).first()
|
||||
# If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None
|
||||
|
||||
if existing_channel.epg_data != current_epg_data:
|
||||
existing_channel.epg_data = current_epg_data
|
||||
|
|
@ -1949,19 +2039,81 @@ def sync_auto_channels(account_id, scan_start_time=None):
|
|||
ChannelProfileMembership.objects.bulk_create(memberships)
|
||||
|
||||
# Try to match EPG data
|
||||
if stream.tvg_id and not force_dummy_epg:
|
||||
if custom_epg_id:
|
||||
# Use the custom EPG specified in group settings (e.g., a dummy EPG)
|
||||
from apps.epg.models import EPGSource
|
||||
try:
|
||||
epg_source = EPGSource.objects.get(id=custom_epg_id)
|
||||
# For dummy EPGs, select the first (and typically only) EPGData entry from this source
|
||||
if epg_source.source_type == 'dummy':
|
||||
epg_data = EPGData.objects.filter(
|
||||
epg_source=epg_source
|
||||
).first()
|
||||
if epg_data:
|
||||
channel.epg_data = epg_data
|
||||
channel.save(update_fields=["epg_data"])
|
||||
else:
|
||||
logger.warning(
|
||||
f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})"
|
||||
)
|
||||
else:
|
||||
# For non-dummy sources, try to find existing EPGData by tvg_id
|
||||
if stream.tvg_id:
|
||||
epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id,
|
||||
epg_source=epg_source
|
||||
).first()
|
||||
if epg_data:
|
||||
channel.epg_data = epg_data
|
||||
channel.save(update_fields=["epg_data"])
|
||||
except EPGSource.DoesNotExist:
|
||||
logger.warning(
|
||||
f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match"
|
||||
)
|
||||
# Fall back to auto-match by tvg_id
|
||||
if stream.tvg_id and not force_dummy_epg:
|
||||
epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id
|
||||
).first()
|
||||
if epg_data:
|
||||
channel.epg_data = epg_data
|
||||
channel.save(update_fields=["epg_data"])
|
||||
elif stream.tvg_id and not force_dummy_epg:
|
||||
# Auto-match EPG by tvg_id (original behavior)
|
||||
epg_data = EPGData.objects.filter(
|
||||
tvg_id=stream.tvg_id
|
||||
).first()
|
||||
if epg_data:
|
||||
channel.epg_data = epg_data
|
||||
channel.save(update_fields=["epg_data"])
|
||||
elif stream.tvg_id and force_dummy_epg:
|
||||
elif force_dummy_epg:
|
||||
# Force dummy EPG with no custom EPG selected (set to None)
|
||||
channel.epg_data = None
|
||||
channel.save(update_fields=["epg_data"])
|
||||
|
||||
# Handle logo
|
||||
if stream.logo_url:
|
||||
if custom_logo_id:
|
||||
# Use the custom logo specified in group settings
|
||||
from apps.channels.models import Logo
|
||||
try:
|
||||
custom_logo = Logo.objects.get(id=custom_logo_id)
|
||||
channel.logo = custom_logo
|
||||
channel.save(update_fields=["logo"])
|
||||
except Logo.DoesNotExist:
|
||||
logger.warning(
|
||||
f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo"
|
||||
)
|
||||
# Fall back to stream logo if custom logo not found
|
||||
if stream.logo_url:
|
||||
logo, _ = Logo.objects.get_or_create(
|
||||
url=stream.logo_url,
|
||||
defaults={
|
||||
"name": stream.name or stream.tvg_id or "Unknown"
|
||||
},
|
||||
)
|
||||
channel.logo = logo
|
||||
channel.save(update_fields=["logo"])
|
||||
elif stream.logo_url:
|
||||
from apps.channels.models import Logo
|
||||
|
||||
logo, _ = Logo.objects.get_or_create(
|
||||
|
|
@ -2128,6 +2280,106 @@ def get_transformed_credentials(account, profile=None):
|
|||
return base_url, base_username, base_password
|
||||
|
||||
|
||||
@shared_task
|
||||
def refresh_account_profiles(account_id):
|
||||
"""Refresh account information for all active profiles of an XC account.
|
||||
|
||||
This task runs asynchronously in the background after account refresh completes.
|
||||
It includes rate limiting delays between profile authentications to prevent provider bans.
|
||||
"""
|
||||
from django.conf import settings
|
||||
import time
|
||||
|
||||
try:
|
||||
account = M3UAccount.objects.get(id=account_id, is_active=True)
|
||||
|
||||
if account.account_type != M3UAccount.Types.XC:
|
||||
logger.debug(f"Account {account_id} is not XC type, skipping profile refresh")
|
||||
return f"Account {account_id} is not an XtreamCodes account"
|
||||
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
m3u_account=account,
|
||||
is_active=True
|
||||
)
|
||||
|
||||
if not profiles.exists():
|
||||
logger.info(f"No active profiles found for account {account.name}")
|
||||
return f"No active profiles for account {account_id}"
|
||||
|
||||
# Get user agent for this account
|
||||
try:
|
||||
user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
||||
if account.user_agent_id:
|
||||
from core.models import UserAgent
|
||||
ua_obj = UserAgent.objects.get(id=account.user_agent_id)
|
||||
if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent:
|
||||
user_agent_string = ua_obj.user_agent
|
||||
except Exception as e:
|
||||
logger.warning(f"Error getting user agent, using fallback: {str(e)}")
|
||||
logger.debug(f"Using user agent for profile refresh: {user_agent_string}")
|
||||
# Get rate limiting delay from settings
|
||||
profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5)
|
||||
|
||||
profiles_updated = 0
|
||||
profiles_failed = 0
|
||||
|
||||
logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}")
|
||||
|
||||
for idx, profile in enumerate(profiles):
|
||||
try:
|
||||
# Add delay between profiles to prevent rate limiting (except for first profile)
|
||||
if idx > 0:
|
||||
logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting")
|
||||
time.sleep(profile_delay)
|
||||
|
||||
# Get transformed credentials for this specific profile
|
||||
profile_url, profile_username, profile_password = get_transformed_credentials(account, profile)
|
||||
|
||||
# Create a separate XC client for this profile's credentials
|
||||
with XCClient(
|
||||
profile_url,
|
||||
profile_username,
|
||||
profile_password,
|
||||
user_agent_string
|
||||
) as profile_client:
|
||||
# Authenticate with this profile's credentials
|
||||
if profile_client.authenticate():
|
||||
# Get account information specific to this profile's credentials
|
||||
profile_account_info = profile_client.get_account_info()
|
||||
|
||||
# Merge with existing custom_properties if they exist
|
||||
existing_props = profile.custom_properties or {}
|
||||
existing_props.update(profile_account_info)
|
||||
profile.custom_properties = existing_props
|
||||
profile.save(update_fields=['custom_properties'])
|
||||
|
||||
profiles_updated += 1
|
||||
logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})")
|
||||
else:
|
||||
profiles_failed += 1
|
||||
logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials")
|
||||
|
||||
except Exception as profile_error:
|
||||
profiles_failed += 1
|
||||
logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}")
|
||||
# Continue with other profiles even if one fails
|
||||
|
||||
result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed"
|
||||
logger.info(result_msg)
|
||||
return result_msg
|
||||
|
||||
except M3UAccount.DoesNotExist:
|
||||
error_msg = f"Account {account_id} not found"
|
||||
logger.error(error_msg)
|
||||
return error_msg
|
||||
except Exception as e:
|
||||
error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
return error_msg
|
||||
|
||||
|
||||
@shared_task
|
||||
def refresh_account_info(profile_id):
|
||||
"""Refresh only the account information for a specific M3U profile."""
|
||||
|
|
@ -2322,7 +2574,7 @@ def refresh_single_m3u_account(account_id):
|
|||
if not extinf_data:
|
||||
try:
|
||||
logger.info(f"Calling refresh_m3u_groups for account {account_id}")
|
||||
result = refresh_m3u_groups(account_id, full_refresh=True)
|
||||
result = refresh_m3u_groups(account_id, full_refresh=True, scan_start_time=refresh_start_timestamp)
|
||||
logger.trace(f"refresh_m3u_groups result: {result}")
|
||||
|
||||
# Check for completely empty result or missing groups
|
||||
|
|
@ -2523,76 +2775,75 @@ def refresh_single_m3u_account(account_id):
|
|||
|
||||
if not all_xc_streams:
|
||||
logger.warning("No streams collected from XC groups")
|
||||
return f"No streams found for XC account {account_id}", None
|
||||
else:
|
||||
# Now batch by stream count (like standard M3U processing)
|
||||
batches = [
|
||||
all_xc_streams[i : i + BATCH_SIZE]
|
||||
for i in range(0, len(all_xc_streams), BATCH_SIZE)
|
||||
]
|
||||
|
||||
# Now batch by stream count (like standard M3U processing)
|
||||
batches = [
|
||||
all_xc_streams[i : i + BATCH_SIZE]
|
||||
for i in range(0, len(all_xc_streams), BATCH_SIZE)
|
||||
]
|
||||
logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches")
|
||||
|
||||
logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches")
|
||||
# Use threading for XC stream processing - now with consistent batch sizes
|
||||
max_workers = min(4, len(batches))
|
||||
logger.debug(f"Using {max_workers} threads for XC stream processing")
|
||||
|
||||
# Use threading for XC stream processing - now with consistent batch sizes
|
||||
max_workers = min(4, len(batches))
|
||||
logger.debug(f"Using {max_workers} threads for XC stream processing")
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit stream batch processing tasks (reuse standard M3U processing)
|
||||
future_to_batch = {
|
||||
executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i
|
||||
for i, batch in enumerate(batches)
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit stream batch processing tasks (reuse standard M3U processing)
|
||||
future_to_batch = {
|
||||
executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i
|
||||
for i, batch in enumerate(batches)
|
||||
}
|
||||
completed_batches = 0
|
||||
total_batches = len(batches)
|
||||
|
||||
completed_batches = 0
|
||||
total_batches = len(batches)
|
||||
# Process completed batches as they finish
|
||||
for future in as_completed(future_to_batch):
|
||||
batch_idx = future_to_batch[future]
|
||||
try:
|
||||
result = future.result()
|
||||
completed_batches += 1
|
||||
|
||||
# Process completed batches as they finish
|
||||
for future in as_completed(future_to_batch):
|
||||
batch_idx = future_to_batch[future]
|
||||
try:
|
||||
result = future.result()
|
||||
completed_batches += 1
|
||||
# Extract stream counts from result
|
||||
if isinstance(result, str):
|
||||
try:
|
||||
created_match = re.search(r"(\d+) created", result)
|
||||
updated_match = re.search(r"(\d+) updated", result)
|
||||
if created_match and updated_match:
|
||||
created_count = int(created_match.group(1))
|
||||
updated_count = int(updated_match.group(1))
|
||||
streams_created += created_count
|
||||
streams_updated += updated_count
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
|
||||
# Extract stream counts from result
|
||||
if isinstance(result, str):
|
||||
try:
|
||||
created_match = re.search(r"(\d+) created", result)
|
||||
updated_match = re.search(r"(\d+) updated", result)
|
||||
if created_match and updated_match:
|
||||
created_count = int(created_match.group(1))
|
||||
updated_count = int(updated_match.group(1))
|
||||
streams_created += created_count
|
||||
streams_updated += updated_count
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
# Send progress update
|
||||
progress = int((completed_batches / total_batches) * 100)
|
||||
current_elapsed = time.time() - start_time
|
||||
|
||||
# Send progress update
|
||||
progress = int((completed_batches / total_batches) * 100)
|
||||
current_elapsed = time.time() - start_time
|
||||
if progress > 0:
|
||||
estimated_total = (current_elapsed / progress) * 100
|
||||
time_remaining = max(0, estimated_total - current_elapsed)
|
||||
else:
|
||||
time_remaining = 0
|
||||
|
||||
if progress > 0:
|
||||
estimated_total = (current_elapsed / progress) * 100
|
||||
time_remaining = max(0, estimated_total - current_elapsed)
|
||||
else:
|
||||
time_remaining = 0
|
||||
send_m3u_update(
|
||||
account_id,
|
||||
"parsing",
|
||||
progress,
|
||||
elapsed_time=current_elapsed,
|
||||
time_remaining=time_remaining,
|
||||
streams_processed=streams_created + streams_updated,
|
||||
)
|
||||
|
||||
send_m3u_update(
|
||||
account_id,
|
||||
"parsing",
|
||||
progress,
|
||||
elapsed_time=current_elapsed,
|
||||
time_remaining=time_remaining,
|
||||
streams_processed=streams_created + streams_updated,
|
||||
)
|
||||
logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed")
|
||||
|
||||
logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}")
|
||||
completed_batches += 1 # Still count it to avoid hanging
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}")
|
||||
completed_batches += 1 # Still count it to avoid hanging
|
||||
|
||||
logger.info(f"XC thread-based processing completed for account {account_id}")
|
||||
logger.info(f"XC thread-based processing completed for account {account_id}")
|
||||
|
||||
# Ensure all database transactions are committed before cleanup
|
||||
logger.info(
|
||||
|
|
@ -2603,9 +2854,26 @@ def refresh_single_m3u_account(account_id):
|
|||
id=-1
|
||||
).exists() # This will never find anything but ensures DB sync
|
||||
|
||||
# Mark streams that weren't seen in this refresh as stale (pending deletion)
|
||||
stale_stream_count = Stream.objects.filter(
|
||||
m3u_account=account,
|
||||
last_seen__lt=refresh_start_timestamp
|
||||
).update(is_stale=True)
|
||||
logger.info(f"Marked {stale_stream_count} streams as stale for account {account_id}")
|
||||
|
||||
# Mark group relationships that weren't seen in this refresh as stale (pending deletion)
|
||||
stale_group_count = ChannelGroupM3UAccount.objects.filter(
|
||||
m3u_account=account,
|
||||
last_seen__lt=refresh_start_timestamp
|
||||
).update(is_stale=True)
|
||||
logger.info(f"Marked {stale_group_count} group relationships as stale for account {account_id}")
|
||||
|
||||
# Now run cleanup
|
||||
streams_deleted = cleanup_streams(account_id, refresh_start_timestamp)
|
||||
|
||||
# Cleanup stale group relationships (follows same retention policy as streams)
|
||||
cleanup_stale_group_relationships(account, refresh_start_timestamp)
|
||||
|
||||
# Run auto channel sync after successful refresh
|
||||
auto_sync_message = ""
|
||||
try:
|
||||
|
|
@ -2638,6 +2906,17 @@ def refresh_single_m3u_account(account_id):
|
|||
account.updated_at = timezone.now()
|
||||
account.save(update_fields=["status", "last_message", "updated_at"])
|
||||
|
||||
# Log system event for M3U refresh
|
||||
log_system_event(
|
||||
event_type='m3u_refresh',
|
||||
account_name=account.name,
|
||||
elapsed_time=round(elapsed_time, 2),
|
||||
streams_created=streams_created,
|
||||
streams_updated=streams_updated,
|
||||
streams_deleted=streams_deleted,
|
||||
total_processed=streams_processed,
|
||||
)
|
||||
|
||||
# Send final update with complete metrics and explicitly include success status
|
||||
send_m3u_update(
|
||||
account_id,
|
||||
|
|
@ -2673,7 +2952,16 @@ def refresh_single_m3u_account(account_id):
|
|||
release_task_lock("refresh_single_m3u_account", account_id)
|
||||
|
||||
# Aggressive garbage collection
|
||||
del existing_groups, extinf_data, groups, batches
|
||||
# Only delete variables if they exist
|
||||
if 'existing_groups' in locals():
|
||||
del existing_groups
|
||||
if 'extinf_data' in locals():
|
||||
del extinf_data
|
||||
if 'groups' in locals():
|
||||
del groups
|
||||
if 'batches' in locals():
|
||||
del batches
|
||||
|
||||
from core.utils import cleanup_memory
|
||||
|
||||
cleanup_memory(log_usage=True, force_collection=True)
|
||||
|
|
|
|||
|
|
@ -8,6 +8,34 @@ lock = threading.Lock()
|
|||
active_streams_map = {}
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def normalize_stream_url(url):
|
||||
"""
|
||||
Normalize stream URLs for compatibility with FFmpeg.
|
||||
|
||||
Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol.
|
||||
FFmpeg doesn't recognize the @ prefix for multicast addresses.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to normalize
|
||||
|
||||
Returns:
|
||||
str: The normalized URL
|
||||
"""
|
||||
if not url:
|
||||
return url
|
||||
|
||||
# Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234
|
||||
# The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax
|
||||
if url.startswith('udp://@'):
|
||||
normalized = url.replace('udp://@', 'udp://', 1)
|
||||
logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}")
|
||||
return normalized
|
||||
|
||||
# Could add other normalizations here in the future (rtp://@, etc.)
|
||||
return url
|
||||
|
||||
|
||||
def increment_stream_count(account):
|
||||
with lock:
|
||||
current_usage = active_streams_map.get(account.id, 0)
|
||||
|
|
|
|||
|
|
@ -14,3 +14,26 @@ class OutputM3UTest(TestCase):
|
|||
self.assertEqual(response.status_code, 200)
|
||||
content = response.content.decode()
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_empty_body(self):
|
||||
"""
|
||||
Test that a POST request with an empty body returns 200 OK.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded')
|
||||
content = response.content.decode()
|
||||
|
||||
self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK")
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_with_body(self):
|
||||
"""
|
||||
Test that a POST request with a non-empty body returns 403 Forbidden.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data={'evilstring': 'muhahaha'})
|
||||
|
||||
self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden")
|
||||
self.assertIn("POST requests with body are not allowed, body is:", response.content.decode())
|
||||
|
|
|
|||
2184
apps/output/views.py
2184
apps/output/views.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +1,6 @@
|
|||
"""Shared configuration between proxy types"""
|
||||
import time
|
||||
from django.db import connection
|
||||
|
||||
class BaseConfig:
|
||||
DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail
|
||||
|
|
@ -12,13 +14,29 @@ class BaseConfig:
|
|||
BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams
|
||||
BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc.
|
||||
|
||||
# Cache for proxy settings (class-level, shared across all instances)
|
||||
_proxy_settings_cache = None
|
||||
_proxy_settings_cache_time = 0
|
||||
_proxy_settings_cache_ttl = 10 # Cache for 10 seconds
|
||||
|
||||
@classmethod
|
||||
def get_proxy_settings(cls):
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults"""
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)"""
|
||||
# Check if cache is still valid
|
||||
now = time.time()
|
||||
if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl:
|
||||
return cls._proxy_settings_cache
|
||||
|
||||
# Cache miss or expired - fetch from database
|
||||
try:
|
||||
from core.models import CoreSettings
|
||||
return CoreSettings.get_proxy_settings()
|
||||
settings = CoreSettings.get_proxy_settings()
|
||||
cls._proxy_settings_cache = settings
|
||||
cls._proxy_settings_cache_time = now
|
||||
return settings
|
||||
|
||||
except Exception:
|
||||
# Return defaults if database query fails
|
||||
return {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
|
|
@ -27,6 +45,13 @@ class BaseConfig:
|
|||
"channel_init_grace_period": 5,
|
||||
}
|
||||
|
||||
finally:
|
||||
# Always close the connection after reading settings
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_redis_chunk_ttl(cls):
|
||||
"""Get Redis chunk TTL from database or default"""
|
||||
|
|
@ -69,10 +94,10 @@ class TSConfig(BaseConfig):
|
|||
CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds
|
||||
|
||||
# Client tracking settings
|
||||
CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6)
|
||||
CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect
|
||||
|
||||
# Stream health and recovery settings
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import gevent
|
|||
from typing import Set, Optional
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .constants import EventType
|
||||
from .constants import EventType, ChannelState, ChannelMetadataField
|
||||
from .config_helper import ConfigHelper
|
||||
from .redis_keys import RedisKeys
|
||||
from .utils import get_logger
|
||||
|
|
@ -26,6 +26,7 @@ class ClientManager:
|
|||
self.lock = threading.Lock()
|
||||
self.last_active_time = time.time()
|
||||
self.worker_id = worker_id # Store worker ID as instance variable
|
||||
self._heartbeat_running = True # Flag to control heartbeat thread
|
||||
|
||||
# STANDARDIZED KEYS: Move client set under channel namespace
|
||||
self.client_set_key = RedisKeys.clients(channel_id)
|
||||
|
|
@ -33,6 +34,10 @@ class ClientManager:
|
|||
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
|
||||
self.last_heartbeat_time = {}
|
||||
|
||||
# Get ProxyServer instance for ownership checks
|
||||
from .server import ProxyServer
|
||||
self.proxy_server = ProxyServer.get_instance()
|
||||
|
||||
# Start heartbeat thread for local clients
|
||||
self._start_heartbeat_thread()
|
||||
self._registered_clients = set() # Track already registered client IDs
|
||||
|
|
@ -43,9 +48,11 @@ class ClientManager:
|
|||
# Import here to avoid potential import issues
|
||||
from apps.proxy.ts_proxy.channel_status import ChannelStatus
|
||||
import redis
|
||||
from django.conf import settings
|
||||
|
||||
# Get all channels from Redis
|
||||
redis_client = redis.Redis.from_url('redis://localhost:6379', decode_responses=True)
|
||||
# Get all channels from Redis using settings
|
||||
redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0')
|
||||
redis_client = redis.Redis.from_url(redis_url, decode_responses=True)
|
||||
all_channels = []
|
||||
cursor = 0
|
||||
|
||||
|
|
@ -77,56 +84,28 @@ class ClientManager:
|
|||
logger.debug(f"Failed to trigger stats update: {e}")
|
||||
|
||||
def _start_heartbeat_thread(self):
|
||||
"""Start thread to regularly refresh client presence in Redis"""
|
||||
"""Start thread to regularly refresh client presence in Redis for local clients"""
|
||||
def heartbeat_task():
|
||||
no_clients_count = 0 # Track consecutive empty cycles
|
||||
max_empty_cycles = 3 # Exit after this many consecutive empty checks
|
||||
|
||||
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
while True:
|
||||
while self._heartbeat_running:
|
||||
try:
|
||||
# Wait for the interval
|
||||
gevent.sleep(self.heartbeat_interval)
|
||||
# Wait for the interval, but check stop flag frequently for quick shutdown
|
||||
# Sleep in 1-second increments to allow faster response to stop signal
|
||||
for _ in range(int(self.heartbeat_interval)):
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
# Final check before doing work
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
|
||||
# Send heartbeat for all local clients
|
||||
with self.lock:
|
||||
if not self.clients or not self.redis_client:
|
||||
# No clients left, increment our counter
|
||||
no_clients_count += 1
|
||||
|
||||
# Check if we're in a shutdown delay period before exiting
|
||||
in_shutdown_delay = False
|
||||
if self.redis_client:
|
||||
try:
|
||||
disconnect_key = RedisKeys.last_client_disconnect(self.channel_id)
|
||||
disconnect_time_bytes = self.redis_client.get(disconnect_key)
|
||||
if disconnect_time_bytes:
|
||||
disconnect_time = float(disconnect_time_bytes.decode('utf-8'))
|
||||
elapsed = time.time() - disconnect_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if elapsed < shutdown_delay:
|
||||
in_shutdown_delay = True
|
||||
logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking shutdown delay: {e}")
|
||||
|
||||
# Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay
|
||||
if no_clients_count >= max_empty_cycles and not in_shutdown_delay:
|
||||
logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread")
|
||||
return # This exits the thread
|
||||
|
||||
# Skip this cycle if we have no clients but continue if in shutdown delay
|
||||
if not in_shutdown_delay:
|
||||
continue
|
||||
else:
|
||||
# Reset counter during shutdown delay to prevent premature exit
|
||||
no_clients_count = 0
|
||||
continue
|
||||
else:
|
||||
# Reset counter when we see clients
|
||||
no_clients_count = 0
|
||||
# Skip this cycle if we have no local clients
|
||||
if not self.clients:
|
||||
continue
|
||||
|
||||
# IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats
|
||||
current_time = time.time()
|
||||
|
|
@ -197,11 +176,20 @@ class ClientManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Error in client heartbeat thread: {e}")
|
||||
|
||||
logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}")
|
||||
|
||||
thread = threading.Thread(target=heartbeat_task, daemon=True)
|
||||
thread.name = f"client-heartbeat-{self.channel_id}"
|
||||
thread.start()
|
||||
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the heartbeat thread and cleanup"""
|
||||
logger.debug(f"Stopping ClientManager for channel {self.channel_id}")
|
||||
self._heartbeat_running = False
|
||||
# Give the thread a moment to exit gracefully
|
||||
# Note: We don't join() here because it's a daemon thread and will exit on its own
|
||||
|
||||
def _execute_redis_command(self, command_func):
|
||||
"""Execute Redis command with error handling"""
|
||||
if not self.redis_client:
|
||||
|
|
@ -355,16 +343,30 @@ class ClientManager:
|
|||
|
||||
self._notify_owner_of_activity()
|
||||
|
||||
# Publish client disconnected event
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
# Check if we're the owner - if so, handle locally; if not, publish event
|
||||
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
|
||||
|
||||
if am_i_owner:
|
||||
# We're the owner - handle the disconnect directly
|
||||
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
|
||||
if remaining == 0:
|
||||
# Trigger shutdown check directly via ProxyServer method
|
||||
logger.debug(f"No clients left - triggering immediate shutdown check")
|
||||
# Spawn greenlet to avoid blocking
|
||||
import gevent
|
||||
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
|
||||
else:
|
||||
# We're not the owner - publish event so owner can handle it
|
||||
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED,
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
|
|
|
|||
|
|
@ -100,3 +100,12 @@ class ConfigHelper:
|
|||
def channel_init_grace_period():
|
||||
"""Get channel initialization grace period in seconds"""
|
||||
return Config.get_channel_init_grace_period()
|
||||
|
||||
@staticmethod
|
||||
def chunk_timeout():
|
||||
"""
|
||||
Get chunk timeout in seconds (used for both socket and HTTP read timeouts).
|
||||
This controls how long we wait for each chunk before timing out.
|
||||
Set this higher (e.g., 30s) for slow providers that may have intermittent delays.
|
||||
"""
|
||||
return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ class EventType:
|
|||
# Stream types
|
||||
class StreamType:
|
||||
HLS = "hls"
|
||||
RTSP = "rtsp"
|
||||
UDP = "udp"
|
||||
TS = "ts"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
|
|
|||
138
apps/proxy/ts_proxy/http_streamer.py
Normal file
138
apps/proxy/ts_proxy/http_streamer.py
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
"""
|
||||
HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe.
|
||||
This allows us to use the same fetch_chunk() path for both transcode and HTTP streams.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import os
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from .utils import get_logger
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
class HTTPStreamReader:
|
||||
"""Thread-based HTTP stream reader that writes to a pipe"""
|
||||
|
||||
def __init__(self, url, user_agent=None, chunk_size=8192):
|
||||
self.url = url
|
||||
self.user_agent = user_agent
|
||||
self.chunk_size = chunk_size
|
||||
self.session = None
|
||||
self.response = None
|
||||
self.thread = None
|
||||
self.pipe_read = None
|
||||
self.pipe_write = None
|
||||
self.running = False
|
||||
|
||||
def start(self):
|
||||
"""Start the HTTP stream reader thread"""
|
||||
# Create a pipe (works on Windows and Unix)
|
||||
self.pipe_read, self.pipe_write = os.pipe()
|
||||
|
||||
# Start the reader thread
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self._read_stream, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
logger.info(f"Started HTTP stream reader thread for {self.url}")
|
||||
return self.pipe_read
|
||||
|
||||
def _read_stream(self):
|
||||
"""Thread worker that reads HTTP stream and writes to pipe"""
|
||||
try:
|
||||
# Build headers
|
||||
headers = {}
|
||||
if self.user_agent:
|
||||
headers['User-Agent'] = self.user_agent
|
||||
|
||||
logger.info(f"HTTP reader connecting to {self.url}")
|
||||
|
||||
# Create session
|
||||
self.session = requests.Session()
|
||||
|
||||
# Disable retries for faster failure detection
|
||||
adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1)
|
||||
self.session.mount('http://', adapter)
|
||||
self.session.mount('https://', adapter)
|
||||
|
||||
# Stream the URL
|
||||
self.response = self.session.get(
|
||||
self.url,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=(5, 30) # 5s connect, 30s read
|
||||
)
|
||||
|
||||
if self.response.status_code != 200:
|
||||
logger.error(f"HTTP {self.response.status_code} from {self.url}")
|
||||
return
|
||||
|
||||
logger.info(f"HTTP reader connected successfully, streaming data...")
|
||||
|
||||
# Stream chunks to pipe
|
||||
chunk_count = 0
|
||||
for chunk in self.response.iter_content(chunk_size=self.chunk_size):
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
try:
|
||||
# Write binary data to pipe
|
||||
os.write(self.pipe_write, chunk)
|
||||
chunk_count += 1
|
||||
|
||||
# Log progress periodically
|
||||
if chunk_count % 1000 == 0:
|
||||
logger.debug(f"HTTP reader streamed {chunk_count} chunks")
|
||||
except OSError as e:
|
||||
logger.error(f"Pipe write error: {e}")
|
||||
break
|
||||
|
||||
logger.info("HTTP stream ended")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP reader request error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"HTTP reader unexpected error: {e}", exc_info=True)
|
||||
finally:
|
||||
self.running = False
|
||||
# Close write end of pipe to signal EOF
|
||||
try:
|
||||
if self.pipe_write is not None:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""Stop the HTTP stream reader"""
|
||||
logger.info("Stopping HTTP stream reader")
|
||||
self.running = False
|
||||
|
||||
# Close response
|
||||
if self.response:
|
||||
try:
|
||||
self.response.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close session
|
||||
if self.session:
|
||||
try:
|
||||
self.session.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close write end of pipe
|
||||
if self.pipe_write is not None:
|
||||
try:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
# Wait for thread
|
||||
if self.thread and self.thread.is_alive():
|
||||
self.thread.join(timeout=2.0)
|
||||
|
|
@ -19,7 +19,7 @@ import gevent # Add gevent import
|
|||
from typing import Dict, Optional, Set
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from core.utils import RedisClient
|
||||
from core.utils import RedisClient, log_system_event
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .stream_manager import StreamManager
|
||||
from .stream_buffer import StreamBuffer
|
||||
|
|
@ -131,6 +131,8 @@ class ProxyServer:
|
|||
max_retries = 10
|
||||
base_retry_delay = 1 # Start with 1 second delay
|
||||
max_retry_delay = 30 # Cap at 30 seconds
|
||||
pubsub_client = None
|
||||
pubsub = None
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
|
@ -192,35 +194,11 @@ class ProxyServer:
|
|||
self.redis_client.delete(disconnect_key)
|
||||
|
||||
elif event_type == EventType.CLIENT_DISCONNECTED:
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
|
||||
# Check if any clients remain
|
||||
if channel_id in self.client_managers:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
client_id = data.get("client_id")
|
||||
worker_id = data.get("worker_id")
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}")
|
||||
# Delegate to dedicated method
|
||||
self.handle_client_disconnect(channel_id)
|
||||
|
||||
|
||||
elif event_type == EventType.STREAM_SWITCH:
|
||||
|
|
@ -339,20 +317,27 @@ class ProxyServer:
|
|||
logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})")
|
||||
gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay)
|
||||
|
||||
# Try to clean up the old connection
|
||||
try:
|
||||
if 'pubsub' in locals():
|
||||
pubsub.close()
|
||||
if 'pubsub_client' in locals():
|
||||
pubsub_client.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in event listener: {e}")
|
||||
# Add a short delay to prevent rapid retries on persistent errors
|
||||
gevent.sleep(5) # REPLACE: time.sleep(5)
|
||||
|
||||
finally:
|
||||
# Always clean up PubSub connections in all error paths
|
||||
try:
|
||||
if pubsub:
|
||||
pubsub.close()
|
||||
pubsub = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub: {e}")
|
||||
|
||||
try:
|
||||
if pubsub_client:
|
||||
pubsub_client.close()
|
||||
pubsub_client = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub_client: {e}")
|
||||
|
||||
thread = threading.Thread(target=event_listener, daemon=True)
|
||||
thread.name = "redis-event-listener"
|
||||
thread.start()
|
||||
|
|
@ -486,17 +471,18 @@ class ProxyServer:
|
|||
)
|
||||
return True
|
||||
|
||||
# Create buffer and client manager instances
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
# Create buffer and client manager instances (or reuse if they exist)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Store in local tracking
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
self.client_managers[channel_id] = client_manager
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# IMPROVED: Set initializing state in Redis BEFORE any other operations
|
||||
if self.redis_client:
|
||||
|
|
@ -550,13 +536,15 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} already owned by worker {current_owner}")
|
||||
logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only")
|
||||
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -571,13 +559,15 @@ class ProxyServer:
|
|||
# Another worker just acquired ownership
|
||||
logger.info(f"Another worker just acquired ownership of channel {channel_id}")
|
||||
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -596,7 +586,7 @@ class ProxyServer:
|
|||
if channel_user_agent:
|
||||
metadata["user_agent"] = channel_user_agent
|
||||
|
||||
# CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged
|
||||
# Make sure stream_id is always set in metadata and properly logged
|
||||
if channel_stream_id:
|
||||
metadata["stream_id"] = str(channel_stream_id)
|
||||
logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}")
|
||||
|
|
@ -632,13 +622,37 @@ class ProxyServer:
|
|||
logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}")
|
||||
self.stream_managers[channel_id] = stream_manager
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Log channel start event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Get stream name if stream_id is available
|
||||
stream_name = None
|
||||
if channel_stream_id:
|
||||
try:
|
||||
stream_obj = Stream.objects.get(id=channel_stream_id)
|
||||
stream_name = stream_obj.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_start',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
stream_name=stream_name,
|
||||
stream_id=channel_stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel start event: {e}")
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# Start stream manager thread only for the owner
|
||||
thread = threading.Thread(target=stream_manager.run, daemon=True)
|
||||
|
|
@ -688,9 +702,10 @@ class ProxyServer:
|
|||
state = metadata.get(b'state', b'unknown').decode('utf-8')
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8')
|
||||
|
||||
# States that indicate the channel is running properly
|
||||
# States that indicate the channel is running properly or shutting down
|
||||
valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING]
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING,
|
||||
ChannelState.STOPPING]
|
||||
|
||||
# If the channel is in a valid state, check if the owner is still active
|
||||
if state in valid_states:
|
||||
|
|
@ -703,12 +718,24 @@ class ProxyServer:
|
|||
else:
|
||||
# This is a zombie channel - owner is gone but metadata still exists
|
||||
logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active")
|
||||
|
||||
# Check if there are any clients connected
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if client_count > 0:
|
||||
logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover")
|
||||
# Could potentially take ownership here in the future
|
||||
# For now, just clean it up to be safe
|
||||
else:
|
||||
logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up")
|
||||
|
||||
self._clean_zombie_channel(channel_id, metadata)
|
||||
return False
|
||||
elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These states indicate the channel should be reinitialized
|
||||
logger.info(f"Channel {channel_id} exists but in terminal state: {state}")
|
||||
return True
|
||||
elif state in [ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These terminal states indicate the channel should be cleaned up and reinitialized
|
||||
logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup")
|
||||
return False
|
||||
else:
|
||||
# Unknown or initializing state, check how long it's been in this state
|
||||
if b'state_changed_at' in metadata:
|
||||
|
|
@ -772,6 +799,44 @@ class ProxyServer:
|
|||
logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def handle_client_disconnect(self, channel_id):
|
||||
"""
|
||||
Handle client disconnect event - check if channel should shut down.
|
||||
Can be called directly by owner or via PubSub from non-owner workers.
|
||||
"""
|
||||
if channel_id not in self.client_managers:
|
||||
return
|
||||
|
||||
try:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling client disconnect for channel {channel_id}: {e}")
|
||||
|
||||
def stop_channel(self, channel_id):
|
||||
"""Stop a channel with proper ownership handling"""
|
||||
try:
|
||||
|
|
@ -819,6 +884,41 @@ class ProxyServer:
|
|||
self.release_ownership(channel_id)
|
||||
logger.info(f"Released ownership of channel {channel_id}")
|
||||
|
||||
# Log channel stop event (after cleanup, before releasing ownership section ends)
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Calculate runtime and get total bytes from metadata
|
||||
runtime = None
|
||||
total_bytes = None
|
||||
if self.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(channel_id)
|
||||
metadata = self.redis_client.hgetall(metadata_key)
|
||||
if metadata:
|
||||
# Calculate runtime from init_time
|
||||
if b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
runtime = round(time.time() - init_time, 2)
|
||||
except Exception:
|
||||
pass
|
||||
# Get total bytes transferred
|
||||
if b'total_bytes' in metadata:
|
||||
try:
|
||||
total_bytes = int(metadata[b'total_bytes'].decode('utf-8'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_stop',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
runtime=runtime,
|
||||
total_bytes=total_bytes
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel stop event: {e}")
|
||||
|
||||
# Always clean up local resources - WITH SAFE CHECKS
|
||||
if channel_id in self.stream_managers:
|
||||
del self.stream_managers[channel_id]
|
||||
|
|
@ -846,6 +946,10 @@ class ProxyServer:
|
|||
# Clean up client manager - SAFE CHECK HERE TOO
|
||||
if channel_id in self.client_managers:
|
||||
try:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
# Stop the heartbeat thread before deleting
|
||||
if hasattr(client_manager, 'stop'):
|
||||
client_manager.stop()
|
||||
del self.client_managers[channel_id]
|
||||
logger.info(f"Removed client manager for channel {channel_id}")
|
||||
except KeyError:
|
||||
|
|
@ -920,6 +1024,15 @@ class ProxyServer:
|
|||
if channel_id in self.client_managers:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
total_clients = client_manager.get_total_client_count()
|
||||
else:
|
||||
# This can happen during reconnection attempts or crashes
|
||||
# Check Redis directly for any connected clients
|
||||
if self.redis_client:
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total_clients = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total_clients == 0:
|
||||
logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup")
|
||||
|
||||
# Log client count periodically
|
||||
if time.time() % 30 < 1: # Every ~30 seconds
|
||||
|
|
@ -927,7 +1040,14 @@ class ProxyServer:
|
|||
|
||||
# If in connecting or waiting_for_clients state, check grace period
|
||||
if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]:
|
||||
# Get connection ready time from metadata
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Get connection_ready_time from metadata (indicates if channel reached ready state)
|
||||
connection_ready_time = None
|
||||
if metadata and b'connection_ready_time' in metadata:
|
||||
try:
|
||||
|
|
@ -935,17 +1055,60 @@ class ProxyServer:
|
|||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# If still connecting, give it more time
|
||||
if channel_state == ChannelState.CONNECTING:
|
||||
logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet")
|
||||
continue
|
||||
if total_clients == 0:
|
||||
# Check if we have a connection_attempt timestamp (set when CONNECTING starts)
|
||||
connection_attempt_time = None
|
||||
attempt_key = RedisKeys.connection_attempt(channel_id)
|
||||
if self.redis_client:
|
||||
attempt_value = self.redis_client.get(attempt_key)
|
||||
if attempt_value:
|
||||
try:
|
||||
connection_attempt_time = float(attempt_value.decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# If waiting for clients, check grace period
|
||||
if connection_ready_time:
|
||||
# Also get init time as a fallback
|
||||
init_time = None
|
||||
if metadata and b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Use whichever timestamp we have (prefer connection_attempt as it's more recent)
|
||||
start_time = connection_attempt_time or init_time
|
||||
|
||||
if start_time:
|
||||
# Check which timeout to apply based on channel lifecycle
|
||||
if connection_ready_time:
|
||||
# Already reached ready - use shutdown_delay
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if time_since_ready > shutdown_delay:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s "
|
||||
f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
else:
|
||||
# Never reached ready - use grace_period timeout
|
||||
time_since_start = time.time() - start_time
|
||||
connecting_timeout = ConfigHelper.channel_init_grace_period()
|
||||
|
||||
if time_since_start > connecting_timeout:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s "
|
||||
f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
elif connection_ready_time:
|
||||
# We have clients now, but check grace period for state transition
|
||||
grace_period = ConfigHelper.channel_init_grace_period()
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
|
||||
# Add this debug log
|
||||
logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, "
|
||||
f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, "
|
||||
f"total_clients={total_clients}")
|
||||
|
|
@ -954,16 +1117,9 @@ class ProxyServer:
|
|||
# Still within grace period
|
||||
logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed")
|
||||
continue
|
||||
elif total_clients == 0:
|
||||
# Grace period expired with no clients
|
||||
logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Grace period expired but we have clients - mark channel as active
|
||||
# Grace period expired with clients - mark channel as active
|
||||
logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active")
|
||||
old_state = "unknown"
|
||||
if metadata and b'state' in metadata:
|
||||
old_state = metadata[b'state'].decode('utf-8')
|
||||
if self.update_channel_state(channel_id, ChannelState.ACTIVE, {
|
||||
"grace_period_ended_at": str(time.time()),
|
||||
"clients_at_activation": str(total_clients)
|
||||
|
|
@ -971,6 +1127,13 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period")
|
||||
# If active and no clients, start normal shutdown procedure
|
||||
elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Check if there's a pending no-clients timeout
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
disconnect_time = None
|
||||
|
|
@ -1030,14 +1193,30 @@ class ProxyServer:
|
|||
continue
|
||||
|
||||
# Check for local client count - if zero, clean up our local resources
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
if channel_id in self.client_managers:
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
else:
|
||||
# This shouldn't happen, but clean up anyway
|
||||
logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in cleanup thread: {e}", exc_info=True)
|
||||
|
||||
# Periodically check for orphaned channels (every 30 seconds)
|
||||
if hasattr(self, '_last_orphan_check'):
|
||||
if time.time() - self._last_orphan_check > 30:
|
||||
try:
|
||||
self._check_orphaned_metadata()
|
||||
self._last_orphan_check = time.time()
|
||||
except Exception as orphan_error:
|
||||
logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True)
|
||||
else:
|
||||
self._last_orphan_check = time.time()
|
||||
|
||||
gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval())
|
||||
|
||||
thread = threading.Thread(target=cleanup_task, daemon=True)
|
||||
|
|
@ -1059,10 +1238,6 @@ class ProxyServer:
|
|||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Skip channels we already have locally
|
||||
if channel_id in self.stream_buffers:
|
||||
continue
|
||||
|
||||
# Check if this channel has an owner
|
||||
owner = self.get_channel_owner(channel_id)
|
||||
|
||||
|
|
@ -1077,13 +1252,84 @@ class ProxyServer:
|
|||
else:
|
||||
# Orphaned channel with no clients - clean it up
|
||||
logger.info(f"Cleaning up orphaned channel {channel_id}")
|
||||
self._clean_redis_keys(channel_id)
|
||||
|
||||
# If we have it locally, stop it properly to clean up processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing channel key {key}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned channels: {e}")
|
||||
|
||||
def _check_orphaned_metadata(self):
|
||||
"""
|
||||
Check for metadata entries that have no owner and no clients.
|
||||
This catches zombie channels that weren't cleaned up properly.
|
||||
"""
|
||||
if not self.redis_client:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get all channel metadata keys
|
||||
channel_pattern = "ts_proxy:channel:*:metadata"
|
||||
channel_keys = self.redis_client.keys(channel_pattern)
|
||||
|
||||
for key in channel_keys:
|
||||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Get metadata first
|
||||
metadata = self.redis_client.hgetall(key)
|
||||
if not metadata:
|
||||
# Empty metadata - clean it up
|
||||
logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up")
|
||||
# If we have it locally, stop it properly
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
self._clean_redis_keys(channel_id)
|
||||
continue
|
||||
|
||||
# Get owner
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else ''
|
||||
|
||||
# Check if owner is still alive
|
||||
owner_alive = False
|
||||
if owner:
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
owner_alive = self.redis_client.exists(owner_heartbeat_key)
|
||||
|
||||
# Check client count
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
# If no owner and no clients, clean it up
|
||||
if not owner_alive and client_count == 0:
|
||||
state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown'
|
||||
logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up")
|
||||
|
||||
# If we have it locally, stop it properly to clean up transcode/proxy processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
elif not owner_alive and client_count > 0:
|
||||
# Owner is gone but clients remain - just log for now
|
||||
logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing metadata key {key}: {e}", exc_info=True)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned metadata: {e}", exc_info=True)
|
||||
|
||||
def _clean_redis_keys(self, channel_id):
|
||||
"""Clean up all Redis keys for a channel more efficiently"""
|
||||
# Release the channel, stream, and profile keys from the channel
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ from ..server import ProxyServer
|
|||
from ..redis_keys import RedisKeys
|
||||
from ..constants import EventType, ChannelState, ChannelMetadataField
|
||||
from ..url_utils import get_stream_info_for_switch
|
||||
from core.utils import log_system_event
|
||||
from .log_parsers import LogParserFactory
|
||||
|
||||
logger = logging.getLogger("ts_proxy")
|
||||
|
||||
|
|
@ -418,124 +420,51 @@ class ChannelService:
|
|||
|
||||
@staticmethod
|
||||
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None):
|
||||
"""Parse FFmpeg stream info line and store in Redis metadata and database"""
|
||||
"""
|
||||
Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB.
|
||||
Uses specialized parsers for each streaming tool.
|
||||
"""
|
||||
try:
|
||||
if stream_type == "input":
|
||||
# Example lines:
|
||||
# Input #0, mpegts, from 'http://example.com/stream.ts':
|
||||
# Input #0, hls, from 'http://example.com/stream.m3u8':
|
||||
# Use factory to parse the line based on stream type
|
||||
parsed_data = LogParserFactory.parse(stream_type, stream_info_line)
|
||||
|
||||
if not parsed_data:
|
||||
return
|
||||
|
||||
# Extract input format (e.g., "mpegts", "hls", "flv", etc.)
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
# Update Redis and database with parsed data
|
||||
ChannelService._update_stream_info_in_redis(
|
||||
channel_id,
|
||||
parsed_data.get('video_codec'),
|
||||
parsed_data.get('resolution'),
|
||||
parsed_data.get('width'),
|
||||
parsed_data.get('height'),
|
||||
parsed_data.get('source_fps'),
|
||||
parsed_data.get('pixel_format'),
|
||||
parsed_data.get('video_bitrate'),
|
||||
parsed_data.get('audio_codec'),
|
||||
parsed_data.get('sample_rate'),
|
||||
parsed_data.get('audio_channels'),
|
||||
parsed_data.get('audio_bitrate'),
|
||||
parsed_data.get('stream_type')
|
||||
)
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if input_format:
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format)
|
||||
# Save to database if stream_id is provided
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(stream_id, stream_type=input_format)
|
||||
|
||||
logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}")
|
||||
|
||||
elif stream_type == "video":
|
||||
# Example line:
|
||||
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
|
||||
|
||||
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
video_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract resolution (e.g., "1280x720") - be more specific to avoid hex values
|
||||
# Look for resolution patterns that are realistic video dimensions
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
# Validate that these look like reasonable video dimensions
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
resolution = f"{width}x{height}"
|
||||
else:
|
||||
width = height = resolution = None
|
||||
else:
|
||||
width = height = resolution = None
|
||||
|
||||
# Extract source FPS (e.g., "29.97 fps")
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
|
||||
source_fps = float(fps_match.group(1)) if fps_match else None
|
||||
|
||||
# Extract pixel format (e.g., "yuv420p")
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
|
||||
pixel_format = None
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
# Clean up pixel format (remove extra info in parentheses)
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
pixel_format = pf
|
||||
|
||||
# Extract bitrate if present (e.g., "2000 kb/s")
|
||||
video_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
video_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None)
|
||||
# Save to database if stream_id is provided
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
stream_id,
|
||||
video_codec=video_codec,
|
||||
resolution=resolution,
|
||||
source_fps=source_fps,
|
||||
pixel_format=pixel_format,
|
||||
video_bitrate=video_bitrate
|
||||
)
|
||||
|
||||
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
|
||||
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
|
||||
f"Video Bitrate: {video_bitrate} kb/s")
|
||||
|
||||
elif stream_type == "audio":
|
||||
# Example line:
|
||||
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
|
||||
|
||||
# Extract audio codec (e.g., "aac", "mp3", etc.)
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
audio_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract sample rate (e.g., "48000 Hz")
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
|
||||
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
|
||||
|
||||
# Extract channel layout (e.g., "stereo", "5.1", "mono")
|
||||
# Look for common channel layouts
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
|
||||
channels = channel_match.group(1) if channel_match else None
|
||||
|
||||
# Extract audio bitrate if present (e.g., "64 kb/s")
|
||||
audio_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
audio_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None)
|
||||
# Save to database if stream_id is provided
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
stream_id,
|
||||
audio_codec=audio_codec,
|
||||
sample_rate=sample_rate,
|
||||
audio_channels=channels,
|
||||
audio_bitrate=audio_bitrate
|
||||
)
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
stream_id,
|
||||
video_codec=parsed_data.get('video_codec'),
|
||||
resolution=parsed_data.get('resolution'),
|
||||
source_fps=parsed_data.get('source_fps'),
|
||||
pixel_format=parsed_data.get('pixel_format'),
|
||||
video_bitrate=parsed_data.get('video_bitrate'),
|
||||
audio_codec=parsed_data.get('audio_codec'),
|
||||
sample_rate=parsed_data.get('sample_rate'),
|
||||
audio_channels=parsed_data.get('audio_channels'),
|
||||
audio_bitrate=parsed_data.get('audio_bitrate'),
|
||||
stream_type=parsed_data.get('stream_type')
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
|
||||
logger.debug(f"Error parsing {stream_type} stream info: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None):
|
||||
|
|
@ -597,32 +526,41 @@ class ChannelService:
|
|||
@staticmethod
|
||||
def _update_stream_stats_in_db(stream_id, **stats):
|
||||
"""Update stream stats in database"""
|
||||
from django.db import connection
|
||||
|
||||
try:
|
||||
from apps.channels.models import Stream
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
stream = Stream.objects.get(id=stream_id)
|
||||
|
||||
|
||||
# Get existing stats or create new dict
|
||||
current_stats = stream.stream_stats or {}
|
||||
|
||||
|
||||
# Update with new stats
|
||||
for key, value in stats.items():
|
||||
if value is not None:
|
||||
current_stats[key] = value
|
||||
|
||||
|
||||
# Save updated stats and timestamp
|
||||
stream.stream_stats = current_stats
|
||||
stream.stream_stats_updated_at = timezone.now()
|
||||
stream.save(update_fields=['stream_stats', 'stream_stats_updated_at'])
|
||||
|
||||
|
||||
logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
# Always close database connection after update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Helper methods for Redis operations
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -678,7 +616,7 @@ class ChannelService:
|
|||
|
||||
switch_request = {
|
||||
"event": EventType.STREAM_SWITCH,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"url": new_url,
|
||||
"user_agent": user_agent,
|
||||
"stream_id": stream_id,
|
||||
|
|
@ -691,6 +629,7 @@ class ChannelService:
|
|||
RedisKeys.events_channel(channel_id),
|
||||
json.dumps(switch_request)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -703,7 +642,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CHANNEL_STOP,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
|
@ -726,7 +665,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CLIENT_STOP,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"client_id": client_id,
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
|
|
|
|||
410
apps/proxy/ts_proxy/services/log_parsers.py
Normal file
410
apps/proxy/ts_proxy/services/log_parsers.py
Normal file
|
|
@ -0,0 +1,410 @@
|
|||
"""Log parsers for FFmpeg, Streamlink, and VLC output."""
|
||||
import re
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseLogParser(ABC):
|
||||
"""Base class for log parsers"""
|
||||
|
||||
# Map of stream_type -> method_name that this parser handles
|
||||
STREAM_TYPE_METHODS: Dict[str, str] = {}
|
||||
|
||||
@abstractmethod
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""
|
||||
Check if this parser can handle the line.
|
||||
Returns the stream_type if it can parse, None otherwise.
|
||||
e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink'
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
|
||||
class FFmpegLogParser(BaseLogParser):
|
||||
"""Parser for FFmpeg log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'input': 'parse_input_format',
|
||||
'video': 'parse_video_stream',
|
||||
'audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is an FFmpeg line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# Input format detection
|
||||
if lower.startswith('input #'):
|
||||
return 'input'
|
||||
|
||||
# Stream info (only during input phase, but we'll let stream_manager handle phase tracking)
|
||||
if 'stream #' in lower:
|
||||
if 'video:' in lower:
|
||||
return 'video'
|
||||
elif 'audio:' in lower:
|
||||
return 'audio'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg input format (e.g., mpegts, hls)"""
|
||||
try:
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
|
||||
if input_format:
|
||||
logger.debug(f"Input format info - Format: {input_format}")
|
||||
return {'stream_type': input_format}
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg input format: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg video stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
# Extract codec, resolution, fps, pixel format, bitrate
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['video_codec'] = codec_match.group(1)
|
||||
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line)
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
result['pixel_format'] = pf
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['video_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
logger.info(f"Video stream info - Codec: {result.get('video_codec')}, "
|
||||
f"Resolution: {result.get('resolution')}, "
|
||||
f"Source FPS: {result.get('source_fps')}, "
|
||||
f"Pixel Format: {result.get('pixel_format')}, "
|
||||
f"Video Bitrate: {result.get('video_bitrate')} kb/s")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg audio stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['audio_codec'] = codec_match.group(1)
|
||||
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', line)
|
||||
if sample_rate_match:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['audio_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class VLCLogParser(BaseLogParser):
|
||||
"""Parser for VLC log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'vlc_video': 'parse_video_stream',
|
||||
'vlc_audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a VLC line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# VLC TS demux codec detection
|
||||
if 'ts demux debug' in lower and 'type=' in lower:
|
||||
if 'video' in lower:
|
||||
return 'vlc_video'
|
||||
elif 'audio' in lower:
|
||||
return 'vlc_audio'
|
||||
|
||||
# VLC decoder output
|
||||
if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower):
|
||||
if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower:
|
||||
return 'vlc_audio'
|
||||
else:
|
||||
return 'vlc_video'
|
||||
|
||||
# VLC transcode output for resolution/FPS
|
||||
if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)):
|
||||
return 'vlc_video'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for video"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
video_codec_map = {
|
||||
('avc', 'h.264', 'type=0x1b'): "h264",
|
||||
('hevc', 'h.265', 'type=0x24'): "hevc",
|
||||
('mpeg-2', 'type=0x02'): "mpeg2video",
|
||||
('mpeg-4', 'type=0x10'): "mpeg4"
|
||||
}
|
||||
|
||||
for patterns, codec in video_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['video_codec'] = codec
|
||||
break
|
||||
|
||||
# Extract FPS from transcode output: "source fps 30/1"
|
||||
fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower)
|
||||
if fps_fraction_match:
|
||||
numerator = int(fps_fraction_match.group(1))
|
||||
denominator = int(fps_fraction_match.group(2))
|
||||
if denominator > 0:
|
||||
result['source_fps'] = numerator / denominator
|
||||
|
||||
# Extract resolution from transcode output: "source 1280x720"
|
||||
source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower)
|
||||
if source_res_match:
|
||||
width = int(source_res_match.group(1))
|
||||
height = int(source_res_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
else:
|
||||
# Fallback: generic resolution pattern
|
||||
resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
# Fallback: try to extract FPS from generic format
|
||||
if 'source_fps' not in result:
|
||||
fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing VLC video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for audio"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
audio_codec_map = {
|
||||
('type=0xf', 'adts'): "aac",
|
||||
('type=0x03', 'type=0x04'): "mp3",
|
||||
('type=0x06', 'type=0x81'): "ac3",
|
||||
('type=0x0b', 'lpcm'): "pcm"
|
||||
}
|
||||
|
||||
for patterns, codec in audio_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['audio_codec'] = codec
|
||||
break
|
||||
|
||||
# VLC decoder format: "AAC channels: 2 samplerate: 48000"
|
||||
if 'channels:' in lower:
|
||||
channels_match = re.search(r'channels:\s*(\d+)', lower)
|
||||
if channels_match:
|
||||
num_channels = int(channels_match.group(1))
|
||||
# Convert number to name
|
||||
channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'}
|
||||
result['audio_channels'] = channel_names.get(num_channels, str(num_channels))
|
||||
|
||||
if 'samplerate:' in lower:
|
||||
samplerate_match = re.search(r'samplerate:\s*(\d+)', lower)
|
||||
if samplerate_match:
|
||||
result['sample_rate'] = int(samplerate_match.group(1))
|
||||
|
||||
# Try to extract sample rate (Hz format)
|
||||
sample_rate_match = re.search(r'(\d+)\s*hz', lower)
|
||||
if sample_rate_match and 'sample_rate' not in result:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
# Try to extract channels (word format)
|
||||
if 'audio_channels' not in result:
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class StreamlinkLogParser(BaseLogParser):
|
||||
"""Parser for Streamlink log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'streamlink': 'parse_video_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a Streamlink line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
if 'opening stream:' in lower or 'available streams:' in lower:
|
||||
return 'streamlink'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse Streamlink quality/resolution"""
|
||||
try:
|
||||
quality_match = re.search(r'(\d+p|\d+x\d+)', line)
|
||||
if quality_match:
|
||||
quality = quality_match.group(1)
|
||||
|
||||
if 'x' in quality:
|
||||
resolution = quality
|
||||
width, height = map(int, quality.split('x'))
|
||||
else:
|
||||
resolutions = {
|
||||
'2160p': ('3840x2160', 3840, 2160),
|
||||
'1080p': ('1920x1080', 1920, 1080),
|
||||
'720p': ('1280x720', 1280, 720),
|
||||
'480p': ('854x480', 854, 480),
|
||||
'360p': ('640x360', 640, 360)
|
||||
}
|
||||
resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080))
|
||||
|
||||
return {
|
||||
'video_codec': 'h264',
|
||||
'resolution': resolution,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'pixel_format': 'yuv420p'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing Streamlink video info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
|
||||
class LogParserFactory:
|
||||
"""Factory to get the appropriate log parser"""
|
||||
|
||||
_parsers = {
|
||||
'ffmpeg': FFmpegLogParser(),
|
||||
'vlc': VLCLogParser(),
|
||||
'streamlink': StreamlinkLogParser()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]:
|
||||
"""Determine parser and method from stream_type"""
|
||||
# Check each parser to see if it handles this stream_type
|
||||
for parser in cls._parsers.values():
|
||||
method_name = parser.STREAM_TYPE_METHODS.get(stream_type)
|
||||
if method_name:
|
||||
return (parser, method_name)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Parse a log line based on stream type.
|
||||
Returns parsed data or None if parsing fails.
|
||||
"""
|
||||
result = cls._get_parser_and_method(stream_type)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
parser, method_name = result
|
||||
method = getattr(parser, method_name, None)
|
||||
if method:
|
||||
return method(line)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]:
|
||||
"""
|
||||
Automatically detect which parser can handle this line and parse it.
|
||||
Returns (stream_type, parsed_data) or None if no parser can handle it.
|
||||
"""
|
||||
# Try each parser to see if it can handle this line
|
||||
for parser in cls._parsers.values():
|
||||
stream_type = parser.can_parse(line)
|
||||
if stream_type:
|
||||
# Parser can handle this line, now parse it
|
||||
parsed_data = cls.parse(stream_type, line)
|
||||
if parsed_data:
|
||||
return (stream_type, parsed_data)
|
||||
|
||||
return None
|
||||
|
|
@ -303,6 +303,14 @@ class StreamBuffer:
|
|||
# Retrieve chunks
|
||||
chunks = self.get_chunks_exact(client_index, chunk_count)
|
||||
|
||||
# Check if we got significantly fewer chunks than expected (likely due to expiration)
|
||||
# Only check if we expected multiple chunks and got none or very few
|
||||
if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10:
|
||||
# Chunks are missing - likely expired from Redis
|
||||
# Return empty list to signal client should skip forward
|
||||
logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)")
|
||||
return [], client_index
|
||||
|
||||
# Check total size
|
||||
total_size = sum(len(c) for c in chunks)
|
||||
|
||||
|
|
@ -316,7 +324,7 @@ class StreamBuffer:
|
|||
additional_size = sum(len(c) for c in more_chunks)
|
||||
if total_size + additional_size <= MAX_SIZE:
|
||||
chunks.extend(more_chunks)
|
||||
chunk_count += additional
|
||||
chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved
|
||||
|
||||
return chunks, client_index + chunk_count
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import logging
|
|||
import threading
|
||||
import gevent # Add this import at the top of your file
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel
|
||||
from core.utils import log_system_event
|
||||
from .server import ProxyServer
|
||||
from .utils import create_ts_packet, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -52,6 +54,10 @@ class StreamGenerator:
|
|||
self.last_stats_bytes = 0
|
||||
self.current_rate = 0.0
|
||||
|
||||
# TTL refresh tracking
|
||||
self.last_ttl_refresh = time.time()
|
||||
self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generator function that produces the stream content for the client.
|
||||
|
|
@ -84,6 +90,20 @@ class StreamGenerator:
|
|||
if not self._setup_streaming():
|
||||
return
|
||||
|
||||
# Log client connect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_connect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client connect event: {e}")
|
||||
|
||||
# Main streaming loop
|
||||
for chunk in self._stream_data_generator():
|
||||
yield chunk
|
||||
|
|
@ -204,6 +224,18 @@ class StreamGenerator:
|
|||
self.empty_reads += 1
|
||||
self.consecutive_empty += 1
|
||||
|
||||
# Check if we're too far behind (chunks expired from Redis)
|
||||
chunks_behind = self.buffer.index - self.local_index
|
||||
if chunks_behind > 50: # If more than 50 chunks behind, jump forward
|
||||
# Calculate new position: stay a few chunks behind current buffer
|
||||
initial_behind = ConfigHelper.initial_behind_chunks()
|
||||
new_index = max(self.local_index, self.buffer.index - initial_behind)
|
||||
|
||||
logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}")
|
||||
self.local_index = new_index
|
||||
self.consecutive_empty = 0 # Reset since we're repositioning
|
||||
continue # Try again immediately with new position
|
||||
|
||||
if self._should_send_keepalive(self.local_index):
|
||||
keepalive_packet = create_ts_packet('keepalive')
|
||||
logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head")
|
||||
|
|
@ -324,7 +356,20 @@ class StreamGenerator:
|
|||
ChannelMetadataField.STATS_UPDATED_AT: str(current_time)
|
||||
}
|
||||
proxy_server.redis_client.hset(client_key, mapping=stats)
|
||||
# No need to set expiration as client heartbeat will refresh this key
|
||||
|
||||
# Refresh TTL periodically while actively streaming
|
||||
# This provides proof-of-life independent of heartbeat thread
|
||||
if current_time - self.last_ttl_refresh > self.ttl_refresh_interval:
|
||||
try:
|
||||
# Refresh TTL on client key
|
||||
proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL)
|
||||
# Also refresh the client set TTL
|
||||
client_set_key = f"ts_proxy:channel:{self.channel_id}:clients"
|
||||
proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL)
|
||||
self.last_ttl_refresh = current_time
|
||||
logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)")
|
||||
except Exception as ttl_error:
|
||||
logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}")
|
||||
except Exception as e:
|
||||
logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}")
|
||||
|
||||
|
|
@ -410,6 +455,22 @@ class StreamGenerator:
|
|||
total_clients = client_manager.get_total_client_count()
|
||||
logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})")
|
||||
|
||||
# Log client disconnect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_disconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None,
|
||||
duration=round(elapsed, 2),
|
||||
bytes_sent=self.bytes_sent
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client disconnect event: {e}")
|
||||
|
||||
# Schedule channel shutdown if no clients left
|
||||
if not stream_released: # Only if we haven't already released the stream
|
||||
self._schedule_channel_shutdown_if_needed(local_clients)
|
||||
|
|
|
|||
|
|
@ -9,11 +9,14 @@ import subprocess
|
|||
import gevent
|
||||
import re
|
||||
from typing import Optional, List
|
||||
from django.db import connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from urllib3.exceptions import ReadTimeoutError
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.utils import log_system_event
|
||||
from .stream_buffer import StreamBuffer
|
||||
from .utils import detect_stream_type, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -91,17 +94,23 @@ class StreamManager:
|
|||
self.tried_stream_ids.add(self.current_stream_id)
|
||||
logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}")
|
||||
else:
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}")
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading stream ID from Redis: {e}")
|
||||
else:
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly")
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
|
||||
logger.info(f"Initialized stream manager for channel {buffer.channel_id}")
|
||||
|
||||
# Add this flag for tracking transcoding process status
|
||||
self.transcode_process_active = False
|
||||
|
||||
# Track stream command for efficient log parser routing
|
||||
self.stream_command = None
|
||||
self.parser_type = None # Will be set when transcode process starts
|
||||
|
||||
# Add tracking for data throughput
|
||||
self.bytes_processed = 0
|
||||
self.last_bytes_update = time.time()
|
||||
|
|
@ -111,6 +120,9 @@ class StreamManager:
|
|||
self.stderr_reader_thread = None
|
||||
self.ffmpeg_input_phase = True # Track if we're still reading input info
|
||||
|
||||
# Add HTTP reader thread property
|
||||
self.http_reader = None
|
||||
|
||||
def _create_session(self):
|
||||
"""Create and configure requests session with optimal settings"""
|
||||
session = requests.Session()
|
||||
|
|
@ -220,11 +232,12 @@ class StreamManager:
|
|||
# Continue with normal flow
|
||||
|
||||
# Check stream type before connecting
|
||||
stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and stream_type == StreamType.HLS:
|
||||
logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS streams
|
||||
self.stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP):
|
||||
stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP")
|
||||
logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS, RTSP/RTP, and UDP streams
|
||||
self.transcode = True
|
||||
# We'll override the stream profile selection with ffmpeg in the transcoding section
|
||||
self.force_ffmpeg = True
|
||||
|
|
@ -252,6 +265,20 @@ class StreamManager:
|
|||
# Store connection start time to measure success duration
|
||||
connection_start_time = time.time()
|
||||
|
||||
# Log reconnection event if this is a retry (not first attempt)
|
||||
if self.retry_count > 0:
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
attempt=self.retry_count + 1,
|
||||
max_attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
# Successfully connected - read stream data until disconnect/error
|
||||
self._process_stream_data()
|
||||
# If we get here, the connection was closed/failed
|
||||
|
|
@ -281,6 +308,20 @@ class StreamManager:
|
|||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}")
|
||||
|
||||
# Log connection error event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_failed',
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log connection error event: {e}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -294,6 +335,21 @@ class StreamManager:
|
|||
|
||||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
|
||||
# Log connection error event with exception details
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_exception',
|
||||
error_message=str(e)[:200],
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as log_error:
|
||||
logger.error(f"Could not log connection error event: {log_error}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -378,6 +434,12 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True)
|
||||
|
||||
# Close database connection for this thread
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"Stream manager stopped for channel {self.channel_id}")
|
||||
|
||||
def _establish_transcode_connection(self):
|
||||
|
|
@ -407,7 +469,7 @@ class StreamManager:
|
|||
from core.models import StreamProfile
|
||||
try:
|
||||
stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True)
|
||||
logger.info("Using FFmpeg stream profile for HLS content")
|
||||
logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)")
|
||||
except StreamProfile.DoesNotExist:
|
||||
# Fall back to channel's profile if FFmpeg not found
|
||||
stream_profile = channel.get_stream_profile()
|
||||
|
|
@ -417,6 +479,28 @@ class StreamManager:
|
|||
|
||||
# Build and start transcode command
|
||||
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
|
||||
|
||||
# Store stream command for efficient log parser routing
|
||||
self.stream_command = stream_profile.command
|
||||
# Map actual commands to parser types for direct routing
|
||||
command_to_parser = {
|
||||
'ffmpeg': 'ffmpeg',
|
||||
'cvlc': 'vlc',
|
||||
'vlc': 'vlc',
|
||||
'streamlink': 'streamlink'
|
||||
}
|
||||
self.parser_type = command_to_parser.get(self.stream_command.lower())
|
||||
if self.parser_type:
|
||||
logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})")
|
||||
else:
|
||||
logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing")
|
||||
|
||||
# For UDP streams, remove any user_agent parameters from the command
|
||||
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
|
||||
# Filter out any arguments that contain the user_agent value or related headers
|
||||
self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()]
|
||||
logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}")
|
||||
|
||||
logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}")
|
||||
|
||||
# Modified to capture stderr instead of discarding it
|
||||
|
|
@ -580,35 +664,51 @@ class StreamManager:
|
|||
if content_lower.startswith('output #') or 'encoder' in content_lower:
|
||||
self.ffmpeg_input_phase = False
|
||||
|
||||
# Only parse stream info if we're still in the input phase
|
||||
if ("stream #" in content_lower and
|
||||
("video:" in content_lower or "audio:" in content_lower) and
|
||||
self.ffmpeg_input_phase):
|
||||
# Route to appropriate parser based on known command type
|
||||
from .services.log_parsers import LogParserFactory
|
||||
from .services.channel_service import ChannelService
|
||||
|
||||
from .services.channel_service import ChannelService
|
||||
if "video:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video", self.current_stream_id)
|
||||
elif "audio:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio", self.current_stream_id)
|
||||
parse_result = None
|
||||
|
||||
# If we know the parser type, use direct routing for efficiency
|
||||
if self.parser_type:
|
||||
# Get the appropriate parser and check what it can parse
|
||||
parser = LogParserFactory._parsers.get(self.parser_type)
|
||||
if parser:
|
||||
stream_type = parser.can_parse(content)
|
||||
if stream_type:
|
||||
# Parser can handle this line, parse it directly
|
||||
parsed_data = LogParserFactory.parse(stream_type, content)
|
||||
if parsed_data:
|
||||
parse_result = (stream_type, parsed_data)
|
||||
else:
|
||||
# Unknown command type - use auto-detection as fallback
|
||||
parse_result = LogParserFactory.auto_parse(content)
|
||||
|
||||
if parse_result:
|
||||
stream_type, parsed_data = parse_result
|
||||
# For FFmpeg, only parse during input phase
|
||||
if stream_type in ['video', 'audio', 'input']:
|
||||
if self.ffmpeg_input_phase:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
else:
|
||||
# VLC and Streamlink can be parsed anytime
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
|
||||
# Determine log level based on content
|
||||
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
|
||||
logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.error(f"Stream process error for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
|
||||
logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.warning(f"Stream process warning for channel {self.channel_id}: {content}")
|
||||
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
|
||||
# Stats lines - log at trace level to avoid spam
|
||||
logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}")
|
||||
logger.trace(f"Stream stats for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
|
||||
# Stream info - log at info level
|
||||
logger.info(f"FFmpeg info for channel {self.channel_id}: {content}")
|
||||
if content.startswith('Input #0'):
|
||||
# If it's input 0, parse stream info
|
||||
from .services.channel_service import ChannelService
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "input", self.current_stream_id)
|
||||
logger.info(f"Stream info for channel {self.channel_id}: {content}")
|
||||
else:
|
||||
# Everything else at debug level
|
||||
logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.debug(f"Stream process output for channel {self.channel_id}: {content}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}")
|
||||
|
|
@ -681,6 +781,19 @@ class StreamManager:
|
|||
# Reset buffering state
|
||||
self.buffering = False
|
||||
self.buffering_start_time = None
|
||||
|
||||
# Log failover event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_failover',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='buffering_timeout',
|
||||
duration=buffering_duration
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log failover event: {e}")
|
||||
else:
|
||||
logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout")
|
||||
else:
|
||||
|
|
@ -688,6 +801,19 @@ class StreamManager:
|
|||
self.buffering = True
|
||||
self.buffering_start_time = time.time()
|
||||
logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x")
|
||||
|
||||
# Log system event for buffering
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_buffering',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
speed=ffmpeg_speed
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log buffering event: {e}")
|
||||
|
||||
# Log buffering warning
|
||||
logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected")
|
||||
# Set channel state to buffering
|
||||
|
|
@ -737,9 +863,9 @@ class StreamManager:
|
|||
|
||||
|
||||
def _establish_http_connection(self):
|
||||
"""Establish a direct HTTP connection to the stream"""
|
||||
"""Establish HTTP connection using thread-based reader (same as transcode path)"""
|
||||
try:
|
||||
logger.debug(f"Using TS Proxy to connect to stream: {self.url}")
|
||||
logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}")
|
||||
|
||||
# Check if we already have active HTTP connections
|
||||
if self.current_response or self.current_session:
|
||||
|
|
@ -756,41 +882,39 @@ class StreamManager:
|
|||
logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}")
|
||||
self._close_socket()
|
||||
|
||||
# Create new session for each connection attempt
|
||||
session = self._create_session()
|
||||
self.current_session = session
|
||||
# Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor
|
||||
# This allows us to use the same fetch_chunk() path as transcode
|
||||
from .http_streamer import HTTPStreamReader
|
||||
|
||||
# Stream the URL with proper timeout handling
|
||||
response = session.get(
|
||||
self.url,
|
||||
stream=True,
|
||||
timeout=(10, 60) # 10s connect timeout, 60s read timeout
|
||||
# Create and start the HTTP stream reader
|
||||
self.http_reader = HTTPStreamReader(
|
||||
url=self.url,
|
||||
user_agent=self.user_agent,
|
||||
chunk_size=self.chunk_size
|
||||
)
|
||||
self.current_response = response
|
||||
|
||||
if response.status_code == 200:
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
logger.info(f"Successfully connected to stream source for channel {self.channel_id}")
|
||||
# Start the reader thread and get the read end of the pipe
|
||||
pipe_fd = self.http_reader.start()
|
||||
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
# Wrap the file descriptor in a file object (same as transcode stdout)
|
||||
import os
|
||||
self.socket = os.fdopen(pipe_fd, 'rb', buffering=0)
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}")
|
||||
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
|
||||
return True
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP request error: {e}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True)
|
||||
self._close_connection()
|
||||
self._close_socket()
|
||||
return False
|
||||
|
||||
def _update_bytes_processed(self, chunk_size):
|
||||
|
|
@ -818,48 +942,19 @@ class StreamManager:
|
|||
logger.error(f"Error updating bytes processed: {e}")
|
||||
|
||||
def _process_stream_data(self):
|
||||
"""Process stream data until disconnect or error"""
|
||||
"""Process stream data until disconnect or error - unified path for both transcode and HTTP"""
|
||||
try:
|
||||
if self.transcode:
|
||||
# Handle transcoded stream data
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
if not self.running:
|
||||
break
|
||||
gevent.sleep(0.1) # REPLACE time.sleep(0.1)
|
||||
else:
|
||||
# Handle direct HTTP connection
|
||||
chunk_count = 0
|
||||
try:
|
||||
for chunk in self.current_response.iter_content(chunk_size=self.chunk_size):
|
||||
# Check if we've been asked to stop
|
||||
if self.stop_requested or self.url_switching or self.needs_stream_switch:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
# Track chunk size before adding to buffer
|
||||
chunk_size = len(chunk)
|
||||
self._update_bytes_processed(chunk_size)
|
||||
|
||||
# Add chunk to buffer with TS packet alignment
|
||||
success = self.buffer.add_chunk(chunk)
|
||||
|
||||
if success:
|
||||
self.last_data_time = time.time()
|
||||
chunk_count += 1
|
||||
|
||||
# Update last data timestamp in Redis
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
last_data_key = RedisKeys.last_data(self.buffer.channel_id)
|
||||
self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60)
|
||||
except (AttributeError, ConnectionError) as e:
|
||||
if self.stop_requested or self.url_switching:
|
||||
logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}")
|
||||
else:
|
||||
logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}")
|
||||
raise
|
||||
# Both transcode and HTTP now use the same subprocess/socket approach
|
||||
# This gives us perfect control: check flags between chunks, timeout just returns False
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
# fetch_chunk() returned False - could be timeout, no data, or error
|
||||
if not self.running:
|
||||
break
|
||||
# Brief sleep before retry to avoid tight loop
|
||||
gevent.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
||||
|
|
@ -948,6 +1043,7 @@ class StreamManager:
|
|||
|
||||
# Import both models for proper resource management
|
||||
from apps.channels.models import Stream, Channel
|
||||
from django.db import connection
|
||||
|
||||
# Update stream profile if we're switching streams
|
||||
if self.current_stream_id and stream_id and self.current_stream_id != stream_id:
|
||||
|
|
@ -965,9 +1061,17 @@ class StreamManager:
|
|||
logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}")
|
||||
else:
|
||||
logger.warning(f"Failed to update stream profile for channel {self.channel_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}")
|
||||
|
||||
finally:
|
||||
# Always close database connection after profile update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# CRITICAL: Set a flag to prevent immediate reconnection with old URL
|
||||
self.url_switching = True
|
||||
self.url_switch_start_time = time.time()
|
||||
|
|
@ -1005,6 +1109,19 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.warning(f"Failed to reset buffer position: {e}")
|
||||
|
||||
# Log stream switch event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'stream_switch',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
new_url=new_url[:100] if new_url else None,
|
||||
stream_id=stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log stream switch event: {e}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
@ -1123,6 +1240,19 @@ class StreamManager:
|
|||
if connection_result:
|
||||
self.connection_start_time = time.time()
|
||||
logger.info(f"Reconnect successful for channel {self.channel_id}")
|
||||
|
||||
# Log reconnection event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='health_monitor'
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Reconnect failed for channel {self.channel_id}")
|
||||
|
|
@ -1183,6 +1313,15 @@ class StreamManager:
|
|||
if self.current_response or self.current_session:
|
||||
self._close_connection()
|
||||
|
||||
# Stop HTTP reader thread if it exists
|
||||
if hasattr(self, 'http_reader') and self.http_reader:
|
||||
try:
|
||||
logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}")
|
||||
self.http_reader.stop()
|
||||
self.http_reader = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}")
|
||||
|
||||
# Otherwise handle socket and transcode resources
|
||||
if self.socket:
|
||||
try:
|
||||
|
|
@ -1191,25 +1330,17 @@ class StreamManager:
|
|||
logger.debug(f"Error closing socket for channel {self.channel_id}: {e}")
|
||||
pass
|
||||
|
||||
# Enhanced transcode process cleanup with more aggressive termination
|
||||
# Enhanced transcode process cleanup with immediate termination
|
||||
if self.transcode_process:
|
||||
try:
|
||||
# First try polite termination
|
||||
logger.debug(f"Terminating transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.terminate()
|
||||
logger.debug(f"Killing transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
# Give it a short time to terminate gracefully
|
||||
# Give it a very short time to die
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
self.transcode_process.wait(timeout=0.5)
|
||||
except subprocess.TimeoutExpired:
|
||||
# If it doesn't terminate quickly, kill it
|
||||
logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}")
|
||||
|
||||
|
|
@ -1219,6 +1350,30 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}")
|
||||
|
||||
# Explicitly close all subprocess pipes to prevent file descriptor leaks
|
||||
try:
|
||||
if self.transcode_process.stdin:
|
||||
self.transcode_process.stdin.close()
|
||||
if self.transcode_process.stdout:
|
||||
self.transcode_process.stdout.close()
|
||||
if self.transcode_process.stderr:
|
||||
self.transcode_process.stderr.close()
|
||||
logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}")
|
||||
|
||||
# Join stderr reader thread to ensure it's fully terminated
|
||||
if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive():
|
||||
try:
|
||||
logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}")
|
||||
self.stderr_reader_thread.join(timeout=2.0)
|
||||
if self.stderr_reader_thread.is_alive():
|
||||
logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}")
|
||||
finally:
|
||||
self.stderr_reader_thread = None
|
||||
|
||||
self.transcode_process = None
|
||||
self.transcode_process_active = False # Reset the flag
|
||||
|
||||
|
|
@ -1250,7 +1405,7 @@ class StreamManager:
|
|||
|
||||
try:
|
||||
# Set timeout for chunk reads
|
||||
chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds
|
||||
chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration
|
||||
|
||||
try:
|
||||
# Handle different socket types with timeout
|
||||
|
|
@ -1333,7 +1488,17 @@ class StreamManager:
|
|||
# Only update if not already past connecting
|
||||
if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]:
|
||||
# NEW CODE: Check if buffer has enough chunks
|
||||
current_buffer_index = getattr(self.buffer, 'index', 0)
|
||||
# IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup
|
||||
# each worker has its own StreamBuffer instance with potentially stale local index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks()
|
||||
|
||||
if current_buffer_index < initial_chunks_needed:
|
||||
|
|
@ -1381,10 +1546,21 @@ class StreamManager:
|
|||
# Clean up completed timers
|
||||
self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()]
|
||||
|
||||
if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'):
|
||||
current_buffer_index = self.buffer.index
|
||||
initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10)
|
||||
if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'):
|
||||
channel_id = self.buffer.channel_id
|
||||
redis_client = self.buffer.redis_client
|
||||
|
||||
# IMPORTANT: Read from Redis, not local buffer.index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency
|
||||
|
||||
if current_buffer_index >= initial_chunks_needed:
|
||||
# We now have enough buffer, call _set_waiting_for_clients again
|
||||
|
|
@ -1409,6 +1585,7 @@ class StreamManager:
|
|||
def _try_next_stream(self):
|
||||
"""
|
||||
Try to switch to the next available stream for this channel.
|
||||
Will iterate through multiple alternate streams if needed to find one with a different URL.
|
||||
|
||||
Returns:
|
||||
bool: True if successfully switched to a new stream, False otherwise
|
||||
|
|
@ -1434,60 +1611,71 @@ class StreamManager:
|
|||
logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
# Get the next stream to try
|
||||
next_stream = untried_streams[0]
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
# IMPROVED: Try multiple streams until we find one with a different URL
|
||||
for next_stream in untried_streams:
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
return False
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
continue # Try next stream instead of giving up
|
||||
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
# CRITICAL FIX: Check if the new URL is the same as current URL
|
||||
# This can happen when current_stream_id is None and we accidentally select the same stream
|
||||
if new_url == self.url:
|
||||
logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). "
|
||||
f"Skipping this stream and trying next alternative.")
|
||||
continue # Try next stream instead of giving up
|
||||
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
return False
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
continue # Try next stream
|
||||
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
|
||||
# If we get here, we tried all streams but none worked
|
||||
logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from typing import Optional, Tuple, List
|
|||
from django.shortcuts import get_object_or_404
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.models import UserAgent, CoreSettings, StreamProfile
|
||||
from .utils import get_logger
|
||||
from uuid import UUID
|
||||
import requests
|
||||
|
|
@ -26,16 +26,100 @@ def get_stream_object(id: str):
|
|||
|
||||
def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]:
|
||||
"""
|
||||
Generate the appropriate stream URL for a channel based on its profile settings.
|
||||
Generate the appropriate stream URL for a channel or stream based on its profile settings.
|
||||
|
||||
Args:
|
||||
channel_id: The UUID of the channel
|
||||
channel_id: The UUID of the channel or stream hash
|
||||
|
||||
Returns:
|
||||
Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id)
|
||||
"""
|
||||
try:
|
||||
channel = get_stream_object(channel_id)
|
||||
channel_or_stream = get_stream_object(channel_id)
|
||||
|
||||
# Handle direct stream preview (custom streams)
|
||||
if isinstance(channel_or_stream, Stream):
|
||||
from core.utils import RedisClient
|
||||
|
||||
stream = channel_or_stream
|
||||
logger.info(f"Previewing stream directly: {stream.id} ({stream.name})")
|
||||
|
||||
# For custom streams, we need to get the M3U account and profile
|
||||
m3u_account = stream.m3u_account
|
||||
if not m3u_account:
|
||||
logger.error(f"Stream {stream.id} has no M3U account")
|
||||
return None, None, False, None
|
||||
|
||||
# Get active profiles for this M3U account
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
logger.error(f"No default active profile found for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Check profiles in order: default first, then others
|
||||
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
|
||||
|
||||
# Try to find an available profile with connection capacity
|
||||
redis_client = RedisClient.get_client()
|
||||
selected_profile = None
|
||||
|
||||
for profile in profiles:
|
||||
logger.info(profile)
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
# Check if profile has available slots (or unlimited connections)
|
||||
if profile.max_streams == 0 or current_connections < profile.max_streams:
|
||||
selected_profile = profile
|
||||
logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview")
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}")
|
||||
else:
|
||||
# No Redis available, use first active profile
|
||||
selected_profile = profile
|
||||
break
|
||||
|
||||
if not selected_profile:
|
||||
logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Get the appropriate user agent
|
||||
stream_user_agent = m3u_account.get_user_agent().user_agent
|
||||
if stream_user_agent is None:
|
||||
stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id())
|
||||
logger.debug(f"No user agent found for account, using default: {stream_user_agent}")
|
||||
|
||||
# Get stream URL with the selected profile's URL transformation
|
||||
stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern)
|
||||
|
||||
# Check if the stream has its own stream_profile set, otherwise use default
|
||||
if stream.stream_profile:
|
||||
stream_profile = stream.stream_profile
|
||||
logger.debug(f"Using stream's own stream profile: {stream_profile.name}")
|
||||
else:
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
logger.debug(f"Using default stream profile: {stream_profile.name}")
|
||||
|
||||
# Check if transcoding is needed
|
||||
if stream_profile.is_proxy() or stream_profile is None:
|
||||
transcode = False
|
||||
else:
|
||||
transcode = True
|
||||
|
||||
stream_profile_id = stream_profile.id
|
||||
|
||||
return stream_url, stream_user_agent, transcode, stream_profile_id
|
||||
|
||||
# Handle channel preview (existing logic)
|
||||
channel = channel_or_stream
|
||||
|
||||
# Get stream and profile for this channel
|
||||
# Note: get_stream now returns 3 values (stream_id, profile_id, error_reason)
|
||||
|
|
@ -351,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
"""
|
||||
Validate if a stream URL is accessible without downloading the full content.
|
||||
|
||||
Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot
|
||||
be validated via HTTP methods.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate
|
||||
user_agent (str): User agent to use for the request
|
||||
|
|
@ -359,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
Returns:
|
||||
tuple: (is_valid, final_url, status_code, message)
|
||||
"""
|
||||
# Check if URL uses non-HTTP protocols (UDP/RTP/RTSP)
|
||||
# These cannot be validated via HTTP methods, so we skip validation
|
||||
if url.startswith(('udp://', 'rtp://', 'rtsp://')):
|
||||
logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}")
|
||||
return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped"
|
||||
|
||||
try:
|
||||
# Create session with proper headers
|
||||
session = requests.Session()
|
||||
|
|
@ -369,16 +462,21 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
session.headers.update(headers)
|
||||
|
||||
# Make HEAD request first as it's faster and doesn't download content
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
head_request_success = True
|
||||
try:
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
head_request_success = False
|
||||
logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}")
|
||||
|
||||
# If HEAD not supported, server will return 405 or other error
|
||||
if 200 <= head_response.status_code < 300:
|
||||
if head_request_success and (200 <= head_response.status_code < 300):
|
||||
# HEAD request successful
|
||||
return True, head_response.url, head_response.status_code, "Valid (HEAD request)"
|
||||
return True, url, head_response.status_code, "Valid (HEAD request)"
|
||||
|
||||
# Try a GET request with stream=True to avoid downloading all content
|
||||
get_response = session.get(
|
||||
|
|
@ -391,7 +489,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
# IMPORTANT: Check status code first before checking content
|
||||
if not (200 <= get_response.status_code < 300):
|
||||
logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}")
|
||||
return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
|
||||
# Only check content if status code is valid
|
||||
try:
|
||||
|
|
@ -445,7 +543,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
get_response.close()
|
||||
|
||||
# If we have content, consider it valid even with unrecognized content type
|
||||
return is_valid, get_response.url, get_response.status_code, message
|
||||
return is_valid, url, get_response.status_code, message
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
return False, url, 0, "Timeout connecting to stream"
|
||||
|
|
|
|||
|
|
@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy")
|
|||
|
||||
def detect_stream_type(url):
|
||||
"""
|
||||
Detect if stream URL is HLS or TS format.
|
||||
Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to analyze
|
||||
|
||||
Returns:
|
||||
str: 'hls' or 'ts' depending on detected format
|
||||
str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format
|
||||
"""
|
||||
if not url:
|
||||
return 'unknown'
|
||||
|
||||
url_lower = url.lower()
|
||||
|
||||
# Check for UDP streams (requires FFmpeg)
|
||||
if url_lower.startswith('udp://'):
|
||||
return 'udp'
|
||||
|
||||
# Check for RTSP/RTP streams (requires FFmpeg)
|
||||
if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'):
|
||||
return 'rtsp'
|
||||
|
||||
# Look for common HLS indicators
|
||||
if (url_lower.endswith('.m3u8') or
|
||||
'.m3u8?' in url_lower or
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import time
|
|||
import random
|
||||
import re
|
||||
import pathlib
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.shortcuts import get_object_or_404
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
|
|
@ -84,11 +84,18 @@ def stream_ts(request, channel_id):
|
|||
if state_field in metadata:
|
||||
channel_state = metadata[state_field].decode("utf-8")
|
||||
|
||||
if channel_state:
|
||||
# Channel is being initialized or already active - no need for reinitialization
|
||||
# Active/running states - channel is operational, don't reinitialize
|
||||
if channel_state in [
|
||||
ChannelState.ACTIVE,
|
||||
ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.BUFFERING,
|
||||
ChannelState.INITIALIZING,
|
||||
ChannelState.CONNECTING,
|
||||
ChannelState.STOPPING,
|
||||
]:
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization"
|
||||
f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization"
|
||||
)
|
||||
|
||||
# Special handling for initializing/connecting states
|
||||
|
|
@ -98,19 +105,34 @@ def stream_ts(request, channel_id):
|
|||
]:
|
||||
channel_initializing = True
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion"
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait"
|
||||
)
|
||||
# Terminal states - channel needs cleanup before reinitialization
|
||||
elif channel_state in [
|
||||
ChannelState.ERROR,
|
||||
ChannelState.STOPPED,
|
||||
]:
|
||||
needs_initialization = True
|
||||
logger.info(
|
||||
f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize"
|
||||
)
|
||||
# Unknown/empty state - check if owner is alive
|
||||
else:
|
||||
# Only check for owner if channel is in a valid state
|
||||
owner_field = ChannelMetadataField.OWNER.encode("utf-8")
|
||||
if owner_field in metadata:
|
||||
owner = metadata[owner_field].decode("utf-8")
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
if proxy_server.redis_client.exists(owner_heartbeat_key):
|
||||
# Owner is still active, so we don't need to reinitialize
|
||||
# Owner is still active with unknown state - don't reinitialize
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}"
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init"
|
||||
)
|
||||
else:
|
||||
# Owner dead - needs reinitialization
|
||||
needs_initialization = True
|
||||
logger.warning(
|
||||
f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize"
|
||||
)
|
||||
|
||||
# Start initialization if needed
|
||||
|
|
@ -128,7 +150,7 @@ def stream_ts(request, channel_id):
|
|||
ChannelService.stop_channel(channel_id)
|
||||
|
||||
# Use fixed retry interval and timeout
|
||||
retry_timeout = 1.5 # 1.5 seconds total timeout
|
||||
retry_timeout = 3 # 3 seconds total timeout
|
||||
retry_interval = 0.1 # 100ms between attempts
|
||||
wait_start_time = time.time()
|
||||
|
||||
|
|
@ -138,9 +160,10 @@ def stream_ts(request, channel_id):
|
|||
profile_value = None
|
||||
error_reason = None
|
||||
attempt = 0
|
||||
should_retry = True
|
||||
|
||||
# Try to get a stream with fixed interval retries
|
||||
while time.time() - wait_start_time < retry_timeout:
|
||||
while should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
|
|
@ -152,35 +175,53 @@ def stream_ts(request, channel_id):
|
|||
)
|
||||
break
|
||||
|
||||
# If we failed because there are no streams assigned, don't retry
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
# On first failure, check if the error is retryable
|
||||
if attempt == 1:
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
)
|
||||
should_retry = False
|
||||
break
|
||||
|
||||
# Check if we have time remaining for another sleep cycle
|
||||
elapsed_time = time.time() - wait_start_time
|
||||
remaining_time = retry_timeout - elapsed_time
|
||||
|
||||
# If we don't have enough time for the next sleep interval, break
|
||||
# but only after we've already made an attempt (the while condition will try one more time)
|
||||
if remaining_time <= retry_interval:
|
||||
logger.info(
|
||||
f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt"
|
||||
)
|
||||
break
|
||||
|
||||
# Wait 100ms before retrying
|
||||
elapsed_time = time.time() - wait_start_time
|
||||
remaining_time = retry_timeout - elapsed_time
|
||||
if remaining_time > retry_interval:
|
||||
# Wait before retrying
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
|
||||
)
|
||||
gevent.sleep(retry_interval)
|
||||
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
|
||||
|
||||
# Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout
|
||||
if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
logger.info(
|
||||
f"[{client_id}] Making final attempt {attempt} at timeout boundary"
|
||||
)
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
|
||||
f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}"
|
||||
)
|
||||
gevent.sleep(retry_interval)
|
||||
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
|
||||
|
||||
if stream_url is None:
|
||||
# Make sure to release any stream locks that might have been acquired
|
||||
if hasattr(channel, "streams") and channel.streams.exists():
|
||||
for stream in channel.streams.all():
|
||||
try:
|
||||
stream.release_stream()
|
||||
logger.info(
|
||||
f"[{client_id}] Released stream {stream.id} for channel {channel_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{client_id}] Error releasing stream: {e}")
|
||||
# Release the channel's stream lock if one was acquired
|
||||
# Note: Only call this if get_stream() actually assigned a stream
|
||||
# In our case, if stream_url is None, no stream was ever assigned, so don't release
|
||||
|
||||
# Get the specific error message if available
|
||||
wait_duration = f"{int(time.time() - wait_start_time)}s"
|
||||
|
|
@ -189,6 +230,9 @@ def stream_ts(request, channel_id):
|
|||
if error_reason
|
||||
else "No available streams for this channel"
|
||||
)
|
||||
logger.info(
|
||||
f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}"
|
||||
)
|
||||
return JsonResponse(
|
||||
{"error": error_msg, "waited": wait_duration}, status=503
|
||||
) # 503 Service Unavailable is appropriate here
|
||||
|
|
@ -270,6 +314,15 @@ def stream_ts(request, channel_id):
|
|||
logger.info(
|
||||
f"[{client_id}] Redirecting to validated URL: {final_url} ({message})"
|
||||
)
|
||||
|
||||
# For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect
|
||||
# because Django's HttpResponseRedirect blocks them for security
|
||||
if final_url.startswith(('rtsp://', 'rtp://', 'udp://')):
|
||||
logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol")
|
||||
response = HttpResponse(status=301)
|
||||
response['Location'] = final_url
|
||||
return response
|
||||
|
||||
return HttpResponseRedirect(final_url)
|
||||
else:
|
||||
logger.error(
|
||||
|
|
@ -474,24 +527,33 @@ def stream_xc(request, username, password, channel_id):
|
|||
|
||||
print(f"Fetchin channel with ID: {channel_id}")
|
||||
if user.user_level < 10:
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
}
|
||||
user_profile_count = user.channel_profiles.count()
|
||||
|
||||
if user.channel_profiles.count() > 0:
|
||||
channel_profiles = user.channel_profiles.all()
|
||||
filters["channelprofilemembership__channel_profile__in"] = channel_profiles
|
||||
# If user has ALL profiles or NO profiles, give unrestricted access
|
||||
if user_profile_count == 0:
|
||||
# No profile filtering - user sees all channels based on user_level
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"user_level__lte": user.user_level
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).first()
|
||||
else:
|
||||
# User has specific limited profiles assigned
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
"channelprofilemembership__channel_profile__in": user.channel_profiles.all()
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
if not channel:
|
||||
return JsonResponse({"error": "Not found"}, status=404)
|
||||
else:
|
||||
channel = get_object_or_404(Channel, id=channel_id)
|
||||
|
||||
# @TODO: we've got the file 'type' via extension, support this when we support multiple outputs
|
||||
return stream_ts(request._request, channel.uuid)
|
||||
return stream_ts(request._request, str(channel.uuid))
|
||||
|
||||
|
||||
@csrf_exempt
|
||||
|
|
|
|||
|
|
@ -97,7 +97,11 @@ class PersistentVODConnection:
|
|||
# First check if we have a pre-stored content length from HEAD request
|
||||
try:
|
||||
import redis
|
||||
r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
|
||||
from django.conf import settings
|
||||
redis_host = getattr(settings, 'REDIS_HOST', 'localhost')
|
||||
redis_port = int(getattr(settings, 'REDIS_PORT', 6379))
|
||||
redis_db = int(getattr(settings, 'REDIS_DB', 0))
|
||||
r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True)
|
||||
content_length_key = f"vod_content_length:{self.session_id}"
|
||||
stored_length = r.get(content_length_key)
|
||||
if stored_length:
|
||||
|
|
|
|||
|
|
@ -24,6 +24,11 @@ from apps.m3u.models import M3UAccountProfile
|
|||
logger = logging.getLogger("vod_proxy")
|
||||
|
||||
|
||||
def get_vod_client_stop_key(client_id):
|
||||
"""Get the Redis key for signaling a VOD client to stop"""
|
||||
return f"vod_proxy:client:{client_id}:stop"
|
||||
|
||||
|
||||
def infer_content_type_from_url(url: str) -> Optional[str]:
|
||||
"""
|
||||
Infer MIME type from file extension in URL
|
||||
|
|
@ -352,12 +357,12 @@ class RedisBackedVODConnection:
|
|||
|
||||
logger.info(f"[{self.session_id}] Making request #{state.request_count} to {'final' if state.final_url else 'original'} URL")
|
||||
|
||||
# Make request
|
||||
# Make request (10s connect, 10s read timeout - keeps lock time reasonable if client disconnects)
|
||||
response = self.local_session.get(
|
||||
target_url,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=(10, 30),
|
||||
timeout=(10, 10),
|
||||
allow_redirects=allow_redirects
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
|
@ -707,6 +712,10 @@ class MultiWorkerVODConnectionManager:
|
|||
content_name = content_obj.name if hasattr(content_obj, 'name') else str(content_obj)
|
||||
client_id = session_id
|
||||
|
||||
# Track whether we incremented profile connections (for cleanup on error)
|
||||
profile_connections_incremented = False
|
||||
redis_connection = None
|
||||
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed streaming request for {content_type} {content_name}")
|
||||
|
||||
try:
|
||||
|
|
@ -797,6 +806,7 @@ class MultiWorkerVODConnectionManager:
|
|||
|
||||
# Increment profile connections after successful connection creation
|
||||
self._increment_profile_connections(m3u_profile)
|
||||
profile_connections_incremented = True
|
||||
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Created consolidated connection with session metadata")
|
||||
else:
|
||||
|
|
@ -832,6 +842,7 @@ class MultiWorkerVODConnectionManager:
|
|||
# Create streaming generator
|
||||
def stream_generator():
|
||||
decremented = False
|
||||
stop_signal_detected = False
|
||||
try:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream")
|
||||
|
||||
|
|
@ -846,14 +857,25 @@ class MultiWorkerVODConnectionManager:
|
|||
bytes_sent = 0
|
||||
chunk_count = 0
|
||||
|
||||
# Get the stop signal key for this client
|
||||
stop_key = get_vod_client_stop_key(client_id)
|
||||
|
||||
for chunk in upstream_response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
yield chunk
|
||||
bytes_sent += len(chunk)
|
||||
chunk_count += 1
|
||||
|
||||
# Update activity every 100 chunks in consolidated connection state
|
||||
# Check for stop signal every 100 chunks
|
||||
if chunk_count % 100 == 0:
|
||||
# Check if stop signal has been set
|
||||
if self.redis_client and self.redis_client.exists(stop_key):
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Stop signal detected, terminating stream")
|
||||
# Delete the stop key
|
||||
self.redis_client.delete(stop_key)
|
||||
stop_signal_detected = True
|
||||
break
|
||||
|
||||
# Update the connection state
|
||||
logger.debug(f"Client: [{client_id}] Worker: {self.worker_id} sent {chunk_count} chunks for VOD: {content_name}")
|
||||
if redis_connection._acquire_lock():
|
||||
|
|
@ -867,7 +889,10 @@ class MultiWorkerVODConnectionManager:
|
|||
finally:
|
||||
redis_connection._release_lock()
|
||||
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent")
|
||||
if stop_signal_detected:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Stream stopped by signal: {bytes_sent} bytes sent")
|
||||
else:
|
||||
logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent")
|
||||
redis_connection.decrement_active_streams()
|
||||
decremented = True
|
||||
|
||||
|
|
@ -1004,6 +1029,19 @@ class MultiWorkerVODConnectionManager:
|
|||
|
||||
except Exception as e:
|
||||
logger.error(f"[{client_id}] Worker {self.worker_id} - Error in Redis-backed stream_content_with_session: {e}", exc_info=True)
|
||||
|
||||
# Decrement profile connections if we incremented them but failed before streaming started
|
||||
if profile_connections_incremented:
|
||||
logger.info(f"[{client_id}] Connection error occurred after profile increment - decrementing profile connections")
|
||||
self._decrement_profile_connections(m3u_profile.id)
|
||||
|
||||
# Also clean up the Redis connection state since we won't be using it
|
||||
if redis_connection:
|
||||
try:
|
||||
redis_connection.cleanup(connection_manager=self, current_worker_id=self.worker_id)
|
||||
except Exception as cleanup_error:
|
||||
logger.error(f"[{client_id}] Error during cleanup after connection failure: {cleanup_error}")
|
||||
|
||||
return HttpResponse(f"Streaming error: {str(e)}", status=500)
|
||||
|
||||
def _apply_timeshift_parameters(self, original_url, utc_start=None, utc_end=None, offset=None):
|
||||
|
|
|
|||
|
|
@ -21,4 +21,7 @@ urlpatterns = [
|
|||
|
||||
# VOD Stats
|
||||
path('stats/', views.VODStatsView.as_view(), name='vod_stats'),
|
||||
|
||||
# Stop VOD client connection
|
||||
path('stop_client/', views.stop_vod_client, name='stop_vod_client'),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from django.views import View
|
|||
from apps.vod.models import Movie, Series, Episode
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from apps.proxy.vod_proxy.connection_manager import VODConnectionManager
|
||||
from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url
|
||||
from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url, get_vod_client_stop_key
|
||||
from .utils import get_client_info, create_vod_response
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -329,7 +329,11 @@ class VODStreamView(View):
|
|||
# Store the total content length in Redis for the persistent connection to use
|
||||
try:
|
||||
import redis
|
||||
r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
|
||||
from django.conf import settings
|
||||
redis_host = getattr(settings, 'REDIS_HOST', 'localhost')
|
||||
redis_port = int(getattr(settings, 'REDIS_PORT', 6379))
|
||||
redis_db = int(getattr(settings, 'REDIS_DB', 0))
|
||||
r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, decode_responses=True)
|
||||
content_length_key = f"vod_content_length:{session_id}"
|
||||
r.set(content_length_key, total_size, ex=1800) # Store for 30 minutes
|
||||
logger.info(f"[VOD-HEAD] Stored total content length {total_size} for session {session_id}")
|
||||
|
|
@ -1011,3 +1015,59 @@ class VODStatsView(View):
|
|||
except Exception as e:
|
||||
logger.error(f"Error getting VOD stats: {e}")
|
||||
return JsonResponse({'error': str(e)}, status=500)
|
||||
|
||||
|
||||
from rest_framework.decorators import api_view, permission_classes
|
||||
from apps.accounts.permissions import IsAdmin
|
||||
|
||||
|
||||
@csrf_exempt
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdmin])
|
||||
def stop_vod_client(request):
|
||||
"""Stop a specific VOD client connection using stop signal mechanism"""
|
||||
try:
|
||||
# Parse request body
|
||||
import json
|
||||
try:
|
||||
data = json.loads(request.body)
|
||||
except json.JSONDecodeError:
|
||||
return JsonResponse({'error': 'Invalid JSON'}, status=400)
|
||||
|
||||
client_id = data.get('client_id')
|
||||
if not client_id:
|
||||
return JsonResponse({'error': 'No client_id provided'}, status=400)
|
||||
|
||||
logger.info(f"Request to stop VOD client: {client_id}")
|
||||
|
||||
# Get Redis client
|
||||
connection_manager = MultiWorkerVODConnectionManager.get_instance()
|
||||
redis_client = connection_manager.redis_client
|
||||
|
||||
if not redis_client:
|
||||
return JsonResponse({'error': 'Redis not available'}, status=500)
|
||||
|
||||
# Check if connection exists
|
||||
connection_key = f"vod_persistent_connection:{client_id}"
|
||||
connection_data = redis_client.hgetall(connection_key)
|
||||
if not connection_data:
|
||||
logger.warning(f"VOD connection not found: {client_id}")
|
||||
return JsonResponse({'error': 'Connection not found'}, status=404)
|
||||
|
||||
# Set a stop signal key that the worker will check
|
||||
stop_key = get_vod_client_stop_key(client_id)
|
||||
redis_client.setex(stop_key, 60, "true") # 60 second TTL
|
||||
|
||||
logger.info(f"Set stop signal for VOD client: {client_id}")
|
||||
|
||||
return JsonResponse({
|
||||
'message': 'VOD client stop signal sent',
|
||||
'client_id': client_id,
|
||||
'stop_key': stop_key
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping VOD client: {e}", exc_info=True)
|
||||
return JsonResponse({'error': str(e)}, status=500)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from .api_views import (
|
|||
SeriesViewSet,
|
||||
VODCategoryViewSet,
|
||||
UnifiedContentViewSet,
|
||||
VODLogoViewSet,
|
||||
)
|
||||
|
||||
app_name = 'vod'
|
||||
|
|
@ -16,5 +17,6 @@ router.register(r'episodes', EpisodeViewSet, basename='episode')
|
|||
router.register(r'series', SeriesViewSet, basename='series')
|
||||
router.register(r'categories', VODCategoryViewSet, basename='vodcategory')
|
||||
router.register(r'all', UnifiedContentViewSet, basename='unified-content')
|
||||
router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo')
|
||||
|
||||
urlpatterns = router.urls
|
||||
|
|
|
|||
|
|
@ -3,23 +3,29 @@ from rest_framework.response import Response
|
|||
from rest_framework.decorators import action
|
||||
from rest_framework.filters import SearchFilter, OrderingFilter
|
||||
from rest_framework.pagination import PageNumberPagination
|
||||
from rest_framework.permissions import AllowAny
|
||||
from django_filters.rest_framework import DjangoFilterBackend
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.http import StreamingHttpResponse, HttpResponse, FileResponse
|
||||
from django.db.models import Q
|
||||
import django_filters
|
||||
import logging
|
||||
import os
|
||||
import requests
|
||||
from apps.accounts.permissions import (
|
||||
Authenticated,
|
||||
permission_classes_by_action,
|
||||
)
|
||||
from .models import (
|
||||
Series, VODCategory, Movie, Episode,
|
||||
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation
|
||||
Series, VODCategory, Movie, Episode, VODLogo,
|
||||
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
|
||||
)
|
||||
from .serializers import (
|
||||
MovieSerializer,
|
||||
EpisodeSerializer,
|
||||
SeriesSerializer,
|
||||
VODCategorySerializer,
|
||||
VODLogoSerializer,
|
||||
M3UMovieRelationSerializer,
|
||||
M3USeriesRelationSerializer,
|
||||
M3UEpisodeRelationSerializer
|
||||
|
|
@ -56,7 +62,7 @@ class MovieFilter(django_filters.FilterSet):
|
|||
|
||||
# Handle the format 'category_name|category_type'
|
||||
if '|' in value:
|
||||
category_name, category_type = value.split('|', 1)
|
||||
category_name, category_type = value.rsplit('|', 1)
|
||||
return queryset.filter(
|
||||
m3u_relations__category__name=category_name,
|
||||
m3u_relations__category__category_type=category_type
|
||||
|
|
@ -213,7 +219,7 @@ class SeriesFilter(django_filters.FilterSet):
|
|||
|
||||
# Handle the format 'category_name|category_type'
|
||||
if '|' in value:
|
||||
category_name, category_type = value.split('|', 1)
|
||||
category_name, category_type = value.rsplit('|', 1)
|
||||
return queryset.filter(
|
||||
m3u_relations__category__name=category_name,
|
||||
m3u_relations__category__category_type=category_type
|
||||
|
|
@ -470,6 +476,59 @@ class VODCategoryViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""Override list to ensure Uncategorized categories and relations exist for all XC accounts with VOD enabled"""
|
||||
from apps.m3u.models import M3UAccount
|
||||
|
||||
# Ensure Uncategorized categories exist
|
||||
movie_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="movie",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
series_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="series",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Get all active XC accounts with VOD enabled
|
||||
xc_accounts = M3UAccount.objects.filter(
|
||||
account_type=M3UAccount.Types.XC,
|
||||
is_active=True
|
||||
)
|
||||
|
||||
for account in xc_accounts:
|
||||
if account.custom_properties:
|
||||
custom_props = account.custom_properties or {}
|
||||
vod_enabled = custom_props.get("enable_vod", False)
|
||||
|
||||
if vod_enabled:
|
||||
# Ensure relations exist for this account
|
||||
auto_enable_new = custom_props.get("auto_enable_new_groups_vod", True)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=movie_category,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=series_category,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
# Now proceed with normal list operation
|
||||
return super().list(request, *args, **kwargs)
|
||||
|
||||
|
||||
class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
"""ViewSet that combines Movies and Series for unified 'All' view"""
|
||||
|
|
@ -529,7 +588,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
|
||||
if category:
|
||||
if '|' in category:
|
||||
cat_name, cat_type = category.split('|', 1)
|
||||
cat_name, cat_type = category.rsplit('|', 1)
|
||||
if cat_type == 'movie':
|
||||
where_conditions[0] += " AND movies.id IN (SELECT movie_id FROM vod_m3umovierelation mmr JOIN vod_vodcategory c ON mmr.category_id = c.id WHERE c.name = %s)"
|
||||
where_conditions[1] = "1=0" # Exclude series
|
||||
|
|
@ -564,7 +623,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
logo.url as logo_url,
|
||||
'movie' as content_type
|
||||
FROM vod_movie movies
|
||||
LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id
|
||||
LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id
|
||||
WHERE {where_conditions[0]}
|
||||
|
||||
UNION ALL
|
||||
|
|
@ -586,7 +645,7 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
logo.url as logo_url,
|
||||
'series' as content_type
|
||||
FROM vod_series series
|
||||
LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id
|
||||
LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id
|
||||
WHERE {where_conditions[1]}
|
||||
)
|
||||
SELECT * FROM unified_content
|
||||
|
|
@ -613,10 +672,10 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
'id': item_dict['logo_id'],
|
||||
'name': item_dict['logo_name'],
|
||||
'url': item_dict['logo_url'],
|
||||
'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None,
|
||||
'channel_count': 0, # We don't need this for VOD
|
||||
'is_used': True,
|
||||
'channel_names': [] # We don't need this for VOD
|
||||
'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/",
|
||||
'movie_count': 0, # We don't calculate this in raw SQL
|
||||
'series_count': 0, # We don't calculate this in raw SQL
|
||||
'is_used': True
|
||||
}
|
||||
|
||||
# Convert to the format expected by frontend
|
||||
|
|
@ -668,4 +727,173 @@ class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
logger.error(f"Error in UnifiedContentViewSet.list(): {e}")
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
return Response({'error': str(e)}, status=500)
|
||||
return Response({'error': str(e)}, status=500)
|
||||
|
||||
|
||||
class VODLogoPagination(PageNumberPagination):
|
||||
page_size = 100
|
||||
page_size_query_param = "page_size"
|
||||
max_page_size = 1000
|
||||
|
||||
|
||||
class VODLogoViewSet(viewsets.ModelViewSet):
|
||||
"""ViewSet for VOD Logo management"""
|
||||
queryset = VODLogo.objects.all()
|
||||
serializer_class = VODLogoSerializer
|
||||
pagination_class = VODLogoPagination
|
||||
filter_backends = [SearchFilter, OrderingFilter]
|
||||
search_fields = ['name', 'url']
|
||||
ordering_fields = ['name', 'id']
|
||||
ordering = ['name']
|
||||
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [perm() for perm in permission_classes_by_action[self.action]]
|
||||
except KeyError:
|
||||
if self.action == 'cache':
|
||||
return [AllowAny()]
|
||||
return [Authenticated()]
|
||||
|
||||
def get_queryset(self):
|
||||
"""Optimize queryset with prefetch and add filtering"""
|
||||
queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name')
|
||||
|
||||
# Filter by specific IDs
|
||||
ids = self.request.query_params.getlist('ids')
|
||||
if ids:
|
||||
try:
|
||||
id_list = [int(id_str) for id_str in ids if id_str.isdigit()]
|
||||
if id_list:
|
||||
queryset = queryset.filter(id__in=id_list)
|
||||
except (ValueError, TypeError):
|
||||
queryset = VODLogo.objects.none()
|
||||
|
||||
# Filter by usage
|
||||
used_filter = self.request.query_params.get('used', None)
|
||||
if used_filter == 'true':
|
||||
# Return logos that are used by movies OR series
|
||||
queryset = queryset.filter(
|
||||
Q(movie__isnull=False) | Q(series__isnull=False)
|
||||
).distinct()
|
||||
elif used_filter == 'false':
|
||||
# Return logos that are NOT used by either
|
||||
queryset = queryset.filter(
|
||||
movie__isnull=True,
|
||||
series__isnull=True
|
||||
)
|
||||
elif used_filter == 'movies':
|
||||
# Return logos that are used by movies (may also be used by series)
|
||||
queryset = queryset.filter(movie__isnull=False).distinct()
|
||||
elif used_filter == 'series':
|
||||
# Return logos that are used by series (may also be used by movies)
|
||||
queryset = queryset.filter(series__isnull=False).distinct()
|
||||
|
||||
|
||||
# Filter by name
|
||||
name_query = self.request.query_params.get('name', None)
|
||||
if name_query:
|
||||
queryset = queryset.filter(name__icontains=name_query)
|
||||
|
||||
# No pagination mode
|
||||
if self.request.query_params.get('no_pagination', 'false').lower() == 'true':
|
||||
self.pagination_class = None
|
||||
|
||||
return queryset
|
||||
|
||||
@action(detail=True, methods=["get"], permission_classes=[AllowAny])
|
||||
def cache(self, request, pk=None):
|
||||
"""Streams the VOD logo file, whether it's local or remote."""
|
||||
logo = self.get_object()
|
||||
|
||||
if not logo.url:
|
||||
return HttpResponse(status=404)
|
||||
|
||||
# Check if this is a local file path
|
||||
if logo.url.startswith('/data/'):
|
||||
# It's a local file
|
||||
file_path = logo.url
|
||||
if not os.path.exists(file_path):
|
||||
logger.error(f"VOD logo file not found: {file_path}")
|
||||
return HttpResponse(status=404)
|
||||
|
||||
try:
|
||||
return FileResponse(open(file_path, 'rb'), content_type='image/png')
|
||||
except Exception as e:
|
||||
logger.error(f"Error serving VOD logo file {file_path}: {str(e)}")
|
||||
return HttpResponse(status=500)
|
||||
else:
|
||||
# It's a remote URL - proxy it
|
||||
try:
|
||||
response = requests.get(logo.url, stream=True, timeout=10)
|
||||
response.raise_for_status()
|
||||
|
||||
content_type = response.headers.get('Content-Type', 'image/png')
|
||||
|
||||
return StreamingHttpResponse(
|
||||
response.iter_content(chunk_size=8192),
|
||||
content_type=content_type
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}")
|
||||
return HttpResponse(status=404)
|
||||
|
||||
@action(detail=False, methods=["delete"], url_path="bulk-delete")
|
||||
def bulk_delete(self, request):
|
||||
"""Delete multiple VOD logos at once"""
|
||||
logo_ids = request.data.get('logo_ids', [])
|
||||
|
||||
if not logo_ids:
|
||||
return Response(
|
||||
{"error": "No logo IDs provided"},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
try:
|
||||
# Get logos to delete
|
||||
logos = VODLogo.objects.filter(id__in=logo_ids)
|
||||
deleted_count = logos.count()
|
||||
|
||||
# Delete them
|
||||
logos.delete()
|
||||
|
||||
return Response({
|
||||
"deleted_count": deleted_count,
|
||||
"message": f"Successfully deleted {deleted_count} VOD logo(s)"
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error during bulk VOD logo deletion: {str(e)}")
|
||||
return Response(
|
||||
{"error": str(e)},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["post"])
|
||||
def cleanup(self, request):
|
||||
"""Delete all VOD logos that are not used by any movies or series"""
|
||||
try:
|
||||
# Find unused logos
|
||||
unused_logos = VODLogo.objects.filter(
|
||||
movie__isnull=True,
|
||||
series__isnull=True
|
||||
)
|
||||
|
||||
deleted_count = unused_logos.count()
|
||||
logo_names = list(unused_logos.values_list('name', flat=True))
|
||||
|
||||
# Delete them
|
||||
unused_logos.delete()
|
||||
|
||||
logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}")
|
||||
|
||||
return Response({
|
||||
"deleted_count": deleted_count,
|
||||
"deleted_logos": logo_names,
|
||||
"message": f"Successfully deleted {deleted_count} unused VOD logo(s)"
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error during VOD logo cleanup: {str(e)}")
|
||||
return Response(
|
||||
{"error": str(e)},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,264 @@
|
|||
# Generated by Django 5.2.4 on 2025-11-06 23:01
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def migrate_vod_logos_forward(apps, schema_editor):
|
||||
"""
|
||||
Migrate VOD logos from the Logo table to the new VODLogo table.
|
||||
This copies all logos referenced by movies or series to VODLogo.
|
||||
Uses pure SQL for maximum performance.
|
||||
"""
|
||||
from django.db import connection
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("Starting VOD logo migration...")
|
||||
print("="*80)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# Step 1: Copy unique logos from Logo table to VODLogo table
|
||||
# Only copy logos that are used by movies or series
|
||||
print("Copying logos to VODLogo table...")
|
||||
cursor.execute("""
|
||||
INSERT INTO vod_vodlogo (name, url)
|
||||
SELECT DISTINCT l.name, l.url
|
||||
FROM dispatcharr_channels_logo l
|
||||
WHERE l.id IN (
|
||||
SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL
|
||||
UNION
|
||||
SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL
|
||||
)
|
||||
ON CONFLICT (url) DO NOTHING
|
||||
""")
|
||||
print(f"Created VODLogo entries")
|
||||
|
||||
# Step 2: Update movies to point to VODLogo IDs using JOIN
|
||||
print("Updating movie references...")
|
||||
cursor.execute("""
|
||||
UPDATE vod_movie m
|
||||
SET logo_id = v.id
|
||||
FROM dispatcharr_channels_logo l
|
||||
INNER JOIN vod_vodlogo v ON l.url = v.url
|
||||
WHERE m.logo_id = l.id
|
||||
AND m.logo_id IS NOT NULL
|
||||
""")
|
||||
movie_count = cursor.rowcount
|
||||
print(f"Updated {movie_count} movies with new VOD logo references")
|
||||
|
||||
# Step 3: Update series to point to VODLogo IDs using JOIN
|
||||
print("Updating series references...")
|
||||
cursor.execute("""
|
||||
UPDATE vod_series s
|
||||
SET logo_id = v.id
|
||||
FROM dispatcharr_channels_logo l
|
||||
INNER JOIN vod_vodlogo v ON l.url = v.url
|
||||
WHERE s.logo_id = l.id
|
||||
AND s.logo_id IS NOT NULL
|
||||
""")
|
||||
series_count = cursor.rowcount
|
||||
print(f"Updated {series_count} series with new VOD logo references")
|
||||
|
||||
print("="*80)
|
||||
print("VOD logo migration completed successfully!")
|
||||
print(f"Summary: Updated {movie_count} movies and {series_count} series")
|
||||
print("="*80 + "\n")
|
||||
|
||||
|
||||
def migrate_vod_logos_backward(apps, schema_editor):
|
||||
"""
|
||||
Reverse migration - moves VODLogos back to Logo table.
|
||||
This recreates Logo entries for all VODLogos and updates Movie/Series references.
|
||||
"""
|
||||
Logo = apps.get_model('dispatcharr_channels', 'Logo')
|
||||
VODLogo = apps.get_model('vod', 'VODLogo')
|
||||
Movie = apps.get_model('vod', 'Movie')
|
||||
Series = apps.get_model('vod', 'Series')
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("REVERSE: Moving VOD logos back to Logo table...")
|
||||
print("="*80)
|
||||
|
||||
# Get all VODLogos
|
||||
vod_logos = VODLogo.objects.all()
|
||||
print(f"Found {vod_logos.count()} VOD logos to reverse migrate")
|
||||
|
||||
# Create Logo entries for each VODLogo
|
||||
logos_to_create = []
|
||||
vod_to_logo_mapping = {} # VODLogo ID -> Logo ID
|
||||
|
||||
for vod_logo in vod_logos:
|
||||
# Check if a Logo with this URL already exists
|
||||
existing_logo = Logo.objects.filter(url=vod_logo.url).first()
|
||||
|
||||
if existing_logo:
|
||||
# Logo already exists, just map to it
|
||||
vod_to_logo_mapping[vod_logo.id] = existing_logo.id
|
||||
print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)")
|
||||
else:
|
||||
# Create new Logo entry
|
||||
new_logo = Logo(name=vod_logo.name, url=vod_logo.url)
|
||||
logos_to_create.append(new_logo)
|
||||
|
||||
# Bulk create new Logo entries
|
||||
if logos_to_create:
|
||||
print(f"Creating {len(logos_to_create)} new Logo entries...")
|
||||
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
|
||||
print("Logo entries created")
|
||||
|
||||
# Get the created Logo instances with their IDs
|
||||
for vod_logo in vod_logos:
|
||||
if vod_logo.id not in vod_to_logo_mapping:
|
||||
try:
|
||||
logo = Logo.objects.get(url=vod_logo.url)
|
||||
vod_to_logo_mapping[vod_logo.id] = logo.id
|
||||
except Logo.DoesNotExist:
|
||||
print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...")
|
||||
|
||||
print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos")
|
||||
|
||||
# Update movies to point back to Logo table
|
||||
movie_count = 0
|
||||
for movie in Movie.objects.exclude(logo__isnull=True):
|
||||
if movie.logo_id in vod_to_logo_mapping:
|
||||
movie.logo_id = vod_to_logo_mapping[movie.logo_id]
|
||||
movie.save(update_fields=['logo_id'])
|
||||
movie_count += 1
|
||||
print(f"Updated {movie_count} movies to use Logo table")
|
||||
|
||||
# Update series to point back to Logo table
|
||||
series_count = 0
|
||||
for series in Series.objects.exclude(logo__isnull=True):
|
||||
if series.logo_id in vod_to_logo_mapping:
|
||||
series.logo_id = vod_to_logo_mapping[series.logo_id]
|
||||
series.save(update_fields=['logo_id'])
|
||||
series_count += 1
|
||||
print(f"Updated {series_count} series to use Logo table")
|
||||
|
||||
# Delete VODLogos (they're now redundant)
|
||||
vod_logo_count = vod_logos.count()
|
||||
vod_logos.delete()
|
||||
print(f"Deleted {vod_logo_count} VOD logos")
|
||||
|
||||
print("="*80)
|
||||
print("Reverse migration completed!")
|
||||
print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series")
|
||||
print("="*80 + "\n")
|
||||
|
||||
|
||||
def cleanup_migrated_logos(apps, schema_editor):
|
||||
"""
|
||||
Delete Logo entries that were successfully migrated to VODLogo.
|
||||
|
||||
Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage.
|
||||
"""
|
||||
from django.db import connection
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("Cleaning up migrated Logo entries...")
|
||||
print("="*80)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# Single efficient query using JOINs:
|
||||
# - JOIN with vod_vodlogo to find migrated logos
|
||||
# - LEFT JOIN with channels to find which aren't used
|
||||
cursor.execute("""
|
||||
DELETE FROM dispatcharr_channels_logo
|
||||
WHERE id IN (
|
||||
SELECT l.id
|
||||
FROM dispatcharr_channels_logo l
|
||||
INNER JOIN vod_vodlogo v ON l.url = v.url
|
||||
LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id
|
||||
WHERE c.id IS NULL
|
||||
)
|
||||
""")
|
||||
deleted_count = cursor.rowcount
|
||||
|
||||
print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)")
|
||||
print("="*80 + "\n")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('vod', '0002_add_last_seen_with_default'),
|
||||
('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Step 1: Create the VODLogo model
|
||||
migrations.CreateModel(
|
||||
name='VODLogo',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('name', models.CharField(max_length=255)),
|
||||
('url', models.TextField(unique=True)),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'VOD Logo',
|
||||
'verbose_name_plural': 'VOD Logos',
|
||||
},
|
||||
),
|
||||
|
||||
# Step 2: Remove foreign key constraints temporarily (so we can change the IDs)
|
||||
# We need to find and drop the actual constraint names dynamically
|
||||
migrations.RunSQL(
|
||||
sql=[
|
||||
# Drop movie logo constraint (find it dynamically)
|
||||
"""
|
||||
DO $$
|
||||
DECLARE
|
||||
constraint_name text;
|
||||
BEGIN
|
||||
SELECT conname INTO constraint_name
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = 'vod_movie'::regclass
|
||||
AND conname LIKE '%logo_id%fk%';
|
||||
|
||||
IF constraint_name IS NOT NULL THEN
|
||||
EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name;
|
||||
END IF;
|
||||
END $$;
|
||||
""",
|
||||
# Drop series logo constraint (find it dynamically)
|
||||
"""
|
||||
DO $$
|
||||
DECLARE
|
||||
constraint_name text;
|
||||
BEGIN
|
||||
SELECT conname INTO constraint_name
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = 'vod_series'::regclass
|
||||
AND conname LIKE '%logo_id%fk%';
|
||||
|
||||
IF constraint_name IS NOT NULL THEN
|
||||
EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name;
|
||||
END IF;
|
||||
END $$;
|
||||
""",
|
||||
],
|
||||
reverse_sql=[
|
||||
# The AlterField operations will recreate the constraints pointing to VODLogo,
|
||||
# so we don't need to manually recreate them in reverse
|
||||
migrations.RunSQL.noop,
|
||||
],
|
||||
),
|
||||
|
||||
# Step 3: Migrate the data (this copies logos and updates references)
|
||||
migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward),
|
||||
|
||||
# Step 4: Now we can safely alter the foreign keys to point to VODLogo
|
||||
migrations.AlterField(
|
||||
model_name='movie',
|
||||
name='logo',
|
||||
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='series',
|
||||
name='logo',
|
||||
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'),
|
||||
),
|
||||
|
||||
# Step 5: Clean up migrated Logo entries
|
||||
migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop),
|
||||
]
|
||||
|
|
@ -4,10 +4,22 @@ from django.utils import timezone
|
|||
from django.contrib.contenttypes.fields import GenericForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from apps.m3u.models import M3UAccount
|
||||
from apps.channels.models import Logo
|
||||
import uuid
|
||||
|
||||
|
||||
class VODLogo(models.Model):
|
||||
"""Logo model specifically for VOD content (movies and series)"""
|
||||
name = models.CharField(max_length=255)
|
||||
url = models.TextField(unique=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
class Meta:
|
||||
verbose_name = 'VOD Logo'
|
||||
verbose_name_plural = 'VOD Logos'
|
||||
|
||||
|
||||
class VODCategory(models.Model):
|
||||
"""Categories for organizing VODs (e.g., Action, Comedy, Drama)"""
|
||||
|
||||
|
|
@ -69,7 +81,7 @@ class Series(models.Model):
|
|||
year = models.IntegerField(blank=True, null=True)
|
||||
rating = models.CharField(max_length=10, blank=True, null=True)
|
||||
genre = models.CharField(max_length=255, blank=True, null=True)
|
||||
logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series')
|
||||
logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series')
|
||||
|
||||
# Metadata IDs for deduplication - these should be globally unique when present
|
||||
tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata")
|
||||
|
|
@ -108,7 +120,7 @@ class Movie(models.Model):
|
|||
rating = models.CharField(max_length=10, blank=True, null=True)
|
||||
genre = models.CharField(max_length=255, blank=True, null=True)
|
||||
duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds")
|
||||
logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie')
|
||||
logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie')
|
||||
|
||||
# Metadata IDs for deduplication - these should be globally unique when present
|
||||
tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata")
|
||||
|
|
@ -233,10 +245,13 @@ class M3UMovieRelation(models.Model):
|
|||
"""Get the full stream URL for this movie from this provider"""
|
||||
# Build URL dynamically for XtreamCodes accounts
|
||||
if self.m3u_account.account_type == 'XC':
|
||||
server_url = self.m3u_account.server_url.rstrip('/')
|
||||
from core.xtream_codes import Client as XCClient
|
||||
# Use XC client's URL normalization to handle malformed URLs
|
||||
# (e.g., URLs with /player_api.php or query parameters)
|
||||
normalized_url = XCClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url)
|
||||
username = self.m3u_account.username
|
||||
password = self.m3u_account.password
|
||||
return f"{server_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
|
||||
return f"{normalized_url}/movie/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
|
||||
else:
|
||||
# For other account types, we would need another way to build URLs
|
||||
return None
|
||||
|
|
@ -273,10 +288,12 @@ class M3UEpisodeRelation(models.Model):
|
|||
|
||||
if self.m3u_account.account_type == 'XC':
|
||||
# For XtreamCodes accounts, build the URL dynamically
|
||||
server_url = self.m3u_account.server_url.rstrip('/')
|
||||
# Use XC client's URL normalization to handle malformed URLs
|
||||
# (e.g., URLs with /player_api.php or query parameters)
|
||||
normalized_url = XtreamCodesClient(self.m3u_account.server_url, '', '')._normalize_url(self.m3u_account.server_url)
|
||||
username = self.m3u_account.username
|
||||
password = self.m3u_account.password
|
||||
return f"{server_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
|
||||
return f"{normalized_url}/series/{username}/{password}/{self.stream_id}.{self.container_extension or 'mp4'}"
|
||||
else:
|
||||
# We might support non XC accounts in the future
|
||||
# For now, return None
|
||||
|
|
|
|||
|
|
@ -1,12 +1,79 @@
|
|||
from rest_framework import serializers
|
||||
from django.urls import reverse
|
||||
from .models import (
|
||||
Series, VODCategory, Movie, Episode,
|
||||
Series, VODCategory, Movie, Episode, VODLogo,
|
||||
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
|
||||
)
|
||||
from apps.channels.serializers import LogoSerializer
|
||||
from apps.m3u.serializers import M3UAccountSerializer
|
||||
|
||||
|
||||
class VODLogoSerializer(serializers.ModelSerializer):
|
||||
cache_url = serializers.SerializerMethodField()
|
||||
movie_count = serializers.SerializerMethodField()
|
||||
series_count = serializers.SerializerMethodField()
|
||||
is_used = serializers.SerializerMethodField()
|
||||
item_names = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = VODLogo
|
||||
fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"]
|
||||
|
||||
def validate_url(self, value):
|
||||
"""Validate that the URL is unique for creation or update"""
|
||||
if self.instance and self.instance.url == value:
|
||||
return value
|
||||
|
||||
if VODLogo.objects.filter(url=value).exists():
|
||||
raise serializers.ValidationError("A VOD logo with this URL already exists.")
|
||||
|
||||
return value
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Handle logo creation with proper URL validation"""
|
||||
return VODLogo.objects.create(**validated_data)
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
"""Handle logo updates"""
|
||||
for attr, value in validated_data.items():
|
||||
setattr(instance, attr, value)
|
||||
instance.save()
|
||||
return instance
|
||||
|
||||
def get_cache_url(self, obj):
|
||||
request = self.context.get("request")
|
||||
if request:
|
||||
return request.build_absolute_uri(
|
||||
reverse("api:vod:vodlogo-cache", args=[obj.id])
|
||||
)
|
||||
return reverse("api:vod:vodlogo-cache", args=[obj.id])
|
||||
|
||||
def get_movie_count(self, obj):
|
||||
"""Get the number of movies using this logo"""
|
||||
return obj.movie.count() if hasattr(obj, 'movie') else 0
|
||||
|
||||
def get_series_count(self, obj):
|
||||
"""Get the number of series using this logo"""
|
||||
return obj.series.count() if hasattr(obj, 'series') else 0
|
||||
|
||||
def get_is_used(self, obj):
|
||||
"""Check if this logo is used by any movies or series"""
|
||||
return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists())
|
||||
|
||||
def get_item_names(self, obj):
|
||||
"""Get the list of movies and series using this logo"""
|
||||
names = []
|
||||
|
||||
if hasattr(obj, 'movie'):
|
||||
for movie in obj.movie.all()[:10]: # Limit to 10 items for performance
|
||||
names.append(f"Movie: {movie.name}")
|
||||
|
||||
if hasattr(obj, 'series'):
|
||||
for series in obj.series.all()[:10]: # Limit to 10 items for performance
|
||||
names.append(f"Series: {series.name}")
|
||||
|
||||
return names
|
||||
|
||||
|
||||
class M3UVODCategoryRelationSerializer(serializers.ModelSerializer):
|
||||
category = serializers.IntegerField(source="category.id")
|
||||
m3u_account = serializers.IntegerField(source="m3u_account.id")
|
||||
|
|
@ -31,7 +98,7 @@ class VODCategorySerializer(serializers.ModelSerializer):
|
|||
]
|
||||
|
||||
class SeriesSerializer(serializers.ModelSerializer):
|
||||
logo = LogoSerializer(read_only=True)
|
||||
logo = VODLogoSerializer(read_only=True)
|
||||
episode_count = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
|
|
@ -43,7 +110,7 @@ class SeriesSerializer(serializers.ModelSerializer):
|
|||
|
||||
|
||||
class MovieSerializer(serializers.ModelSerializer):
|
||||
logo = LogoSerializer(read_only=True)
|
||||
logo = VODLogoSerializer(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Movie
|
||||
|
|
@ -225,7 +292,7 @@ class M3UEpisodeRelationSerializer(serializers.ModelSerializer):
|
|||
|
||||
class EnhancedSeriesSerializer(serializers.ModelSerializer):
|
||||
"""Enhanced serializer for series with provider information"""
|
||||
logo = LogoSerializer(read_only=True)
|
||||
logo = VODLogoSerializer(read_only=True)
|
||||
providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True)
|
||||
episode_count = serializers.SerializerMethodField()
|
||||
|
||||
|
|
|
|||
|
|
@ -5,10 +5,9 @@ from django.db.models import Q
|
|||
from apps.m3u.models import M3UAccount
|
||||
from core.xtream_codes import Client as XtreamCodesClient
|
||||
from .models import (
|
||||
VODCategory, Series, Movie, Episode,
|
||||
VODCategory, Series, Movie, Episode, VODLogo,
|
||||
M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation
|
||||
)
|
||||
from apps.channels.models import Logo
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import json
|
||||
|
|
@ -128,6 +127,37 @@ def refresh_movies(client, account, categories_by_provider, relations, scan_star
|
|||
"""Refresh movie content using single API call for all movies"""
|
||||
logger.info(f"Refreshing movies for account {account.name}")
|
||||
|
||||
# Ensure "Uncategorized" category exists for movies without a category
|
||||
uncategorized_category, created = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="movie",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Ensure there's a relation for the Uncategorized category
|
||||
account_custom_props = account.custom_properties or {}
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
|
||||
|
||||
uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=uncategorized_category,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
if created:
|
||||
logger.info(f"Created 'Uncategorized' category for movies")
|
||||
if rel_created:
|
||||
logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})")
|
||||
|
||||
# Add uncategorized category to relations dict for easy access
|
||||
relations[uncategorized_category.id] = uncategorized_relation
|
||||
|
||||
# Add to categories_by_provider with a special key for items without category
|
||||
categories_by_provider['__uncategorized__'] = uncategorized_category
|
||||
|
||||
# Get all movies in a single API call
|
||||
logger.info("Fetching all movies from provider...")
|
||||
all_movies_data = client.get_vod_streams() # No category_id = get all movies
|
||||
|
|
@ -151,6 +181,37 @@ def refresh_series(client, account, categories_by_provider, relations, scan_star
|
|||
"""Refresh series content using single API call for all series"""
|
||||
logger.info(f"Refreshing series for account {account.name}")
|
||||
|
||||
# Ensure "Uncategorized" category exists for series without a category
|
||||
uncategorized_category, created = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="series",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Ensure there's a relation for the Uncategorized category
|
||||
account_custom_props = account.custom_properties or {}
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True)
|
||||
|
||||
uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=uncategorized_category,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
if created:
|
||||
logger.info(f"Created 'Uncategorized' category for series")
|
||||
if rel_created:
|
||||
logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})")
|
||||
|
||||
# Add uncategorized category to relations dict for easy access
|
||||
relations[uncategorized_category.id] = uncategorized_relation
|
||||
|
||||
# Add to categories_by_provider with a special key for items without category
|
||||
categories_by_provider['__uncategorized__'] = uncategorized_category
|
||||
|
||||
# Get all series in a single API call
|
||||
logger.info("Fetching all series from provider...")
|
||||
all_series_data = client.get_series() # No category_id = get all series
|
||||
|
|
@ -187,16 +248,28 @@ def batch_create_categories(categories_data, category_type, account):
|
|||
|
||||
logger.debug(f"Found {len(existing_categories)} existing categories")
|
||||
|
||||
# Check if we should auto-enable new categories based on account settings
|
||||
account_custom_props = account.custom_properties or {}
|
||||
if category_type == 'movie':
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
|
||||
else: # series
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True)
|
||||
|
||||
# Create missing categories in batch
|
||||
new_categories = []
|
||||
|
||||
for name in category_names:
|
||||
if name not in existing_categories:
|
||||
# Always create new categories
|
||||
new_categories.append(VODCategory(name=name, category_type=category_type))
|
||||
else:
|
||||
# Existing category - create relationship with enabled based on auto_enable setting
|
||||
# (category exists globally but is new to this account)
|
||||
relations_to_create.append(M3UVODCategoryRelation(
|
||||
category=existing_categories[name],
|
||||
m3u_account=account,
|
||||
custom_properties={},
|
||||
enabled=auto_enable_new,
|
||||
))
|
||||
|
||||
logger.debug(f"{len(new_categories)} new categories found")
|
||||
|
|
@ -204,24 +277,69 @@ def batch_create_categories(categories_data, category_type, account):
|
|||
|
||||
if new_categories:
|
||||
logger.debug("Creating new categories...")
|
||||
created_categories = VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)
|
||||
created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True))
|
||||
|
||||
# Create relations for newly created categories with enabled based on auto_enable setting
|
||||
for cat in created_categories:
|
||||
if not auto_enable_new:
|
||||
logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}")
|
||||
|
||||
relations_to_create.append(
|
||||
M3UVODCategoryRelation(
|
||||
category=cat,
|
||||
m3u_account=account,
|
||||
custom_properties={},
|
||||
enabled=auto_enable_new,
|
||||
)
|
||||
)
|
||||
|
||||
# Convert to dictionary for easy lookup
|
||||
newly_created = {cat.name: cat for cat in created_categories}
|
||||
|
||||
relations_to_create += [
|
||||
M3UVODCategoryRelation(
|
||||
category=cat,
|
||||
m3u_account=account,
|
||||
custom_properties={},
|
||||
) for cat in newly_created.values()
|
||||
]
|
||||
|
||||
existing_categories.update(newly_created)
|
||||
|
||||
# Create missing relations
|
||||
logger.debug("Updating category account relations...")
|
||||
M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
|
||||
|
||||
# Delete orphaned category relationships (categories no longer in the M3U source)
|
||||
# Exclude "Uncategorized" from cleanup as it's a special category we manage
|
||||
current_category_ids = set(existing_categories[name].id for name in category_names)
|
||||
existing_relations = M3UVODCategoryRelation.objects.filter(
|
||||
m3u_account=account,
|
||||
category__category_type=category_type
|
||||
).select_related('category')
|
||||
|
||||
relations_to_delete = [
|
||||
rel for rel in existing_relations
|
||||
if rel.category_id not in current_category_ids and rel.category.name != "Uncategorized"
|
||||
]
|
||||
|
||||
if relations_to_delete:
|
||||
M3UVODCategoryRelation.objects.filter(
|
||||
id__in=[rel.id for rel in relations_to_delete]
|
||||
).delete()
|
||||
logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}")
|
||||
|
||||
# Check if any of the deleted relationships left categories with no remaining associations
|
||||
orphaned_category_ids = []
|
||||
for rel in relations_to_delete:
|
||||
category = rel.category
|
||||
|
||||
# Check if this category has any remaining M3U account relationships
|
||||
remaining_relationships = M3UVODCategoryRelation.objects.filter(
|
||||
category=category
|
||||
).exists()
|
||||
|
||||
# If no relationships remain, it's safe to delete the category
|
||||
if not remaining_relationships:
|
||||
orphaned_category_ids.append(category.id)
|
||||
logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted")
|
||||
|
||||
# Delete orphaned categories
|
||||
if orphaned_category_ids:
|
||||
VODCategory.objects.filter(id__in=orphaned_category_ids).delete()
|
||||
logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations")
|
||||
|
||||
# 🔑 Fetch all relations for this account, for all categories
|
||||
# relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects
|
||||
# .filter(category__in=existing_categories.values(), m3u_account=account)
|
||||
|
|
@ -276,17 +394,26 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
logger.debug("Skipping disabled category")
|
||||
continue
|
||||
else:
|
||||
logger.warning(f"No category ID provided for movie {name}")
|
||||
# Assign to Uncategorized category if no category_id provided
|
||||
logger.debug(f"No category ID provided for movie {name}, assigning to 'Uncategorized'")
|
||||
category = categories.get('__uncategorized__')
|
||||
if category:
|
||||
movie_data['_category_id'] = category.id
|
||||
# Check if uncategorized is disabled
|
||||
relation = relations.get(category.id, None)
|
||||
if relation and not relation.enabled:
|
||||
logger.debug("Skipping disabled 'Uncategorized' category")
|
||||
continue
|
||||
|
||||
# Extract metadata
|
||||
year = extract_year_from_data(movie_data, 'name')
|
||||
tmdb_id = movie_data.get('tmdb_id') or movie_data.get('tmdb')
|
||||
imdb_id = movie_data.get('imdb_id') or movie_data.get('imdb')
|
||||
|
||||
# Clean empty string IDs
|
||||
if tmdb_id == '':
|
||||
# Clean empty string IDs and zero values (some providers use 0 to indicate no ID)
|
||||
if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0':
|
||||
tmdb_id = None
|
||||
if imdb_id == '':
|
||||
if imdb_id == '' or imdb_id == 0 or imdb_id == '0':
|
||||
imdb_id = None
|
||||
|
||||
# Create a unique key for this movie (priority: TMDB > IMDB > name+year)
|
||||
|
|
@ -303,7 +430,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
|
||||
# Prepare movie properties
|
||||
description = movie_data.get('description') or movie_data.get('plot') or ''
|
||||
rating = movie_data.get('rating') or movie_data.get('vote_average') or ''
|
||||
rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average'))
|
||||
genre = movie_data.get('genre') or movie_data.get('category_name') or ''
|
||||
duration_secs = extract_duration_from_data(movie_data)
|
||||
trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or ''
|
||||
|
|
@ -347,7 +474,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
|
||||
# Get existing logos
|
||||
existing_logos = {
|
||||
logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls)
|
||||
logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls)
|
||||
} if logo_urls else {}
|
||||
|
||||
# Create missing logos
|
||||
|
|
@ -355,20 +482,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
for logo_url in logo_urls:
|
||||
if logo_url not in existing_logos:
|
||||
movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie')
|
||||
logos_to_create.append(Logo(url=logo_url, name=movie_name))
|
||||
logos_to_create.append(VODLogo(url=logo_url, name=movie_name))
|
||||
|
||||
if logos_to_create:
|
||||
try:
|
||||
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
|
||||
VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
|
||||
# Refresh existing_logos with newly created ones
|
||||
new_logo_urls = [logo.url for logo in logos_to_create]
|
||||
newly_created = {
|
||||
logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls)
|
||||
logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls)
|
||||
}
|
||||
existing_logos.update(newly_created)
|
||||
logger.info(f"Created {len(newly_created)} new logos for movies")
|
||||
logger.info(f"Created {len(newly_created)} new VOD logos for movies")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to create logos: {e}")
|
||||
logger.warning(f"Failed to create VOD logos: {e}")
|
||||
|
||||
# Get existing movies based on our keys
|
||||
existing_movies = {}
|
||||
|
|
@ -487,26 +614,41 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
# First, create new movies and get their IDs
|
||||
created_movies = {}
|
||||
if movies_to_create:
|
||||
Movie.objects.bulk_create(movies_to_create, ignore_conflicts=True)
|
||||
# Bulk query to check which movies already exist
|
||||
tmdb_ids = [m.tmdb_id for m in movies_to_create if m.tmdb_id]
|
||||
imdb_ids = [m.imdb_id for m in movies_to_create if m.imdb_id]
|
||||
name_year_pairs = [(m.name, m.year) for m in movies_to_create if not m.tmdb_id and not m.imdb_id]
|
||||
|
||||
# Get the newly created movies with their IDs
|
||||
# We need to re-fetch them to get the primary keys
|
||||
existing_by_tmdb = {m.tmdb_id: m for m in Movie.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {}
|
||||
existing_by_imdb = {m.imdb_id: m for m in Movie.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {}
|
||||
|
||||
existing_by_name_year = {}
|
||||
if name_year_pairs:
|
||||
for movie in Movie.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True):
|
||||
key = (movie.name, movie.year)
|
||||
if key in name_year_pairs:
|
||||
existing_by_name_year[key] = movie
|
||||
|
||||
# Check each movie against the bulk query results
|
||||
movies_actually_created = []
|
||||
for movie in movies_to_create:
|
||||
# Find the movie by its unique identifiers
|
||||
if movie.tmdb_id:
|
||||
db_movie = Movie.objects.filter(tmdb_id=movie.tmdb_id).first()
|
||||
elif movie.imdb_id:
|
||||
db_movie = Movie.objects.filter(imdb_id=movie.imdb_id).first()
|
||||
else:
|
||||
db_movie = Movie.objects.filter(
|
||||
name=movie.name,
|
||||
year=movie.year,
|
||||
tmdb_id__isnull=True,
|
||||
imdb_id__isnull=True
|
||||
).first()
|
||||
existing = None
|
||||
if movie.tmdb_id and movie.tmdb_id in existing_by_tmdb:
|
||||
existing = existing_by_tmdb[movie.tmdb_id]
|
||||
elif movie.imdb_id and movie.imdb_id in existing_by_imdb:
|
||||
existing = existing_by_imdb[movie.imdb_id]
|
||||
elif not movie.tmdb_id and not movie.imdb_id:
|
||||
existing = existing_by_name_year.get((movie.name, movie.year))
|
||||
|
||||
if db_movie:
|
||||
created_movies[id(movie)] = db_movie
|
||||
if existing:
|
||||
created_movies[id(movie)] = existing
|
||||
else:
|
||||
movies_actually_created.append(movie)
|
||||
created_movies[id(movie)] = movie
|
||||
|
||||
# Bulk create only movies that don't exist
|
||||
if movies_actually_created:
|
||||
Movie.objects.bulk_create(movies_actually_created)
|
||||
|
||||
# Update existing movies
|
||||
if movies_to_update:
|
||||
|
|
@ -522,12 +664,16 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N
|
|||
movie.logo = movie._logo_to_update
|
||||
movie.save(update_fields=['logo'])
|
||||
|
||||
# Update relations to reference the correct movie objects
|
||||
# Update relations to reference the correct movie objects (with PKs)
|
||||
for relation in relations_to_create:
|
||||
if id(relation.movie) in created_movies:
|
||||
relation.movie = created_movies[id(relation.movie)]
|
||||
|
||||
# Handle relations
|
||||
for relation in relations_to_update:
|
||||
if id(relation.movie) in created_movies:
|
||||
relation.movie = created_movies[id(relation.movie)]
|
||||
|
||||
# All movies now have PKs, safe to bulk create/update relations
|
||||
if relations_to_create:
|
||||
M3UMovieRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
|
||||
|
||||
|
|
@ -578,7 +724,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
logger.debug("Skipping disabled category")
|
||||
continue
|
||||
else:
|
||||
logger.warning(f"No category ID provided for series {name}")
|
||||
# Assign to Uncategorized category if no category_id provided
|
||||
logger.debug(f"No category ID provided for series {name}, assigning to 'Uncategorized'")
|
||||
category = categories.get('__uncategorized__')
|
||||
if category:
|
||||
series_data['_category_id'] = category.id
|
||||
# Check if uncategorized is disabled
|
||||
relation = relations.get(category.id, None)
|
||||
if relation and not relation.enabled:
|
||||
logger.debug("Skipping disabled 'Uncategorized' category")
|
||||
continue
|
||||
|
||||
# Extract metadata
|
||||
year = extract_year(series_data.get('releaseDate', ''))
|
||||
|
|
@ -588,10 +743,10 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
tmdb_id = series_data.get('tmdb') or series_data.get('tmdb_id')
|
||||
imdb_id = series_data.get('imdb') or series_data.get('imdb_id')
|
||||
|
||||
# Clean empty string IDs
|
||||
if tmdb_id == '':
|
||||
# Clean empty string IDs and zero values (some providers use 0 to indicate no ID)
|
||||
if tmdb_id == '' or tmdb_id == 0 or tmdb_id == '0':
|
||||
tmdb_id = None
|
||||
if imdb_id == '':
|
||||
if imdb_id == '' or imdb_id == 0 or imdb_id == '0':
|
||||
imdb_id = None
|
||||
|
||||
# Create a unique key for this series (priority: TMDB > IMDB > name+year)
|
||||
|
|
@ -608,7 +763,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
|
||||
# Prepare series properties
|
||||
description = series_data.get('plot', '')
|
||||
rating = series_data.get('rating', '')
|
||||
rating = normalize_rating(series_data.get('rating'))
|
||||
genre = series_data.get('genre', '')
|
||||
logo_url = series_data.get('cover') or ''
|
||||
|
||||
|
|
@ -669,7 +824,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
|
||||
# Get existing logos
|
||||
existing_logos = {
|
||||
logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls)
|
||||
logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls)
|
||||
} if logo_urls else {}
|
||||
|
||||
# Create missing logos
|
||||
|
|
@ -677,20 +832,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
for logo_url in logo_urls:
|
||||
if logo_url not in existing_logos:
|
||||
series_name = logo_url_to_name.get(logo_url, 'Unknown Series')
|
||||
logos_to_create.append(Logo(url=logo_url, name=series_name))
|
||||
logos_to_create.append(VODLogo(url=logo_url, name=series_name))
|
||||
|
||||
if logos_to_create:
|
||||
try:
|
||||
Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
|
||||
VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True)
|
||||
# Refresh existing_logos with newly created ones
|
||||
new_logo_urls = [logo.url for logo in logos_to_create]
|
||||
newly_created = {
|
||||
logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls)
|
||||
logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls)
|
||||
}
|
||||
existing_logos.update(newly_created)
|
||||
logger.info(f"Created {len(newly_created)} new logos for series")
|
||||
logger.info(f"Created {len(newly_created)} new VOD logos for series")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to create logos: {e}")
|
||||
logger.warning(f"Failed to create VOD logos: {e}")
|
||||
|
||||
# Get existing series based on our keys - same pattern as movies
|
||||
existing_series = {}
|
||||
|
|
@ -809,26 +964,41 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
# First, create new series and get their IDs
|
||||
created_series = {}
|
||||
if series_to_create:
|
||||
Series.objects.bulk_create(series_to_create, ignore_conflicts=True)
|
||||
# Bulk query to check which series already exist
|
||||
tmdb_ids = [s.tmdb_id for s in series_to_create if s.tmdb_id]
|
||||
imdb_ids = [s.imdb_id for s in series_to_create if s.imdb_id]
|
||||
name_year_pairs = [(s.name, s.year) for s in series_to_create if not s.tmdb_id and not s.imdb_id]
|
||||
|
||||
# Get the newly created series with their IDs
|
||||
# We need to re-fetch them to get the primary keys
|
||||
existing_by_tmdb = {s.tmdb_id: s for s in Series.objects.filter(tmdb_id__in=tmdb_ids)} if tmdb_ids else {}
|
||||
existing_by_imdb = {s.imdb_id: s for s in Series.objects.filter(imdb_id__in=imdb_ids)} if imdb_ids else {}
|
||||
|
||||
existing_by_name_year = {}
|
||||
if name_year_pairs:
|
||||
for series in Series.objects.filter(tmdb_id__isnull=True, imdb_id__isnull=True):
|
||||
key = (series.name, series.year)
|
||||
if key in name_year_pairs:
|
||||
existing_by_name_year[key] = series
|
||||
|
||||
# Check each series against the bulk query results
|
||||
series_actually_created = []
|
||||
for series in series_to_create:
|
||||
# Find the series by its unique identifiers
|
||||
if series.tmdb_id:
|
||||
db_series = Series.objects.filter(tmdb_id=series.tmdb_id).first()
|
||||
elif series.imdb_id:
|
||||
db_series = Series.objects.filter(imdb_id=series.imdb_id).first()
|
||||
else:
|
||||
db_series = Series.objects.filter(
|
||||
name=series.name,
|
||||
year=series.year,
|
||||
tmdb_id__isnull=True,
|
||||
imdb_id__isnull=True
|
||||
).first()
|
||||
existing = None
|
||||
if series.tmdb_id and series.tmdb_id in existing_by_tmdb:
|
||||
existing = existing_by_tmdb[series.tmdb_id]
|
||||
elif series.imdb_id and series.imdb_id in existing_by_imdb:
|
||||
existing = existing_by_imdb[series.imdb_id]
|
||||
elif not series.tmdb_id and not series.imdb_id:
|
||||
existing = existing_by_name_year.get((series.name, series.year))
|
||||
|
||||
if db_series:
|
||||
created_series[id(series)] = db_series
|
||||
if existing:
|
||||
created_series[id(series)] = existing
|
||||
else:
|
||||
series_actually_created.append(series)
|
||||
created_series[id(series)] = series
|
||||
|
||||
# Bulk create only series that don't exist
|
||||
if series_actually_created:
|
||||
Series.objects.bulk_create(series_actually_created)
|
||||
|
||||
# Update existing series
|
||||
if series_to_update:
|
||||
|
|
@ -844,12 +1014,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time=
|
|||
series.logo = series._logo_to_update
|
||||
series.save(update_fields=['logo'])
|
||||
|
||||
# Update relations to reference the correct series objects
|
||||
# Update relations to reference the correct series objects (with PKs)
|
||||
for relation in relations_to_create:
|
||||
if id(relation.series) in created_series:
|
||||
relation.series = created_series[id(relation.series)]
|
||||
|
||||
# Handle relations
|
||||
for relation in relations_to_update:
|
||||
if id(relation.series) in created_series:
|
||||
relation.series = created_series[id(relation.series)]
|
||||
|
||||
# All series now have PKs, safe to bulk create/update relations
|
||||
if relations_to_create:
|
||||
M3USeriesRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
|
||||
|
||||
|
|
@ -896,6 +1070,33 @@ def extract_duration_from_data(movie_data):
|
|||
return duration_secs
|
||||
|
||||
|
||||
def normalize_rating(rating_value):
|
||||
"""Normalize rating value by converting commas to decimals and validating as float"""
|
||||
if not rating_value:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Convert to string for processing
|
||||
rating_str = str(rating_value).strip()
|
||||
|
||||
if not rating_str or rating_str == '':
|
||||
return None
|
||||
|
||||
# Replace comma with decimal point (European format)
|
||||
rating_str = rating_str.replace(',', '.')
|
||||
|
||||
# Try to convert to float
|
||||
rating_float = float(rating_str)
|
||||
|
||||
# Return as string to maintain compatibility with existing code
|
||||
# but ensure it's a valid numeric format
|
||||
return str(rating_float)
|
||||
except (ValueError, TypeError, AttributeError):
|
||||
# If conversion fails, discard the rating
|
||||
logger.debug(f"Invalid rating value discarded: {rating_value}")
|
||||
return None
|
||||
|
||||
|
||||
def extract_year(date_string):
|
||||
"""Extract year from date string"""
|
||||
if not date_string:
|
||||
|
|
@ -1021,9 +1222,9 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N
|
|||
if should_update_field(series.description, info.get('plot')):
|
||||
series.description = extract_string_from_array_or_string(info.get('plot'))
|
||||
updated = True
|
||||
if (info.get('rating') and str(info.get('rating')).strip() and
|
||||
(not series.rating or not str(series.rating).strip())):
|
||||
series.rating = info.get('rating')
|
||||
normalized_rating = normalize_rating(info.get('rating'))
|
||||
if normalized_rating and (not series.rating or not str(series.rating).strip()):
|
||||
series.rating = normalized_rating
|
||||
updated = True
|
||||
if should_update_field(series.genre, info.get('genre')):
|
||||
series.genre = extract_string_from_array_or_string(info.get('genre'))
|
||||
|
|
@ -1069,7 +1270,13 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N
|
|||
|
||||
|
||||
def batch_process_episodes(account, series, episodes_data, scan_start_time=None):
|
||||
"""Process episodes in batches for better performance"""
|
||||
"""Process episodes in batches for better performance.
|
||||
|
||||
Note: Multiple streams can represent the same episode (e.g., different languages
|
||||
or qualities). Each stream has a unique stream_id, but they share the same
|
||||
season/episode number. We create one Episode record per (series, season, episode)
|
||||
and multiple M3UEpisodeRelation records pointing to it.
|
||||
"""
|
||||
if not episodes_data:
|
||||
return
|
||||
|
||||
|
|
@ -1086,12 +1293,13 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}")
|
||||
|
||||
# Extract episode identifiers
|
||||
episode_keys = []
|
||||
# Note: episode_keys may have duplicates when multiple streams represent same episode
|
||||
episode_keys = set() # Use set to track unique episode keys
|
||||
episode_ids = []
|
||||
for episode_data in all_episodes_data:
|
||||
season_num = episode_data['_season_number']
|
||||
episode_num = episode_data.get('episode_num', 0)
|
||||
episode_keys.append((series.id, season_num, episode_num))
|
||||
episode_keys.add((series.id, season_num, episode_num))
|
||||
episode_ids.append(str(episode_data.get('id')))
|
||||
|
||||
# Pre-fetch existing episodes
|
||||
|
|
@ -1114,17 +1322,30 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
relations_to_create = []
|
||||
relations_to_update = []
|
||||
|
||||
# Track episodes we're creating in this batch to avoid duplicates
|
||||
# Key: (series_id, season_number, episode_number) -> Episode object
|
||||
episodes_pending_creation = {}
|
||||
|
||||
for episode_data in all_episodes_data:
|
||||
try:
|
||||
episode_id = str(episode_data.get('id'))
|
||||
episode_name = episode_data.get('title', 'Unknown Episode')
|
||||
season_number = episode_data['_season_number']
|
||||
episode_number = episode_data.get('episode_num', 0)
|
||||
# Ensure season and episode numbers are integers (API may return strings)
|
||||
try:
|
||||
season_number = int(episode_data['_season_number'])
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.warning(f"Invalid season_number '{episode_data.get('_season_number')}' for episode '{episode_name}': {e}")
|
||||
season_number = 0
|
||||
try:
|
||||
episode_number = int(episode_data.get('episode_num', 0))
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.warning(f"Invalid episode_num '{episode_data.get('episode_num')}' for episode '{episode_name}': {e}")
|
||||
episode_number = 0
|
||||
info = episode_data.get('info', {})
|
||||
|
||||
# Extract episode metadata
|
||||
description = info.get('plot') or info.get('overview', '') if info else ''
|
||||
rating = info.get('rating', '') if info else ''
|
||||
rating = normalize_rating(info.get('rating')) if info else None
|
||||
air_date = extract_date_from_data(info) if info else None
|
||||
duration_secs = info.get('duration_secs') if info else None
|
||||
tmdb_id = info.get('tmdb_id') if info else None
|
||||
|
|
@ -1143,10 +1364,15 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
if backdrop:
|
||||
custom_props['backdrop_path'] = [backdrop]
|
||||
|
||||
# Find existing episode
|
||||
# Find existing episode - check DB first, then pending creations
|
||||
episode_key = (series.id, season_number, episode_number)
|
||||
episode = existing_episodes.get(episode_key)
|
||||
|
||||
# Check if we already have this episode pending creation (multiple streams for same episode)
|
||||
if not episode and episode_key in episodes_pending_creation:
|
||||
episode = episodes_pending_creation[episode_key]
|
||||
logger.debug(f"Reusing pending episode for S{season_number}E{episode_number} (stream_id: {episode_id})")
|
||||
|
||||
if episode:
|
||||
# Update existing episode
|
||||
updated = False
|
||||
|
|
@ -1175,7 +1401,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
episode.custom_properties = custom_props if custom_props else None
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
# Only add to update list if episode has a PK (exists in DB) and isn't already in list
|
||||
# Episodes pending creation don't have PKs yet and will be created via bulk_create
|
||||
if updated and episode.pk and episode not in episodes_to_update:
|
||||
episodes_to_update.append(episode)
|
||||
else:
|
||||
# Create new episode
|
||||
|
|
@ -1193,6 +1421,8 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
custom_properties=custom_props if custom_props else None
|
||||
)
|
||||
episodes_to_create.append(episode)
|
||||
# Track this episode so subsequent streams with same season/episode can reuse it
|
||||
episodes_pending_creation[episode_key] = episode
|
||||
|
||||
# Handle episode relation
|
||||
if episode_id in existing_relations:
|
||||
|
|
@ -1226,9 +1456,43 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
|
||||
# Execute batch operations
|
||||
with transaction.atomic():
|
||||
# Create new episodes
|
||||
# Create new episodes - use ignore_conflicts in case of race conditions
|
||||
if episodes_to_create:
|
||||
Episode.objects.bulk_create(episodes_to_create)
|
||||
Episode.objects.bulk_create(episodes_to_create, ignore_conflicts=True)
|
||||
|
||||
# Re-fetch the created episodes to get their PKs
|
||||
# We need to do this because bulk_create with ignore_conflicts doesn't set PKs
|
||||
created_episode_keys = [
|
||||
(ep.series_id, ep.season_number, ep.episode_number)
|
||||
for ep in episodes_to_create
|
||||
]
|
||||
db_episodes = Episode.objects.filter(series=series)
|
||||
episode_pk_map = {
|
||||
(ep.series_id, ep.season_number, ep.episode_number): ep
|
||||
for ep in db_episodes
|
||||
}
|
||||
|
||||
# Update relations to point to the actual DB episodes with PKs
|
||||
for relation in relations_to_create:
|
||||
ep = relation.episode
|
||||
key = (ep.series_id, ep.season_number, ep.episode_number)
|
||||
if key in episode_pk_map:
|
||||
relation.episode = episode_pk_map[key]
|
||||
|
||||
# Filter out relations with unsaved episodes (no PK)
|
||||
# This can happen if bulk_create had a conflict and ignore_conflicts=True didn't save the episode
|
||||
valid_relations_to_create = []
|
||||
for relation in relations_to_create:
|
||||
if relation.episode.pk is not None:
|
||||
valid_relations_to_create.append(relation)
|
||||
else:
|
||||
season_num = relation.episode.season_number
|
||||
episode_num = relation.episode.episode_number
|
||||
logger.warning(
|
||||
f"Skipping relation for episode S{season_num}E{episode_num} "
|
||||
f"- episode not saved to database"
|
||||
)
|
||||
relations_to_create = valid_relations_to_create
|
||||
|
||||
# Update existing episodes
|
||||
if episodes_to_update:
|
||||
|
|
@ -1237,9 +1501,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None)
|
|||
'tmdb_id', 'imdb_id', 'custom_properties'
|
||||
])
|
||||
|
||||
# Create new episode relations
|
||||
# Create new episode relations - use ignore_conflicts for stream_id duplicates
|
||||
if relations_to_create:
|
||||
M3UEpisodeRelation.objects.bulk_create(relations_to_create)
|
||||
M3UEpisodeRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True)
|
||||
|
||||
# Update existing episode relations
|
||||
if relations_to_update:
|
||||
|
|
@ -1341,21 +1605,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id=
|
|||
stale_episode_count = stale_episode_relations.count()
|
||||
stale_episode_relations.delete()
|
||||
|
||||
# Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup)
|
||||
if not account_id:
|
||||
orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True)
|
||||
orphaned_movie_count = orphaned_movies.count()
|
||||
# Clean up movies with no relations (orphaned)
|
||||
# Safe to delete even during account-specific cleanup because if ANY account
|
||||
# has a relation, m3u_relations will not be null
|
||||
orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True)
|
||||
orphaned_movie_count = orphaned_movies.count()
|
||||
if orphaned_movie_count > 0:
|
||||
logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations")
|
||||
orphaned_movies.delete()
|
||||
|
||||
# Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup)
|
||||
orphaned_series = Series.objects.filter(m3u_relations__isnull=True)
|
||||
orphaned_series_count = orphaned_series.count()
|
||||
# Clean up series with no relations (orphaned)
|
||||
orphaned_series = Series.objects.filter(m3u_relations__isnull=True)
|
||||
orphaned_series_count = orphaned_series.count()
|
||||
if orphaned_series_count > 0:
|
||||
logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations")
|
||||
orphaned_series.delete()
|
||||
else:
|
||||
# When cleaning up for specific account, we don't remove orphaned content
|
||||
# as other accounts might still reference it
|
||||
orphaned_movie_count = 0
|
||||
orphaned_series_count = 0
|
||||
|
||||
# Episodes will be cleaned up via CASCADE when series are deleted
|
||||
|
||||
|
|
@ -1797,8 +2061,9 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False):
|
|||
if info.get('plot') and info.get('plot') != movie.description:
|
||||
movie.description = info.get('plot')
|
||||
updated = True
|
||||
if info.get('rating') and info.get('rating') != movie.rating:
|
||||
movie.rating = info.get('rating')
|
||||
normalized_rating = normalize_rating(info.get('rating'))
|
||||
if normalized_rating and normalized_rating != movie.rating:
|
||||
movie.rating = normalized_rating
|
||||
updated = True
|
||||
if info.get('genre') and info.get('genre') != movie.genre:
|
||||
movie.genre = info.get('genre')
|
||||
|
|
@ -1915,7 +2180,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False):
|
|||
|
||||
def validate_logo_reference(obj, obj_type="object"):
|
||||
"""
|
||||
Validate that a logo reference exists in the database.
|
||||
Validate that a VOD logo reference exists in the database.
|
||||
If not, set it to None to prevent foreign key constraint violations.
|
||||
|
||||
Args:
|
||||
|
|
@ -1935,9 +2200,9 @@ def validate_logo_reference(obj, obj_type="object"):
|
|||
|
||||
try:
|
||||
# Verify the logo exists in the database
|
||||
Logo.objects.get(pk=obj.logo.pk)
|
||||
VODLogo.objects.get(pk=obj.logo.pk)
|
||||
return True
|
||||
except Logo.DoesNotExist:
|
||||
logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None")
|
||||
except VODLogo.DoesNotExist:
|
||||
logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None")
|
||||
obj.logo = None
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -2,7 +2,16 @@
|
|||
|
||||
from django.urls import path, include
|
||||
from rest_framework.routers import DefaultRouter
|
||||
from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint
|
||||
from .api_views import (
|
||||
UserAgentViewSet,
|
||||
StreamProfileViewSet,
|
||||
CoreSettingsViewSet,
|
||||
environment,
|
||||
version,
|
||||
rehash_streams_endpoint,
|
||||
TimezoneListView,
|
||||
get_system_events
|
||||
)
|
||||
|
||||
router = DefaultRouter()
|
||||
router.register(r'useragents', UserAgentViewSet, basename='useragent')
|
||||
|
|
@ -12,5 +21,7 @@ urlpatterns = [
|
|||
path('settings/env/', environment, name='token_refresh'),
|
||||
path('version/', version, name='version'),
|
||||
path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'),
|
||||
path('timezones/', TimezoneListView.as_view(), name='timezones'),
|
||||
path('system-events/', get_system_events, name='system_events'),
|
||||
path('', include(router.urls)),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -5,16 +5,19 @@ import ipaddress
|
|||
import logging
|
||||
from rest_framework import viewsets, status
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.views import APIView
|
||||
from django.shortcuts import get_object_or_404
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.decorators import api_view, permission_classes, action
|
||||
from drf_yasg.utils import swagger_auto_schema
|
||||
from drf_yasg import openapi
|
||||
from .models import (
|
||||
UserAgent,
|
||||
StreamProfile,
|
||||
CoreSettings,
|
||||
STREAM_HASH_KEY,
|
||||
NETWORK_ACCESS,
|
||||
STREAM_SETTINGS_KEY,
|
||||
DVR_SETTINGS_KEY,
|
||||
NETWORK_ACCESS_KEY,
|
||||
PROXY_SETTINGS_KEY,
|
||||
)
|
||||
from .serializers import (
|
||||
|
|
@ -66,16 +69,28 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
|
|||
|
||||
def update(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
old_value = instance.value
|
||||
response = super().update(request, *args, **kwargs)
|
||||
if instance.key == STREAM_HASH_KEY:
|
||||
if instance.value != request.data["value"]:
|
||||
rehash_streams.delay(request.data["value"].split(","))
|
||||
|
||||
# If DVR pre/post offsets changed, reschedule upcoming recordings
|
||||
try:
|
||||
from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY
|
||||
if instance.key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY):
|
||||
if instance.value != request.data.get("value"):
|
||||
# If stream settings changed and m3u_hash_key is different, rehash streams
|
||||
if instance.key == STREAM_SETTINGS_KEY:
|
||||
new_value = request.data.get("value", {})
|
||||
if isinstance(new_value, dict) and isinstance(old_value, dict):
|
||||
old_hash = old_value.get("m3u_hash_key", "")
|
||||
new_hash = new_value.get("m3u_hash_key", "")
|
||||
if old_hash != new_hash:
|
||||
hash_keys = new_hash.split(",") if isinstance(new_hash, str) else new_hash
|
||||
rehash_streams.delay(hash_keys)
|
||||
|
||||
# If DVR settings changed and pre/post offsets are different, reschedule upcoming recordings
|
||||
if instance.key == DVR_SETTINGS_KEY:
|
||||
new_value = request.data.get("value", {})
|
||||
if isinstance(new_value, dict) and isinstance(old_value, dict):
|
||||
old_pre = old_value.get("pre_offset_minutes")
|
||||
new_pre = new_value.get("pre_offset_minutes")
|
||||
old_post = old_value.get("post_offset_minutes")
|
||||
new_post = new_value.get("post_offset_minutes")
|
||||
if old_pre != new_pre or old_post != new_post:
|
||||
try:
|
||||
# Prefer async task if Celery is available
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change
|
||||
|
|
@ -84,24 +99,23 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
|
|||
# Fallback to synchronous implementation
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl
|
||||
reschedule_upcoming_recordings_for_offset_change_impl()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return response
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
response = super().create(request, *args, **kwargs)
|
||||
# If creating DVR pre/post offset settings, also reschedule upcoming recordings
|
||||
# If creating DVR settings with offset values, reschedule upcoming recordings
|
||||
try:
|
||||
key = request.data.get("key")
|
||||
from core.models import DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY
|
||||
if key in (DVR_PRE_OFFSET_MINUTES_KEY, DVR_POST_OFFSET_MINUTES_KEY):
|
||||
try:
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change
|
||||
reschedule_upcoming_recordings_for_offset_change.delay()
|
||||
except Exception:
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl
|
||||
reschedule_upcoming_recordings_for_offset_change_impl()
|
||||
if key == DVR_SETTINGS_KEY:
|
||||
value = request.data.get("value", {})
|
||||
if isinstance(value, dict) and ("pre_offset_minutes" in value or "post_offset_minutes" in value):
|
||||
try:
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change
|
||||
reschedule_upcoming_recordings_for_offset_change.delay()
|
||||
except Exception:
|
||||
from apps.channels.tasks import reschedule_upcoming_recordings_for_offset_change_impl
|
||||
reschedule_upcoming_recordings_for_offset_change_impl()
|
||||
except Exception:
|
||||
pass
|
||||
return response
|
||||
|
|
@ -109,13 +123,13 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
|
|||
def check(self, request, *args, **kwargs):
|
||||
data = request.data
|
||||
|
||||
if data.get("key") == NETWORK_ACCESS:
|
||||
if data.get("key") == NETWORK_ACCESS_KEY:
|
||||
client_ip = ipaddress.ip_address(get_client_ip(request))
|
||||
|
||||
in_network = {}
|
||||
invalid = []
|
||||
|
||||
value = json.loads(data.get("value", "{}"))
|
||||
value = data.get("value", {})
|
||||
for key, val in value.items():
|
||||
in_network[key] = []
|
||||
cidrs = val.split(",")
|
||||
|
|
@ -141,7 +155,11 @@ class CoreSettingsViewSet(viewsets.ModelViewSet):
|
|||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
return Response(in_network, status=status.HTTP_200_OK)
|
||||
response_data = {
|
||||
**in_network,
|
||||
"client_ip": str(client_ip)
|
||||
}
|
||||
return Response(response_data, status=status.HTTP_200_OK)
|
||||
|
||||
return Response({}, status=status.HTTP_200_OK)
|
||||
|
||||
|
|
@ -155,8 +173,8 @@ class ProxySettingsViewSet(viewsets.ViewSet):
|
|||
"""Get or create the proxy settings CoreSettings entry"""
|
||||
try:
|
||||
settings_obj = CoreSettings.objects.get(key=PROXY_SETTINGS_KEY)
|
||||
settings_data = json.loads(settings_obj.value)
|
||||
except (CoreSettings.DoesNotExist, json.JSONDecodeError):
|
||||
settings_data = settings_obj.value
|
||||
except CoreSettings.DoesNotExist:
|
||||
# Create default settings
|
||||
settings_data = {
|
||||
"buffering_timeout": 15,
|
||||
|
|
@ -169,7 +187,7 @@ class ProxySettingsViewSet(viewsets.ViewSet):
|
|||
key=PROXY_SETTINGS_KEY,
|
||||
defaults={
|
||||
"name": "Proxy Settings",
|
||||
"value": json.dumps(settings_data)
|
||||
"value": settings_data
|
||||
}
|
||||
)
|
||||
return settings_obj, settings_data
|
||||
|
|
@ -191,8 +209,8 @@ class ProxySettingsViewSet(viewsets.ViewSet):
|
|||
serializer = ProxySettingsSerializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
# Update the JSON data
|
||||
settings_obj.value = json.dumps(serializer.validated_data)
|
||||
# Update the JSON data - store as dict directly
|
||||
settings_obj.value = serializer.validated_data
|
||||
settings_obj.save()
|
||||
|
||||
return Response(serializer.validated_data)
|
||||
|
|
@ -207,8 +225,8 @@ class ProxySettingsViewSet(viewsets.ViewSet):
|
|||
serializer = ProxySettingsSerializer(data=updated_data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
# Update the JSON data
|
||||
settings_obj.value = json.dumps(serializer.validated_data)
|
||||
# Update the JSON data - store as dict directly
|
||||
settings_obj.value = serializer.validated_data
|
||||
settings_obj.save()
|
||||
|
||||
return Response(serializer.validated_data)
|
||||
|
|
@ -326,27 +344,132 @@ def rehash_streams_endpoint(request):
|
|||
"""Trigger the rehash streams task"""
|
||||
try:
|
||||
# Get the current hash keys from settings
|
||||
hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY)
|
||||
hash_keys = hash_key_setting.value.split(",")
|
||||
|
||||
hash_key = CoreSettings.get_m3u_hash_key()
|
||||
hash_keys = hash_key.split(",") if isinstance(hash_key, str) else hash_key
|
||||
|
||||
# Queue the rehash task
|
||||
task = rehash_streams.delay(hash_keys)
|
||||
|
||||
|
||||
return Response({
|
||||
"success": True,
|
||||
"message": "Stream rehashing task has been queued",
|
||||
"task_id": task.id
|
||||
}, status=status.HTTP_200_OK)
|
||||
|
||||
except CoreSettings.DoesNotExist:
|
||||
|
||||
except Exception as e:
|
||||
return Response({
|
||||
"success": False,
|
||||
"message": "Hash key settings not found"
|
||||
"message": f"Error triggering rehash: {str(e)}"
|
||||
}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error triggering rehash streams: {e}")
|
||||
return Response({
|
||||
"success": False,
|
||||
"message": "Failed to trigger rehash task"
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
# ─────────────────────────────
|
||||
# Timezone List API
|
||||
# ─────────────────────────────
|
||||
class TimezoneListView(APIView):
|
||||
"""
|
||||
API endpoint that returns all available timezones supported by pytz.
|
||||
Returns a list of timezone names grouped by region for easy selection.
|
||||
This is a general utility endpoint that can be used throughout the application.
|
||||
"""
|
||||
|
||||
def get_permissions(self):
|
||||
return [Authenticated()]
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Get list of all supported timezones",
|
||||
responses={200: openapi.Response('List of timezones with grouping by region')}
|
||||
)
|
||||
def get(self, request):
|
||||
import pytz
|
||||
|
||||
# Get all common timezones (excludes deprecated ones)
|
||||
all_timezones = sorted(pytz.common_timezones)
|
||||
|
||||
# Group by region for better UX
|
||||
grouped = {}
|
||||
for tz in all_timezones:
|
||||
if '/' in tz:
|
||||
region = tz.split('/')[0]
|
||||
if region not in grouped:
|
||||
grouped[region] = []
|
||||
grouped[region].append(tz)
|
||||
else:
|
||||
# Handle special zones like UTC, GMT, etc.
|
||||
if 'Other' not in grouped:
|
||||
grouped['Other'] = []
|
||||
grouped['Other'].append(tz)
|
||||
|
||||
return Response({
|
||||
'timezones': all_timezones,
|
||||
'grouped': grouped,
|
||||
'count': len(all_timezones)
|
||||
})
|
||||
|
||||
|
||||
# ─────────────────────────────
|
||||
# System Events API
|
||||
# ─────────────────────────────
|
||||
@api_view(['GET'])
|
||||
@permission_classes([IsAuthenticated])
|
||||
def get_system_events(request):
|
||||
"""
|
||||
Get recent system events (channel start/stop, buffering, client connections, etc.)
|
||||
|
||||
Query Parameters:
|
||||
limit: Number of events to return per page (default: 100, max: 1000)
|
||||
offset: Number of events to skip (for pagination, default: 0)
|
||||
event_type: Filter by specific event type (optional)
|
||||
"""
|
||||
from core.models import SystemEvent
|
||||
|
||||
try:
|
||||
# Get pagination params
|
||||
limit = min(int(request.GET.get('limit', 100)), 1000)
|
||||
offset = int(request.GET.get('offset', 0))
|
||||
|
||||
# Start with all events
|
||||
events = SystemEvent.objects.all()
|
||||
|
||||
# Filter by event_type if provided
|
||||
event_type = request.GET.get('event_type')
|
||||
if event_type:
|
||||
events = events.filter(event_type=event_type)
|
||||
|
||||
# Get total count before applying pagination
|
||||
total_count = events.count()
|
||||
|
||||
# Apply offset and limit for pagination
|
||||
events = events[offset:offset + limit]
|
||||
|
||||
# Serialize the data
|
||||
events_data = [{
|
||||
'id': event.id,
|
||||
'event_type': event.event_type,
|
||||
'event_type_display': event.get_event_type_display(),
|
||||
'timestamp': event.timestamp.isoformat(),
|
||||
'channel_id': str(event.channel_id) if event.channel_id else None,
|
||||
'channel_name': event.channel_name,
|
||||
'details': event.details
|
||||
} for event in events]
|
||||
|
||||
return Response({
|
||||
'events': events_data,
|
||||
'count': len(events_data),
|
||||
'total': total_count,
|
||||
'offset': offset,
|
||||
'limit': limit
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching system events: {e}")
|
||||
return Response({
|
||||
'error': 'Failed to fetch system events'
|
||||
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
"model": "core.streamprofile",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "ffmpeg",
|
||||
"name": "FFmpeg",
|
||||
"command": "ffmpeg",
|
||||
"parameters": "-i {streamUrl} -c:v copy -c:a copy -f mpegts pipe:1",
|
||||
"is_active": true,
|
||||
|
|
@ -34,11 +34,22 @@
|
|||
"model": "core.streamprofile",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "streamlink",
|
||||
"name": "Streamlink",
|
||||
"command": "streamlink",
|
||||
"parameters": "{streamUrl} best --stdout",
|
||||
"is_active": true,
|
||||
"user_agent": "1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "core.streamprofile",
|
||||
"pk": 3,
|
||||
"fields": {
|
||||
"name": "VLC",
|
||||
"command": "cvlc",
|
||||
"parameters": "-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
|
||||
"is_active": true,
|
||||
"user_agent": "1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
# your_app/management/commands/update_column.py
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import CoreSettings, NETWORK_ACCESS
|
||||
from core.models import CoreSettings, NETWORK_ACCESS_KEY
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Reset network access settings"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
setting = CoreSettings.objects.get(key=NETWORK_ACCESS)
|
||||
setting.value = "{}"
|
||||
setting = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY)
|
||||
setting.value = {}
|
||||
setting.save()
|
||||
|
|
|
|||
28
core/migrations/0017_systemevent.py
Normal file
28
core/migrations/0017_systemevent.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# Generated by Django 5.2.4 on 2025-11-20 20:47
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0016_update_dvr_template_paths'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='SystemEvent',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)),
|
||||
('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)),
|
||||
('channel_id', models.UUIDField(blank=True, db_index=True, null=True)),
|
||||
('channel_name', models.CharField(blank=True, max_length=255, null=True)),
|
||||
('details', models.JSONField(blank=True, default=dict)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-timestamp'],
|
||||
'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')],
|
||||
},
|
||||
),
|
||||
]
|
||||
18
core/migrations/0018_alter_systemevent_event_type.py
Normal file
18
core/migrations/0018_alter_systemevent_event_type.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-11-21 15:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0017_systemevent'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='systemevent',
|
||||
name='event_type',
|
||||
field=models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded'), ('login_success', 'Login Successful'), ('login_failed', 'Login Failed'), ('logout', 'User Logged Out'), ('m3u_blocked', 'M3U Download Blocked'), ('epg_blocked', 'EPG Download Blocked')], db_index=True, max_length=50),
|
||||
),
|
||||
]
|
||||
42
core/migrations/0019_add_vlc_stream_profile.py
Normal file
42
core/migrations/0019_add_vlc_stream_profile.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
# Generated migration to add VLC stream profile
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
def add_vlc_profile(apps, schema_editor):
|
||||
StreamProfile = apps.get_model("core", "StreamProfile")
|
||||
UserAgent = apps.get_model("core", "UserAgent")
|
||||
|
||||
# Check if VLC profile already exists
|
||||
if not StreamProfile.objects.filter(name="VLC").exists():
|
||||
# Get the TiviMate user agent (should be pk=1)
|
||||
try:
|
||||
tivimate_ua = UserAgent.objects.get(pk=1)
|
||||
except UserAgent.DoesNotExist:
|
||||
# Fallback: get first available user agent
|
||||
tivimate_ua = UserAgent.objects.first()
|
||||
if not tivimate_ua:
|
||||
# No user agents exist, skip creating profile
|
||||
return
|
||||
|
||||
StreamProfile.objects.create(
|
||||
name="VLC",
|
||||
command="cvlc",
|
||||
parameters="-vv -I dummy --no-video-title-show --http-user-agent {userAgent} {streamUrl} --sout #standard{access=file,mux=ts,dst=-}",
|
||||
is_active=True,
|
||||
user_agent=tivimate_ua,
|
||||
locked=True, # Make it read-only like ffmpeg/streamlink
|
||||
)
|
||||
|
||||
def remove_vlc_profile(apps, schema_editor):
|
||||
StreamProfile = apps.get_model("core", "StreamProfile")
|
||||
StreamProfile.objects.filter(name="VLC").delete()
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0018_alter_systemevent_event_type'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(add_vlc_profile, remove_vlc_profile),
|
||||
]
|
||||
267
core/migrations/0020_change_coresettings_value_to_jsonfield.py
Normal file
267
core/migrations/0020_change_coresettings_value_to_jsonfield.py
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
# Generated migration to change CoreSettings value field to JSONField and consolidate settings
|
||||
|
||||
import json
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def convert_string_to_json(apps, schema_editor):
|
||||
"""Convert existing string values to appropriate JSON types before changing column type"""
|
||||
CoreSettings = apps.get_model("core", "CoreSettings")
|
||||
|
||||
for setting in CoreSettings.objects.all():
|
||||
value = setting.value
|
||||
|
||||
if not value:
|
||||
# Empty strings become empty string in JSON
|
||||
setting.value = json.dumps("")
|
||||
setting.save(update_fields=['value'])
|
||||
continue
|
||||
|
||||
# Try to parse as JSON if it looks like JSON (objects/arrays)
|
||||
if value.startswith('{') or value.startswith('['):
|
||||
try:
|
||||
parsed = json.loads(value)
|
||||
# Store as JSON string temporarily (column is still CharField)
|
||||
setting.value = json.dumps(parsed)
|
||||
setting.save(update_fields=['value'])
|
||||
continue
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass
|
||||
|
||||
# Try to parse as number
|
||||
try:
|
||||
# Check if it's an integer
|
||||
if '.' not in value and value.lstrip('-').isdigit():
|
||||
setting.value = json.dumps(int(value))
|
||||
setting.save(update_fields=['value'])
|
||||
continue
|
||||
# Check if it's a float
|
||||
float_val = float(value)
|
||||
setting.value = json.dumps(float_val)
|
||||
setting.save(update_fields=['value'])
|
||||
continue
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
# Check for booleans
|
||||
if value.lower() in ('true', 'false', '1', '0', 'yes', 'no', 'on', 'off'):
|
||||
bool_val = value.lower() in ('true', '1', 'yes', 'on')
|
||||
setting.value = json.dumps(bool_val)
|
||||
setting.save(update_fields=['value'])
|
||||
continue
|
||||
|
||||
# Default: store as JSON string
|
||||
setting.value = json.dumps(value)
|
||||
setting.save(update_fields=['value'])
|
||||
|
||||
|
||||
def consolidate_settings(apps, schema_editor):
|
||||
"""Consolidate individual setting rows into grouped JSON objects."""
|
||||
CoreSettings = apps.get_model("core", "CoreSettings")
|
||||
|
||||
# Helper to get setting value
|
||||
def get_value(key, default=None):
|
||||
try:
|
||||
obj = CoreSettings.objects.get(key=key)
|
||||
return obj.value if obj.value is not None else default
|
||||
except CoreSettings.DoesNotExist:
|
||||
return default
|
||||
|
||||
# STREAM SETTINGS
|
||||
stream_settings = {
|
||||
"default_user_agent": get_value("default-user-agent"),
|
||||
"default_stream_profile": get_value("default-stream-profile"),
|
||||
"m3u_hash_key": get_value("m3u-hash-key", ""),
|
||||
"preferred_region": get_value("preferred-region"),
|
||||
"auto_import_mapped_files": get_value("auto-import-mapped-files"),
|
||||
}
|
||||
CoreSettings.objects.update_or_create(
|
||||
key="stream_settings",
|
||||
defaults={"name": "Stream Settings", "value": stream_settings}
|
||||
)
|
||||
|
||||
# DVR SETTINGS
|
||||
dvr_settings = {
|
||||
"tv_template": get_value("dvr-tv-template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"),
|
||||
"movie_template": get_value("dvr-movie-template", "Movies/{title} ({year}).mkv"),
|
||||
"tv_fallback_dir": get_value("dvr-tv-fallback-dir", "TV_Shows"),
|
||||
"tv_fallback_template": get_value("dvr-tv-fallback-template", "TV_Shows/{show}/{start}.mkv"),
|
||||
"movie_fallback_template": get_value("dvr-movie-fallback-template", "Movies/{start}.mkv"),
|
||||
"comskip_enabled": bool(get_value("dvr-comskip-enabled", False)),
|
||||
"comskip_custom_path": get_value("dvr-comskip-custom-path", ""),
|
||||
"pre_offset_minutes": int(get_value("dvr-pre-offset-minutes", 0) or 0),
|
||||
"post_offset_minutes": int(get_value("dvr-post-offset-minutes", 0) or 0),
|
||||
"series_rules": get_value("dvr-series-rules", []),
|
||||
}
|
||||
CoreSettings.objects.update_or_create(
|
||||
key="dvr_settings",
|
||||
defaults={"name": "DVR Settings", "value": dvr_settings}
|
||||
)
|
||||
|
||||
# BACKUP SETTINGS - using underscore keys (not dashes)
|
||||
backup_settings = {
|
||||
"schedule_enabled": get_value("backup_schedule_enabled") if get_value("backup_schedule_enabled") is not None else True,
|
||||
"schedule_frequency": get_value("backup_schedule_frequency") or "daily",
|
||||
"schedule_time": get_value("backup_schedule_time") or "03:00",
|
||||
"schedule_day_of_week": get_value("backup_schedule_day_of_week") if get_value("backup_schedule_day_of_week") is not None else 0,
|
||||
"retention_count": get_value("backup_retention_count") if get_value("backup_retention_count") is not None else 3,
|
||||
"schedule_cron_expression": get_value("backup_schedule_cron_expression") or "",
|
||||
}
|
||||
CoreSettings.objects.update_or_create(
|
||||
key="backup_settings",
|
||||
defaults={"name": "Backup Settings", "value": backup_settings}
|
||||
)
|
||||
|
||||
# SYSTEM SETTINGS
|
||||
system_settings = {
|
||||
"time_zone": get_value("system-time-zone", "UTC"),
|
||||
"max_system_events": int(get_value("max-system-events", 100) or 100),
|
||||
}
|
||||
CoreSettings.objects.update_or_create(
|
||||
key="system_settings",
|
||||
defaults={"name": "System Settings", "value": system_settings}
|
||||
)
|
||||
|
||||
# Rename proxy-settings to proxy_settings (if it exists with old name)
|
||||
try:
|
||||
old_proxy = CoreSettings.objects.get(key="proxy-settings")
|
||||
old_proxy.key = "proxy_settings"
|
||||
old_proxy.save()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Ensure proxy_settings exists with defaults if not present
|
||||
proxy_obj, proxy_created = CoreSettings.objects.get_or_create(
|
||||
key="proxy_settings",
|
||||
defaults={
|
||||
"name": "Proxy Settings",
|
||||
"value": {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
"redis_chunk_ttl": 60,
|
||||
"channel_shutdown_delay": 0,
|
||||
"channel_init_grace_period": 5,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Rename network-access to network_access (if it exists with old name)
|
||||
try:
|
||||
old_network = CoreSettings.objects.get(key="network-access")
|
||||
old_network.key = "network_access"
|
||||
old_network.save()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Ensure network_access exists with defaults if not present
|
||||
network_obj, network_created = CoreSettings.objects.get_or_create(
|
||||
key="network_access",
|
||||
defaults={
|
||||
"name": "Network Access",
|
||||
"value": {}
|
||||
}
|
||||
)
|
||||
# Delete old individual setting rows (keep only the new grouped settings)
|
||||
grouped_keys = ["stream_settings", "dvr_settings", "backup_settings", "system_settings", "proxy_settings", "network_access"]
|
||||
CoreSettings.objects.exclude(key__in=grouped_keys).delete()
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""Reverse migration: split grouped settings and convert JSON back to strings"""
|
||||
CoreSettings = apps.get_model("core", "CoreSettings")
|
||||
|
||||
# Helper to create individual setting
|
||||
def create_setting(key, name, value):
|
||||
# Convert value back to string representation for CharField
|
||||
if isinstance(value, str):
|
||||
str_value = value
|
||||
elif isinstance(value, bool):
|
||||
str_value = "true" if value else "false"
|
||||
elif isinstance(value, (int, float)):
|
||||
str_value = str(value)
|
||||
elif isinstance(value, (dict, list)):
|
||||
str_value = json.dumps(value)
|
||||
elif value is None:
|
||||
str_value = ""
|
||||
else:
|
||||
str_value = str(value)
|
||||
|
||||
CoreSettings.objects.update_or_create(
|
||||
key=key,
|
||||
defaults={"name": name, "value": str_value}
|
||||
)
|
||||
|
||||
# Split stream_settings
|
||||
try:
|
||||
stream = CoreSettings.objects.get(key="stream_settings")
|
||||
if isinstance(stream.value, dict):
|
||||
create_setting("default_user_agent", "Default User Agent", stream.value.get("default_user_agent"))
|
||||
create_setting("default_stream_profile", "Default Stream Profile", stream.value.get("default_stream_profile"))
|
||||
create_setting("stream_hash_key", "Stream Hash Key", stream.value.get("m3u_hash_key", ""))
|
||||
create_setting("preferred_region", "Preferred Region", stream.value.get("preferred_region"))
|
||||
create_setting("auto_import_mapped_files", "Auto Import Mapped Files", stream.value.get("auto_import_mapped_files"))
|
||||
stream.delete()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Split dvr_settings
|
||||
try:
|
||||
dvr = CoreSettings.objects.get(key="dvr_settings")
|
||||
if isinstance(dvr.value, dict):
|
||||
create_setting("dvr_tv_template", "DVR TV Template", dvr.value.get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"))
|
||||
create_setting("dvr_movie_template", "DVR Movie Template", dvr.value.get("movie_template", "Movies/{title} ({year}).mkv"))
|
||||
create_setting("dvr_tv_fallback_dir", "DVR TV Fallback Dir", dvr.value.get("tv_fallback_dir", "TV_Shows"))
|
||||
create_setting("dvr_tv_fallback_template", "DVR TV Fallback Template", dvr.value.get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv"))
|
||||
create_setting("dvr_movie_fallback_template", "DVR Movie Fallback Template", dvr.value.get("movie_fallback_template", "Movies/{start}.mkv"))
|
||||
create_setting("dvr_comskip_enabled", "DVR Comskip Enabled", dvr.value.get("comskip_enabled", False))
|
||||
create_setting("dvr_comskip_custom_path", "DVR Comskip Custom Path", dvr.value.get("comskip_custom_path", ""))
|
||||
create_setting("dvr_pre_offset_minutes", "DVR Pre Offset Minutes", dvr.value.get("pre_offset_minutes", 0))
|
||||
create_setting("dvr_post_offset_minutes", "DVR Post Offset Minutes", dvr.value.get("post_offset_minutes", 0))
|
||||
create_setting("dvr_series_rules", "DVR Series Rules", dvr.value.get("series_rules", []))
|
||||
dvr.delete()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Split backup_settings
|
||||
try:
|
||||
backup = CoreSettings.objects.get(key="backup_settings")
|
||||
if isinstance(backup.value, dict):
|
||||
create_setting("backup_schedule_enabled", "Backup Schedule Enabled", backup.value.get("schedule_enabled", False))
|
||||
create_setting("backup_schedule_frequency", "Backup Schedule Frequency", backup.value.get("schedule_frequency", "weekly"))
|
||||
create_setting("backup_schedule_time", "Backup Schedule Time", backup.value.get("schedule_time", "02:00"))
|
||||
create_setting("backup_schedule_day_of_week", "Backup Schedule Day of Week", backup.value.get("schedule_day_of_week", 0))
|
||||
create_setting("backup_retention_count", "Backup Retention Count", backup.value.get("retention_count", 7))
|
||||
create_setting("backup_schedule_cron_expression", "Backup Schedule Cron Expression", backup.value.get("schedule_cron_expression", ""))
|
||||
backup.delete()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Split system_settings
|
||||
try:
|
||||
system = CoreSettings.objects.get(key="system_settings")
|
||||
if isinstance(system.value, dict):
|
||||
create_setting("system_time_zone", "System Time Zone", system.value.get("time_zone", "UTC"))
|
||||
create_setting("max_system_events", "Max System Events", system.value.get("max_system_events", 100))
|
||||
system.delete()
|
||||
except CoreSettings.DoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('core', '0019_add_vlc_stream_profile'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# First, convert all data to valid JSON strings while column is still CharField
|
||||
migrations.RunPython(convert_string_to_json, migrations.RunPython.noop),
|
||||
# Then change the field type to JSONField
|
||||
migrations.AlterField(
|
||||
model_name='coresettings',
|
||||
name='value',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
# Finally, consolidate individual settings into grouped JSON objects
|
||||
migrations.RunPython(consolidate_settings, reverse_migration),
|
||||
]
|
||||
289
core/models.py
289
core/models.py
|
|
@ -1,4 +1,8 @@
|
|||
# core/models.py
|
||||
|
||||
from shlex import split as shlex_split
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.utils.text import slugify
|
||||
from django.core.exceptions import ValidationError
|
||||
|
|
@ -132,7 +136,7 @@ class StreamProfile(models.Model):
|
|||
# Split the command and iterate through each part to apply replacements
|
||||
cmd = [self.command] + [
|
||||
self._replace_in_part(part, replacements)
|
||||
for part in self.parameters.split()
|
||||
for part in shlex_split(self.parameters) # use shlex to handle quoted strings
|
||||
]
|
||||
|
||||
return cmd
|
||||
|
|
@ -144,22 +148,13 @@ class StreamProfile(models.Model):
|
|||
return part
|
||||
|
||||
|
||||
DEFAULT_USER_AGENT_KEY = slugify("Default User-Agent")
|
||||
DEFAULT_STREAM_PROFILE_KEY = slugify("Default Stream Profile")
|
||||
STREAM_HASH_KEY = slugify("M3U Hash Key")
|
||||
PREFERRED_REGION_KEY = slugify("Preferred Region")
|
||||
AUTO_IMPORT_MAPPED_FILES = slugify("Auto-Import Mapped Files")
|
||||
NETWORK_ACCESS = slugify("Network Access")
|
||||
PROXY_SETTINGS_KEY = slugify("Proxy Settings")
|
||||
DVR_TV_TEMPLATE_KEY = slugify("DVR TV Template")
|
||||
DVR_MOVIE_TEMPLATE_KEY = slugify("DVR Movie Template")
|
||||
DVR_SERIES_RULES_KEY = slugify("DVR Series Rules")
|
||||
DVR_TV_FALLBACK_DIR_KEY = slugify("DVR TV Fallback Dir")
|
||||
DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template")
|
||||
DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template")
|
||||
DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled")
|
||||
DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes")
|
||||
DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes")
|
||||
# Setting group keys
|
||||
STREAM_SETTINGS_KEY = "stream_settings"
|
||||
DVR_SETTINGS_KEY = "dvr_settings"
|
||||
BACKUP_SETTINGS_KEY = "backup_settings"
|
||||
PROXY_SETTINGS_KEY = "proxy_settings"
|
||||
NETWORK_ACCESS_KEY = "network_access"
|
||||
SYSTEM_SETTINGS_KEY = "system_settings"
|
||||
|
||||
|
||||
class CoreSettings(models.Model):
|
||||
|
|
@ -170,160 +165,208 @@ class CoreSettings(models.Model):
|
|||
name = models.CharField(
|
||||
max_length=255,
|
||||
)
|
||||
value = models.CharField(
|
||||
max_length=255,
|
||||
value = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "Core Settings"
|
||||
|
||||
# Helper methods to get/set grouped settings
|
||||
@classmethod
|
||||
def _get_group(cls, key, defaults=None):
|
||||
"""Get a settings group, returning defaults if not found."""
|
||||
try:
|
||||
return cls.objects.get(key=key).value or (defaults or {})
|
||||
except cls.DoesNotExist:
|
||||
return defaults or {}
|
||||
|
||||
@classmethod
|
||||
def _update_group(cls, key, name, updates):
|
||||
"""Update specific fields in a settings group."""
|
||||
obj, created = cls.objects.get_or_create(
|
||||
key=key,
|
||||
defaults={"name": name, "value": {}}
|
||||
)
|
||||
current = obj.value if isinstance(obj.value, dict) else {}
|
||||
current.update(updates)
|
||||
obj.value = current
|
||||
obj.save()
|
||||
return current
|
||||
|
||||
# Stream Settings
|
||||
@classmethod
|
||||
def get_stream_settings(cls):
|
||||
"""Get all stream-related settings."""
|
||||
return cls._get_group(STREAM_SETTINGS_KEY, {
|
||||
"default_user_agent": None,
|
||||
"default_stream_profile": None,
|
||||
"m3u_hash_key": "",
|
||||
"preferred_region": None,
|
||||
"auto_import_mapped_files": None,
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def get_default_user_agent_id(cls):
|
||||
"""Retrieve a system profile by name (or return None if not found)."""
|
||||
return cls.objects.get(key=DEFAULT_USER_AGENT_KEY).value
|
||||
return cls.get_stream_settings().get("default_user_agent")
|
||||
|
||||
@classmethod
|
||||
def get_default_stream_profile_id(cls):
|
||||
return cls.objects.get(key=DEFAULT_STREAM_PROFILE_KEY).value
|
||||
return cls.get_stream_settings().get("default_stream_profile")
|
||||
|
||||
@classmethod
|
||||
def get_m3u_hash_key(cls):
|
||||
return cls.objects.get(key=STREAM_HASH_KEY).value
|
||||
return cls.get_stream_settings().get("m3u_hash_key", "")
|
||||
|
||||
@classmethod
|
||||
def get_preferred_region(cls):
|
||||
"""Retrieve the preferred region setting (or return None if not found)."""
|
||||
try:
|
||||
return cls.objects.get(key=PREFERRED_REGION_KEY).value
|
||||
except cls.DoesNotExist:
|
||||
return None
|
||||
return cls.get_stream_settings().get("preferred_region")
|
||||
|
||||
@classmethod
|
||||
def get_auto_import_mapped_files(cls):
|
||||
"""Retrieve the preferred region setting (or return None if not found)."""
|
||||
try:
|
||||
return cls.objects.get(key=AUTO_IMPORT_MAPPED_FILES).value
|
||||
except cls.DoesNotExist:
|
||||
return None
|
||||
return cls.get_stream_settings().get("auto_import_mapped_files")
|
||||
|
||||
# DVR Settings
|
||||
@classmethod
|
||||
def get_proxy_settings(cls):
|
||||
"""Retrieve proxy settings as dict (or return defaults if not found)."""
|
||||
try:
|
||||
import json
|
||||
settings_json = cls.objects.get(key=PROXY_SETTINGS_KEY).value
|
||||
return json.loads(settings_json)
|
||||
except (cls.DoesNotExist, json.JSONDecodeError):
|
||||
# Return defaults if not found or invalid JSON
|
||||
return {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
"redis_chunk_ttl": 60,
|
||||
"channel_shutdown_delay": 0,
|
||||
"channel_init_grace_period": 5,
|
||||
}
|
||||
def get_dvr_settings(cls):
|
||||
"""Get all DVR-related settings."""
|
||||
return cls._get_group(DVR_SETTINGS_KEY, {
|
||||
"tv_template": "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv",
|
||||
"movie_template": "Movies/{title} ({year}).mkv",
|
||||
"tv_fallback_dir": "TV_Shows",
|
||||
"tv_fallback_template": "TV_Shows/{show}/{start}.mkv",
|
||||
"movie_fallback_template": "Movies/{start}.mkv",
|
||||
"comskip_enabled": False,
|
||||
"comskip_custom_path": "",
|
||||
"pre_offset_minutes": 0,
|
||||
"post_offset_minutes": 0,
|
||||
"series_rules": [],
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def get_dvr_tv_template(cls):
|
||||
try:
|
||||
return cls.objects.get(key=DVR_TV_TEMPLATE_KEY).value
|
||||
except cls.DoesNotExist:
|
||||
# Default: relative to recordings root (/data/recordings)
|
||||
return "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv"
|
||||
return cls.get_dvr_settings().get("tv_template", "TV_Shows/{show}/S{season:02d}E{episode:02d}.mkv")
|
||||
|
||||
@classmethod
|
||||
def get_dvr_movie_template(cls):
|
||||
try:
|
||||
return cls.objects.get(key=DVR_MOVIE_TEMPLATE_KEY).value
|
||||
except cls.DoesNotExist:
|
||||
return "Movies/{title} ({year}).mkv"
|
||||
return cls.get_dvr_settings().get("movie_template", "Movies/{title} ({year}).mkv")
|
||||
|
||||
@classmethod
|
||||
def get_dvr_tv_fallback_dir(cls):
|
||||
"""Folder name to use when a TV episode has no season/episode information.
|
||||
Defaults to 'TV_Show' to match existing behavior but can be overridden in settings.
|
||||
"""
|
||||
try:
|
||||
return cls.objects.get(key=DVR_TV_FALLBACK_DIR_KEY).value or "TV_Shows"
|
||||
except cls.DoesNotExist:
|
||||
return "TV_Shows"
|
||||
return cls.get_dvr_settings().get("tv_fallback_dir", "TV_Shows")
|
||||
|
||||
@classmethod
|
||||
def get_dvr_tv_fallback_template(cls):
|
||||
"""Full path template used when season/episode are missing for a TV airing."""
|
||||
try:
|
||||
return cls.objects.get(key=DVR_TV_FALLBACK_TEMPLATE_KEY).value
|
||||
except cls.DoesNotExist:
|
||||
# default requested by user
|
||||
return "TV_Shows/{show}/{start}.mkv"
|
||||
return cls.get_dvr_settings().get("tv_fallback_template", "TV_Shows/{show}/{start}.mkv")
|
||||
|
||||
@classmethod
|
||||
def get_dvr_movie_fallback_template(cls):
|
||||
"""Full path template used when movie metadata is incomplete."""
|
||||
try:
|
||||
return cls.objects.get(key=DVR_MOVIE_FALLBACK_TEMPLATE_KEY).value
|
||||
except cls.DoesNotExist:
|
||||
return "Movies/{start}.mkv"
|
||||
return cls.get_dvr_settings().get("movie_fallback_template", "Movies/{start}.mkv")
|
||||
|
||||
@classmethod
|
||||
def get_dvr_comskip_enabled(cls):
|
||||
"""Return boolean-like string value ('true'/'false') for comskip enablement."""
|
||||
try:
|
||||
val = cls.objects.get(key=DVR_COMSKIP_ENABLED_KEY).value
|
||||
return str(val).lower() in ("1", "true", "yes", "on")
|
||||
except cls.DoesNotExist:
|
||||
return False
|
||||
return bool(cls.get_dvr_settings().get("comskip_enabled", False))
|
||||
|
||||
@classmethod
|
||||
def get_dvr_comskip_custom_path(cls):
|
||||
return cls.get_dvr_settings().get("comskip_custom_path", "")
|
||||
|
||||
@classmethod
|
||||
def set_dvr_comskip_custom_path(cls, path: str | None):
|
||||
value = (path or "").strip()
|
||||
cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"comskip_custom_path": value})
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def get_dvr_pre_offset_minutes(cls):
|
||||
"""Minutes to start recording before scheduled start (default 0)."""
|
||||
try:
|
||||
val = cls.objects.get(key=DVR_PRE_OFFSET_MINUTES_KEY).value
|
||||
return int(val)
|
||||
except cls.DoesNotExist:
|
||||
return 0
|
||||
except Exception:
|
||||
try:
|
||||
return int(float(val))
|
||||
except Exception:
|
||||
return 0
|
||||
return int(cls.get_dvr_settings().get("pre_offset_minutes", 0) or 0)
|
||||
|
||||
@classmethod
|
||||
def get_dvr_post_offset_minutes(cls):
|
||||
"""Minutes to stop recording after scheduled end (default 0)."""
|
||||
try:
|
||||
val = cls.objects.get(key=DVR_POST_OFFSET_MINUTES_KEY).value
|
||||
return int(val)
|
||||
except cls.DoesNotExist:
|
||||
return 0
|
||||
except Exception:
|
||||
try:
|
||||
return int(float(val))
|
||||
except Exception:
|
||||
return 0
|
||||
return int(cls.get_dvr_settings().get("post_offset_minutes", 0) or 0)
|
||||
|
||||
@classmethod
|
||||
def get_dvr_series_rules(cls):
|
||||
"""Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}"""
|
||||
import json
|
||||
try:
|
||||
raw = cls.objects.get(key=DVR_SERIES_RULES_KEY).value
|
||||
rules = json.loads(raw) if raw else []
|
||||
if isinstance(rules, list):
|
||||
return rules
|
||||
return []
|
||||
except cls.DoesNotExist:
|
||||
# Initialize empty if missing
|
||||
cls.objects.create(key=DVR_SERIES_RULES_KEY, name="DVR Series Rules", value="[]")
|
||||
return []
|
||||
return cls.get_dvr_settings().get("series_rules", [])
|
||||
|
||||
@classmethod
|
||||
def set_dvr_series_rules(cls, rules):
|
||||
import json
|
||||
try:
|
||||
obj, _ = cls.objects.get_or_create(key=DVR_SERIES_RULES_KEY, defaults={"name": "DVR Series Rules", "value": "[]"})
|
||||
obj.value = json.dumps(rules)
|
||||
obj.save(update_fields=["value"])
|
||||
return rules
|
||||
except Exception:
|
||||
return rules
|
||||
cls._update_group(DVR_SETTINGS_KEY, "DVR Settings", {"series_rules": rules})
|
||||
return rules
|
||||
|
||||
# Proxy Settings
|
||||
@classmethod
|
||||
def get_proxy_settings(cls):
|
||||
"""Get proxy settings."""
|
||||
return cls._get_group(PROXY_SETTINGS_KEY, {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
"redis_chunk_ttl": 60,
|
||||
"channel_shutdown_delay": 0,
|
||||
"channel_init_grace_period": 5,
|
||||
})
|
||||
|
||||
# System Settings
|
||||
@classmethod
|
||||
def get_system_settings(cls):
|
||||
"""Get all system-related settings."""
|
||||
return cls._get_group(SYSTEM_SETTINGS_KEY, {
|
||||
"time_zone": getattr(settings, "TIME_ZONE", "UTC") or "UTC",
|
||||
"max_system_events": 100,
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def get_system_time_zone(cls):
|
||||
return cls.get_system_settings().get("time_zone") or getattr(settings, "TIME_ZONE", "UTC") or "UTC"
|
||||
|
||||
@classmethod
|
||||
def set_system_time_zone(cls, tz_name: str | None):
|
||||
value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC"
|
||||
cls._update_group(SYSTEM_SETTINGS_KEY, "System Settings", {"time_zone": value})
|
||||
return value
|
||||
|
||||
|
||||
class SystemEvent(models.Model):
|
||||
"""
|
||||
Tracks system events like channel start/stop, buffering, failover, client connections.
|
||||
Maintains a rolling history based on max_system_events setting.
|
||||
"""
|
||||
EVENT_TYPES = [
|
||||
('channel_start', 'Channel Started'),
|
||||
('channel_stop', 'Channel Stopped'),
|
||||
('channel_buffering', 'Channel Buffering'),
|
||||
('channel_failover', 'Channel Failover'),
|
||||
('channel_reconnect', 'Channel Reconnected'),
|
||||
('channel_error', 'Channel Error'),
|
||||
('client_connect', 'Client Connected'),
|
||||
('client_disconnect', 'Client Disconnected'),
|
||||
('recording_start', 'Recording Started'),
|
||||
('recording_end', 'Recording Ended'),
|
||||
('stream_switch', 'Stream Switched'),
|
||||
('m3u_refresh', 'M3U Refreshed'),
|
||||
('m3u_download', 'M3U Downloaded'),
|
||||
('epg_refresh', 'EPG Refreshed'),
|
||||
('epg_download', 'EPG Downloaded'),
|
||||
('login_success', 'Login Successful'),
|
||||
('login_failed', 'Login Failed'),
|
||||
('logout', 'User Logged Out'),
|
||||
('m3u_blocked', 'M3U Download Blocked'),
|
||||
('epg_blocked', 'EPG Download Blocked'),
|
||||
]
|
||||
|
||||
event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True)
|
||||
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
|
||||
channel_id = models.UUIDField(null=True, blank=True, db_index=True)
|
||||
channel_name = models.CharField(max_length=255, null=True, blank=True)
|
||||
details = models.JSONField(default=dict, blank=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-timestamp']
|
||||
indexes = [
|
||||
models.Index(fields=['-timestamp']),
|
||||
models.Index(fields=['event_type', '-timestamp']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}"
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import json
|
|||
import ipaddress
|
||||
|
||||
from rest_framework import serializers
|
||||
from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS
|
||||
from .models import CoreSettings, UserAgent, StreamProfile, NETWORK_ACCESS_KEY
|
||||
|
||||
|
||||
class UserAgentSerializer(serializers.ModelSerializer):
|
||||
|
|
@ -40,10 +40,10 @@ class CoreSettingsSerializer(serializers.ModelSerializer):
|
|||
fields = "__all__"
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
if instance.key == NETWORK_ACCESS:
|
||||
if instance.key == NETWORK_ACCESS_KEY:
|
||||
errors = False
|
||||
invalid = {}
|
||||
value = json.loads(validated_data.get("value"))
|
||||
value = validated_data.get("value")
|
||||
for key, val in value.items():
|
||||
cidrs = val.split(",")
|
||||
for cidr in cidrs:
|
||||
|
|
|
|||
|
|
@ -513,7 +513,8 @@ def rehash_streams(keys):
|
|||
|
||||
for obj in batch:
|
||||
# Generate new hash
|
||||
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id)
|
||||
group_name = obj.channel_group.name if obj.channel_group else None
|
||||
new_hash = Stream.generate_hash_key(obj.name, obj.url, obj.tvg_id, keys, m3u_id=obj.m3u_account_id, group=group_name)
|
||||
|
||||
# Check if this hash already exists in our tracking dict or in database
|
||||
if new_hash in hash_keys:
|
||||
|
|
|
|||
|
|
@ -377,12 +377,63 @@ def validate_flexible_url(value):
|
|||
import re
|
||||
|
||||
# More flexible pattern for non-FQDN hostnames with paths
|
||||
# Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml
|
||||
non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$'
|
||||
# Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234
|
||||
# Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value
|
||||
# Also supports authentication: rtsp://user:pass@hostname/path
|
||||
non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$'
|
||||
non_fqdn_match = re.match(non_fqdn_pattern, value)
|
||||
|
||||
if non_fqdn_match:
|
||||
return # Accept non-FQDN hostnames
|
||||
return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication
|
||||
|
||||
# If it doesn't match our flexible patterns, raise the original error
|
||||
raise ValidationError("Enter a valid URL.")
|
||||
|
||||
|
||||
def log_system_event(event_type, channel_id=None, channel_name=None, **details):
|
||||
"""
|
||||
Log a system event and maintain the configured max history.
|
||||
|
||||
Args:
|
||||
event_type: Type of event (e.g., 'channel_start', 'client_connect')
|
||||
channel_id: Optional UUID of the channel
|
||||
channel_name: Optional name of the channel
|
||||
**details: Additional details to store in the event (stored as JSON)
|
||||
|
||||
Example:
|
||||
log_system_event('channel_start', channel_id=uuid, channel_name='CNN',
|
||||
stream_url='http://...', user='admin')
|
||||
"""
|
||||
from core.models import SystemEvent, CoreSettings
|
||||
|
||||
try:
|
||||
# Create the event
|
||||
SystemEvent.objects.create(
|
||||
event_type=event_type,
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_name,
|
||||
details=details
|
||||
)
|
||||
|
||||
# Get max events from settings (default 100)
|
||||
try:
|
||||
from .models import CoreSettings
|
||||
system_settings = CoreSettings.objects.filter(key='system_settings').first()
|
||||
if system_settings and isinstance(system_settings.value, dict):
|
||||
max_events = int(system_settings.value.get('max_system_events', 100))
|
||||
else:
|
||||
max_events = 100
|
||||
except Exception:
|
||||
max_events = 100
|
||||
|
||||
# Delete old events beyond the limit (keep it efficient with a single query)
|
||||
total_count = SystemEvent.objects.count()
|
||||
if total_count > max_events:
|
||||
# Get the ID of the event at the cutoff point
|
||||
cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events]
|
||||
# Delete all events with ID less than cutoff (older events)
|
||||
SystemEvent.objects.filter(id__lt=cutoff_event).delete()
|
||||
|
||||
except Exception as e:
|
||||
# Don't let event logging break the main application
|
||||
logger.error(f"Failed to log system event {event_type}: {e}")
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# core/views.py
|
||||
import os
|
||||
from shlex import split as shlex_split
|
||||
import sys
|
||||
import subprocess
|
||||
import logging
|
||||
|
|
@ -37,7 +38,9 @@ def stream_view(request, channel_uuid):
|
|||
"""
|
||||
try:
|
||||
redis_host = getattr(settings, "REDIS_HOST", "localhost")
|
||||
redis_client = redis.Redis(host=settings.REDIS_HOST, port=6379, db=int(getattr(settings, "REDIS_DB", "0")))
|
||||
redis_port = int(getattr(settings, "REDIS_PORT", 6379))
|
||||
redis_db = int(getattr(settings, "REDIS_DB", "0"))
|
||||
redis_client = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
|
||||
|
||||
# Retrieve the channel by the provided stream_id.
|
||||
channel = Channel.objects.get(uuid=channel_uuid)
|
||||
|
|
@ -129,7 +132,7 @@ def stream_view(request, channel_uuid):
|
|||
stream_profile = channel.stream_profile
|
||||
if not stream_profile:
|
||||
logger.error("No stream profile set for channel ID=%s, using default", channel.id)
|
||||
stream_profile = StreamProfile.objects.get(id=CoreSettings.objects.get(key="default-stream-profile").value)
|
||||
stream_profile = StreamProfile.objects.get(id=CoreSettings.get_default_stream_profile_id())
|
||||
|
||||
logger.debug("Stream profile used: %s", stream_profile.name)
|
||||
|
||||
|
|
@ -142,7 +145,7 @@ def stream_view(request, channel_uuid):
|
|||
logger.debug("Formatted parameters: %s", parameters)
|
||||
|
||||
# Build the final command.
|
||||
cmd = [stream_profile.command] + parameters.split()
|
||||
cmd = [stream_profile.command] + shlex_split(parameters)
|
||||
logger.debug("Executing command: %s", cmd)
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -68,15 +68,16 @@ install_packages() {
|
|||
echo ">>> Installing system packages..."
|
||||
apt-get update
|
||||
declare -a packages=(
|
||||
git curl wget build-essential gcc libpcre3-dev libpq-dev
|
||||
git curl wget build-essential gcc libpq-dev
|
||||
python3-dev python3-venv python3-pip nginx redis-server
|
||||
postgresql postgresql-contrib ffmpeg procps streamlink
|
||||
sudo
|
||||
)
|
||||
apt-get install -y --no-install-recommends "${packages[@]}"
|
||||
|
||||
if ! command -v node >/dev/null 2>&1; then
|
||||
echo ">>> Installing Node.js..."
|
||||
curl -sL https://deb.nodesource.com/setup_23.x | bash -
|
||||
curl -sL https://deb.nodesource.com/setup_24.x | bash -
|
||||
apt-get install -y nodejs
|
||||
fi
|
||||
|
||||
|
|
@ -186,7 +187,32 @@ EOSU
|
|||
}
|
||||
|
||||
##############################################################################
|
||||
# 8) Django Migrations & Static
|
||||
# 8) Create Directories
|
||||
##############################################################################
|
||||
|
||||
create_directories() {
|
||||
mkdir -p /data/logos
|
||||
mkdir -p /data/recordings
|
||||
mkdir -p /data/uploads/m3us
|
||||
mkdir -p /data/uploads/epgs
|
||||
mkdir -p /data/m3us
|
||||
mkdir -p /data/epgs
|
||||
mkdir -p /data/plugins
|
||||
mkdir -p /data/db
|
||||
|
||||
# Needs to own ALL of /data except db
|
||||
chown -R $DISPATCH_USER:$DISPATCH_GROUP /data
|
||||
chown -R postgres:postgres /data/db
|
||||
chmod +x /data
|
||||
|
||||
mkdir -p "$APP_DIR"/logo_cache
|
||||
mkdir -p "$APP_DIR"/media
|
||||
chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/logo_cache
|
||||
chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/media
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# 9) Django Migrations & Static
|
||||
##############################################################################
|
||||
|
||||
django_migrate_collectstatic() {
|
||||
|
|
@ -204,7 +230,7 @@ EOSU
|
|||
}
|
||||
|
||||
##############################################################################
|
||||
# 9) Configure Services & Nginx
|
||||
# 10) Configure Services & Nginx
|
||||
##############################################################################
|
||||
|
||||
configure_services() {
|
||||
|
|
@ -360,7 +386,7 @@ EOF
|
|||
}
|
||||
|
||||
##############################################################################
|
||||
# 10) Start Services
|
||||
# 11) Start Services
|
||||
##############################################################################
|
||||
|
||||
start_services() {
|
||||
|
|
@ -371,7 +397,7 @@ start_services() {
|
|||
}
|
||||
|
||||
##############################################################################
|
||||
# 11) Summary
|
||||
# 12) Summary
|
||||
##############################################################################
|
||||
|
||||
show_summary() {
|
||||
|
|
@ -408,10 +434,11 @@ main() {
|
|||
clone_dispatcharr_repo
|
||||
setup_python_env
|
||||
build_frontend
|
||||
create_directories
|
||||
django_migrate_collectstatic
|
||||
configure_services
|
||||
start_services
|
||||
show_summary
|
||||
}
|
||||
|
||||
main "$@"
|
||||
main "$@"
|
||||
|
|
@ -50,13 +50,21 @@ app.conf.update(
|
|||
)
|
||||
|
||||
# Add memory cleanup after task completion
|
||||
#@task_postrun.connect # Use the imported signal
|
||||
@task_postrun.connect # Use the imported signal
|
||||
def cleanup_task_memory(**kwargs):
|
||||
"""Clean up memory after each task completes"""
|
||||
"""Clean up memory and database connections after each task completes"""
|
||||
from django.db import connection
|
||||
|
||||
# Get task name from kwargs
|
||||
task_name = kwargs.get('task').name if kwargs.get('task') else ''
|
||||
|
||||
# Only run cleanup for memory-intensive tasks
|
||||
# Close database connection for this Celery worker process
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Only run memory cleanup for memory-intensive tasks
|
||||
memory_intensive_tasks = [
|
||||
'apps.m3u.tasks.refresh_single_m3u_account',
|
||||
'apps.m3u.tasks.refresh_m3u_accounts',
|
||||
|
|
|
|||
|
|
@ -73,8 +73,12 @@ class PersistentLock:
|
|||
|
||||
# Example usage (for testing purposes only):
|
||||
if __name__ == "__main__":
|
||||
# Connect to Redis on localhost; adjust connection parameters as needed.
|
||||
client = redis.Redis(host="localhost", port=6379, db=0)
|
||||
import os
|
||||
# Connect to Redis using environment variables; adjust connection parameters as needed.
|
||||
redis_host = os.environ.get("REDIS_HOST", "localhost")
|
||||
redis_port = int(os.environ.get("REDIS_PORT", 6379))
|
||||
redis_db = int(os.environ.get("REDIS_DB", 0))
|
||||
client = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
|
||||
lock = PersistentLock(client, "lock:example_account", lock_timeout=120)
|
||||
|
||||
if lock.acquire():
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@ from datetime import timedelta
|
|||
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
SECRET_KEY = "REPLACE_ME_WITH_A_REAL_SECRET"
|
||||
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY")
|
||||
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
|
||||
REDIS_PORT = int(os.environ.get("REDIS_PORT", 6379))
|
||||
REDIS_DB = os.environ.get("REDIS_DB", "0")
|
||||
|
||||
# Set DEBUG to True for development, False for production
|
||||
|
|
@ -51,6 +52,11 @@ EPG_BATCH_SIZE = 1000 # Number of records to process in a batch
|
|||
EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection
|
||||
EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing
|
||||
|
||||
# XtreamCodes Rate Limiting Settings
|
||||
# Delay between profile authentications when refreshing multiple profiles
|
||||
# This prevents providers from temporarily banning users with many profiles
|
||||
XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes
|
||||
|
||||
# Database optimization settings
|
||||
DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries
|
||||
DATABASE_CONN_MAX_AGE = (
|
||||
|
|
@ -113,7 +119,7 @@ CHANNEL_LAYERS = {
|
|||
"default": {
|
||||
"BACKEND": "channels_redis.core.RedisChannelLayer",
|
||||
"CONFIG": {
|
||||
"hosts": [(REDIS_HOST, 6379, REDIS_DB)], # Ensure Redis is running
|
||||
"hosts": [(REDIS_HOST, REDIS_PORT, REDIS_DB)], # Ensure Redis is running
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -134,6 +140,7 @@ else:
|
|||
"PASSWORD": os.environ.get("POSTGRES_PASSWORD", "secret"),
|
||||
"HOST": os.environ.get("POSTGRES_HOST", "localhost"),
|
||||
"PORT": int(os.environ.get("POSTGRES_PORT", 5432)),
|
||||
"CONN_MAX_AGE": DATABASE_CONN_MAX_AGE,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -178,8 +185,10 @@ STATICFILES_DIRS = [
|
|||
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
||||
AUTH_USER_MODEL = "accounts.User"
|
||||
|
||||
CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", "redis://localhost:6379/0")
|
||||
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
|
||||
# Build default Redis URL from components for Celery
|
||||
_default_redis_url = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}"
|
||||
CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL", _default_redis_url)
|
||||
CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", CELERY_BROKER_URL)
|
||||
|
||||
# Configure Redis key prefix
|
||||
CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = {
|
||||
|
|
@ -211,11 +220,22 @@ CELERY_BEAT_SCHEDULE = {
|
|||
"task": "core.tasks.scan_and_process_files", # Direct task call
|
||||
"schedule": 20.0, # Every 20 seconds
|
||||
},
|
||||
"maintain-recurring-recordings": {
|
||||
"task": "apps.channels.tasks.maintain_recurring_recordings",
|
||||
"schedule": 3600.0, # Once an hour ensure recurring schedules stay ahead
|
||||
},
|
||||
}
|
||||
|
||||
MEDIA_ROOT = BASE_DIR / "media"
|
||||
MEDIA_URL = "/media/"
|
||||
|
||||
# Backup settings
|
||||
BACKUP_ROOT = os.environ.get("BACKUP_ROOT", "/data/backups")
|
||||
BACKUP_DATA_DIRS = [
|
||||
os.environ.get("LOGOS_DIR", "/data/logos"),
|
||||
os.environ.get("UPLOADS_DIR", "/data/uploads"),
|
||||
os.environ.get("PLUGINS_DIR", "/data/plugins"),
|
||||
]
|
||||
|
||||
SERVER_IP = "127.0.0.1"
|
||||
|
||||
|
|
@ -232,7 +252,7 @@ SIMPLE_JWT = {
|
|||
}
|
||||
|
||||
# Redis connection settings
|
||||
REDIS_URL = "redis://localhost:6379/0"
|
||||
REDIS_URL = os.environ.get("REDIS_URL", f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}")
|
||||
REDIS_SOCKET_TIMEOUT = 60 # Socket timeout in seconds
|
||||
REDIS_SOCKET_CONNECT_TIMEOUT = 5 # Connection timeout in seconds
|
||||
REDIS_HEALTH_CHECK_INTERVAL = 15 # Health check every 15 seconds
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import json
|
|||
import ipaddress
|
||||
from django.http import JsonResponse
|
||||
from django.core.exceptions import ValidationError
|
||||
from core.models import CoreSettings, NETWORK_ACCESS
|
||||
from core.models import CoreSettings, NETWORK_ACCESS_KEY
|
||||
|
||||
|
||||
def json_error_response(message, status=400):
|
||||
|
|
@ -39,12 +39,15 @@ def get_client_ip(request):
|
|||
|
||||
|
||||
def network_access_allowed(request, settings_key):
|
||||
network_access = json.loads(CoreSettings.objects.get(key=NETWORK_ACCESS).value)
|
||||
try:
|
||||
network_access = CoreSettings.objects.get(key=NETWORK_ACCESS_KEY).value
|
||||
except CoreSettings.DoesNotExist:
|
||||
network_access = {}
|
||||
|
||||
cidrs = (
|
||||
network_access[settings_key].split(",")
|
||||
if settings_key in network_access
|
||||
else ["0.0.0.0/0"]
|
||||
else ["0.0.0.0/0", "::/0"]
|
||||
)
|
||||
|
||||
network_allowed = False
|
||||
|
|
|
|||
|
|
@ -4,26 +4,44 @@ ENV DEBIAN_FRONTEND=noninteractive
|
|||
ENV VIRTUAL_ENV=/dispatcharrpy
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
# --- Install Python 3.13 and system dependencies ---
|
||||
# --- Install Python 3.13 and build dependencies ---
|
||||
# Note: Hardware acceleration (VA-API, VDPAU, NVENC) already included in base ffmpeg image
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
ca-certificates software-properties-common gnupg2 curl wget \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
python3.13 python3.13-dev python3.13-venv \
|
||||
python3.13 python3.13-dev python3.13-venv libpython3.13 \
|
||||
python-is-python3 python3-pip \
|
||||
libpcre3 libpcre3-dev libpq-dev procps \
|
||||
build-essential gcc pciutils \
|
||||
nginx streamlink comskip\
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
libpcre3 libpcre3-dev libpq-dev procps pciutils \
|
||||
nginx streamlink comskip \
|
||||
vlc-bin vlc-plugin-base \
|
||||
build-essential gcc g++ gfortran libopenblas-dev libopenblas0 ninja-build
|
||||
|
||||
# --- Create Python virtual environment ---
|
||||
RUN python3.13 -m venv $VIRTUAL_ENV && $VIRTUAL_ENV/bin/pip install --upgrade pip
|
||||
|
||||
# --- Install Python dependencies ---
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir -r /tmp/requirements.txt && rm /tmp/requirements.txt
|
||||
RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir -r /tmp/requirements.txt && \
|
||||
rm /tmp/requirements.txt
|
||||
|
||||
# --- Build legacy NumPy wheel for old hardware (store for runtime switching) ---
|
||||
RUN $VIRTUAL_ENV/bin/pip install --no-cache-dir build && \
|
||||
cd /tmp && \
|
||||
$VIRTUAL_ENV/bin/pip download --no-binary numpy --no-deps numpy && \
|
||||
tar -xzf numpy-*.tar.gz && \
|
||||
cd numpy-*/ && \
|
||||
$VIRTUAL_ENV/bin/python -m build --wheel -Csetup-args=-Dcpu-baseline="none" -Csetup-args=-Dcpu-dispatch="none" && \
|
||||
mv dist/*.whl /opt/ && \
|
||||
cd / && rm -rf /tmp/numpy-* /tmp/*.tar.gz && \
|
||||
$VIRTUAL_ENV/bin/pip uninstall -y build
|
||||
|
||||
# --- Clean up build dependencies to reduce image size ---
|
||||
RUN apt-get remove -y build-essential gcc g++ gfortran libopenblas-dev libpcre3-dev python3.13-dev ninja-build && \
|
||||
apt-get autoremove -y --purge && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /root/.cache /tmp/*
|
||||
|
||||
# --- Set up Redis 7.x ---
|
||||
RUN curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \
|
||||
|
|
|
|||
|
|
@ -35,9 +35,6 @@ RUN rm -rf /app/frontend
|
|||
# Copy built frontend assets
|
||||
COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist
|
||||
|
||||
# Run Django collectstatic
|
||||
RUN python manage.py collectstatic --noinput
|
||||
|
||||
# Add timestamp argument
|
||||
ARG TIMESTAMP
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,65 @@
|
|||
#!/bin/bash
|
||||
docker build --build-arg BRANCH=dev -t dispatcharr/dispatcharr:dev -f Dockerfile ..
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Default values
|
||||
VERSION=$(python3 -c "import sys; sys.path.append('..'); import version; print(version.__version__)")
|
||||
REGISTRY="dispatcharr" # Registry or private repo to push to
|
||||
IMAGE="dispatcharr" # Image that we're building
|
||||
BRANCH="dev"
|
||||
ARCH="" # Architectures to build for, e.g. linux/amd64,linux/arm64
|
||||
PUSH=false
|
||||
|
||||
usage() {
|
||||
cat <<- EOF
|
||||
To test locally:
|
||||
./build-dev.sh
|
||||
|
||||
To build and push to registry:
|
||||
./build-dev.sh -p
|
||||
|
||||
To build and push to a private registry:
|
||||
./build-dev.sh -p -r myregistry:5000
|
||||
|
||||
To build for -both- x86_64 and arm_64:
|
||||
./build-dev.sh -p -a linux/amd64,linux/arm64
|
||||
|
||||
Do it all:
|
||||
./build-dev.sh -p -r myregistry:5000 -a linux/amd64,linux/arm64
|
||||
EOF
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Parse options
|
||||
while getopts "pr:a:b:i:h" opt; do
|
||||
case $opt in
|
||||
r) REGISTRY="$OPTARG" ;;
|
||||
a) ARCH="--platform $OPTARG" ;;
|
||||
b) BRANCH="$OPTARG" ;;
|
||||
i) IMAGE="$OPTARG" ;;
|
||||
p) PUSH=true ;;
|
||||
h) usage ;;
|
||||
\?) echo "Invalid option: -$OPTARG" >&2; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
BUILD_ARGS="BRANCH=$BRANCH"
|
||||
|
||||
echo docker build --build-arg $BUILD_ARGS $ARCH -t $IMAGE
|
||||
docker build -f Dockerfile --build-arg $BUILD_ARGS $ARCH -t $IMAGE ..
|
||||
docker tag $IMAGE $IMAGE:$BRANCH
|
||||
docker tag $IMAGE $IMAGE:$VERSION
|
||||
|
||||
if [ -z "$PUSH" ]; then
|
||||
echo "Please run 'docker push -t $IMAGE:dev -t $IMAGE:${VERSION}' when ready"
|
||||
else
|
||||
for TAG in latest "$VERSION" "$BRANCH"; do
|
||||
docker tag "$IMAGE" "$REGISTRY/$IMAGE:$TAG"
|
||||
docker push -q "$REGISTRY/$IMAGE:$TAG"
|
||||
done
|
||||
echo "Images pushed successfully."
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
# Get version information
|
||||
VERSION=$(python -c "import sys; sys.path.append('..'); import version; print(version.__version__)")
|
||||
|
||||
# Build with version tag
|
||||
docker build --build-arg BRANCH=dev \
|
||||
-t dispatcharr/dispatcharr:dev \
|
||||
-t dispatcharr/dispatcharr:${VERSION} \
|
||||
-f Dockerfile ..
|
||||
|
|
|
|||
|
|
@ -14,6 +14,19 @@ services:
|
|||
- REDIS_HOST=localhost
|
||||
- CELERY_BROKER_URL=redis://localhost:6379/0
|
||||
- DISPATCHARR_LOG_LEVEL=info
|
||||
# Legacy CPU Support (Optional)
|
||||
# Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
|
||||
# that lack support for newer baseline CPU features
|
||||
#- USE_LEGACY_NUMPY=true
|
||||
# Process Priority Configuration (Optional)
|
||||
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
|
||||
# Negative values require cap_add: SYS_NICE (uncomment below)
|
||||
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
|
||||
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
|
||||
#
|
||||
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
|
||||
#cap_add:
|
||||
# - SYS_NICE
|
||||
# Optional for hardware acceleration
|
||||
#devices:
|
||||
# - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API)
|
||||
|
|
|
|||
|
|
@ -18,3 +18,16 @@ services:
|
|||
- REDIS_HOST=localhost
|
||||
- CELERY_BROKER_URL=redis://localhost:6379/0
|
||||
- DISPATCHARR_LOG_LEVEL=trace
|
||||
# Legacy CPU Support (Optional)
|
||||
# Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
|
||||
# that lack support for newer baseline CPU features
|
||||
#- USE_LEGACY_NUMPY=true
|
||||
# Process Priority Configuration (Optional)
|
||||
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
|
||||
# Negative values require cap_add: SYS_NICE (uncomment below)
|
||||
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
|
||||
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
|
||||
#
|
||||
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
|
||||
#cap_add:
|
||||
# - SYS_NICE
|
||||
|
|
|
|||
|
|
@ -17,6 +17,19 @@ services:
|
|||
- REDIS_HOST=localhost
|
||||
- CELERY_BROKER_URL=redis://localhost:6379/0
|
||||
- DISPATCHARR_LOG_LEVEL=debug
|
||||
# Legacy CPU Support (Optional)
|
||||
# Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
|
||||
# that lack support for newer baseline CPU features
|
||||
#- USE_LEGACY_NUMPY=true
|
||||
# Process Priority Configuration (Optional)
|
||||
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
|
||||
# Negative values require cap_add: SYS_NICE (uncomment below)
|
||||
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
|
||||
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
|
||||
#
|
||||
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
|
||||
#cap_add:
|
||||
# - SYS_NICE
|
||||
|
||||
pgadmin:
|
||||
image: dpage/pgadmin4
|
||||
|
|
|
|||
|
|
@ -17,6 +17,19 @@ services:
|
|||
- REDIS_HOST=redis
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- DISPATCHARR_LOG_LEVEL=info
|
||||
# Legacy CPU Support (Optional)
|
||||
# Uncomment to enable legacy NumPy build for older CPUs (circa 2009)
|
||||
# that lack support for newer baseline CPU features
|
||||
#- USE_LEGACY_NUMPY=true
|
||||
# Process Priority Configuration (Optional)
|
||||
# Lower values = higher priority. Range: -20 (highest) to 19 (lowest)
|
||||
# Negative values require cap_add: SYS_NICE (uncomment below)
|
||||
#- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority)
|
||||
#- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority)
|
||||
#
|
||||
# Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0)
|
||||
#cap_add:
|
||||
# - SYS_NICE
|
||||
# Optional for hardware acceleration
|
||||
#group_add:
|
||||
# - video
|
||||
|
|
|
|||
|
|
@ -27,6 +27,18 @@ echo_with_timestamp() {
|
|||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
|
||||
}
|
||||
|
||||
# --- NumPy version switching for legacy hardware ---
|
||||
if [ "$USE_LEGACY_NUMPY" = "true" ]; then
|
||||
# Check if NumPy was compiled with baseline support
|
||||
if /dispatcharrpy/bin/python -c "import numpy; numpy.show_config()" 2>&1 | grep -qi "baseline"; then
|
||||
echo_with_timestamp "🔧 Switching to legacy NumPy (no CPU baseline)..."
|
||||
/dispatcharrpy/bin/pip install --no-cache-dir --force-reinstall --no-deps /opt/numpy-*.whl
|
||||
echo_with_timestamp "✅ Legacy NumPy installed"
|
||||
else
|
||||
echo_with_timestamp "✅ Legacy NumPy (no baseline) already installed, skipping reinstallation"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set PostgreSQL environment variables
|
||||
export POSTGRES_DB=${POSTGRES_DB:-dispatcharr}
|
||||
export POSTGRES_USER=${POSTGRES_USER:-dispatch}
|
||||
|
|
@ -40,6 +52,33 @@ export REDIS_DB=${REDIS_DB:-0}
|
|||
export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191}
|
||||
export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri'
|
||||
export LD_LIBRARY_PATH='/usr/local/lib'
|
||||
export SECRET_FILE="/data/jwt"
|
||||
# Ensure Django secret key exists or generate a new one
|
||||
if [ ! -f "$SECRET_FILE" ]; then
|
||||
echo "Generating new Django secret key..."
|
||||
old_umask=$(umask)
|
||||
umask 077
|
||||
tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; }
|
||||
python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; }
|
||||
import secrets
|
||||
print(secrets.token_urlsafe(64))
|
||||
PY
|
||||
mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; }
|
||||
umask $old_umask
|
||||
fi
|
||||
export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")"
|
||||
|
||||
# Process priority configuration
|
||||
# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority)
|
||||
# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority)
|
||||
# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI
|
||||
export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0}
|
||||
CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5}
|
||||
|
||||
# Calculate relative nice value for Celery (since nice is relative to parent process)
|
||||
# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value
|
||||
export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL))
|
||||
|
||||
# Set LIBVA_DRIVER_NAME if user has specified it
|
||||
if [ -v LIBVA_DRIVER_NAME ]; then
|
||||
export LIBVA_DRIVER_NAME
|
||||
|
|
@ -73,11 +112,12 @@ export POSTGRES_DIR=/data/db
|
|||
if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then
|
||||
# Define all variables to process
|
||||
variables=(
|
||||
PATH VIRTUAL_ENV DJANGO_SETTINGS_MODULE PYTHONUNBUFFERED
|
||||
PATH VIRTUAL_ENV DJANGO_SETTINGS_MODULE PYTHONUNBUFFERED PYTHONDONTWRITEBYTECODE
|
||||
POSTGRES_DB POSTGRES_USER POSTGRES_PASSWORD POSTGRES_HOST POSTGRES_PORT
|
||||
DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL
|
||||
REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT
|
||||
DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH
|
||||
CELERY_NICE_LEVEL UWSGI_NICE_LEVEL DJANGO_SECRET_KEY
|
||||
)
|
||||
|
||||
# Process each variable for both profile.d and environment
|
||||
|
|
@ -96,7 +136,16 @@ fi
|
|||
|
||||
chmod +x /etc/profile.d/dispatcharr.sh
|
||||
|
||||
pip install django-filter
|
||||
# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells
|
||||
if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then
|
||||
cat >> /root/.bashrc << 'EOF'
|
||||
|
||||
# Source Dispatcharr environment variables
|
||||
if [ -f /etc/profile.d/dispatcharr.sh ]; then
|
||||
. /etc/profile.d/dispatcharr.sh
|
||||
fi
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Run init scripts
|
||||
echo "Starting user setup..."
|
||||
|
|
@ -137,9 +186,9 @@ else
|
|||
pids+=("$nginx_pid")
|
||||
fi
|
||||
|
||||
cd /app
|
||||
python manage.py migrate --noinput
|
||||
python manage.py collectstatic --noinput
|
||||
# Run Django commands as non-root user to prevent permission issues
|
||||
su - $POSTGRES_USER -c "cd /app && python manage.py migrate --noinput"
|
||||
su - $POSTGRES_USER -c "cd /app && python manage.py collectstatic --noinput"
|
||||
|
||||
# Select proper uwsgi config based on environment
|
||||
if [ "$DISPATCHARR_ENV" = "dev" ] && [ "$DISPATCHARR_DEBUG" != "true" ]; then
|
||||
|
|
@ -161,10 +210,12 @@ if [ "$DISPATCHARR_DEBUG" != "true" ]; then
|
|||
uwsgi_args+=" --disable-logging"
|
||||
fi
|
||||
|
||||
# Launch uwsgi -p passes environment variables to the process
|
||||
su -p - $POSTGRES_USER -c "cd /app && uwsgi $uwsgi_args &"
|
||||
uwsgi_pid=$(pgrep uwsgi | sort | head -n1)
|
||||
echo "✅ uwsgi started with PID $uwsgi_pid"
|
||||
# Launch uwsgi with configurable nice level (default: 0 for normal priority)
|
||||
# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose
|
||||
# Start with nice as root, then use setpriv to drop privileges to dispatch user
|
||||
# This preserves both the nice value and environment variables
|
||||
nice -n $UWSGI_NICE_LEVEL su - "$POSTGRES_USER" -c "cd /app && exec /dispatcharrpy/bin/uwsgi $uwsgi_args" & uwsgi_pid=$!
|
||||
echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)"
|
||||
pids+=("$uwsgi_pid")
|
||||
|
||||
# sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf
|
||||
|
|
@ -209,7 +260,7 @@ echo "🔍 Running hardware acceleration check..."
|
|||
|
||||
# Wait for at least one process to exit and log the process that exited first
|
||||
if [ ${#pids[@]} -gt 0 ]; then
|
||||
echo "⏳ Waiting for processes to exit..."
|
||||
echo "⏳ Dispatcharr is running. Monitoring processes..."
|
||||
while kill -0 "${pids[@]}" 2>/dev/null; do
|
||||
sleep 1 # Wait for a second before checking again
|
||||
done
|
||||
|
|
|
|||
|
|
@ -1,25 +1,80 @@
|
|||
#!/bin/bash
|
||||
|
||||
mkdir -p /data/logos
|
||||
mkdir -p /data/recordings
|
||||
mkdir -p /data/uploads/m3us
|
||||
mkdir -p /data/uploads/epgs
|
||||
mkdir -p /data/m3us
|
||||
mkdir -p /data/epgs
|
||||
mkdir -p /data/plugins
|
||||
mkdir -p /app/logo_cache
|
||||
mkdir -p /app/media
|
||||
# Define directories that need to exist and be owned by PUID:PGID
|
||||
DATA_DIRS=(
|
||||
"/data/logos"
|
||||
"/data/recordings"
|
||||
"/data/uploads/m3us"
|
||||
"/data/uploads/epgs"
|
||||
"/data/m3us"
|
||||
"/data/epgs"
|
||||
"/data/plugins"
|
||||
"/data/models"
|
||||
)
|
||||
|
||||
APP_DIRS=(
|
||||
"/app/logo_cache"
|
||||
"/app/media"
|
||||
"/app/static"
|
||||
)
|
||||
|
||||
# Create all directories
|
||||
for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do
|
||||
mkdir -p "$dir"
|
||||
done
|
||||
|
||||
# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation)
|
||||
if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then
|
||||
if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then
|
||||
echo "Fixing ownership for /app (non-recursive)"
|
||||
chown $PUID:$PGID /app
|
||||
fi
|
||||
fi
|
||||
# Configure nginx port
|
||||
if ! [[ "$DISPATCHARR_PORT" =~ ^[0-9]+$ ]]; then
|
||||
echo "⚠️ Warning: DISPATCHARR_PORT is not a valid integer, using default port 9191"
|
||||
DISPATCHARR_PORT=9191
|
||||
fi
|
||||
sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default
|
||||
|
||||
# Configure nginx based on IPv6 availability
|
||||
if ip -6 addr show | grep -q "inet6"; then
|
||||
echo "✅ IPv6 is available, enabling IPv6 in nginx"
|
||||
else
|
||||
echo "⚠️ IPv6 not available, disabling IPv6 in nginx"
|
||||
sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default
|
||||
fi
|
||||
|
||||
# NOTE: mac doesn't run as root, so only manage permissions
|
||||
# if this script is running as root
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
# Needs to own ALL of /data except db, we handle that below
|
||||
chown -R $PUID:$PGID /data
|
||||
chown -R $PUID:$PGID /app
|
||||
# Fix data directories (non-recursive to avoid touching user files)
|
||||
for dir in "${DATA_DIRS[@]}"; do
|
||||
if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then
|
||||
echo "Fixing ownership for $dir"
|
||||
chown $PUID:$PGID "$dir"
|
||||
fi
|
||||
done
|
||||
|
||||
# Fix app directories (recursive since they're managed by the app)
|
||||
for dir in "${APP_DIRS[@]}"; do
|
||||
if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then
|
||||
echo "Fixing ownership for $dir (recursive)"
|
||||
chown -R $PUID:$PGID "$dir"
|
||||
fi
|
||||
done
|
||||
|
||||
# Database permissions
|
||||
if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then
|
||||
echo "Fixing ownership for /data/db"
|
||||
chown -R postgres:postgres /data/db
|
||||
fi
|
||||
|
||||
# Fix /data directory ownership (non-recursive)
|
||||
if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then
|
||||
echo "Fixing ownership for /data (non-recursive)"
|
||||
chown $PUID:$PGID /data
|
||||
fi
|
||||
|
||||
# Permissions
|
||||
chown -R postgres:postgres /data/db
|
||||
chmod +x /data
|
||||
fi
|
||||
fi
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue